1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the implementation of the scalar evolution analysis 10 // engine, which is used primarily to analyze expressions involving induction 11 // variables in loops. 12 // 13 // There are several aspects to this library. First is the representation of 14 // scalar expressions, which are represented as subclasses of the SCEV class. 15 // These classes are used to represent certain types of subexpressions that we 16 // can handle. We only create one SCEV of a particular shape, so 17 // pointer-comparisons for equality are legal. 18 // 19 // One important aspect of the SCEV objects is that they are never cyclic, even 20 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If 21 // the PHI node is one of the idioms that we can represent (e.g., a polynomial 22 // recurrence) then we represent it directly as a recurrence node, otherwise we 23 // represent it as a SCEVUnknown node. 24 // 25 // In addition to being able to represent expressions of various types, we also 26 // have folders that are used to build the *canonical* representation for a 27 // particular expression. These folders are capable of using a variety of 28 // rewrite rules to simplify the expressions. 29 // 30 // Once the folders are defined, we can implement the more interesting 31 // higher-level code, such as the code that recognizes PHI nodes of various 32 // types, computes the execution count of a loop, etc. 33 // 34 // TODO: We should use these routines and value representations to implement 35 // dependence analysis! 36 // 37 //===----------------------------------------------------------------------===// 38 // 39 // There are several good references for the techniques used in this analysis. 40 // 41 // Chains of recurrences -- a method to expedite the evaluation 42 // of closed-form functions 43 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima 44 // 45 // On computational properties of chains of recurrences 46 // Eugene V. Zima 47 // 48 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization 49 // Robert A. van Engelen 50 // 51 // Efficient Symbolic Analysis for Optimizing Compilers 52 // Robert A. van Engelen 53 // 54 // Using the chains of recurrences algebra for data dependence testing and 55 // induction variable substitution 56 // MS Thesis, Johnie Birch 57 // 58 //===----------------------------------------------------------------------===// 59 60 #include "llvm/Analysis/ScalarEvolution.h" 61 #include "llvm/ADT/APInt.h" 62 #include "llvm/ADT/ArrayRef.h" 63 #include "llvm/ADT/DenseMap.h" 64 #include "llvm/ADT/DepthFirstIterator.h" 65 #include "llvm/ADT/EquivalenceClasses.h" 66 #include "llvm/ADT/FoldingSet.h" 67 #include "llvm/ADT/None.h" 68 #include "llvm/ADT/Optional.h" 69 #include "llvm/ADT/STLExtras.h" 70 #include "llvm/ADT/ScopeExit.h" 71 #include "llvm/ADT/Sequence.h" 72 #include "llvm/ADT/SetVector.h" 73 #include "llvm/ADT/SmallPtrSet.h" 74 #include "llvm/ADT/SmallSet.h" 75 #include "llvm/ADT/SmallVector.h" 76 #include "llvm/ADT/Statistic.h" 77 #include "llvm/ADT/StringRef.h" 78 #include "llvm/Analysis/AssumptionCache.h" 79 #include "llvm/Analysis/ConstantFolding.h" 80 #include "llvm/Analysis/InstructionSimplify.h" 81 #include "llvm/Analysis/LoopInfo.h" 82 #include "llvm/Analysis/ScalarEvolutionDivision.h" 83 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 84 #include "llvm/Analysis/TargetLibraryInfo.h" 85 #include "llvm/Analysis/ValueTracking.h" 86 #include "llvm/Config/llvm-config.h" 87 #include "llvm/IR/Argument.h" 88 #include "llvm/IR/BasicBlock.h" 89 #include "llvm/IR/CFG.h" 90 #include "llvm/IR/Constant.h" 91 #include "llvm/IR/ConstantRange.h" 92 #include "llvm/IR/Constants.h" 93 #include "llvm/IR/DataLayout.h" 94 #include "llvm/IR/DerivedTypes.h" 95 #include "llvm/IR/Dominators.h" 96 #include "llvm/IR/Function.h" 97 #include "llvm/IR/GlobalAlias.h" 98 #include "llvm/IR/GlobalValue.h" 99 #include "llvm/IR/GlobalVariable.h" 100 #include "llvm/IR/InstIterator.h" 101 #include "llvm/IR/InstrTypes.h" 102 #include "llvm/IR/Instruction.h" 103 #include "llvm/IR/Instructions.h" 104 #include "llvm/IR/IntrinsicInst.h" 105 #include "llvm/IR/Intrinsics.h" 106 #include "llvm/IR/LLVMContext.h" 107 #include "llvm/IR/Metadata.h" 108 #include "llvm/IR/Operator.h" 109 #include "llvm/IR/PatternMatch.h" 110 #include "llvm/IR/Type.h" 111 #include "llvm/IR/Use.h" 112 #include "llvm/IR/User.h" 113 #include "llvm/IR/Value.h" 114 #include "llvm/IR/Verifier.h" 115 #include "llvm/InitializePasses.h" 116 #include "llvm/Pass.h" 117 #include "llvm/Support/Casting.h" 118 #include "llvm/Support/CommandLine.h" 119 #include "llvm/Support/Compiler.h" 120 #include "llvm/Support/Debug.h" 121 #include "llvm/Support/ErrorHandling.h" 122 #include "llvm/Support/KnownBits.h" 123 #include "llvm/Support/SaveAndRestore.h" 124 #include "llvm/Support/raw_ostream.h" 125 #include <algorithm> 126 #include <cassert> 127 #include <climits> 128 #include <cstddef> 129 #include <cstdint> 130 #include <cstdlib> 131 #include <map> 132 #include <memory> 133 #include <tuple> 134 #include <utility> 135 #include <vector> 136 137 using namespace llvm; 138 using namespace PatternMatch; 139 140 #define DEBUG_TYPE "scalar-evolution" 141 142 STATISTIC(NumArrayLenItCounts, 143 "Number of trip counts computed with array length"); 144 STATISTIC(NumTripCountsComputed, 145 "Number of loops with predictable loop counts"); 146 STATISTIC(NumTripCountsNotComputed, 147 "Number of loops without predictable loop counts"); 148 STATISTIC(NumBruteForceTripCountsComputed, 149 "Number of loops with trip counts computed by force"); 150 151 static cl::opt<unsigned> 152 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, 153 cl::ZeroOrMore, 154 cl::desc("Maximum number of iterations SCEV will " 155 "symbolically execute a constant " 156 "derived loop"), 157 cl::init(100)); 158 159 // FIXME: Enable this with EXPENSIVE_CHECKS when the test suite is clean. 160 static cl::opt<bool> VerifySCEV( 161 "verify-scev", cl::Hidden, 162 cl::desc("Verify ScalarEvolution's backedge taken counts (slow)")); 163 static cl::opt<bool> VerifySCEVStrict( 164 "verify-scev-strict", cl::Hidden, 165 cl::desc("Enable stricter verification with -verify-scev is passed")); 166 static cl::opt<bool> 167 VerifySCEVMap("verify-scev-maps", cl::Hidden, 168 cl::desc("Verify no dangling value in ScalarEvolution's " 169 "ExprValueMap (slow)")); 170 171 static cl::opt<bool> VerifyIR( 172 "scev-verify-ir", cl::Hidden, 173 cl::desc("Verify IR correctness when making sensitive SCEV queries (slow)"), 174 cl::init(false)); 175 176 static cl::opt<unsigned> MulOpsInlineThreshold( 177 "scev-mulops-inline-threshold", cl::Hidden, 178 cl::desc("Threshold for inlining multiplication operands into a SCEV"), 179 cl::init(32)); 180 181 static cl::opt<unsigned> AddOpsInlineThreshold( 182 "scev-addops-inline-threshold", cl::Hidden, 183 cl::desc("Threshold for inlining addition operands into a SCEV"), 184 cl::init(500)); 185 186 static cl::opt<unsigned> MaxSCEVCompareDepth( 187 "scalar-evolution-max-scev-compare-depth", cl::Hidden, 188 cl::desc("Maximum depth of recursive SCEV complexity comparisons"), 189 cl::init(32)); 190 191 static cl::opt<unsigned> MaxSCEVOperationsImplicationDepth( 192 "scalar-evolution-max-scev-operations-implication-depth", cl::Hidden, 193 cl::desc("Maximum depth of recursive SCEV operations implication analysis"), 194 cl::init(2)); 195 196 static cl::opt<unsigned> MaxValueCompareDepth( 197 "scalar-evolution-max-value-compare-depth", cl::Hidden, 198 cl::desc("Maximum depth of recursive value complexity comparisons"), 199 cl::init(2)); 200 201 static cl::opt<unsigned> 202 MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden, 203 cl::desc("Maximum depth of recursive arithmetics"), 204 cl::init(32)); 205 206 static cl::opt<unsigned> MaxConstantEvolvingDepth( 207 "scalar-evolution-max-constant-evolving-depth", cl::Hidden, 208 cl::desc("Maximum depth of recursive constant evolving"), cl::init(32)); 209 210 static cl::opt<unsigned> 211 MaxCastDepth("scalar-evolution-max-cast-depth", cl::Hidden, 212 cl::desc("Maximum depth of recursive SExt/ZExt/Trunc"), 213 cl::init(8)); 214 215 static cl::opt<unsigned> 216 MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden, 217 cl::desc("Max coefficients in AddRec during evolving"), 218 cl::init(8)); 219 220 static cl::opt<unsigned> 221 HugeExprThreshold("scalar-evolution-huge-expr-threshold", cl::Hidden, 222 cl::desc("Size of the expression which is considered huge"), 223 cl::init(4096)); 224 225 static cl::opt<bool> 226 ClassifyExpressions("scalar-evolution-classify-expressions", 227 cl::Hidden, cl::init(true), 228 cl::desc("When printing analysis, include information on every instruction")); 229 230 static cl::opt<bool> UseExpensiveRangeSharpening( 231 "scalar-evolution-use-expensive-range-sharpening", cl::Hidden, 232 cl::init(false), 233 cl::desc("Use more powerful methods of sharpening expression ranges. May " 234 "be costly in terms of compile time")); 235 236 //===----------------------------------------------------------------------===// 237 // SCEV class definitions 238 //===----------------------------------------------------------------------===// 239 240 //===----------------------------------------------------------------------===// 241 // Implementation of the SCEV class. 242 // 243 244 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 245 LLVM_DUMP_METHOD void SCEV::dump() const { 246 print(dbgs()); 247 dbgs() << '\n'; 248 } 249 #endif 250 251 void SCEV::print(raw_ostream &OS) const { 252 switch (getSCEVType()) { 253 case scConstant: 254 cast<SCEVConstant>(this)->getValue()->printAsOperand(OS, false); 255 return; 256 case scPtrToInt: { 257 const SCEVPtrToIntExpr *PtrToInt = cast<SCEVPtrToIntExpr>(this); 258 const SCEV *Op = PtrToInt->getOperand(); 259 OS << "(ptrtoint " << *Op->getType() << " " << *Op << " to " 260 << *PtrToInt->getType() << ")"; 261 return; 262 } 263 case scTruncate: { 264 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this); 265 const SCEV *Op = Trunc->getOperand(); 266 OS << "(trunc " << *Op->getType() << " " << *Op << " to " 267 << *Trunc->getType() << ")"; 268 return; 269 } 270 case scZeroExtend: { 271 const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this); 272 const SCEV *Op = ZExt->getOperand(); 273 OS << "(zext " << *Op->getType() << " " << *Op << " to " 274 << *ZExt->getType() << ")"; 275 return; 276 } 277 case scSignExtend: { 278 const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this); 279 const SCEV *Op = SExt->getOperand(); 280 OS << "(sext " << *Op->getType() << " " << *Op << " to " 281 << *SExt->getType() << ")"; 282 return; 283 } 284 case scAddRecExpr: { 285 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this); 286 OS << "{" << *AR->getOperand(0); 287 for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i) 288 OS << ",+," << *AR->getOperand(i); 289 OS << "}<"; 290 if (AR->hasNoUnsignedWrap()) 291 OS << "nuw><"; 292 if (AR->hasNoSignedWrap()) 293 OS << "nsw><"; 294 if (AR->hasNoSelfWrap() && 295 !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW))) 296 OS << "nw><"; 297 AR->getLoop()->getHeader()->printAsOperand(OS, /*PrintType=*/false); 298 OS << ">"; 299 return; 300 } 301 case scAddExpr: 302 case scMulExpr: 303 case scUMaxExpr: 304 case scSMaxExpr: 305 case scUMinExpr: 306 case scSMinExpr: { 307 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this); 308 const char *OpStr = nullptr; 309 switch (NAry->getSCEVType()) { 310 case scAddExpr: OpStr = " + "; break; 311 case scMulExpr: OpStr = " * "; break; 312 case scUMaxExpr: OpStr = " umax "; break; 313 case scSMaxExpr: OpStr = " smax "; break; 314 case scUMinExpr: 315 OpStr = " umin "; 316 break; 317 case scSMinExpr: 318 OpStr = " smin "; 319 break; 320 default: 321 llvm_unreachable("There are no other nary expression types."); 322 } 323 OS << "("; 324 ListSeparator LS(OpStr); 325 for (const SCEV *Op : NAry->operands()) 326 OS << LS << *Op; 327 OS << ")"; 328 switch (NAry->getSCEVType()) { 329 case scAddExpr: 330 case scMulExpr: 331 if (NAry->hasNoUnsignedWrap()) 332 OS << "<nuw>"; 333 if (NAry->hasNoSignedWrap()) 334 OS << "<nsw>"; 335 break; 336 default: 337 // Nothing to print for other nary expressions. 338 break; 339 } 340 return; 341 } 342 case scUDivExpr: { 343 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this); 344 OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")"; 345 return; 346 } 347 case scUnknown: { 348 const SCEVUnknown *U = cast<SCEVUnknown>(this); 349 Type *AllocTy; 350 if (U->isSizeOf(AllocTy)) { 351 OS << "sizeof(" << *AllocTy << ")"; 352 return; 353 } 354 if (U->isAlignOf(AllocTy)) { 355 OS << "alignof(" << *AllocTy << ")"; 356 return; 357 } 358 359 Type *CTy; 360 Constant *FieldNo; 361 if (U->isOffsetOf(CTy, FieldNo)) { 362 OS << "offsetof(" << *CTy << ", "; 363 FieldNo->printAsOperand(OS, false); 364 OS << ")"; 365 return; 366 } 367 368 // Otherwise just print it normally. 369 U->getValue()->printAsOperand(OS, false); 370 return; 371 } 372 case scCouldNotCompute: 373 OS << "***COULDNOTCOMPUTE***"; 374 return; 375 } 376 llvm_unreachable("Unknown SCEV kind!"); 377 } 378 379 Type *SCEV::getType() const { 380 switch (getSCEVType()) { 381 case scConstant: 382 return cast<SCEVConstant>(this)->getType(); 383 case scPtrToInt: 384 case scTruncate: 385 case scZeroExtend: 386 case scSignExtend: 387 return cast<SCEVCastExpr>(this)->getType(); 388 case scAddRecExpr: 389 return cast<SCEVAddRecExpr>(this)->getType(); 390 case scMulExpr: 391 return cast<SCEVMulExpr>(this)->getType(); 392 case scUMaxExpr: 393 case scSMaxExpr: 394 case scUMinExpr: 395 case scSMinExpr: 396 return cast<SCEVMinMaxExpr>(this)->getType(); 397 case scAddExpr: 398 return cast<SCEVAddExpr>(this)->getType(); 399 case scUDivExpr: 400 return cast<SCEVUDivExpr>(this)->getType(); 401 case scUnknown: 402 return cast<SCEVUnknown>(this)->getType(); 403 case scCouldNotCompute: 404 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 405 } 406 llvm_unreachable("Unknown SCEV kind!"); 407 } 408 409 bool SCEV::isZero() const { 410 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 411 return SC->getValue()->isZero(); 412 return false; 413 } 414 415 bool SCEV::isOne() const { 416 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 417 return SC->getValue()->isOne(); 418 return false; 419 } 420 421 bool SCEV::isAllOnesValue() const { 422 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 423 return SC->getValue()->isMinusOne(); 424 return false; 425 } 426 427 bool SCEV::isNonConstantNegative() const { 428 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this); 429 if (!Mul) return false; 430 431 // If there is a constant factor, it will be first. 432 const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0)); 433 if (!SC) return false; 434 435 // Return true if the value is negative, this matches things like (-42 * V). 436 return SC->getAPInt().isNegative(); 437 } 438 439 SCEVCouldNotCompute::SCEVCouldNotCompute() : 440 SCEV(FoldingSetNodeIDRef(), scCouldNotCompute, 0) {} 441 442 bool SCEVCouldNotCompute::classof(const SCEV *S) { 443 return S->getSCEVType() == scCouldNotCompute; 444 } 445 446 const SCEV *ScalarEvolution::getConstant(ConstantInt *V) { 447 FoldingSetNodeID ID; 448 ID.AddInteger(scConstant); 449 ID.AddPointer(V); 450 void *IP = nullptr; 451 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 452 SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V); 453 UniqueSCEVs.InsertNode(S, IP); 454 return S; 455 } 456 457 const SCEV *ScalarEvolution::getConstant(const APInt &Val) { 458 return getConstant(ConstantInt::get(getContext(), Val)); 459 } 460 461 const SCEV * 462 ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) { 463 IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty)); 464 return getConstant(ConstantInt::get(ITy, V, isSigned)); 465 } 466 467 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, 468 const SCEV *op, Type *ty) 469 : SCEV(ID, SCEVTy, computeExpressionSize(op)), Ty(ty) { 470 Operands[0] = op; 471 } 472 473 SCEVPtrToIntExpr::SCEVPtrToIntExpr(const FoldingSetNodeIDRef ID, const SCEV *Op, 474 Type *ITy) 475 : SCEVCastExpr(ID, scPtrToInt, Op, ITy) { 476 assert(getOperand()->getType()->isPointerTy() && Ty->isIntegerTy() && 477 "Must be a non-bit-width-changing pointer-to-integer cast!"); 478 } 479 480 SCEVIntegralCastExpr::SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID, 481 SCEVTypes SCEVTy, const SCEV *op, 482 Type *ty) 483 : SCEVCastExpr(ID, SCEVTy, op, ty) {} 484 485 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, const SCEV *op, 486 Type *ty) 487 : SCEVIntegralCastExpr(ID, scTruncate, op, ty) { 488 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 489 "Cannot truncate non-integer value!"); 490 } 491 492 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, 493 const SCEV *op, Type *ty) 494 : SCEVIntegralCastExpr(ID, scZeroExtend, op, ty) { 495 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 496 "Cannot zero extend non-integer value!"); 497 } 498 499 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, 500 const SCEV *op, Type *ty) 501 : SCEVIntegralCastExpr(ID, scSignExtend, op, ty) { 502 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 503 "Cannot sign extend non-integer value!"); 504 } 505 506 void SCEVUnknown::deleted() { 507 // Clear this SCEVUnknown from various maps. 508 SE->forgetMemoizedResults(this); 509 510 // Remove this SCEVUnknown from the uniquing map. 511 SE->UniqueSCEVs.RemoveNode(this); 512 513 // Release the value. 514 setValPtr(nullptr); 515 } 516 517 void SCEVUnknown::allUsesReplacedWith(Value *New) { 518 // Remove this SCEVUnknown from the uniquing map. 519 SE->UniqueSCEVs.RemoveNode(this); 520 521 // Update this SCEVUnknown to point to the new value. This is needed 522 // because there may still be outstanding SCEVs which still point to 523 // this SCEVUnknown. 524 setValPtr(New); 525 } 526 527 bool SCEVUnknown::isSizeOf(Type *&AllocTy) const { 528 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 529 if (VCE->getOpcode() == Instruction::PtrToInt) 530 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 531 if (CE->getOpcode() == Instruction::GetElementPtr && 532 CE->getOperand(0)->isNullValue() && 533 CE->getNumOperands() == 2) 534 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1))) 535 if (CI->isOne()) { 536 AllocTy = cast<PointerType>(CE->getOperand(0)->getType()) 537 ->getElementType(); 538 return true; 539 } 540 541 return false; 542 } 543 544 bool SCEVUnknown::isAlignOf(Type *&AllocTy) const { 545 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 546 if (VCE->getOpcode() == Instruction::PtrToInt) 547 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 548 if (CE->getOpcode() == Instruction::GetElementPtr && 549 CE->getOperand(0)->isNullValue()) { 550 Type *Ty = 551 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 552 if (StructType *STy = dyn_cast<StructType>(Ty)) 553 if (!STy->isPacked() && 554 CE->getNumOperands() == 3 && 555 CE->getOperand(1)->isNullValue()) { 556 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2))) 557 if (CI->isOne() && 558 STy->getNumElements() == 2 && 559 STy->getElementType(0)->isIntegerTy(1)) { 560 AllocTy = STy->getElementType(1); 561 return true; 562 } 563 } 564 } 565 566 return false; 567 } 568 569 bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const { 570 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 571 if (VCE->getOpcode() == Instruction::PtrToInt) 572 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 573 if (CE->getOpcode() == Instruction::GetElementPtr && 574 CE->getNumOperands() == 3 && 575 CE->getOperand(0)->isNullValue() && 576 CE->getOperand(1)->isNullValue()) { 577 Type *Ty = 578 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 579 // Ignore vector types here so that ScalarEvolutionExpander doesn't 580 // emit getelementptrs that index into vectors. 581 if (Ty->isStructTy() || Ty->isArrayTy()) { 582 CTy = Ty; 583 FieldNo = CE->getOperand(2); 584 return true; 585 } 586 } 587 588 return false; 589 } 590 591 //===----------------------------------------------------------------------===// 592 // SCEV Utilities 593 //===----------------------------------------------------------------------===// 594 595 /// Compare the two values \p LV and \p RV in terms of their "complexity" where 596 /// "complexity" is a partial (and somewhat ad-hoc) relation used to order 597 /// operands in SCEV expressions. \p EqCache is a set of pairs of values that 598 /// have been previously deemed to be "equally complex" by this routine. It is 599 /// intended to avoid exponential time complexity in cases like: 600 /// 601 /// %a = f(%x, %y) 602 /// %b = f(%a, %a) 603 /// %c = f(%b, %b) 604 /// 605 /// %d = f(%x, %y) 606 /// %e = f(%d, %d) 607 /// %f = f(%e, %e) 608 /// 609 /// CompareValueComplexity(%f, %c) 610 /// 611 /// Since we do not continue running this routine on expression trees once we 612 /// have seen unequal values, there is no need to track them in the cache. 613 static int 614 CompareValueComplexity(EquivalenceClasses<const Value *> &EqCacheValue, 615 const LoopInfo *const LI, Value *LV, Value *RV, 616 unsigned Depth) { 617 if (Depth > MaxValueCompareDepth || EqCacheValue.isEquivalent(LV, RV)) 618 return 0; 619 620 // Order pointer values after integer values. This helps SCEVExpander form 621 // GEPs. 622 bool LIsPointer = LV->getType()->isPointerTy(), 623 RIsPointer = RV->getType()->isPointerTy(); 624 if (LIsPointer != RIsPointer) 625 return (int)LIsPointer - (int)RIsPointer; 626 627 // Compare getValueID values. 628 unsigned LID = LV->getValueID(), RID = RV->getValueID(); 629 if (LID != RID) 630 return (int)LID - (int)RID; 631 632 // Sort arguments by their position. 633 if (const auto *LA = dyn_cast<Argument>(LV)) { 634 const auto *RA = cast<Argument>(RV); 635 unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo(); 636 return (int)LArgNo - (int)RArgNo; 637 } 638 639 if (const auto *LGV = dyn_cast<GlobalValue>(LV)) { 640 const auto *RGV = cast<GlobalValue>(RV); 641 642 const auto IsGVNameSemantic = [&](const GlobalValue *GV) { 643 auto LT = GV->getLinkage(); 644 return !(GlobalValue::isPrivateLinkage(LT) || 645 GlobalValue::isInternalLinkage(LT)); 646 }; 647 648 // Use the names to distinguish the two values, but only if the 649 // names are semantically important. 650 if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV)) 651 return LGV->getName().compare(RGV->getName()); 652 } 653 654 // For instructions, compare their loop depth, and their operand count. This 655 // is pretty loose. 656 if (const auto *LInst = dyn_cast<Instruction>(LV)) { 657 const auto *RInst = cast<Instruction>(RV); 658 659 // Compare loop depths. 660 const BasicBlock *LParent = LInst->getParent(), 661 *RParent = RInst->getParent(); 662 if (LParent != RParent) { 663 unsigned LDepth = LI->getLoopDepth(LParent), 664 RDepth = LI->getLoopDepth(RParent); 665 if (LDepth != RDepth) 666 return (int)LDepth - (int)RDepth; 667 } 668 669 // Compare the number of operands. 670 unsigned LNumOps = LInst->getNumOperands(), 671 RNumOps = RInst->getNumOperands(); 672 if (LNumOps != RNumOps) 673 return (int)LNumOps - (int)RNumOps; 674 675 for (unsigned Idx : seq(0u, LNumOps)) { 676 int Result = 677 CompareValueComplexity(EqCacheValue, LI, LInst->getOperand(Idx), 678 RInst->getOperand(Idx), Depth + 1); 679 if (Result != 0) 680 return Result; 681 } 682 } 683 684 EqCacheValue.unionSets(LV, RV); 685 return 0; 686 } 687 688 // Return negative, zero, or positive, if LHS is less than, equal to, or greater 689 // than RHS, respectively. A three-way result allows recursive comparisons to be 690 // more efficient. 691 // If the max analysis depth was reached, return None, assuming we do not know 692 // if they are equivalent for sure. 693 static Optional<int> 694 CompareSCEVComplexity(EquivalenceClasses<const SCEV *> &EqCacheSCEV, 695 EquivalenceClasses<const Value *> &EqCacheValue, 696 const LoopInfo *const LI, const SCEV *LHS, 697 const SCEV *RHS, DominatorTree &DT, unsigned Depth = 0) { 698 // Fast-path: SCEVs are uniqued so we can do a quick equality check. 699 if (LHS == RHS) 700 return 0; 701 702 // Primarily, sort the SCEVs by their getSCEVType(). 703 SCEVTypes LType = LHS->getSCEVType(), RType = RHS->getSCEVType(); 704 if (LType != RType) 705 return (int)LType - (int)RType; 706 707 if (EqCacheSCEV.isEquivalent(LHS, RHS)) 708 return 0; 709 710 if (Depth > MaxSCEVCompareDepth) 711 return None; 712 713 // Aside from the getSCEVType() ordering, the particular ordering 714 // isn't very important except that it's beneficial to be consistent, 715 // so that (a + b) and (b + a) don't end up as different expressions. 716 switch (LType) { 717 case scUnknown: { 718 const SCEVUnknown *LU = cast<SCEVUnknown>(LHS); 719 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS); 720 721 int X = CompareValueComplexity(EqCacheValue, LI, LU->getValue(), 722 RU->getValue(), Depth + 1); 723 if (X == 0) 724 EqCacheSCEV.unionSets(LHS, RHS); 725 return X; 726 } 727 728 case scConstant: { 729 const SCEVConstant *LC = cast<SCEVConstant>(LHS); 730 const SCEVConstant *RC = cast<SCEVConstant>(RHS); 731 732 // Compare constant values. 733 const APInt &LA = LC->getAPInt(); 734 const APInt &RA = RC->getAPInt(); 735 unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth(); 736 if (LBitWidth != RBitWidth) 737 return (int)LBitWidth - (int)RBitWidth; 738 return LA.ult(RA) ? -1 : 1; 739 } 740 741 case scAddRecExpr: { 742 const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS); 743 const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS); 744 745 // There is always a dominance between two recs that are used by one SCEV, 746 // so we can safely sort recs by loop header dominance. We require such 747 // order in getAddExpr. 748 const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop(); 749 if (LLoop != RLoop) { 750 const BasicBlock *LHead = LLoop->getHeader(), *RHead = RLoop->getHeader(); 751 assert(LHead != RHead && "Two loops share the same header?"); 752 if (DT.dominates(LHead, RHead)) 753 return 1; 754 else 755 assert(DT.dominates(RHead, LHead) && 756 "No dominance between recurrences used by one SCEV?"); 757 return -1; 758 } 759 760 // Addrec complexity grows with operand count. 761 unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands(); 762 if (LNumOps != RNumOps) 763 return (int)LNumOps - (int)RNumOps; 764 765 // Lexicographically compare. 766 for (unsigned i = 0; i != LNumOps; ++i) { 767 auto X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 768 LA->getOperand(i), RA->getOperand(i), DT, 769 Depth + 1); 770 if (X != 0) 771 return X; 772 } 773 EqCacheSCEV.unionSets(LHS, RHS); 774 return 0; 775 } 776 777 case scAddExpr: 778 case scMulExpr: 779 case scSMaxExpr: 780 case scUMaxExpr: 781 case scSMinExpr: 782 case scUMinExpr: { 783 const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS); 784 const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS); 785 786 // Lexicographically compare n-ary expressions. 787 unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands(); 788 if (LNumOps != RNumOps) 789 return (int)LNumOps - (int)RNumOps; 790 791 for (unsigned i = 0; i != LNumOps; ++i) { 792 auto X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 793 LC->getOperand(i), RC->getOperand(i), DT, 794 Depth + 1); 795 if (X != 0) 796 return X; 797 } 798 EqCacheSCEV.unionSets(LHS, RHS); 799 return 0; 800 } 801 802 case scUDivExpr: { 803 const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS); 804 const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS); 805 806 // Lexicographically compare udiv expressions. 807 auto X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getLHS(), 808 RC->getLHS(), DT, Depth + 1); 809 if (X != 0) 810 return X; 811 X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getRHS(), 812 RC->getRHS(), DT, Depth + 1); 813 if (X == 0) 814 EqCacheSCEV.unionSets(LHS, RHS); 815 return X; 816 } 817 818 case scPtrToInt: 819 case scTruncate: 820 case scZeroExtend: 821 case scSignExtend: { 822 const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS); 823 const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS); 824 825 // Compare cast expressions by operand. 826 auto X = 827 CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getOperand(), 828 RC->getOperand(), DT, Depth + 1); 829 if (X == 0) 830 EqCacheSCEV.unionSets(LHS, RHS); 831 return X; 832 } 833 834 case scCouldNotCompute: 835 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 836 } 837 llvm_unreachable("Unknown SCEV kind!"); 838 } 839 840 /// Given a list of SCEV objects, order them by their complexity, and group 841 /// objects of the same complexity together by value. When this routine is 842 /// finished, we know that any duplicates in the vector are consecutive and that 843 /// complexity is monotonically increasing. 844 /// 845 /// Note that we go take special precautions to ensure that we get deterministic 846 /// results from this routine. In other words, we don't want the results of 847 /// this to depend on where the addresses of various SCEV objects happened to 848 /// land in memory. 849 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops, 850 LoopInfo *LI, DominatorTree &DT) { 851 if (Ops.size() < 2) return; // Noop 852 853 EquivalenceClasses<const SCEV *> EqCacheSCEV; 854 EquivalenceClasses<const Value *> EqCacheValue; 855 856 // Whether LHS has provably less complexity than RHS. 857 auto IsLessComplex = [&](const SCEV *LHS, const SCEV *RHS) { 858 auto Complexity = 859 CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LHS, RHS, DT); 860 return Complexity && *Complexity < 0; 861 }; 862 if (Ops.size() == 2) { 863 // This is the common case, which also happens to be trivially simple. 864 // Special case it. 865 const SCEV *&LHS = Ops[0], *&RHS = Ops[1]; 866 if (IsLessComplex(RHS, LHS)) 867 std::swap(LHS, RHS); 868 return; 869 } 870 871 // Do the rough sort by complexity. 872 llvm::stable_sort(Ops, [&](const SCEV *LHS, const SCEV *RHS) { 873 return IsLessComplex(LHS, RHS); 874 }); 875 876 // Now that we are sorted by complexity, group elements of the same 877 // complexity. Note that this is, at worst, N^2, but the vector is likely to 878 // be extremely short in practice. Note that we take this approach because we 879 // do not want to depend on the addresses of the objects we are grouping. 880 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) { 881 const SCEV *S = Ops[i]; 882 unsigned Complexity = S->getSCEVType(); 883 884 // If there are any objects of the same complexity and same value as this 885 // one, group them. 886 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) { 887 if (Ops[j] == S) { // Found a duplicate. 888 // Move it to immediately after i'th element. 889 std::swap(Ops[i+1], Ops[j]); 890 ++i; // no need to rescan it. 891 if (i == e-2) return; // Done! 892 } 893 } 894 } 895 } 896 897 /// Returns true if \p Ops contains a huge SCEV (the subtree of S contains at 898 /// least HugeExprThreshold nodes). 899 static bool hasHugeExpression(ArrayRef<const SCEV *> Ops) { 900 return any_of(Ops, [](const SCEV *S) { 901 return S->getExpressionSize() >= HugeExprThreshold; 902 }); 903 } 904 905 //===----------------------------------------------------------------------===// 906 // Simple SCEV method implementations 907 //===----------------------------------------------------------------------===// 908 909 /// Compute BC(It, K). The result has width W. Assume, K > 0. 910 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K, 911 ScalarEvolution &SE, 912 Type *ResultTy) { 913 // Handle the simplest case efficiently. 914 if (K == 1) 915 return SE.getTruncateOrZeroExtend(It, ResultTy); 916 917 // We are using the following formula for BC(It, K): 918 // 919 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K! 920 // 921 // Suppose, W is the bitwidth of the return value. We must be prepared for 922 // overflow. Hence, we must assure that the result of our computation is 923 // equal to the accurate one modulo 2^W. Unfortunately, division isn't 924 // safe in modular arithmetic. 925 // 926 // However, this code doesn't use exactly that formula; the formula it uses 927 // is something like the following, where T is the number of factors of 2 in 928 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is 929 // exponentiation: 930 // 931 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T) 932 // 933 // This formula is trivially equivalent to the previous formula. However, 934 // this formula can be implemented much more efficiently. The trick is that 935 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular 936 // arithmetic. To do exact division in modular arithmetic, all we have 937 // to do is multiply by the inverse. Therefore, this step can be done at 938 // width W. 939 // 940 // The next issue is how to safely do the division by 2^T. The way this 941 // is done is by doing the multiplication step at a width of at least W + T 942 // bits. This way, the bottom W+T bits of the product are accurate. Then, 943 // when we perform the division by 2^T (which is equivalent to a right shift 944 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get 945 // truncated out after the division by 2^T. 946 // 947 // In comparison to just directly using the first formula, this technique 948 // is much more efficient; using the first formula requires W * K bits, 949 // but this formula less than W + K bits. Also, the first formula requires 950 // a division step, whereas this formula only requires multiplies and shifts. 951 // 952 // It doesn't matter whether the subtraction step is done in the calculation 953 // width or the input iteration count's width; if the subtraction overflows, 954 // the result must be zero anyway. We prefer here to do it in the width of 955 // the induction variable because it helps a lot for certain cases; CodeGen 956 // isn't smart enough to ignore the overflow, which leads to much less 957 // efficient code if the width of the subtraction is wider than the native 958 // register width. 959 // 960 // (It's possible to not widen at all by pulling out factors of 2 before 961 // the multiplication; for example, K=2 can be calculated as 962 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires 963 // extra arithmetic, so it's not an obvious win, and it gets 964 // much more complicated for K > 3.) 965 966 // Protection from insane SCEVs; this bound is conservative, 967 // but it probably doesn't matter. 968 if (K > 1000) 969 return SE.getCouldNotCompute(); 970 971 unsigned W = SE.getTypeSizeInBits(ResultTy); 972 973 // Calculate K! / 2^T and T; we divide out the factors of two before 974 // multiplying for calculating K! / 2^T to avoid overflow. 975 // Other overflow doesn't matter because we only care about the bottom 976 // W bits of the result. 977 APInt OddFactorial(W, 1); 978 unsigned T = 1; 979 for (unsigned i = 3; i <= K; ++i) { 980 APInt Mult(W, i); 981 unsigned TwoFactors = Mult.countTrailingZeros(); 982 T += TwoFactors; 983 Mult.lshrInPlace(TwoFactors); 984 OddFactorial *= Mult; 985 } 986 987 // We need at least W + T bits for the multiplication step 988 unsigned CalculationBits = W + T; 989 990 // Calculate 2^T, at width T+W. 991 APInt DivFactor = APInt::getOneBitSet(CalculationBits, T); 992 993 // Calculate the multiplicative inverse of K! / 2^T; 994 // this multiplication factor will perform the exact division by 995 // K! / 2^T. 996 APInt Mod = APInt::getSignedMinValue(W+1); 997 APInt MultiplyFactor = OddFactorial.zext(W+1); 998 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod); 999 MultiplyFactor = MultiplyFactor.trunc(W); 1000 1001 // Calculate the product, at width T+W 1002 IntegerType *CalculationTy = IntegerType::get(SE.getContext(), 1003 CalculationBits); 1004 const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); 1005 for (unsigned i = 1; i != K; ++i) { 1006 const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i)); 1007 Dividend = SE.getMulExpr(Dividend, 1008 SE.getTruncateOrZeroExtend(S, CalculationTy)); 1009 } 1010 1011 // Divide by 2^T 1012 const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor)); 1013 1014 // Truncate the result, and divide by K! / 2^T. 1015 1016 return SE.getMulExpr(SE.getConstant(MultiplyFactor), 1017 SE.getTruncateOrZeroExtend(DivResult, ResultTy)); 1018 } 1019 1020 /// Return the value of this chain of recurrences at the specified iteration 1021 /// number. We can evaluate this recurrence by multiplying each element in the 1022 /// chain by the binomial coefficient corresponding to it. In other words, we 1023 /// can evaluate {A,+,B,+,C,+,D} as: 1024 /// 1025 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3) 1026 /// 1027 /// where BC(It, k) stands for binomial coefficient. 1028 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It, 1029 ScalarEvolution &SE) const { 1030 return evaluateAtIteration(makeArrayRef(op_begin(), op_end()), It, SE); 1031 } 1032 1033 const SCEV * 1034 SCEVAddRecExpr::evaluateAtIteration(ArrayRef<const SCEV *> Operands, 1035 const SCEV *It, ScalarEvolution &SE) { 1036 assert(Operands.size() > 0); 1037 const SCEV *Result = Operands[0]; 1038 for (unsigned i = 1, e = Operands.size(); i != e; ++i) { 1039 // The computation is correct in the face of overflow provided that the 1040 // multiplication is performed _after_ the evaluation of the binomial 1041 // coefficient. 1042 const SCEV *Coeff = BinomialCoefficient(It, i, SE, Result->getType()); 1043 if (isa<SCEVCouldNotCompute>(Coeff)) 1044 return Coeff; 1045 1046 Result = SE.getAddExpr(Result, SE.getMulExpr(Operands[i], Coeff)); 1047 } 1048 return Result; 1049 } 1050 1051 //===----------------------------------------------------------------------===// 1052 // SCEV Expression folder implementations 1053 //===----------------------------------------------------------------------===// 1054 1055 const SCEV *ScalarEvolution::getLosslessPtrToIntExpr(const SCEV *Op, 1056 unsigned Depth) { 1057 assert(Depth <= 1 && 1058 "getLosslessPtrToIntExpr() should self-recurse at most once."); 1059 1060 // We could be called with an integer-typed operands during SCEV rewrites. 1061 // Since the operand is an integer already, just perform zext/trunc/self cast. 1062 if (!Op->getType()->isPointerTy()) 1063 return Op; 1064 1065 // What would be an ID for such a SCEV cast expression? 1066 FoldingSetNodeID ID; 1067 ID.AddInteger(scPtrToInt); 1068 ID.AddPointer(Op); 1069 1070 void *IP = nullptr; 1071 1072 // Is there already an expression for such a cast? 1073 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 1074 return S; 1075 1076 // It isn't legal for optimizations to construct new ptrtoint expressions 1077 // for non-integral pointers. 1078 if (getDataLayout().isNonIntegralPointerType(Op->getType())) 1079 return getCouldNotCompute(); 1080 1081 Type *IntPtrTy = getDataLayout().getIntPtrType(Op->getType()); 1082 1083 // We can only trivially model ptrtoint if SCEV's effective (integer) type 1084 // is sufficiently wide to represent all possible pointer values. 1085 // We could theoretically teach SCEV to truncate wider pointers, but 1086 // that isn't implemented for now. 1087 if (getDataLayout().getTypeSizeInBits(getEffectiveSCEVType(Op->getType())) != 1088 getDataLayout().getTypeSizeInBits(IntPtrTy)) 1089 return getCouldNotCompute(); 1090 1091 // If not, is this expression something we can't reduce any further? 1092 if (auto *U = dyn_cast<SCEVUnknown>(Op)) { 1093 // Perform some basic constant folding. If the operand of the ptr2int cast 1094 // is a null pointer, don't create a ptr2int SCEV expression (that will be 1095 // left as-is), but produce a zero constant. 1096 // NOTE: We could handle a more general case, but lack motivational cases. 1097 if (isa<ConstantPointerNull>(U->getValue())) 1098 return getZero(IntPtrTy); 1099 1100 // Create an explicit cast node. 1101 // We can reuse the existing insert position since if we get here, 1102 // we won't have made any changes which would invalidate it. 1103 SCEV *S = new (SCEVAllocator) 1104 SCEVPtrToIntExpr(ID.Intern(SCEVAllocator), Op, IntPtrTy); 1105 UniqueSCEVs.InsertNode(S, IP); 1106 addToLoopUseLists(S); 1107 return S; 1108 } 1109 1110 assert(Depth == 0 && "getLosslessPtrToIntExpr() should not self-recurse for " 1111 "non-SCEVUnknown's."); 1112 1113 // Otherwise, we've got some expression that is more complex than just a 1114 // single SCEVUnknown. But we don't want to have a SCEVPtrToIntExpr of an 1115 // arbitrary expression, we want to have SCEVPtrToIntExpr of an SCEVUnknown 1116 // only, and the expressions must otherwise be integer-typed. 1117 // So sink the cast down to the SCEVUnknown's. 1118 1119 /// The SCEVPtrToIntSinkingRewriter takes a scalar evolution expression, 1120 /// which computes a pointer-typed value, and rewrites the whole expression 1121 /// tree so that *all* the computations are done on integers, and the only 1122 /// pointer-typed operands in the expression are SCEVUnknown. 1123 class SCEVPtrToIntSinkingRewriter 1124 : public SCEVRewriteVisitor<SCEVPtrToIntSinkingRewriter> { 1125 using Base = SCEVRewriteVisitor<SCEVPtrToIntSinkingRewriter>; 1126 1127 public: 1128 SCEVPtrToIntSinkingRewriter(ScalarEvolution &SE) : SCEVRewriteVisitor(SE) {} 1129 1130 static const SCEV *rewrite(const SCEV *Scev, ScalarEvolution &SE) { 1131 SCEVPtrToIntSinkingRewriter Rewriter(SE); 1132 return Rewriter.visit(Scev); 1133 } 1134 1135 const SCEV *visit(const SCEV *S) { 1136 Type *STy = S->getType(); 1137 // If the expression is not pointer-typed, just keep it as-is. 1138 if (!STy->isPointerTy()) 1139 return S; 1140 // Else, recursively sink the cast down into it. 1141 return Base::visit(S); 1142 } 1143 1144 const SCEV *visitAddExpr(const SCEVAddExpr *Expr) { 1145 SmallVector<const SCEV *, 2> Operands; 1146 bool Changed = false; 1147 for (auto *Op : Expr->operands()) { 1148 Operands.push_back(visit(Op)); 1149 Changed |= Op != Operands.back(); 1150 } 1151 return !Changed ? Expr : SE.getAddExpr(Operands, Expr->getNoWrapFlags()); 1152 } 1153 1154 const SCEV *visitMulExpr(const SCEVMulExpr *Expr) { 1155 SmallVector<const SCEV *, 2> Operands; 1156 bool Changed = false; 1157 for (auto *Op : Expr->operands()) { 1158 Operands.push_back(visit(Op)); 1159 Changed |= Op != Operands.back(); 1160 } 1161 return !Changed ? Expr : SE.getMulExpr(Operands, Expr->getNoWrapFlags()); 1162 } 1163 1164 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 1165 assert(Expr->getType()->isPointerTy() && 1166 "Should only reach pointer-typed SCEVUnknown's."); 1167 return SE.getLosslessPtrToIntExpr(Expr, /*Depth=*/1); 1168 } 1169 }; 1170 1171 // And actually perform the cast sinking. 1172 const SCEV *IntOp = SCEVPtrToIntSinkingRewriter::rewrite(Op, *this); 1173 assert(IntOp->getType()->isIntegerTy() && 1174 "We must have succeeded in sinking the cast, " 1175 "and ending up with an integer-typed expression!"); 1176 return IntOp; 1177 } 1178 1179 const SCEV *ScalarEvolution::getPtrToIntExpr(const SCEV *Op, Type *Ty) { 1180 assert(Ty->isIntegerTy() && "Target type must be an integer type!"); 1181 1182 const SCEV *IntOp = getLosslessPtrToIntExpr(Op); 1183 if (isa<SCEVCouldNotCompute>(IntOp)) 1184 return IntOp; 1185 1186 return getTruncateOrZeroExtend(IntOp, Ty); 1187 } 1188 1189 const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, Type *Ty, 1190 unsigned Depth) { 1191 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) && 1192 "This is not a truncating conversion!"); 1193 assert(isSCEVable(Ty) && 1194 "This is not a conversion to a SCEVable type!"); 1195 Ty = getEffectiveSCEVType(Ty); 1196 1197 FoldingSetNodeID ID; 1198 ID.AddInteger(scTruncate); 1199 ID.AddPointer(Op); 1200 ID.AddPointer(Ty); 1201 void *IP = nullptr; 1202 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1203 1204 // Fold if the operand is constant. 1205 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1206 return getConstant( 1207 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty))); 1208 1209 // trunc(trunc(x)) --> trunc(x) 1210 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) 1211 return getTruncateExpr(ST->getOperand(), Ty, Depth + 1); 1212 1213 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing 1214 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1215 return getTruncateOrSignExtend(SS->getOperand(), Ty, Depth + 1); 1216 1217 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing 1218 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1219 return getTruncateOrZeroExtend(SZ->getOperand(), Ty, Depth + 1); 1220 1221 if (Depth > MaxCastDepth) { 1222 SCEV *S = 1223 new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), Op, Ty); 1224 UniqueSCEVs.InsertNode(S, IP); 1225 addToLoopUseLists(S); 1226 return S; 1227 } 1228 1229 // trunc(x1 + ... + xN) --> trunc(x1) + ... + trunc(xN) and 1230 // trunc(x1 * ... * xN) --> trunc(x1) * ... * trunc(xN), 1231 // if after transforming we have at most one truncate, not counting truncates 1232 // that replace other casts. 1233 if (isa<SCEVAddExpr>(Op) || isa<SCEVMulExpr>(Op)) { 1234 auto *CommOp = cast<SCEVCommutativeExpr>(Op); 1235 SmallVector<const SCEV *, 4> Operands; 1236 unsigned numTruncs = 0; 1237 for (unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2; 1238 ++i) { 1239 const SCEV *S = getTruncateExpr(CommOp->getOperand(i), Ty, Depth + 1); 1240 if (!isa<SCEVIntegralCastExpr>(CommOp->getOperand(i)) && 1241 isa<SCEVTruncateExpr>(S)) 1242 numTruncs++; 1243 Operands.push_back(S); 1244 } 1245 if (numTruncs < 2) { 1246 if (isa<SCEVAddExpr>(Op)) 1247 return getAddExpr(Operands); 1248 else if (isa<SCEVMulExpr>(Op)) 1249 return getMulExpr(Operands); 1250 else 1251 llvm_unreachable("Unexpected SCEV type for Op."); 1252 } 1253 // Although we checked in the beginning that ID is not in the cache, it is 1254 // possible that during recursion and different modification ID was inserted 1255 // into the cache. So if we find it, just return it. 1256 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 1257 return S; 1258 } 1259 1260 // If the input value is a chrec scev, truncate the chrec's operands. 1261 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 1262 SmallVector<const SCEV *, 4> Operands; 1263 for (const SCEV *Op : AddRec->operands()) 1264 Operands.push_back(getTruncateExpr(Op, Ty, Depth + 1)); 1265 return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap); 1266 } 1267 1268 // Return zero if truncating to known zeros. 1269 uint32_t MinTrailingZeros = GetMinTrailingZeros(Op); 1270 if (MinTrailingZeros >= getTypeSizeInBits(Ty)) 1271 return getZero(Ty); 1272 1273 // The cast wasn't folded; create an explicit cast node. We can reuse 1274 // the existing insert position since if we get here, we won't have 1275 // made any changes which would invalidate it. 1276 SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), 1277 Op, Ty); 1278 UniqueSCEVs.InsertNode(S, IP); 1279 addToLoopUseLists(S); 1280 return S; 1281 } 1282 1283 // Get the limit of a recurrence such that incrementing by Step cannot cause 1284 // signed overflow as long as the value of the recurrence within the 1285 // loop does not exceed this limit before incrementing. 1286 static const SCEV *getSignedOverflowLimitForStep(const SCEV *Step, 1287 ICmpInst::Predicate *Pred, 1288 ScalarEvolution *SE) { 1289 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1290 if (SE->isKnownPositive(Step)) { 1291 *Pred = ICmpInst::ICMP_SLT; 1292 return SE->getConstant(APInt::getSignedMinValue(BitWidth) - 1293 SE->getSignedRangeMax(Step)); 1294 } 1295 if (SE->isKnownNegative(Step)) { 1296 *Pred = ICmpInst::ICMP_SGT; 1297 return SE->getConstant(APInt::getSignedMaxValue(BitWidth) - 1298 SE->getSignedRangeMin(Step)); 1299 } 1300 return nullptr; 1301 } 1302 1303 // Get the limit of a recurrence such that incrementing by Step cannot cause 1304 // unsigned overflow as long as the value of the recurrence within the loop does 1305 // not exceed this limit before incrementing. 1306 static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step, 1307 ICmpInst::Predicate *Pred, 1308 ScalarEvolution *SE) { 1309 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1310 *Pred = ICmpInst::ICMP_ULT; 1311 1312 return SE->getConstant(APInt::getMinValue(BitWidth) - 1313 SE->getUnsignedRangeMax(Step)); 1314 } 1315 1316 namespace { 1317 1318 struct ExtendOpTraitsBase { 1319 typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(const SCEV *, Type *, 1320 unsigned); 1321 }; 1322 1323 // Used to make code generic over signed and unsigned overflow. 1324 template <typename ExtendOp> struct ExtendOpTraits { 1325 // Members present: 1326 // 1327 // static const SCEV::NoWrapFlags WrapType; 1328 // 1329 // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr; 1330 // 1331 // static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1332 // ICmpInst::Predicate *Pred, 1333 // ScalarEvolution *SE); 1334 }; 1335 1336 template <> 1337 struct ExtendOpTraits<SCEVSignExtendExpr> : public ExtendOpTraitsBase { 1338 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNSW; 1339 1340 static const GetExtendExprTy GetExtendExpr; 1341 1342 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1343 ICmpInst::Predicate *Pred, 1344 ScalarEvolution *SE) { 1345 return getSignedOverflowLimitForStep(Step, Pred, SE); 1346 } 1347 }; 1348 1349 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1350 SCEVSignExtendExpr>::GetExtendExpr = &ScalarEvolution::getSignExtendExpr; 1351 1352 template <> 1353 struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase { 1354 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNUW; 1355 1356 static const GetExtendExprTy GetExtendExpr; 1357 1358 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1359 ICmpInst::Predicate *Pred, 1360 ScalarEvolution *SE) { 1361 return getUnsignedOverflowLimitForStep(Step, Pred, SE); 1362 } 1363 }; 1364 1365 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1366 SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr; 1367 1368 } // end anonymous namespace 1369 1370 // The recurrence AR has been shown to have no signed/unsigned wrap or something 1371 // close to it. Typically, if we can prove NSW/NUW for AR, then we can just as 1372 // easily prove NSW/NUW for its preincrement or postincrement sibling. This 1373 // allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step + 1374 // Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the 1375 // expression "Step + sext/zext(PreIncAR)" is congruent with 1376 // "sext/zext(PostIncAR)" 1377 template <typename ExtendOpTy> 1378 static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty, 1379 ScalarEvolution *SE, unsigned Depth) { 1380 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1381 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1382 1383 const Loop *L = AR->getLoop(); 1384 const SCEV *Start = AR->getStart(); 1385 const SCEV *Step = AR->getStepRecurrence(*SE); 1386 1387 // Check for a simple looking step prior to loop entry. 1388 const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start); 1389 if (!SA) 1390 return nullptr; 1391 1392 // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV 1393 // subtraction is expensive. For this purpose, perform a quick and dirty 1394 // difference, by checking for Step in the operand list. 1395 SmallVector<const SCEV *, 4> DiffOps; 1396 for (const SCEV *Op : SA->operands()) 1397 if (Op != Step) 1398 DiffOps.push_back(Op); 1399 1400 if (DiffOps.size() == SA->getNumOperands()) 1401 return nullptr; 1402 1403 // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` + 1404 // `Step`: 1405 1406 // 1. NSW/NUW flags on the step increment. 1407 auto PreStartFlags = 1408 ScalarEvolution::maskFlags(SA->getNoWrapFlags(), SCEV::FlagNUW); 1409 const SCEV *PreStart = SE->getAddExpr(DiffOps, PreStartFlags); 1410 const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>( 1411 SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap)); 1412 1413 // "{S,+,X} is <nsw>/<nuw>" and "the backedge is taken at least once" implies 1414 // "S+X does not sign/unsign-overflow". 1415 // 1416 1417 const SCEV *BECount = SE->getBackedgeTakenCount(L); 1418 if (PreAR && PreAR->getNoWrapFlags(WrapType) && 1419 !isa<SCEVCouldNotCompute>(BECount) && SE->isKnownPositive(BECount)) 1420 return PreStart; 1421 1422 // 2. Direct overflow check on the step operation's expression. 1423 unsigned BitWidth = SE->getTypeSizeInBits(AR->getType()); 1424 Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2); 1425 const SCEV *OperandExtendedStart = 1426 SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy, Depth), 1427 (SE->*GetExtendExpr)(Step, WideTy, Depth)); 1428 if ((SE->*GetExtendExpr)(Start, WideTy, Depth) == OperandExtendedStart) { 1429 if (PreAR && AR->getNoWrapFlags(WrapType)) { 1430 // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW 1431 // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then 1432 // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`. Cache this fact. 1433 SE->setNoWrapFlags(const_cast<SCEVAddRecExpr *>(PreAR), WrapType); 1434 } 1435 return PreStart; 1436 } 1437 1438 // 3. Loop precondition. 1439 ICmpInst::Predicate Pred; 1440 const SCEV *OverflowLimit = 1441 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE); 1442 1443 if (OverflowLimit && 1444 SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit)) 1445 return PreStart; 1446 1447 return nullptr; 1448 } 1449 1450 // Get the normalized zero or sign extended expression for this AddRec's Start. 1451 template <typename ExtendOpTy> 1452 static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty, 1453 ScalarEvolution *SE, 1454 unsigned Depth) { 1455 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1456 1457 const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE, Depth); 1458 if (!PreStart) 1459 return (SE->*GetExtendExpr)(AR->getStart(), Ty, Depth); 1460 1461 return SE->getAddExpr((SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty, 1462 Depth), 1463 (SE->*GetExtendExpr)(PreStart, Ty, Depth)); 1464 } 1465 1466 // Try to prove away overflow by looking at "nearby" add recurrences. A 1467 // motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it 1468 // does not itself wrap then we can conclude that `{1,+,4}` is `nuw`. 1469 // 1470 // Formally: 1471 // 1472 // {S,+,X} == {S-T,+,X} + T 1473 // => Ext({S,+,X}) == Ext({S-T,+,X} + T) 1474 // 1475 // If ({S-T,+,X} + T) does not overflow ... (1) 1476 // 1477 // RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T) 1478 // 1479 // If {S-T,+,X} does not overflow ... (2) 1480 // 1481 // RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T) 1482 // == {Ext(S-T)+Ext(T),+,Ext(X)} 1483 // 1484 // If (S-T)+T does not overflow ... (3) 1485 // 1486 // RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)} 1487 // == {Ext(S),+,Ext(X)} == LHS 1488 // 1489 // Thus, if (1), (2) and (3) are true for some T, then 1490 // Ext({S,+,X}) == {Ext(S),+,Ext(X)} 1491 // 1492 // (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T) 1493 // does not overflow" restricted to the 0th iteration. Therefore we only need 1494 // to check for (1) and (2). 1495 // 1496 // In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T 1497 // is `Delta` (defined below). 1498 template <typename ExtendOpTy> 1499 bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start, 1500 const SCEV *Step, 1501 const Loop *L) { 1502 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1503 1504 // We restrict `Start` to a constant to prevent SCEV from spending too much 1505 // time here. It is correct (but more expensive) to continue with a 1506 // non-constant `Start` and do a general SCEV subtraction to compute 1507 // `PreStart` below. 1508 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start); 1509 if (!StartC) 1510 return false; 1511 1512 APInt StartAI = StartC->getAPInt(); 1513 1514 for (unsigned Delta : {-2, -1, 1, 2}) { 1515 const SCEV *PreStart = getConstant(StartAI - Delta); 1516 1517 FoldingSetNodeID ID; 1518 ID.AddInteger(scAddRecExpr); 1519 ID.AddPointer(PreStart); 1520 ID.AddPointer(Step); 1521 ID.AddPointer(L); 1522 void *IP = nullptr; 1523 const auto *PreAR = 1524 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 1525 1526 // Give up if we don't already have the add recurrence we need because 1527 // actually constructing an add recurrence is relatively expensive. 1528 if (PreAR && PreAR->getNoWrapFlags(WrapType)) { // proves (2) 1529 const SCEV *DeltaS = getConstant(StartC->getType(), Delta); 1530 ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE; 1531 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep( 1532 DeltaS, &Pred, this); 1533 if (Limit && isKnownPredicate(Pred, PreAR, Limit)) // proves (1) 1534 return true; 1535 } 1536 } 1537 1538 return false; 1539 } 1540 1541 // Finds an integer D for an expression (C + x + y + ...) such that the top 1542 // level addition in (D + (C - D + x + y + ...)) would not wrap (signed or 1543 // unsigned) and the number of trailing zeros of (C - D + x + y + ...) is 1544 // maximized, where C is the \p ConstantTerm, x, y, ... are arbitrary SCEVs, and 1545 // the (C + x + y + ...) expression is \p WholeAddExpr. 1546 static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, 1547 const SCEVConstant *ConstantTerm, 1548 const SCEVAddExpr *WholeAddExpr) { 1549 const APInt &C = ConstantTerm->getAPInt(); 1550 const unsigned BitWidth = C.getBitWidth(); 1551 // Find number of trailing zeros of (x + y + ...) w/o the C first: 1552 uint32_t TZ = BitWidth; 1553 for (unsigned I = 1, E = WholeAddExpr->getNumOperands(); I < E && TZ; ++I) 1554 TZ = std::min(TZ, SE.GetMinTrailingZeros(WholeAddExpr->getOperand(I))); 1555 if (TZ) { 1556 // Set D to be as many least significant bits of C as possible while still 1557 // guaranteeing that adding D to (C - D + x + y + ...) won't cause a wrap: 1558 return TZ < BitWidth ? C.trunc(TZ).zext(BitWidth) : C; 1559 } 1560 return APInt(BitWidth, 0); 1561 } 1562 1563 // Finds an integer D for an affine AddRec expression {C,+,x} such that the top 1564 // level addition in (D + {C-D,+,x}) would not wrap (signed or unsigned) and the 1565 // number of trailing zeros of (C - D + x * n) is maximized, where C is the \p 1566 // ConstantStart, x is an arbitrary \p Step, and n is the loop trip count. 1567 static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, 1568 const APInt &ConstantStart, 1569 const SCEV *Step) { 1570 const unsigned BitWidth = ConstantStart.getBitWidth(); 1571 const uint32_t TZ = SE.GetMinTrailingZeros(Step); 1572 if (TZ) 1573 return TZ < BitWidth ? ConstantStart.trunc(TZ).zext(BitWidth) 1574 : ConstantStart; 1575 return APInt(BitWidth, 0); 1576 } 1577 1578 const SCEV * 1579 ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { 1580 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1581 "This is not an extending conversion!"); 1582 assert(isSCEVable(Ty) && 1583 "This is not a conversion to a SCEVable type!"); 1584 Ty = getEffectiveSCEVType(Ty); 1585 1586 // Fold if the operand is constant. 1587 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1588 return getConstant( 1589 cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty))); 1590 1591 // zext(zext(x)) --> zext(x) 1592 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1593 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); 1594 1595 // Before doing any expensive analysis, check to see if we've already 1596 // computed a SCEV for this Op and Ty. 1597 FoldingSetNodeID ID; 1598 ID.AddInteger(scZeroExtend); 1599 ID.AddPointer(Op); 1600 ID.AddPointer(Ty); 1601 void *IP = nullptr; 1602 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1603 if (Depth > MaxCastDepth) { 1604 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1605 Op, Ty); 1606 UniqueSCEVs.InsertNode(S, IP); 1607 addToLoopUseLists(S); 1608 return S; 1609 } 1610 1611 // zext(trunc(x)) --> zext(x) or x or trunc(x) 1612 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1613 // It's possible the bits taken off by the truncate were all zero bits. If 1614 // so, we should be able to simplify this further. 1615 const SCEV *X = ST->getOperand(); 1616 ConstantRange CR = getUnsignedRange(X); 1617 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1618 unsigned NewBits = getTypeSizeInBits(Ty); 1619 if (CR.truncate(TruncBits).zeroExtend(NewBits).contains( 1620 CR.zextOrTrunc(NewBits))) 1621 return getTruncateOrZeroExtend(X, Ty, Depth); 1622 } 1623 1624 // If the input value is a chrec scev, and we can prove that the value 1625 // did not overflow the old, smaller, value, we can zero extend all of the 1626 // operands (often constants). This allows analysis of something like 1627 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; } 1628 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1629 if (AR->isAffine()) { 1630 const SCEV *Start = AR->getStart(); 1631 const SCEV *Step = AR->getStepRecurrence(*this); 1632 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1633 const Loop *L = AR->getLoop(); 1634 1635 if (!AR->hasNoUnsignedWrap()) { 1636 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1637 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); 1638 } 1639 1640 // If we have special knowledge that this addrec won't overflow, 1641 // we don't need to do any further analysis. 1642 if (AR->hasNoUnsignedWrap()) 1643 return getAddRecExpr( 1644 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1), 1645 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1646 1647 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1648 // Note that this serves two purposes: It filters out loops that are 1649 // simply not analyzable, and it covers the case where this code is 1650 // being called from within backedge-taken count analysis, such that 1651 // attempting to ask for the backedge-taken count would likely result 1652 // in infinite recursion. In the later case, the analysis code will 1653 // cope with a conservative value, and it will take care to purge 1654 // that value once it has finished. 1655 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 1656 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1657 // Manually compute the final value for AR, checking for overflow. 1658 1659 // Check whether the backedge-taken count can be losslessly casted to 1660 // the addrec's type. The count is always unsigned. 1661 const SCEV *CastedMaxBECount = 1662 getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth); 1663 const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend( 1664 CastedMaxBECount, MaxBECount->getType(), Depth); 1665 if (MaxBECount == RecastedMaxBECount) { 1666 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1667 // Check whether Start+Step*MaxBECount has no unsigned overflow. 1668 const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step, 1669 SCEV::FlagAnyWrap, Depth + 1); 1670 const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul, 1671 SCEV::FlagAnyWrap, 1672 Depth + 1), 1673 WideTy, Depth + 1); 1674 const SCEV *WideStart = getZeroExtendExpr(Start, WideTy, Depth + 1); 1675 const SCEV *WideMaxBECount = 1676 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); 1677 const SCEV *OperandExtendedAdd = 1678 getAddExpr(WideStart, 1679 getMulExpr(WideMaxBECount, 1680 getZeroExtendExpr(Step, WideTy, Depth + 1), 1681 SCEV::FlagAnyWrap, Depth + 1), 1682 SCEV::FlagAnyWrap, Depth + 1); 1683 if (ZAdd == OperandExtendedAdd) { 1684 // Cache knowledge of AR NUW, which is propagated to this AddRec. 1685 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNUW); 1686 // Return the expression with the addrec on the outside. 1687 return getAddRecExpr( 1688 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1689 Depth + 1), 1690 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1691 AR->getNoWrapFlags()); 1692 } 1693 // Similar to above, only this time treat the step value as signed. 1694 // This covers loops that count down. 1695 OperandExtendedAdd = 1696 getAddExpr(WideStart, 1697 getMulExpr(WideMaxBECount, 1698 getSignExtendExpr(Step, WideTy, Depth + 1), 1699 SCEV::FlagAnyWrap, Depth + 1), 1700 SCEV::FlagAnyWrap, Depth + 1); 1701 if (ZAdd == OperandExtendedAdd) { 1702 // Cache knowledge of AR NW, which is propagated to this AddRec. 1703 // Negative step causes unsigned wrap, but it still can't self-wrap. 1704 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW); 1705 // Return the expression with the addrec on the outside. 1706 return getAddRecExpr( 1707 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1708 Depth + 1), 1709 getSignExtendExpr(Step, Ty, Depth + 1), L, 1710 AR->getNoWrapFlags()); 1711 } 1712 } 1713 } 1714 1715 // Normally, in the cases we can prove no-overflow via a 1716 // backedge guarding condition, we can also compute a backedge 1717 // taken count for the loop. The exceptions are assumptions and 1718 // guards present in the loop -- SCEV is not great at exploiting 1719 // these to compute max backedge taken counts, but can still use 1720 // these to prove lack of overflow. Use this fact to avoid 1721 // doing extra work that may not pay off. 1722 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards || 1723 !AC.assumptions().empty()) { 1724 1725 auto NewFlags = proveNoUnsignedWrapViaInduction(AR); 1726 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); 1727 if (AR->hasNoUnsignedWrap()) { 1728 // Same as nuw case above - duplicated here to avoid a compile time 1729 // issue. It's not clear that the order of checks does matter, but 1730 // it's one of two issue possible causes for a change which was 1731 // reverted. Be conservative for the moment. 1732 return getAddRecExpr( 1733 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1734 Depth + 1), 1735 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1736 AR->getNoWrapFlags()); 1737 } 1738 1739 // For a negative step, we can extend the operands iff doing so only 1740 // traverses values in the range zext([0,UINT_MAX]). 1741 if (isKnownNegative(Step)) { 1742 const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) - 1743 getSignedRangeMin(Step)); 1744 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) || 1745 isKnownOnEveryIteration(ICmpInst::ICMP_UGT, AR, N)) { 1746 // Cache knowledge of AR NW, which is propagated to this 1747 // AddRec. Negative step causes unsigned wrap, but it 1748 // still can't self-wrap. 1749 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW); 1750 // Return the expression with the addrec on the outside. 1751 return getAddRecExpr( 1752 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1753 Depth + 1), 1754 getSignExtendExpr(Step, Ty, Depth + 1), L, 1755 AR->getNoWrapFlags()); 1756 } 1757 } 1758 } 1759 1760 // zext({C,+,Step}) --> (zext(D) + zext({C-D,+,Step}))<nuw><nsw> 1761 // if D + (C - D + Step * n) could be proven to not unsigned wrap 1762 // where D maximizes the number of trailing zeros of (C - D + Step * n) 1763 if (const auto *SC = dyn_cast<SCEVConstant>(Start)) { 1764 const APInt &C = SC->getAPInt(); 1765 const APInt &D = extractConstantWithoutWrapping(*this, C, Step); 1766 if (D != 0) { 1767 const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth); 1768 const SCEV *SResidual = 1769 getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags()); 1770 const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1); 1771 return getAddExpr(SZExtD, SZExtR, 1772 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1773 Depth + 1); 1774 } 1775 } 1776 1777 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) { 1778 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNUW); 1779 return getAddRecExpr( 1780 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1), 1781 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1782 } 1783 } 1784 1785 // zext(A % B) --> zext(A) % zext(B) 1786 { 1787 const SCEV *LHS; 1788 const SCEV *RHS; 1789 if (matchURem(Op, LHS, RHS)) 1790 return getURemExpr(getZeroExtendExpr(LHS, Ty, Depth + 1), 1791 getZeroExtendExpr(RHS, Ty, Depth + 1)); 1792 } 1793 1794 // zext(A / B) --> zext(A) / zext(B). 1795 if (auto *Div = dyn_cast<SCEVUDivExpr>(Op)) 1796 return getUDivExpr(getZeroExtendExpr(Div->getLHS(), Ty, Depth + 1), 1797 getZeroExtendExpr(Div->getRHS(), Ty, Depth + 1)); 1798 1799 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1800 // zext((A + B + ...)<nuw>) --> (zext(A) + zext(B) + ...)<nuw> 1801 if (SA->hasNoUnsignedWrap()) { 1802 // If the addition does not unsign overflow then we can, by definition, 1803 // commute the zero extension with the addition operation. 1804 SmallVector<const SCEV *, 4> Ops; 1805 for (const auto *Op : SA->operands()) 1806 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); 1807 return getAddExpr(Ops, SCEV::FlagNUW, Depth + 1); 1808 } 1809 1810 // zext(C + x + y + ...) --> (zext(D) + zext((C - D) + x + y + ...)) 1811 // if D + (C - D + x + y + ...) could be proven to not unsigned wrap 1812 // where D maximizes the number of trailing zeros of (C - D + x + y + ...) 1813 // 1814 // Often address arithmetics contain expressions like 1815 // (zext (add (shl X, C1), C2)), for instance, (zext (5 + (4 * X))). 1816 // This transformation is useful while proving that such expressions are 1817 // equal or differ by a small constant amount, see LoadStoreVectorizer pass. 1818 if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) { 1819 const APInt &D = extractConstantWithoutWrapping(*this, SC, SA); 1820 if (D != 0) { 1821 const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth); 1822 const SCEV *SResidual = 1823 getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth); 1824 const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1); 1825 return getAddExpr(SZExtD, SZExtR, 1826 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1827 Depth + 1); 1828 } 1829 } 1830 } 1831 1832 if (auto *SM = dyn_cast<SCEVMulExpr>(Op)) { 1833 // zext((A * B * ...)<nuw>) --> (zext(A) * zext(B) * ...)<nuw> 1834 if (SM->hasNoUnsignedWrap()) { 1835 // If the multiply does not unsign overflow then we can, by definition, 1836 // commute the zero extension with the multiply operation. 1837 SmallVector<const SCEV *, 4> Ops; 1838 for (const auto *Op : SM->operands()) 1839 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); 1840 return getMulExpr(Ops, SCEV::FlagNUW, Depth + 1); 1841 } 1842 1843 // zext(2^K * (trunc X to iN)) to iM -> 1844 // 2^K * (zext(trunc X to i{N-K}) to iM)<nuw> 1845 // 1846 // Proof: 1847 // 1848 // zext(2^K * (trunc X to iN)) to iM 1849 // = zext((trunc X to iN) << K) to iM 1850 // = zext((trunc X to i{N-K}) << K)<nuw> to iM 1851 // (because shl removes the top K bits) 1852 // = zext((2^K * (trunc X to i{N-K}))<nuw>) to iM 1853 // = (2^K * (zext(trunc X to i{N-K}) to iM))<nuw>. 1854 // 1855 if (SM->getNumOperands() == 2) 1856 if (auto *MulLHS = dyn_cast<SCEVConstant>(SM->getOperand(0))) 1857 if (MulLHS->getAPInt().isPowerOf2()) 1858 if (auto *TruncRHS = dyn_cast<SCEVTruncateExpr>(SM->getOperand(1))) { 1859 int NewTruncBits = getTypeSizeInBits(TruncRHS->getType()) - 1860 MulLHS->getAPInt().logBase2(); 1861 Type *NewTruncTy = IntegerType::get(getContext(), NewTruncBits); 1862 return getMulExpr( 1863 getZeroExtendExpr(MulLHS, Ty), 1864 getZeroExtendExpr( 1865 getTruncateExpr(TruncRHS->getOperand(), NewTruncTy), Ty), 1866 SCEV::FlagNUW, Depth + 1); 1867 } 1868 } 1869 1870 // The cast wasn't folded; create an explicit cast node. 1871 // Recompute the insert position, as it may have been invalidated. 1872 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1873 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1874 Op, Ty); 1875 UniqueSCEVs.InsertNode(S, IP); 1876 addToLoopUseLists(S); 1877 return S; 1878 } 1879 1880 const SCEV * 1881 ScalarEvolution::getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { 1882 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1883 "This is not an extending conversion!"); 1884 assert(isSCEVable(Ty) && 1885 "This is not a conversion to a SCEVable type!"); 1886 Ty = getEffectiveSCEVType(Ty); 1887 1888 // Fold if the operand is constant. 1889 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1890 return getConstant( 1891 cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty))); 1892 1893 // sext(sext(x)) --> sext(x) 1894 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1895 return getSignExtendExpr(SS->getOperand(), Ty, Depth + 1); 1896 1897 // sext(zext(x)) --> zext(x) 1898 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1899 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); 1900 1901 // Before doing any expensive analysis, check to see if we've already 1902 // computed a SCEV for this Op and Ty. 1903 FoldingSetNodeID ID; 1904 ID.AddInteger(scSignExtend); 1905 ID.AddPointer(Op); 1906 ID.AddPointer(Ty); 1907 void *IP = nullptr; 1908 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1909 // Limit recursion depth. 1910 if (Depth > MaxCastDepth) { 1911 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 1912 Op, Ty); 1913 UniqueSCEVs.InsertNode(S, IP); 1914 addToLoopUseLists(S); 1915 return S; 1916 } 1917 1918 // sext(trunc(x)) --> sext(x) or x or trunc(x) 1919 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1920 // It's possible the bits taken off by the truncate were all sign bits. If 1921 // so, we should be able to simplify this further. 1922 const SCEV *X = ST->getOperand(); 1923 ConstantRange CR = getSignedRange(X); 1924 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1925 unsigned NewBits = getTypeSizeInBits(Ty); 1926 if (CR.truncate(TruncBits).signExtend(NewBits).contains( 1927 CR.sextOrTrunc(NewBits))) 1928 return getTruncateOrSignExtend(X, Ty, Depth); 1929 } 1930 1931 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1932 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 1933 if (SA->hasNoSignedWrap()) { 1934 // If the addition does not sign overflow then we can, by definition, 1935 // commute the sign extension with the addition operation. 1936 SmallVector<const SCEV *, 4> Ops; 1937 for (const auto *Op : SA->operands()) 1938 Ops.push_back(getSignExtendExpr(Op, Ty, Depth + 1)); 1939 return getAddExpr(Ops, SCEV::FlagNSW, Depth + 1); 1940 } 1941 1942 // sext(C + x + y + ...) --> (sext(D) + sext((C - D) + x + y + ...)) 1943 // if D + (C - D + x + y + ...) could be proven to not signed wrap 1944 // where D maximizes the number of trailing zeros of (C - D + x + y + ...) 1945 // 1946 // For instance, this will bring two seemingly different expressions: 1947 // 1 + sext(5 + 20 * %x + 24 * %y) and 1948 // sext(6 + 20 * %x + 24 * %y) 1949 // to the same form: 1950 // 2 + sext(4 + 20 * %x + 24 * %y) 1951 if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) { 1952 const APInt &D = extractConstantWithoutWrapping(*this, SC, SA); 1953 if (D != 0) { 1954 const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth); 1955 const SCEV *SResidual = 1956 getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth); 1957 const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1); 1958 return getAddExpr(SSExtD, SSExtR, 1959 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1960 Depth + 1); 1961 } 1962 } 1963 } 1964 // If the input value is a chrec scev, and we can prove that the value 1965 // did not overflow the old, smaller, value, we can sign extend all of the 1966 // operands (often constants). This allows analysis of something like 1967 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; } 1968 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1969 if (AR->isAffine()) { 1970 const SCEV *Start = AR->getStart(); 1971 const SCEV *Step = AR->getStepRecurrence(*this); 1972 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1973 const Loop *L = AR->getLoop(); 1974 1975 if (!AR->hasNoSignedWrap()) { 1976 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1977 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); 1978 } 1979 1980 // If we have special knowledge that this addrec won't overflow, 1981 // we don't need to do any further analysis. 1982 if (AR->hasNoSignedWrap()) 1983 return getAddRecExpr( 1984 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 1985 getSignExtendExpr(Step, Ty, Depth + 1), L, SCEV::FlagNSW); 1986 1987 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1988 // Note that this serves two purposes: It filters out loops that are 1989 // simply not analyzable, and it covers the case where this code is 1990 // being called from within backedge-taken count analysis, such that 1991 // attempting to ask for the backedge-taken count would likely result 1992 // in infinite recursion. In the later case, the analysis code will 1993 // cope with a conservative value, and it will take care to purge 1994 // that value once it has finished. 1995 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 1996 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1997 // Manually compute the final value for AR, checking for 1998 // overflow. 1999 2000 // Check whether the backedge-taken count can be losslessly casted to 2001 // the addrec's type. The count is always unsigned. 2002 const SCEV *CastedMaxBECount = 2003 getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth); 2004 const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend( 2005 CastedMaxBECount, MaxBECount->getType(), Depth); 2006 if (MaxBECount == RecastedMaxBECount) { 2007 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 2008 // Check whether Start+Step*MaxBECount has no signed overflow. 2009 const SCEV *SMul = getMulExpr(CastedMaxBECount, Step, 2010 SCEV::FlagAnyWrap, Depth + 1); 2011 const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul, 2012 SCEV::FlagAnyWrap, 2013 Depth + 1), 2014 WideTy, Depth + 1); 2015 const SCEV *WideStart = getSignExtendExpr(Start, WideTy, Depth + 1); 2016 const SCEV *WideMaxBECount = 2017 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); 2018 const SCEV *OperandExtendedAdd = 2019 getAddExpr(WideStart, 2020 getMulExpr(WideMaxBECount, 2021 getSignExtendExpr(Step, WideTy, Depth + 1), 2022 SCEV::FlagAnyWrap, Depth + 1), 2023 SCEV::FlagAnyWrap, Depth + 1); 2024 if (SAdd == OperandExtendedAdd) { 2025 // Cache knowledge of AR NSW, which is propagated to this AddRec. 2026 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNSW); 2027 // Return the expression with the addrec on the outside. 2028 return getAddRecExpr( 2029 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, 2030 Depth + 1), 2031 getSignExtendExpr(Step, Ty, Depth + 1), L, 2032 AR->getNoWrapFlags()); 2033 } 2034 // Similar to above, only this time treat the step value as unsigned. 2035 // This covers loops that count up with an unsigned step. 2036 OperandExtendedAdd = 2037 getAddExpr(WideStart, 2038 getMulExpr(WideMaxBECount, 2039 getZeroExtendExpr(Step, WideTy, Depth + 1), 2040 SCEV::FlagAnyWrap, Depth + 1), 2041 SCEV::FlagAnyWrap, Depth + 1); 2042 if (SAdd == OperandExtendedAdd) { 2043 // If AR wraps around then 2044 // 2045 // abs(Step) * MaxBECount > unsigned-max(AR->getType()) 2046 // => SAdd != OperandExtendedAdd 2047 // 2048 // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=> 2049 // (SAdd == OperandExtendedAdd => AR is NW) 2050 2051 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW); 2052 2053 // Return the expression with the addrec on the outside. 2054 return getAddRecExpr( 2055 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, 2056 Depth + 1), 2057 getZeroExtendExpr(Step, Ty, Depth + 1), L, 2058 AR->getNoWrapFlags()); 2059 } 2060 } 2061 } 2062 2063 auto NewFlags = proveNoSignedWrapViaInduction(AR); 2064 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); 2065 if (AR->hasNoSignedWrap()) { 2066 // Same as nsw case above - duplicated here to avoid a compile time 2067 // issue. It's not clear that the order of checks does matter, but 2068 // it's one of two issue possible causes for a change which was 2069 // reverted. Be conservative for the moment. 2070 return getAddRecExpr( 2071 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 2072 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 2073 } 2074 2075 // sext({C,+,Step}) --> (sext(D) + sext({C-D,+,Step}))<nuw><nsw> 2076 // if D + (C - D + Step * n) could be proven to not signed wrap 2077 // where D maximizes the number of trailing zeros of (C - D + Step * n) 2078 if (const auto *SC = dyn_cast<SCEVConstant>(Start)) { 2079 const APInt &C = SC->getAPInt(); 2080 const APInt &D = extractConstantWithoutWrapping(*this, C, Step); 2081 if (D != 0) { 2082 const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth); 2083 const SCEV *SResidual = 2084 getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags()); 2085 const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1); 2086 return getAddExpr(SSExtD, SSExtR, 2087 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 2088 Depth + 1); 2089 } 2090 } 2091 2092 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) { 2093 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNSW); 2094 return getAddRecExpr( 2095 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 2096 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 2097 } 2098 } 2099 2100 // If the input value is provably positive and we could not simplify 2101 // away the sext build a zext instead. 2102 if (isKnownNonNegative(Op)) 2103 return getZeroExtendExpr(Op, Ty, Depth + 1); 2104 2105 // The cast wasn't folded; create an explicit cast node. 2106 // Recompute the insert position, as it may have been invalidated. 2107 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 2108 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 2109 Op, Ty); 2110 UniqueSCEVs.InsertNode(S, IP); 2111 addToLoopUseLists(S); 2112 return S; 2113 } 2114 2115 /// getAnyExtendExpr - Return a SCEV for the given operand extended with 2116 /// unspecified bits out to the given type. 2117 const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op, 2118 Type *Ty) { 2119 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 2120 "This is not an extending conversion!"); 2121 assert(isSCEVable(Ty) && 2122 "This is not a conversion to a SCEVable type!"); 2123 Ty = getEffectiveSCEVType(Ty); 2124 2125 // Sign-extend negative constants. 2126 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 2127 if (SC->getAPInt().isNegative()) 2128 return getSignExtendExpr(Op, Ty); 2129 2130 // Peel off a truncate cast. 2131 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) { 2132 const SCEV *NewOp = T->getOperand(); 2133 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty)) 2134 return getAnyExtendExpr(NewOp, Ty); 2135 return getTruncateOrNoop(NewOp, Ty); 2136 } 2137 2138 // Next try a zext cast. If the cast is folded, use it. 2139 const SCEV *ZExt = getZeroExtendExpr(Op, Ty); 2140 if (!isa<SCEVZeroExtendExpr>(ZExt)) 2141 return ZExt; 2142 2143 // Next try a sext cast. If the cast is folded, use it. 2144 const SCEV *SExt = getSignExtendExpr(Op, Ty); 2145 if (!isa<SCEVSignExtendExpr>(SExt)) 2146 return SExt; 2147 2148 // Force the cast to be folded into the operands of an addrec. 2149 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) { 2150 SmallVector<const SCEV *, 4> Ops; 2151 for (const SCEV *Op : AR->operands()) 2152 Ops.push_back(getAnyExtendExpr(Op, Ty)); 2153 return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW); 2154 } 2155 2156 // If the expression is obviously signed, use the sext cast value. 2157 if (isa<SCEVSMaxExpr>(Op)) 2158 return SExt; 2159 2160 // Absent any other information, use the zext cast value. 2161 return ZExt; 2162 } 2163 2164 /// Process the given Ops list, which is a list of operands to be added under 2165 /// the given scale, update the given map. This is a helper function for 2166 /// getAddRecExpr. As an example of what it does, given a sequence of operands 2167 /// that would form an add expression like this: 2168 /// 2169 /// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r) 2170 /// 2171 /// where A and B are constants, update the map with these values: 2172 /// 2173 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0) 2174 /// 2175 /// and add 13 + A*B*29 to AccumulatedConstant. 2176 /// This will allow getAddRecExpr to produce this: 2177 /// 2178 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B) 2179 /// 2180 /// This form often exposes folding opportunities that are hidden in 2181 /// the original operand list. 2182 /// 2183 /// Return true iff it appears that any interesting folding opportunities 2184 /// may be exposed. This helps getAddRecExpr short-circuit extra work in 2185 /// the common case where no interesting opportunities are present, and 2186 /// is also used as a check to avoid infinite recursion. 2187 static bool 2188 CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M, 2189 SmallVectorImpl<const SCEV *> &NewOps, 2190 APInt &AccumulatedConstant, 2191 const SCEV *const *Ops, size_t NumOperands, 2192 const APInt &Scale, 2193 ScalarEvolution &SE) { 2194 bool Interesting = false; 2195 2196 // Iterate over the add operands. They are sorted, with constants first. 2197 unsigned i = 0; 2198 while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2199 ++i; 2200 // Pull a buried constant out to the outside. 2201 if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero()) 2202 Interesting = true; 2203 AccumulatedConstant += Scale * C->getAPInt(); 2204 } 2205 2206 // Next comes everything else. We're especially interested in multiplies 2207 // here, but they're in the middle, so just visit the rest with one loop. 2208 for (; i != NumOperands; ++i) { 2209 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]); 2210 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) { 2211 APInt NewScale = 2212 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getAPInt(); 2213 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) { 2214 // A multiplication of a constant with another add; recurse. 2215 const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1)); 2216 Interesting |= 2217 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2218 Add->op_begin(), Add->getNumOperands(), 2219 NewScale, SE); 2220 } else { 2221 // A multiplication of a constant with some other value. Update 2222 // the map. 2223 SmallVector<const SCEV *, 4> MulOps(drop_begin(Mul->operands())); 2224 const SCEV *Key = SE.getMulExpr(MulOps); 2225 auto Pair = M.insert({Key, NewScale}); 2226 if (Pair.second) { 2227 NewOps.push_back(Pair.first->first); 2228 } else { 2229 Pair.first->second += NewScale; 2230 // The map already had an entry for this value, which may indicate 2231 // a folding opportunity. 2232 Interesting = true; 2233 } 2234 } 2235 } else { 2236 // An ordinary operand. Update the map. 2237 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair = 2238 M.insert({Ops[i], Scale}); 2239 if (Pair.second) { 2240 NewOps.push_back(Pair.first->first); 2241 } else { 2242 Pair.first->second += Scale; 2243 // The map already had an entry for this value, which may indicate 2244 // a folding opportunity. 2245 Interesting = true; 2246 } 2247 } 2248 } 2249 2250 return Interesting; 2251 } 2252 2253 bool ScalarEvolution::willNotOverflow(Instruction::BinaryOps BinOp, bool Signed, 2254 const SCEV *LHS, const SCEV *RHS) { 2255 const SCEV *(ScalarEvolution::*Operation)(const SCEV *, const SCEV *, 2256 SCEV::NoWrapFlags, unsigned); 2257 switch (BinOp) { 2258 default: 2259 llvm_unreachable("Unsupported binary op"); 2260 case Instruction::Add: 2261 Operation = &ScalarEvolution::getAddExpr; 2262 break; 2263 case Instruction::Sub: 2264 Operation = &ScalarEvolution::getMinusSCEV; 2265 break; 2266 case Instruction::Mul: 2267 Operation = &ScalarEvolution::getMulExpr; 2268 break; 2269 } 2270 2271 const SCEV *(ScalarEvolution::*Extension)(const SCEV *, Type *, unsigned) = 2272 Signed ? &ScalarEvolution::getSignExtendExpr 2273 : &ScalarEvolution::getZeroExtendExpr; 2274 2275 // Check ext(LHS op RHS) == ext(LHS) op ext(RHS) 2276 auto *NarrowTy = cast<IntegerType>(LHS->getType()); 2277 auto *WideTy = 2278 IntegerType::get(NarrowTy->getContext(), NarrowTy->getBitWidth() * 2); 2279 2280 const SCEV *A = (this->*Extension)( 2281 (this->*Operation)(LHS, RHS, SCEV::FlagAnyWrap, 0), WideTy, 0); 2282 const SCEV *B = (this->*Operation)((this->*Extension)(LHS, WideTy, 0), 2283 (this->*Extension)(RHS, WideTy, 0), 2284 SCEV::FlagAnyWrap, 0); 2285 return A == B; 2286 } 2287 2288 std::pair<SCEV::NoWrapFlags, bool /*Deduced*/> 2289 ScalarEvolution::getStrengthenedNoWrapFlagsFromBinOp( 2290 const OverflowingBinaryOperator *OBO) { 2291 SCEV::NoWrapFlags Flags = SCEV::NoWrapFlags::FlagAnyWrap; 2292 2293 if (OBO->hasNoUnsignedWrap()) 2294 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2295 if (OBO->hasNoSignedWrap()) 2296 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 2297 2298 bool Deduced = false; 2299 2300 if (OBO->hasNoUnsignedWrap() && OBO->hasNoSignedWrap()) 2301 return {Flags, Deduced}; 2302 2303 if (OBO->getOpcode() != Instruction::Add && 2304 OBO->getOpcode() != Instruction::Sub && 2305 OBO->getOpcode() != Instruction::Mul) 2306 return {Flags, Deduced}; 2307 2308 const SCEV *LHS = getSCEV(OBO->getOperand(0)); 2309 const SCEV *RHS = getSCEV(OBO->getOperand(1)); 2310 2311 if (!OBO->hasNoUnsignedWrap() && 2312 willNotOverflow((Instruction::BinaryOps)OBO->getOpcode(), 2313 /* Signed */ false, LHS, RHS)) { 2314 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2315 Deduced = true; 2316 } 2317 2318 if (!OBO->hasNoSignedWrap() && 2319 willNotOverflow((Instruction::BinaryOps)OBO->getOpcode(), 2320 /* Signed */ true, LHS, RHS)) { 2321 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 2322 Deduced = true; 2323 } 2324 2325 return {Flags, Deduced}; 2326 } 2327 2328 // We're trying to construct a SCEV of type `Type' with `Ops' as operands and 2329 // `OldFlags' as can't-wrap behavior. Infer a more aggressive set of 2330 // can't-overflow flags for the operation if possible. 2331 static SCEV::NoWrapFlags 2332 StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type, 2333 const ArrayRef<const SCEV *> Ops, 2334 SCEV::NoWrapFlags Flags) { 2335 using namespace std::placeholders; 2336 2337 using OBO = OverflowingBinaryOperator; 2338 2339 bool CanAnalyze = 2340 Type == scAddExpr || Type == scAddRecExpr || Type == scMulExpr; 2341 (void)CanAnalyze; 2342 assert(CanAnalyze && "don't call from other places!"); 2343 2344 int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW; 2345 SCEV::NoWrapFlags SignOrUnsignWrap = 2346 ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2347 2348 // If FlagNSW is true and all the operands are non-negative, infer FlagNUW. 2349 auto IsKnownNonNegative = [&](const SCEV *S) { 2350 return SE->isKnownNonNegative(S); 2351 }; 2352 2353 if (SignOrUnsignWrap == SCEV::FlagNSW && all_of(Ops, IsKnownNonNegative)) 2354 Flags = 2355 ScalarEvolution::setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask); 2356 2357 SignOrUnsignWrap = ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2358 2359 if (SignOrUnsignWrap != SignOrUnsignMask && 2360 (Type == scAddExpr || Type == scMulExpr) && Ops.size() == 2 && 2361 isa<SCEVConstant>(Ops[0])) { 2362 2363 auto Opcode = [&] { 2364 switch (Type) { 2365 case scAddExpr: 2366 return Instruction::Add; 2367 case scMulExpr: 2368 return Instruction::Mul; 2369 default: 2370 llvm_unreachable("Unexpected SCEV op."); 2371 } 2372 }(); 2373 2374 const APInt &C = cast<SCEVConstant>(Ops[0])->getAPInt(); 2375 2376 // (A <opcode> C) --> (A <opcode> C)<nsw> if the op doesn't sign overflow. 2377 if (!(SignOrUnsignWrap & SCEV::FlagNSW)) { 2378 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2379 Opcode, C, OBO::NoSignedWrap); 2380 if (NSWRegion.contains(SE->getSignedRange(Ops[1]))) 2381 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 2382 } 2383 2384 // (A <opcode> C) --> (A <opcode> C)<nuw> if the op doesn't unsign overflow. 2385 if (!(SignOrUnsignWrap & SCEV::FlagNUW)) { 2386 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2387 Opcode, C, OBO::NoUnsignedWrap); 2388 if (NUWRegion.contains(SE->getUnsignedRange(Ops[1]))) 2389 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2390 } 2391 } 2392 2393 return Flags; 2394 } 2395 2396 bool ScalarEvolution::isAvailableAtLoopEntry(const SCEV *S, const Loop *L) { 2397 return isLoopInvariant(S, L) && properlyDominates(S, L->getHeader()); 2398 } 2399 2400 /// Get a canonical add expression, or something simpler if possible. 2401 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops, 2402 SCEV::NoWrapFlags OrigFlags, 2403 unsigned Depth) { 2404 assert(!(OrigFlags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) && 2405 "only nuw or nsw allowed"); 2406 assert(!Ops.empty() && "Cannot get empty add!"); 2407 if (Ops.size() == 1) return Ops[0]; 2408 #ifndef NDEBUG 2409 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2410 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2411 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2412 "SCEVAddExpr operand types don't match!"); 2413 #endif 2414 2415 // Sort by complexity, this groups all similar expression types together. 2416 GroupByComplexity(Ops, &LI, DT); 2417 2418 // If there are any constants, fold them together. 2419 unsigned Idx = 0; 2420 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2421 ++Idx; 2422 assert(Idx < Ops.size()); 2423 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2424 // We found two constants, fold them together! 2425 Ops[0] = getConstant(LHSC->getAPInt() + RHSC->getAPInt()); 2426 if (Ops.size() == 2) return Ops[0]; 2427 Ops.erase(Ops.begin()+1); // Erase the folded element 2428 LHSC = cast<SCEVConstant>(Ops[0]); 2429 } 2430 2431 // If we are left with a constant zero being added, strip it off. 2432 if (LHSC->getValue()->isZero()) { 2433 Ops.erase(Ops.begin()); 2434 --Idx; 2435 } 2436 2437 if (Ops.size() == 1) return Ops[0]; 2438 } 2439 2440 // Delay expensive flag strengthening until necessary. 2441 auto ComputeFlags = [this, OrigFlags](const ArrayRef<const SCEV *> Ops) { 2442 return StrengthenNoWrapFlags(this, scAddExpr, Ops, OrigFlags); 2443 }; 2444 2445 // Limit recursion calls depth. 2446 if (Depth > MaxArithDepth || hasHugeExpression(Ops)) 2447 return getOrCreateAddExpr(Ops, ComputeFlags(Ops)); 2448 2449 if (SCEV *S = std::get<0>(findExistingSCEVInCache(scAddExpr, Ops))) { 2450 // Don't strengthen flags if we have no new information. 2451 SCEVAddExpr *Add = static_cast<SCEVAddExpr *>(S); 2452 if (Add->getNoWrapFlags(OrigFlags) != OrigFlags) 2453 Add->setNoWrapFlags(ComputeFlags(Ops)); 2454 return S; 2455 } 2456 2457 // Okay, check to see if the same value occurs in the operand list more than 2458 // once. If so, merge them together into an multiply expression. Since we 2459 // sorted the list, these values are required to be adjacent. 2460 Type *Ty = Ops[0]->getType(); 2461 bool FoundMatch = false; 2462 for (unsigned i = 0, e = Ops.size(); i != e-1; ++i) 2463 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2 2464 // Scan ahead to count how many equal operands there are. 2465 unsigned Count = 2; 2466 while (i+Count != e && Ops[i+Count] == Ops[i]) 2467 ++Count; 2468 // Merge the values into a multiply. 2469 const SCEV *Scale = getConstant(Ty, Count); 2470 const SCEV *Mul = getMulExpr(Scale, Ops[i], SCEV::FlagAnyWrap, Depth + 1); 2471 if (Ops.size() == Count) 2472 return Mul; 2473 Ops[i] = Mul; 2474 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count); 2475 --i; e -= Count - 1; 2476 FoundMatch = true; 2477 } 2478 if (FoundMatch) 2479 return getAddExpr(Ops, OrigFlags, Depth + 1); 2480 2481 // Check for truncates. If all the operands are truncated from the same 2482 // type, see if factoring out the truncate would permit the result to be 2483 // folded. eg., n*trunc(x) + m*trunc(y) --> trunc(trunc(m)*x + trunc(n)*y) 2484 // if the contents of the resulting outer trunc fold to something simple. 2485 auto FindTruncSrcType = [&]() -> Type * { 2486 // We're ultimately looking to fold an addrec of truncs and muls of only 2487 // constants and truncs, so if we find any other types of SCEV 2488 // as operands of the addrec then we bail and return nullptr here. 2489 // Otherwise, we return the type of the operand of a trunc that we find. 2490 if (auto *T = dyn_cast<SCEVTruncateExpr>(Ops[Idx])) 2491 return T->getOperand()->getType(); 2492 if (const auto *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 2493 const auto *LastOp = Mul->getOperand(Mul->getNumOperands() - 1); 2494 if (const auto *T = dyn_cast<SCEVTruncateExpr>(LastOp)) 2495 return T->getOperand()->getType(); 2496 } 2497 return nullptr; 2498 }; 2499 if (auto *SrcType = FindTruncSrcType()) { 2500 SmallVector<const SCEV *, 8> LargeOps; 2501 bool Ok = true; 2502 // Check all the operands to see if they can be represented in the 2503 // source type of the truncate. 2504 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 2505 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) { 2506 if (T->getOperand()->getType() != SrcType) { 2507 Ok = false; 2508 break; 2509 } 2510 LargeOps.push_back(T->getOperand()); 2511 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2512 LargeOps.push_back(getAnyExtendExpr(C, SrcType)); 2513 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) { 2514 SmallVector<const SCEV *, 8> LargeMulOps; 2515 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) { 2516 if (const SCEVTruncateExpr *T = 2517 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) { 2518 if (T->getOperand()->getType() != SrcType) { 2519 Ok = false; 2520 break; 2521 } 2522 LargeMulOps.push_back(T->getOperand()); 2523 } else if (const auto *C = dyn_cast<SCEVConstant>(M->getOperand(j))) { 2524 LargeMulOps.push_back(getAnyExtendExpr(C, SrcType)); 2525 } else { 2526 Ok = false; 2527 break; 2528 } 2529 } 2530 if (Ok) 2531 LargeOps.push_back(getMulExpr(LargeMulOps, SCEV::FlagAnyWrap, Depth + 1)); 2532 } else { 2533 Ok = false; 2534 break; 2535 } 2536 } 2537 if (Ok) { 2538 // Evaluate the expression in the larger type. 2539 const SCEV *Fold = getAddExpr(LargeOps, SCEV::FlagAnyWrap, Depth + 1); 2540 // If it folds to something simple, use it. Otherwise, don't. 2541 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold)) 2542 return getTruncateExpr(Fold, Ty); 2543 } 2544 } 2545 2546 if (Ops.size() == 2) { 2547 // Check if we have an expression of the form ((X + C1) - C2), where C1 and 2548 // C2 can be folded in a way that allows retaining wrapping flags of (X + 2549 // C1). 2550 const SCEV *A = Ops[0]; 2551 const SCEV *B = Ops[1]; 2552 auto *AddExpr = dyn_cast<SCEVAddExpr>(B); 2553 auto *C = dyn_cast<SCEVConstant>(A); 2554 if (AddExpr && C && isa<SCEVConstant>(AddExpr->getOperand(0))) { 2555 auto C1 = cast<SCEVConstant>(AddExpr->getOperand(0))->getAPInt(); 2556 auto C2 = C->getAPInt(); 2557 SCEV::NoWrapFlags PreservedFlags = SCEV::FlagAnyWrap; 2558 2559 APInt ConstAdd = C1 + C2; 2560 auto AddFlags = AddExpr->getNoWrapFlags(); 2561 // Adding a smaller constant is NUW if the original AddExpr was NUW. 2562 if (ScalarEvolution::maskFlags(AddFlags, SCEV::FlagNUW) == 2563 SCEV::FlagNUW && 2564 ConstAdd.ule(C1)) { 2565 PreservedFlags = 2566 ScalarEvolution::setFlags(PreservedFlags, SCEV::FlagNUW); 2567 } 2568 2569 // Adding a constant with the same sign and small magnitude is NSW, if the 2570 // original AddExpr was NSW. 2571 if (ScalarEvolution::maskFlags(AddFlags, SCEV::FlagNSW) == 2572 SCEV::FlagNSW && 2573 C1.isSignBitSet() == ConstAdd.isSignBitSet() && 2574 ConstAdd.abs().ule(C1.abs())) { 2575 PreservedFlags = 2576 ScalarEvolution::setFlags(PreservedFlags, SCEV::FlagNSW); 2577 } 2578 2579 if (PreservedFlags != SCEV::FlagAnyWrap) { 2580 SmallVector<const SCEV *, 4> NewOps(AddExpr->op_begin(), 2581 AddExpr->op_end()); 2582 NewOps[0] = getConstant(ConstAdd); 2583 return getAddExpr(NewOps, PreservedFlags); 2584 } 2585 } 2586 } 2587 2588 // Skip past any other cast SCEVs. 2589 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr) 2590 ++Idx; 2591 2592 // If there are add operands they would be next. 2593 if (Idx < Ops.size()) { 2594 bool DeletedAdd = false; 2595 // If the original flags and all inlined SCEVAddExprs are NUW, use the 2596 // common NUW flag for expression after inlining. Other flags cannot be 2597 // preserved, because they may depend on the original order of operations. 2598 SCEV::NoWrapFlags CommonFlags = maskFlags(OrigFlags, SCEV::FlagNUW); 2599 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) { 2600 if (Ops.size() > AddOpsInlineThreshold || 2601 Add->getNumOperands() > AddOpsInlineThreshold) 2602 break; 2603 // If we have an add, expand the add operands onto the end of the operands 2604 // list. 2605 Ops.erase(Ops.begin()+Idx); 2606 Ops.append(Add->op_begin(), Add->op_end()); 2607 DeletedAdd = true; 2608 CommonFlags = maskFlags(CommonFlags, Add->getNoWrapFlags()); 2609 } 2610 2611 // If we deleted at least one add, we added operands to the end of the list, 2612 // and they are not necessarily sorted. Recurse to resort and resimplify 2613 // any operands we just acquired. 2614 if (DeletedAdd) 2615 return getAddExpr(Ops, CommonFlags, Depth + 1); 2616 } 2617 2618 // Skip over the add expression until we get to a multiply. 2619 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 2620 ++Idx; 2621 2622 // Check to see if there are any folding opportunities present with 2623 // operands multiplied by constant values. 2624 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) { 2625 uint64_t BitWidth = getTypeSizeInBits(Ty); 2626 DenseMap<const SCEV *, APInt> M; 2627 SmallVector<const SCEV *, 8> NewOps; 2628 APInt AccumulatedConstant(BitWidth, 0); 2629 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2630 Ops.data(), Ops.size(), 2631 APInt(BitWidth, 1), *this)) { 2632 struct APIntCompare { 2633 bool operator()(const APInt &LHS, const APInt &RHS) const { 2634 return LHS.ult(RHS); 2635 } 2636 }; 2637 2638 // Some interesting folding opportunity is present, so its worthwhile to 2639 // re-generate the operands list. Group the operands by constant scale, 2640 // to avoid multiplying by the same constant scale multiple times. 2641 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists; 2642 for (const SCEV *NewOp : NewOps) 2643 MulOpLists[M.find(NewOp)->second].push_back(NewOp); 2644 // Re-generate the operands list. 2645 Ops.clear(); 2646 if (AccumulatedConstant != 0) 2647 Ops.push_back(getConstant(AccumulatedConstant)); 2648 for (auto &MulOp : MulOpLists) 2649 if (MulOp.first != 0) 2650 Ops.push_back(getMulExpr( 2651 getConstant(MulOp.first), 2652 getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1), 2653 SCEV::FlagAnyWrap, Depth + 1)); 2654 if (Ops.empty()) 2655 return getZero(Ty); 2656 if (Ops.size() == 1) 2657 return Ops[0]; 2658 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2659 } 2660 } 2661 2662 // If we are adding something to a multiply expression, make sure the 2663 // something is not already an operand of the multiply. If so, merge it into 2664 // the multiply. 2665 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) { 2666 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]); 2667 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) { 2668 const SCEV *MulOpSCEV = Mul->getOperand(MulOp); 2669 if (isa<SCEVConstant>(MulOpSCEV)) 2670 continue; 2671 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp) 2672 if (MulOpSCEV == Ops[AddOp]) { 2673 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1)) 2674 const SCEV *InnerMul = Mul->getOperand(MulOp == 0); 2675 if (Mul->getNumOperands() != 2) { 2676 // If the multiply has more than two operands, we must get the 2677 // Y*Z term. 2678 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2679 Mul->op_begin()+MulOp); 2680 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2681 InnerMul = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2682 } 2683 SmallVector<const SCEV *, 2> TwoOps = {getOne(Ty), InnerMul}; 2684 const SCEV *AddOne = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2685 const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV, 2686 SCEV::FlagAnyWrap, Depth + 1); 2687 if (Ops.size() == 2) return OuterMul; 2688 if (AddOp < Idx) { 2689 Ops.erase(Ops.begin()+AddOp); 2690 Ops.erase(Ops.begin()+Idx-1); 2691 } else { 2692 Ops.erase(Ops.begin()+Idx); 2693 Ops.erase(Ops.begin()+AddOp-1); 2694 } 2695 Ops.push_back(OuterMul); 2696 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2697 } 2698 2699 // Check this multiply against other multiplies being added together. 2700 for (unsigned OtherMulIdx = Idx+1; 2701 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]); 2702 ++OtherMulIdx) { 2703 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]); 2704 // If MulOp occurs in OtherMul, we can fold the two multiplies 2705 // together. 2706 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands(); 2707 OMulOp != e; ++OMulOp) 2708 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) { 2709 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E)) 2710 const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0); 2711 if (Mul->getNumOperands() != 2) { 2712 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2713 Mul->op_begin()+MulOp); 2714 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2715 InnerMul1 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2716 } 2717 const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0); 2718 if (OtherMul->getNumOperands() != 2) { 2719 SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(), 2720 OtherMul->op_begin()+OMulOp); 2721 MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end()); 2722 InnerMul2 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2723 } 2724 SmallVector<const SCEV *, 2> TwoOps = {InnerMul1, InnerMul2}; 2725 const SCEV *InnerMulSum = 2726 getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2727 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum, 2728 SCEV::FlagAnyWrap, Depth + 1); 2729 if (Ops.size() == 2) return OuterMul; 2730 Ops.erase(Ops.begin()+Idx); 2731 Ops.erase(Ops.begin()+OtherMulIdx-1); 2732 Ops.push_back(OuterMul); 2733 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2734 } 2735 } 2736 } 2737 } 2738 2739 // If there are any add recurrences in the operands list, see if any other 2740 // added values are loop invariant. If so, we can fold them into the 2741 // recurrence. 2742 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 2743 ++Idx; 2744 2745 // Scan over all recurrences, trying to fold loop invariants into them. 2746 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 2747 // Scan all of the other operands to this add and add them to the vector if 2748 // they are loop invariant w.r.t. the recurrence. 2749 SmallVector<const SCEV *, 8> LIOps; 2750 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 2751 const Loop *AddRecLoop = AddRec->getLoop(); 2752 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2753 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 2754 LIOps.push_back(Ops[i]); 2755 Ops.erase(Ops.begin()+i); 2756 --i; --e; 2757 } 2758 2759 // If we found some loop invariants, fold them into the recurrence. 2760 if (!LIOps.empty()) { 2761 // Compute nowrap flags for the addition of the loop-invariant ops and 2762 // the addrec. Temporarily push it as an operand for that purpose. 2763 LIOps.push_back(AddRec); 2764 SCEV::NoWrapFlags Flags = ComputeFlags(LIOps); 2765 LIOps.pop_back(); 2766 2767 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step} 2768 LIOps.push_back(AddRec->getStart()); 2769 2770 SmallVector<const SCEV *, 4> AddRecOps(AddRec->operands()); 2771 // This follows from the fact that the no-wrap flags on the outer add 2772 // expression are applicable on the 0th iteration, when the add recurrence 2773 // will be equal to its start value. 2774 AddRecOps[0] = getAddExpr(LIOps, Flags, Depth + 1); 2775 2776 // Build the new addrec. Propagate the NUW and NSW flags if both the 2777 // outer add and the inner addrec are guaranteed to have no overflow. 2778 // Always propagate NW. 2779 Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW)); 2780 const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags); 2781 2782 // If all of the other operands were loop invariant, we are done. 2783 if (Ops.size() == 1) return NewRec; 2784 2785 // Otherwise, add the folded AddRec by the non-invariant parts. 2786 for (unsigned i = 0;; ++i) 2787 if (Ops[i] == AddRec) { 2788 Ops[i] = NewRec; 2789 break; 2790 } 2791 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2792 } 2793 2794 // Okay, if there weren't any loop invariants to be folded, check to see if 2795 // there are multiple AddRec's with the same loop induction variable being 2796 // added together. If so, we can fold them. 2797 for (unsigned OtherIdx = Idx+1; 2798 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2799 ++OtherIdx) { 2800 // We expect the AddRecExpr's to be sorted in reverse dominance order, 2801 // so that the 1st found AddRecExpr is dominated by all others. 2802 assert(DT.dominates( 2803 cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(), 2804 AddRec->getLoop()->getHeader()) && 2805 "AddRecExprs are not sorted in reverse dominance order?"); 2806 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) { 2807 // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L> 2808 SmallVector<const SCEV *, 4> AddRecOps(AddRec->operands()); 2809 for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2810 ++OtherIdx) { 2811 const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]); 2812 if (OtherAddRec->getLoop() == AddRecLoop) { 2813 for (unsigned i = 0, e = OtherAddRec->getNumOperands(); 2814 i != e; ++i) { 2815 if (i >= AddRecOps.size()) { 2816 AddRecOps.append(OtherAddRec->op_begin()+i, 2817 OtherAddRec->op_end()); 2818 break; 2819 } 2820 SmallVector<const SCEV *, 2> TwoOps = { 2821 AddRecOps[i], OtherAddRec->getOperand(i)}; 2822 AddRecOps[i] = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2823 } 2824 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 2825 } 2826 } 2827 // Step size has changed, so we cannot guarantee no self-wraparound. 2828 Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap); 2829 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2830 } 2831 } 2832 2833 // Otherwise couldn't fold anything into this recurrence. Move onto the 2834 // next one. 2835 } 2836 2837 // Okay, it looks like we really DO need an add expr. Check to see if we 2838 // already have one, otherwise create a new one. 2839 return getOrCreateAddExpr(Ops, ComputeFlags(Ops)); 2840 } 2841 2842 const SCEV * 2843 ScalarEvolution::getOrCreateAddExpr(ArrayRef<const SCEV *> Ops, 2844 SCEV::NoWrapFlags Flags) { 2845 FoldingSetNodeID ID; 2846 ID.AddInteger(scAddExpr); 2847 for (const SCEV *Op : Ops) 2848 ID.AddPointer(Op); 2849 void *IP = nullptr; 2850 SCEVAddExpr *S = 2851 static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2852 if (!S) { 2853 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2854 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2855 S = new (SCEVAllocator) 2856 SCEVAddExpr(ID.Intern(SCEVAllocator), O, Ops.size()); 2857 UniqueSCEVs.InsertNode(S, IP); 2858 addToLoopUseLists(S); 2859 } 2860 S->setNoWrapFlags(Flags); 2861 return S; 2862 } 2863 2864 const SCEV * 2865 ScalarEvolution::getOrCreateAddRecExpr(ArrayRef<const SCEV *> Ops, 2866 const Loop *L, SCEV::NoWrapFlags Flags) { 2867 FoldingSetNodeID ID; 2868 ID.AddInteger(scAddRecExpr); 2869 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2870 ID.AddPointer(Ops[i]); 2871 ID.AddPointer(L); 2872 void *IP = nullptr; 2873 SCEVAddRecExpr *S = 2874 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2875 if (!S) { 2876 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2877 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2878 S = new (SCEVAllocator) 2879 SCEVAddRecExpr(ID.Intern(SCEVAllocator), O, Ops.size(), L); 2880 UniqueSCEVs.InsertNode(S, IP); 2881 addToLoopUseLists(S); 2882 } 2883 setNoWrapFlags(S, Flags); 2884 return S; 2885 } 2886 2887 const SCEV * 2888 ScalarEvolution::getOrCreateMulExpr(ArrayRef<const SCEV *> Ops, 2889 SCEV::NoWrapFlags Flags) { 2890 FoldingSetNodeID ID; 2891 ID.AddInteger(scMulExpr); 2892 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2893 ID.AddPointer(Ops[i]); 2894 void *IP = nullptr; 2895 SCEVMulExpr *S = 2896 static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2897 if (!S) { 2898 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2899 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2900 S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator), 2901 O, Ops.size()); 2902 UniqueSCEVs.InsertNode(S, IP); 2903 addToLoopUseLists(S); 2904 } 2905 S->setNoWrapFlags(Flags); 2906 return S; 2907 } 2908 2909 static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) { 2910 uint64_t k = i*j; 2911 if (j > 1 && k / j != i) Overflow = true; 2912 return k; 2913 } 2914 2915 /// Compute the result of "n choose k", the binomial coefficient. If an 2916 /// intermediate computation overflows, Overflow will be set and the return will 2917 /// be garbage. Overflow is not cleared on absence of overflow. 2918 static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) { 2919 // We use the multiplicative formula: 2920 // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 . 2921 // At each iteration, we take the n-th term of the numeral and divide by the 2922 // (k-n)th term of the denominator. This division will always produce an 2923 // integral result, and helps reduce the chance of overflow in the 2924 // intermediate computations. However, we can still overflow even when the 2925 // final result would fit. 2926 2927 if (n == 0 || n == k) return 1; 2928 if (k > n) return 0; 2929 2930 if (k > n/2) 2931 k = n-k; 2932 2933 uint64_t r = 1; 2934 for (uint64_t i = 1; i <= k; ++i) { 2935 r = umul_ov(r, n-(i-1), Overflow); 2936 r /= i; 2937 } 2938 return r; 2939 } 2940 2941 /// Determine if any of the operands in this SCEV are a constant or if 2942 /// any of the add or multiply expressions in this SCEV contain a constant. 2943 static bool containsConstantInAddMulChain(const SCEV *StartExpr) { 2944 struct FindConstantInAddMulChain { 2945 bool FoundConstant = false; 2946 2947 bool follow(const SCEV *S) { 2948 FoundConstant |= isa<SCEVConstant>(S); 2949 return isa<SCEVAddExpr>(S) || isa<SCEVMulExpr>(S); 2950 } 2951 2952 bool isDone() const { 2953 return FoundConstant; 2954 } 2955 }; 2956 2957 FindConstantInAddMulChain F; 2958 SCEVTraversal<FindConstantInAddMulChain> ST(F); 2959 ST.visitAll(StartExpr); 2960 return F.FoundConstant; 2961 } 2962 2963 /// Get a canonical multiply expression, or something simpler if possible. 2964 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops, 2965 SCEV::NoWrapFlags OrigFlags, 2966 unsigned Depth) { 2967 assert(OrigFlags == maskFlags(OrigFlags, SCEV::FlagNUW | SCEV::FlagNSW) && 2968 "only nuw or nsw allowed"); 2969 assert(!Ops.empty() && "Cannot get empty mul!"); 2970 if (Ops.size() == 1) return Ops[0]; 2971 #ifndef NDEBUG 2972 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2973 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2974 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2975 "SCEVMulExpr operand types don't match!"); 2976 #endif 2977 2978 // Sort by complexity, this groups all similar expression types together. 2979 GroupByComplexity(Ops, &LI, DT); 2980 2981 // If there are any constants, fold them together. 2982 unsigned Idx = 0; 2983 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2984 ++Idx; 2985 assert(Idx < Ops.size()); 2986 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2987 // We found two constants, fold them together! 2988 Ops[0] = getConstant(LHSC->getAPInt() * RHSC->getAPInt()); 2989 if (Ops.size() == 2) return Ops[0]; 2990 Ops.erase(Ops.begin()+1); // Erase the folded element 2991 LHSC = cast<SCEVConstant>(Ops[0]); 2992 } 2993 2994 // If we have a multiply of zero, it will always be zero. 2995 if (LHSC->getValue()->isZero()) 2996 return LHSC; 2997 2998 // If we are left with a constant one being multiplied, strip it off. 2999 if (LHSC->getValue()->isOne()) { 3000 Ops.erase(Ops.begin()); 3001 --Idx; 3002 } 3003 3004 if (Ops.size() == 1) 3005 return Ops[0]; 3006 } 3007 3008 // Delay expensive flag strengthening until necessary. 3009 auto ComputeFlags = [this, OrigFlags](const ArrayRef<const SCEV *> Ops) { 3010 return StrengthenNoWrapFlags(this, scMulExpr, Ops, OrigFlags); 3011 }; 3012 3013 // Limit recursion calls depth. 3014 if (Depth > MaxArithDepth || hasHugeExpression(Ops)) 3015 return getOrCreateMulExpr(Ops, ComputeFlags(Ops)); 3016 3017 if (SCEV *S = std::get<0>(findExistingSCEVInCache(scMulExpr, Ops))) { 3018 // Don't strengthen flags if we have no new information. 3019 SCEVMulExpr *Mul = static_cast<SCEVMulExpr *>(S); 3020 if (Mul->getNoWrapFlags(OrigFlags) != OrigFlags) 3021 Mul->setNoWrapFlags(ComputeFlags(Ops)); 3022 return S; 3023 } 3024 3025 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3026 if (Ops.size() == 2) { 3027 // C1*(C2+V) -> C1*C2 + C1*V 3028 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) 3029 // If any of Add's ops are Adds or Muls with a constant, apply this 3030 // transformation as well. 3031 // 3032 // TODO: There are some cases where this transformation is not 3033 // profitable; for example, Add = (C0 + X) * Y + Z. Maybe the scope of 3034 // this transformation should be narrowed down. 3035 if (Add->getNumOperands() == 2 && containsConstantInAddMulChain(Add)) 3036 return getAddExpr(getMulExpr(LHSC, Add->getOperand(0), 3037 SCEV::FlagAnyWrap, Depth + 1), 3038 getMulExpr(LHSC, Add->getOperand(1), 3039 SCEV::FlagAnyWrap, Depth + 1), 3040 SCEV::FlagAnyWrap, Depth + 1); 3041 3042 if (Ops[0]->isAllOnesValue()) { 3043 // If we have a mul by -1 of an add, try distributing the -1 among the 3044 // add operands. 3045 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) { 3046 SmallVector<const SCEV *, 4> NewOps; 3047 bool AnyFolded = false; 3048 for (const SCEV *AddOp : Add->operands()) { 3049 const SCEV *Mul = getMulExpr(Ops[0], AddOp, SCEV::FlagAnyWrap, 3050 Depth + 1); 3051 if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true; 3052 NewOps.push_back(Mul); 3053 } 3054 if (AnyFolded) 3055 return getAddExpr(NewOps, SCEV::FlagAnyWrap, Depth + 1); 3056 } else if (const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) { 3057 // Negation preserves a recurrence's no self-wrap property. 3058 SmallVector<const SCEV *, 4> Operands; 3059 for (const SCEV *AddRecOp : AddRec->operands()) 3060 Operands.push_back(getMulExpr(Ops[0], AddRecOp, SCEV::FlagAnyWrap, 3061 Depth + 1)); 3062 3063 return getAddRecExpr(Operands, AddRec->getLoop(), 3064 AddRec->getNoWrapFlags(SCEV::FlagNW)); 3065 } 3066 } 3067 } 3068 } 3069 3070 // Skip over the add expression until we get to a multiply. 3071 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 3072 ++Idx; 3073 3074 // If there are mul operands inline them all into this expression. 3075 if (Idx < Ops.size()) { 3076 bool DeletedMul = false; 3077 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 3078 if (Ops.size() > MulOpsInlineThreshold) 3079 break; 3080 // If we have an mul, expand the mul operands onto the end of the 3081 // operands list. 3082 Ops.erase(Ops.begin()+Idx); 3083 Ops.append(Mul->op_begin(), Mul->op_end()); 3084 DeletedMul = true; 3085 } 3086 3087 // If we deleted at least one mul, we added operands to the end of the 3088 // list, and they are not necessarily sorted. Recurse to resort and 3089 // resimplify any operands we just acquired. 3090 if (DeletedMul) 3091 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 3092 } 3093 3094 // If there are any add recurrences in the operands list, see if any other 3095 // added values are loop invariant. If so, we can fold them into the 3096 // recurrence. 3097 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 3098 ++Idx; 3099 3100 // Scan over all recurrences, trying to fold loop invariants into them. 3101 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 3102 // Scan all of the other operands to this mul and add them to the vector 3103 // if they are loop invariant w.r.t. the recurrence. 3104 SmallVector<const SCEV *, 8> LIOps; 3105 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 3106 const Loop *AddRecLoop = AddRec->getLoop(); 3107 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3108 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 3109 LIOps.push_back(Ops[i]); 3110 Ops.erase(Ops.begin()+i); 3111 --i; --e; 3112 } 3113 3114 // If we found some loop invariants, fold them into the recurrence. 3115 if (!LIOps.empty()) { 3116 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step} 3117 SmallVector<const SCEV *, 4> NewOps; 3118 NewOps.reserve(AddRec->getNumOperands()); 3119 const SCEV *Scale = getMulExpr(LIOps, SCEV::FlagAnyWrap, Depth + 1); 3120 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) 3121 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i), 3122 SCEV::FlagAnyWrap, Depth + 1)); 3123 3124 // Build the new addrec. Propagate the NUW and NSW flags if both the 3125 // outer mul and the inner addrec are guaranteed to have no overflow. 3126 // 3127 // No self-wrap cannot be guaranteed after changing the step size, but 3128 // will be inferred if either NUW or NSW is true. 3129 SCEV::NoWrapFlags Flags = ComputeFlags({Scale, AddRec}); 3130 const SCEV *NewRec = getAddRecExpr( 3131 NewOps, AddRecLoop, AddRec->getNoWrapFlags(Flags)); 3132 3133 // If all of the other operands were loop invariant, we are done. 3134 if (Ops.size() == 1) return NewRec; 3135 3136 // Otherwise, multiply the folded AddRec by the non-invariant parts. 3137 for (unsigned i = 0;; ++i) 3138 if (Ops[i] == AddRec) { 3139 Ops[i] = NewRec; 3140 break; 3141 } 3142 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 3143 } 3144 3145 // Okay, if there weren't any loop invariants to be folded, check to see 3146 // if there are multiple AddRec's with the same loop induction variable 3147 // being multiplied together. If so, we can fold them. 3148 3149 // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L> 3150 // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [ 3151 // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z 3152 // ]]],+,...up to x=2n}. 3153 // Note that the arguments to choose() are always integers with values 3154 // known at compile time, never SCEV objects. 3155 // 3156 // The implementation avoids pointless extra computations when the two 3157 // addrec's are of different length (mathematically, it's equivalent to 3158 // an infinite stream of zeros on the right). 3159 bool OpsModified = false; 3160 for (unsigned OtherIdx = Idx+1; 3161 OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 3162 ++OtherIdx) { 3163 const SCEVAddRecExpr *OtherAddRec = 3164 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]); 3165 if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop) 3166 continue; 3167 3168 // Limit max number of arguments to avoid creation of unreasonably big 3169 // SCEVAddRecs with very complex operands. 3170 if (AddRec->getNumOperands() + OtherAddRec->getNumOperands() - 1 > 3171 MaxAddRecSize || hasHugeExpression({AddRec, OtherAddRec})) 3172 continue; 3173 3174 bool Overflow = false; 3175 Type *Ty = AddRec->getType(); 3176 bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64; 3177 SmallVector<const SCEV*, 7> AddRecOps; 3178 for (int x = 0, xe = AddRec->getNumOperands() + 3179 OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) { 3180 SmallVector <const SCEV *, 7> SumOps; 3181 for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) { 3182 uint64_t Coeff1 = Choose(x, 2*x - y, Overflow); 3183 for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1), 3184 ze = std::min(x+1, (int)OtherAddRec->getNumOperands()); 3185 z < ze && !Overflow; ++z) { 3186 uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow); 3187 uint64_t Coeff; 3188 if (LargerThan64Bits) 3189 Coeff = umul_ov(Coeff1, Coeff2, Overflow); 3190 else 3191 Coeff = Coeff1*Coeff2; 3192 const SCEV *CoeffTerm = getConstant(Ty, Coeff); 3193 const SCEV *Term1 = AddRec->getOperand(y-z); 3194 const SCEV *Term2 = OtherAddRec->getOperand(z); 3195 SumOps.push_back(getMulExpr(CoeffTerm, Term1, Term2, 3196 SCEV::FlagAnyWrap, Depth + 1)); 3197 } 3198 } 3199 if (SumOps.empty()) 3200 SumOps.push_back(getZero(Ty)); 3201 AddRecOps.push_back(getAddExpr(SumOps, SCEV::FlagAnyWrap, Depth + 1)); 3202 } 3203 if (!Overflow) { 3204 const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRecLoop, 3205 SCEV::FlagAnyWrap); 3206 if (Ops.size() == 2) return NewAddRec; 3207 Ops[Idx] = NewAddRec; 3208 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 3209 OpsModified = true; 3210 AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec); 3211 if (!AddRec) 3212 break; 3213 } 3214 } 3215 if (OpsModified) 3216 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 3217 3218 // Otherwise couldn't fold anything into this recurrence. Move onto the 3219 // next one. 3220 } 3221 3222 // Okay, it looks like we really DO need an mul expr. Check to see if we 3223 // already have one, otherwise create a new one. 3224 return getOrCreateMulExpr(Ops, ComputeFlags(Ops)); 3225 } 3226 3227 /// Represents an unsigned remainder expression based on unsigned division. 3228 const SCEV *ScalarEvolution::getURemExpr(const SCEV *LHS, 3229 const SCEV *RHS) { 3230 assert(getEffectiveSCEVType(LHS->getType()) == 3231 getEffectiveSCEVType(RHS->getType()) && 3232 "SCEVURemExpr operand types don't match!"); 3233 3234 // Short-circuit easy cases 3235 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 3236 // If constant is one, the result is trivial 3237 if (RHSC->getValue()->isOne()) 3238 return getZero(LHS->getType()); // X urem 1 --> 0 3239 3240 // If constant is a power of two, fold into a zext(trunc(LHS)). 3241 if (RHSC->getAPInt().isPowerOf2()) { 3242 Type *FullTy = LHS->getType(); 3243 Type *TruncTy = 3244 IntegerType::get(getContext(), RHSC->getAPInt().logBase2()); 3245 return getZeroExtendExpr(getTruncateExpr(LHS, TruncTy), FullTy); 3246 } 3247 } 3248 3249 // Fallback to %a == %x urem %y == %x -<nuw> ((%x udiv %y) *<nuw> %y) 3250 const SCEV *UDiv = getUDivExpr(LHS, RHS); 3251 const SCEV *Mult = getMulExpr(UDiv, RHS, SCEV::FlagNUW); 3252 return getMinusSCEV(LHS, Mult, SCEV::FlagNUW); 3253 } 3254 3255 /// Get a canonical unsigned division expression, or something simpler if 3256 /// possible. 3257 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS, 3258 const SCEV *RHS) { 3259 assert(getEffectiveSCEVType(LHS->getType()) == 3260 getEffectiveSCEVType(RHS->getType()) && 3261 "SCEVUDivExpr operand types don't match!"); 3262 3263 FoldingSetNodeID ID; 3264 ID.AddInteger(scUDivExpr); 3265 ID.AddPointer(LHS); 3266 ID.AddPointer(RHS); 3267 void *IP = nullptr; 3268 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 3269 return S; 3270 3271 // 0 udiv Y == 0 3272 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) 3273 if (LHSC->getValue()->isZero()) 3274 return LHS; 3275 3276 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 3277 if (RHSC->getValue()->isOne()) 3278 return LHS; // X udiv 1 --> x 3279 // If the denominator is zero, the result of the udiv is undefined. Don't 3280 // try to analyze it, because the resolution chosen here may differ from 3281 // the resolution chosen in other parts of the compiler. 3282 if (!RHSC->getValue()->isZero()) { 3283 // Determine if the division can be folded into the operands of 3284 // its operands. 3285 // TODO: Generalize this to non-constants by using known-bits information. 3286 Type *Ty = LHS->getType(); 3287 unsigned LZ = RHSC->getAPInt().countLeadingZeros(); 3288 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1; 3289 // For non-power-of-two values, effectively round the value up to the 3290 // nearest power of two. 3291 if (!RHSC->getAPInt().isPowerOf2()) 3292 ++MaxShiftAmt; 3293 IntegerType *ExtTy = 3294 IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt); 3295 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) 3296 if (const SCEVConstant *Step = 3297 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) { 3298 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded. 3299 const APInt &StepInt = Step->getAPInt(); 3300 const APInt &DivInt = RHSC->getAPInt(); 3301 if (!StepInt.urem(DivInt) && 3302 getZeroExtendExpr(AR, ExtTy) == 3303 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 3304 getZeroExtendExpr(Step, ExtTy), 3305 AR->getLoop(), SCEV::FlagAnyWrap)) { 3306 SmallVector<const SCEV *, 4> Operands; 3307 for (const SCEV *Op : AR->operands()) 3308 Operands.push_back(getUDivExpr(Op, RHS)); 3309 return getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagNW); 3310 } 3311 /// Get a canonical UDivExpr for a recurrence. 3312 /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0. 3313 // We can currently only fold X%N if X is constant. 3314 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart()); 3315 if (StartC && !DivInt.urem(StepInt) && 3316 getZeroExtendExpr(AR, ExtTy) == 3317 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 3318 getZeroExtendExpr(Step, ExtTy), 3319 AR->getLoop(), SCEV::FlagAnyWrap)) { 3320 const APInt &StartInt = StartC->getAPInt(); 3321 const APInt &StartRem = StartInt.urem(StepInt); 3322 if (StartRem != 0) { 3323 const SCEV *NewLHS = 3324 getAddRecExpr(getConstant(StartInt - StartRem), Step, 3325 AR->getLoop(), SCEV::FlagNW); 3326 if (LHS != NewLHS) { 3327 LHS = NewLHS; 3328 3329 // Reset the ID to include the new LHS, and check if it is 3330 // already cached. 3331 ID.clear(); 3332 ID.AddInteger(scUDivExpr); 3333 ID.AddPointer(LHS); 3334 ID.AddPointer(RHS); 3335 IP = nullptr; 3336 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 3337 return S; 3338 } 3339 } 3340 } 3341 } 3342 // (A*B)/C --> A*(B/C) if safe and B/C can be folded. 3343 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) { 3344 SmallVector<const SCEV *, 4> Operands; 3345 for (const SCEV *Op : M->operands()) 3346 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3347 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands)) 3348 // Find an operand that's safely divisible. 3349 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 3350 const SCEV *Op = M->getOperand(i); 3351 const SCEV *Div = getUDivExpr(Op, RHSC); 3352 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) { 3353 Operands = SmallVector<const SCEV *, 4>(M->operands()); 3354 Operands[i] = Div; 3355 return getMulExpr(Operands); 3356 } 3357 } 3358 } 3359 3360 // (A/B)/C --> A/(B*C) if safe and B*C can be folded. 3361 if (const SCEVUDivExpr *OtherDiv = dyn_cast<SCEVUDivExpr>(LHS)) { 3362 if (auto *DivisorConstant = 3363 dyn_cast<SCEVConstant>(OtherDiv->getRHS())) { 3364 bool Overflow = false; 3365 APInt NewRHS = 3366 DivisorConstant->getAPInt().umul_ov(RHSC->getAPInt(), Overflow); 3367 if (Overflow) { 3368 return getConstant(RHSC->getType(), 0, false); 3369 } 3370 return getUDivExpr(OtherDiv->getLHS(), getConstant(NewRHS)); 3371 } 3372 } 3373 3374 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded. 3375 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) { 3376 SmallVector<const SCEV *, 4> Operands; 3377 for (const SCEV *Op : A->operands()) 3378 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3379 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) { 3380 Operands.clear(); 3381 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) { 3382 const SCEV *Op = getUDivExpr(A->getOperand(i), RHS); 3383 if (isa<SCEVUDivExpr>(Op) || 3384 getMulExpr(Op, RHS) != A->getOperand(i)) 3385 break; 3386 Operands.push_back(Op); 3387 } 3388 if (Operands.size() == A->getNumOperands()) 3389 return getAddExpr(Operands); 3390 } 3391 } 3392 3393 // Fold if both operands are constant. 3394 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 3395 Constant *LHSCV = LHSC->getValue(); 3396 Constant *RHSCV = RHSC->getValue(); 3397 return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV, 3398 RHSCV))); 3399 } 3400 } 3401 } 3402 3403 // The Insertion Point (IP) might be invalid by now (due to UniqueSCEVs 3404 // changes). Make sure we get a new one. 3405 IP = nullptr; 3406 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3407 SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator), 3408 LHS, RHS); 3409 UniqueSCEVs.InsertNode(S, IP); 3410 addToLoopUseLists(S); 3411 return S; 3412 } 3413 3414 static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) { 3415 APInt A = C1->getAPInt().abs(); 3416 APInt B = C2->getAPInt().abs(); 3417 uint32_t ABW = A.getBitWidth(); 3418 uint32_t BBW = B.getBitWidth(); 3419 3420 if (ABW > BBW) 3421 B = B.zext(ABW); 3422 else if (ABW < BBW) 3423 A = A.zext(BBW); 3424 3425 return APIntOps::GreatestCommonDivisor(std::move(A), std::move(B)); 3426 } 3427 3428 /// Get a canonical unsigned division expression, or something simpler if 3429 /// possible. There is no representation for an exact udiv in SCEV IR, but we 3430 /// can attempt to remove factors from the LHS and RHS. We can't do this when 3431 /// it's not exact because the udiv may be clearing bits. 3432 const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS, 3433 const SCEV *RHS) { 3434 // TODO: we could try to find factors in all sorts of things, but for now we 3435 // just deal with u/exact (multiply, constant). See SCEVDivision towards the 3436 // end of this file for inspiration. 3437 3438 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS); 3439 if (!Mul || !Mul->hasNoUnsignedWrap()) 3440 return getUDivExpr(LHS, RHS); 3441 3442 if (const SCEVConstant *RHSCst = dyn_cast<SCEVConstant>(RHS)) { 3443 // If the mulexpr multiplies by a constant, then that constant must be the 3444 // first element of the mulexpr. 3445 if (const auto *LHSCst = dyn_cast<SCEVConstant>(Mul->getOperand(0))) { 3446 if (LHSCst == RHSCst) { 3447 SmallVector<const SCEV *, 2> Operands(drop_begin(Mul->operands())); 3448 return getMulExpr(Operands); 3449 } 3450 3451 // We can't just assume that LHSCst divides RHSCst cleanly, it could be 3452 // that there's a factor provided by one of the other terms. We need to 3453 // check. 3454 APInt Factor = gcd(LHSCst, RHSCst); 3455 if (!Factor.isIntN(1)) { 3456 LHSCst = 3457 cast<SCEVConstant>(getConstant(LHSCst->getAPInt().udiv(Factor))); 3458 RHSCst = 3459 cast<SCEVConstant>(getConstant(RHSCst->getAPInt().udiv(Factor))); 3460 SmallVector<const SCEV *, 2> Operands; 3461 Operands.push_back(LHSCst); 3462 Operands.append(Mul->op_begin() + 1, Mul->op_end()); 3463 LHS = getMulExpr(Operands); 3464 RHS = RHSCst; 3465 Mul = dyn_cast<SCEVMulExpr>(LHS); 3466 if (!Mul) 3467 return getUDivExactExpr(LHS, RHS); 3468 } 3469 } 3470 } 3471 3472 for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) { 3473 if (Mul->getOperand(i) == RHS) { 3474 SmallVector<const SCEV *, 2> Operands; 3475 Operands.append(Mul->op_begin(), Mul->op_begin() + i); 3476 Operands.append(Mul->op_begin() + i + 1, Mul->op_end()); 3477 return getMulExpr(Operands); 3478 } 3479 } 3480 3481 return getUDivExpr(LHS, RHS); 3482 } 3483 3484 /// Get an add recurrence expression for the specified loop. Simplify the 3485 /// expression as much as possible. 3486 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step, 3487 const Loop *L, 3488 SCEV::NoWrapFlags Flags) { 3489 SmallVector<const SCEV *, 4> Operands; 3490 Operands.push_back(Start); 3491 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step)) 3492 if (StepChrec->getLoop() == L) { 3493 Operands.append(StepChrec->op_begin(), StepChrec->op_end()); 3494 return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW)); 3495 } 3496 3497 Operands.push_back(Step); 3498 return getAddRecExpr(Operands, L, Flags); 3499 } 3500 3501 /// Get an add recurrence expression for the specified loop. Simplify the 3502 /// expression as much as possible. 3503 const SCEV * 3504 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands, 3505 const Loop *L, SCEV::NoWrapFlags Flags) { 3506 if (Operands.size() == 1) return Operands[0]; 3507 #ifndef NDEBUG 3508 Type *ETy = getEffectiveSCEVType(Operands[0]->getType()); 3509 for (unsigned i = 1, e = Operands.size(); i != e; ++i) 3510 assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy && 3511 "SCEVAddRecExpr operand types don't match!"); 3512 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 3513 assert(isLoopInvariant(Operands[i], L) && 3514 "SCEVAddRecExpr operand is not loop-invariant!"); 3515 #endif 3516 3517 if (Operands.back()->isZero()) { 3518 Operands.pop_back(); 3519 return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X 3520 } 3521 3522 // It's tempting to want to call getConstantMaxBackedgeTakenCount count here and 3523 // use that information to infer NUW and NSW flags. However, computing a 3524 // BE count requires calling getAddRecExpr, so we may not yet have a 3525 // meaningful BE count at this point (and if we don't, we'd be stuck 3526 // with a SCEVCouldNotCompute as the cached BE count). 3527 3528 Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags); 3529 3530 // Canonicalize nested AddRecs in by nesting them in order of loop depth. 3531 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) { 3532 const Loop *NestedLoop = NestedAR->getLoop(); 3533 if (L->contains(NestedLoop) 3534 ? (L->getLoopDepth() < NestedLoop->getLoopDepth()) 3535 : (!NestedLoop->contains(L) && 3536 DT.dominates(L->getHeader(), NestedLoop->getHeader()))) { 3537 SmallVector<const SCEV *, 4> NestedOperands(NestedAR->operands()); 3538 Operands[0] = NestedAR->getStart(); 3539 // AddRecs require their operands be loop-invariant with respect to their 3540 // loops. Don't perform this transformation if it would break this 3541 // requirement. 3542 bool AllInvariant = all_of( 3543 Operands, [&](const SCEV *Op) { return isLoopInvariant(Op, L); }); 3544 3545 if (AllInvariant) { 3546 // Create a recurrence for the outer loop with the same step size. 3547 // 3548 // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the 3549 // inner recurrence has the same property. 3550 SCEV::NoWrapFlags OuterFlags = 3551 maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags()); 3552 3553 NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags); 3554 AllInvariant = all_of(NestedOperands, [&](const SCEV *Op) { 3555 return isLoopInvariant(Op, NestedLoop); 3556 }); 3557 3558 if (AllInvariant) { 3559 // Ok, both add recurrences are valid after the transformation. 3560 // 3561 // The inner recurrence keeps its NW flag but only keeps NUW/NSW if 3562 // the outer recurrence has the same property. 3563 SCEV::NoWrapFlags InnerFlags = 3564 maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags); 3565 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags); 3566 } 3567 } 3568 // Reset Operands to its original state. 3569 Operands[0] = NestedAR; 3570 } 3571 } 3572 3573 // Okay, it looks like we really DO need an addrec expr. Check to see if we 3574 // already have one, otherwise create a new one. 3575 return getOrCreateAddRecExpr(Operands, L, Flags); 3576 } 3577 3578 const SCEV * 3579 ScalarEvolution::getGEPExpr(GEPOperator *GEP, 3580 const SmallVectorImpl<const SCEV *> &IndexExprs) { 3581 const SCEV *BaseExpr = getSCEV(GEP->getPointerOperand()); 3582 // getSCEV(Base)->getType() has the same address space as Base->getType() 3583 // because SCEV::getType() preserves the address space. 3584 Type *IntIdxTy = getEffectiveSCEVType(BaseExpr->getType()); 3585 // FIXME(PR23527): Don't blindly transfer the inbounds flag from the GEP 3586 // instruction to its SCEV, because the Instruction may be guarded by control 3587 // flow and the no-overflow bits may not be valid for the expression in any 3588 // context. This can be fixed similarly to how these flags are handled for 3589 // adds. 3590 SCEV::NoWrapFlags OffsetWrap = 3591 GEP->isInBounds() ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 3592 3593 Type *CurTy = GEP->getType(); 3594 bool FirstIter = true; 3595 SmallVector<const SCEV *, 4> Offsets; 3596 for (const SCEV *IndexExpr : IndexExprs) { 3597 // Compute the (potentially symbolic) offset in bytes for this index. 3598 if (StructType *STy = dyn_cast<StructType>(CurTy)) { 3599 // For a struct, add the member offset. 3600 ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue(); 3601 unsigned FieldNo = Index->getZExtValue(); 3602 const SCEV *FieldOffset = getOffsetOfExpr(IntIdxTy, STy, FieldNo); 3603 Offsets.push_back(FieldOffset); 3604 3605 // Update CurTy to the type of the field at Index. 3606 CurTy = STy->getTypeAtIndex(Index); 3607 } else { 3608 // Update CurTy to its element type. 3609 if (FirstIter) { 3610 assert(isa<PointerType>(CurTy) && 3611 "The first index of a GEP indexes a pointer"); 3612 CurTy = GEP->getSourceElementType(); 3613 FirstIter = false; 3614 } else { 3615 CurTy = GetElementPtrInst::getTypeAtIndex(CurTy, (uint64_t)0); 3616 } 3617 // For an array, add the element offset, explicitly scaled. 3618 const SCEV *ElementSize = getSizeOfExpr(IntIdxTy, CurTy); 3619 // Getelementptr indices are signed. 3620 IndexExpr = getTruncateOrSignExtend(IndexExpr, IntIdxTy); 3621 3622 // Multiply the index by the element size to compute the element offset. 3623 const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, OffsetWrap); 3624 Offsets.push_back(LocalOffset); 3625 } 3626 } 3627 3628 // Handle degenerate case of GEP without offsets. 3629 if (Offsets.empty()) 3630 return BaseExpr; 3631 3632 // Add the offsets together, assuming nsw if inbounds. 3633 const SCEV *Offset = getAddExpr(Offsets, OffsetWrap); 3634 // Add the base address and the offset. We cannot use the nsw flag, as the 3635 // base address is unsigned. However, if we know that the offset is 3636 // non-negative, we can use nuw. 3637 SCEV::NoWrapFlags BaseWrap = GEP->isInBounds() && isKnownNonNegative(Offset) 3638 ? SCEV::FlagNUW : SCEV::FlagAnyWrap; 3639 return getAddExpr(BaseExpr, Offset, BaseWrap); 3640 } 3641 3642 std::tuple<SCEV *, FoldingSetNodeID, void *> 3643 ScalarEvolution::findExistingSCEVInCache(SCEVTypes SCEVType, 3644 ArrayRef<const SCEV *> Ops) { 3645 FoldingSetNodeID ID; 3646 void *IP = nullptr; 3647 ID.AddInteger(SCEVType); 3648 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3649 ID.AddPointer(Ops[i]); 3650 return std::tuple<SCEV *, FoldingSetNodeID, void *>( 3651 UniqueSCEVs.FindNodeOrInsertPos(ID, IP), std::move(ID), IP); 3652 } 3653 3654 const SCEV *ScalarEvolution::getAbsExpr(const SCEV *Op, bool IsNSW) { 3655 SCEV::NoWrapFlags Flags = IsNSW ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 3656 return getSMaxExpr(Op, getNegativeSCEV(Op, Flags)); 3657 } 3658 3659 const SCEV *ScalarEvolution::getMinMaxExpr(SCEVTypes Kind, 3660 SmallVectorImpl<const SCEV *> &Ops) { 3661 assert(!Ops.empty() && "Cannot get empty (u|s)(min|max)!"); 3662 if (Ops.size() == 1) return Ops[0]; 3663 #ifndef NDEBUG 3664 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 3665 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 3666 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 3667 "Operand types don't match!"); 3668 #endif 3669 3670 bool IsSigned = Kind == scSMaxExpr || Kind == scSMinExpr; 3671 bool IsMax = Kind == scSMaxExpr || Kind == scUMaxExpr; 3672 3673 // Sort by complexity, this groups all similar expression types together. 3674 GroupByComplexity(Ops, &LI, DT); 3675 3676 // Check if we have created the same expression before. 3677 if (const SCEV *S = std::get<0>(findExistingSCEVInCache(Kind, Ops))) { 3678 return S; 3679 } 3680 3681 // If there are any constants, fold them together. 3682 unsigned Idx = 0; 3683 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3684 ++Idx; 3685 assert(Idx < Ops.size()); 3686 auto FoldOp = [&](const APInt &LHS, const APInt &RHS) { 3687 if (Kind == scSMaxExpr) 3688 return APIntOps::smax(LHS, RHS); 3689 else if (Kind == scSMinExpr) 3690 return APIntOps::smin(LHS, RHS); 3691 else if (Kind == scUMaxExpr) 3692 return APIntOps::umax(LHS, RHS); 3693 else if (Kind == scUMinExpr) 3694 return APIntOps::umin(LHS, RHS); 3695 llvm_unreachable("Unknown SCEV min/max opcode"); 3696 }; 3697 3698 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 3699 // We found two constants, fold them together! 3700 ConstantInt *Fold = ConstantInt::get( 3701 getContext(), FoldOp(LHSC->getAPInt(), RHSC->getAPInt())); 3702 Ops[0] = getConstant(Fold); 3703 Ops.erase(Ops.begin()+1); // Erase the folded element 3704 if (Ops.size() == 1) return Ops[0]; 3705 LHSC = cast<SCEVConstant>(Ops[0]); 3706 } 3707 3708 bool IsMinV = LHSC->getValue()->isMinValue(IsSigned); 3709 bool IsMaxV = LHSC->getValue()->isMaxValue(IsSigned); 3710 3711 if (IsMax ? IsMinV : IsMaxV) { 3712 // If we are left with a constant minimum(/maximum)-int, strip it off. 3713 Ops.erase(Ops.begin()); 3714 --Idx; 3715 } else if (IsMax ? IsMaxV : IsMinV) { 3716 // If we have a max(/min) with a constant maximum(/minimum)-int, 3717 // it will always be the extremum. 3718 return LHSC; 3719 } 3720 3721 if (Ops.size() == 1) return Ops[0]; 3722 } 3723 3724 // Find the first operation of the same kind 3725 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < Kind) 3726 ++Idx; 3727 3728 // Check to see if one of the operands is of the same kind. If so, expand its 3729 // operands onto our operand list, and recurse to simplify. 3730 if (Idx < Ops.size()) { 3731 bool DeletedAny = false; 3732 while (Ops[Idx]->getSCEVType() == Kind) { 3733 const SCEVMinMaxExpr *SMME = cast<SCEVMinMaxExpr>(Ops[Idx]); 3734 Ops.erase(Ops.begin()+Idx); 3735 Ops.append(SMME->op_begin(), SMME->op_end()); 3736 DeletedAny = true; 3737 } 3738 3739 if (DeletedAny) 3740 return getMinMaxExpr(Kind, Ops); 3741 } 3742 3743 // Okay, check to see if the same value occurs in the operand list twice. If 3744 // so, delete one. Since we sorted the list, these values are required to 3745 // be adjacent. 3746 llvm::CmpInst::Predicate GEPred = 3747 IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; 3748 llvm::CmpInst::Predicate LEPred = 3749 IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; 3750 llvm::CmpInst::Predicate FirstPred = IsMax ? GEPred : LEPred; 3751 llvm::CmpInst::Predicate SecondPred = IsMax ? LEPred : GEPred; 3752 for (unsigned i = 0, e = Ops.size() - 1; i != e; ++i) { 3753 if (Ops[i] == Ops[i + 1] || 3754 isKnownViaNonRecursiveReasoning(FirstPred, Ops[i], Ops[i + 1])) { 3755 // X op Y op Y --> X op Y 3756 // X op Y --> X, if we know X, Y are ordered appropriately 3757 Ops.erase(Ops.begin() + i + 1, Ops.begin() + i + 2); 3758 --i; 3759 --e; 3760 } else if (isKnownViaNonRecursiveReasoning(SecondPred, Ops[i], 3761 Ops[i + 1])) { 3762 // X op Y --> Y, if we know X, Y are ordered appropriately 3763 Ops.erase(Ops.begin() + i, Ops.begin() + i + 1); 3764 --i; 3765 --e; 3766 } 3767 } 3768 3769 if (Ops.size() == 1) return Ops[0]; 3770 3771 assert(!Ops.empty() && "Reduced smax down to nothing!"); 3772 3773 // Okay, it looks like we really DO need an expr. Check to see if we 3774 // already have one, otherwise create a new one. 3775 const SCEV *ExistingSCEV; 3776 FoldingSetNodeID ID; 3777 void *IP; 3778 std::tie(ExistingSCEV, ID, IP) = findExistingSCEVInCache(Kind, Ops); 3779 if (ExistingSCEV) 3780 return ExistingSCEV; 3781 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 3782 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 3783 SCEV *S = new (SCEVAllocator) 3784 SCEVMinMaxExpr(ID.Intern(SCEVAllocator), Kind, O, Ops.size()); 3785 3786 UniqueSCEVs.InsertNode(S, IP); 3787 addToLoopUseLists(S); 3788 return S; 3789 } 3790 3791 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, const SCEV *RHS) { 3792 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 3793 return getSMaxExpr(Ops); 3794 } 3795 3796 const SCEV *ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 3797 return getMinMaxExpr(scSMaxExpr, Ops); 3798 } 3799 3800 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, const SCEV *RHS) { 3801 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 3802 return getUMaxExpr(Ops); 3803 } 3804 3805 const SCEV *ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 3806 return getMinMaxExpr(scUMaxExpr, Ops); 3807 } 3808 3809 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS, 3810 const SCEV *RHS) { 3811 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 3812 return getSMinExpr(Ops); 3813 } 3814 3815 const SCEV *ScalarEvolution::getSMinExpr(SmallVectorImpl<const SCEV *> &Ops) { 3816 return getMinMaxExpr(scSMinExpr, Ops); 3817 } 3818 3819 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS, 3820 const SCEV *RHS) { 3821 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 3822 return getUMinExpr(Ops); 3823 } 3824 3825 const SCEV *ScalarEvolution::getUMinExpr(SmallVectorImpl<const SCEV *> &Ops) { 3826 return getMinMaxExpr(scUMinExpr, Ops); 3827 } 3828 3829 const SCEV * 3830 ScalarEvolution::getSizeOfScalableVectorExpr(Type *IntTy, 3831 ScalableVectorType *ScalableTy) { 3832 Constant *NullPtr = Constant::getNullValue(ScalableTy->getPointerTo()); 3833 Constant *One = ConstantInt::get(IntTy, 1); 3834 Constant *GEP = ConstantExpr::getGetElementPtr(ScalableTy, NullPtr, One); 3835 // Note that the expression we created is the final expression, we don't 3836 // want to simplify it any further Also, if we call a normal getSCEV(), 3837 // we'll end up in an endless recursion. So just create an SCEVUnknown. 3838 return getUnknown(ConstantExpr::getPtrToInt(GEP, IntTy)); 3839 } 3840 3841 const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) { 3842 if (auto *ScalableAllocTy = dyn_cast<ScalableVectorType>(AllocTy)) 3843 return getSizeOfScalableVectorExpr(IntTy, ScalableAllocTy); 3844 // We can bypass creating a target-independent constant expression and then 3845 // folding it back into a ConstantInt. This is just a compile-time 3846 // optimization. 3847 return getConstant(IntTy, getDataLayout().getTypeAllocSize(AllocTy)); 3848 } 3849 3850 const SCEV *ScalarEvolution::getStoreSizeOfExpr(Type *IntTy, Type *StoreTy) { 3851 if (auto *ScalableStoreTy = dyn_cast<ScalableVectorType>(StoreTy)) 3852 return getSizeOfScalableVectorExpr(IntTy, ScalableStoreTy); 3853 // We can bypass creating a target-independent constant expression and then 3854 // folding it back into a ConstantInt. This is just a compile-time 3855 // optimization. 3856 return getConstant(IntTy, getDataLayout().getTypeStoreSize(StoreTy)); 3857 } 3858 3859 const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy, 3860 StructType *STy, 3861 unsigned FieldNo) { 3862 // We can bypass creating a target-independent constant expression and then 3863 // folding it back into a ConstantInt. This is just a compile-time 3864 // optimization. 3865 return getConstant( 3866 IntTy, getDataLayout().getStructLayout(STy)->getElementOffset(FieldNo)); 3867 } 3868 3869 const SCEV *ScalarEvolution::getUnknown(Value *V) { 3870 // Don't attempt to do anything other than create a SCEVUnknown object 3871 // here. createSCEV only calls getUnknown after checking for all other 3872 // interesting possibilities, and any other code that calls getUnknown 3873 // is doing so in order to hide a value from SCEV canonicalization. 3874 3875 FoldingSetNodeID ID; 3876 ID.AddInteger(scUnknown); 3877 ID.AddPointer(V); 3878 void *IP = nullptr; 3879 if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) { 3880 assert(cast<SCEVUnknown>(S)->getValue() == V && 3881 "Stale SCEVUnknown in uniquing map!"); 3882 return S; 3883 } 3884 SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this, 3885 FirstUnknown); 3886 FirstUnknown = cast<SCEVUnknown>(S); 3887 UniqueSCEVs.InsertNode(S, IP); 3888 return S; 3889 } 3890 3891 //===----------------------------------------------------------------------===// 3892 // Basic SCEV Analysis and PHI Idiom Recognition Code 3893 // 3894 3895 /// Test if values of the given type are analyzable within the SCEV 3896 /// framework. This primarily includes integer types, and it can optionally 3897 /// include pointer types if the ScalarEvolution class has access to 3898 /// target-specific information. 3899 bool ScalarEvolution::isSCEVable(Type *Ty) const { 3900 // Integers and pointers are always SCEVable. 3901 return Ty->isIntOrPtrTy(); 3902 } 3903 3904 /// Return the size in bits of the specified type, for which isSCEVable must 3905 /// return true. 3906 uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const { 3907 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 3908 if (Ty->isPointerTy()) 3909 return getDataLayout().getIndexTypeSizeInBits(Ty); 3910 return getDataLayout().getTypeSizeInBits(Ty); 3911 } 3912 3913 /// Return a type with the same bitwidth as the given type and which represents 3914 /// how SCEV will treat the given type, for which isSCEVable must return 3915 /// true. For pointer types, this is the pointer index sized integer type. 3916 Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const { 3917 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 3918 3919 if (Ty->isIntegerTy()) 3920 return Ty; 3921 3922 // The only other support type is pointer. 3923 assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!"); 3924 return getDataLayout().getIndexType(Ty); 3925 } 3926 3927 Type *ScalarEvolution::getWiderType(Type *T1, Type *T2) const { 3928 return getTypeSizeInBits(T1) >= getTypeSizeInBits(T2) ? T1 : T2; 3929 } 3930 3931 const SCEV *ScalarEvolution::getCouldNotCompute() { 3932 return CouldNotCompute.get(); 3933 } 3934 3935 bool ScalarEvolution::checkValidity(const SCEV *S) const { 3936 bool ContainsNulls = SCEVExprContains(S, [](const SCEV *S) { 3937 auto *SU = dyn_cast<SCEVUnknown>(S); 3938 return SU && SU->getValue() == nullptr; 3939 }); 3940 3941 return !ContainsNulls; 3942 } 3943 3944 bool ScalarEvolution::containsAddRecurrence(const SCEV *S) { 3945 HasRecMapType::iterator I = HasRecMap.find(S); 3946 if (I != HasRecMap.end()) 3947 return I->second; 3948 3949 bool FoundAddRec = 3950 SCEVExprContains(S, [](const SCEV *S) { return isa<SCEVAddRecExpr>(S); }); 3951 HasRecMap.insert({S, FoundAddRec}); 3952 return FoundAddRec; 3953 } 3954 3955 /// Try to split a SCEVAddExpr into a pair of {SCEV, ConstantInt}. 3956 /// If \p S is a SCEVAddExpr and is composed of a sub SCEV S' and an 3957 /// offset I, then return {S', I}, else return {\p S, nullptr}. 3958 static std::pair<const SCEV *, ConstantInt *> splitAddExpr(const SCEV *S) { 3959 const auto *Add = dyn_cast<SCEVAddExpr>(S); 3960 if (!Add) 3961 return {S, nullptr}; 3962 3963 if (Add->getNumOperands() != 2) 3964 return {S, nullptr}; 3965 3966 auto *ConstOp = dyn_cast<SCEVConstant>(Add->getOperand(0)); 3967 if (!ConstOp) 3968 return {S, nullptr}; 3969 3970 return {Add->getOperand(1), ConstOp->getValue()}; 3971 } 3972 3973 /// Return the ValueOffsetPair set for \p S. \p S can be represented 3974 /// by the value and offset from any ValueOffsetPair in the set. 3975 ScalarEvolution::ValueOffsetPairSetVector * 3976 ScalarEvolution::getSCEVValues(const SCEV *S) { 3977 ExprValueMapType::iterator SI = ExprValueMap.find_as(S); 3978 if (SI == ExprValueMap.end()) 3979 return nullptr; 3980 #ifndef NDEBUG 3981 if (VerifySCEVMap) { 3982 // Check there is no dangling Value in the set returned. 3983 for (const auto &VE : SI->second) 3984 assert(ValueExprMap.count(VE.first)); 3985 } 3986 #endif 3987 return &SI->second; 3988 } 3989 3990 /// Erase Value from ValueExprMap and ExprValueMap. ValueExprMap.erase(V) 3991 /// cannot be used separately. eraseValueFromMap should be used to remove 3992 /// V from ValueExprMap and ExprValueMap at the same time. 3993 void ScalarEvolution::eraseValueFromMap(Value *V) { 3994 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 3995 if (I != ValueExprMap.end()) { 3996 const SCEV *S = I->second; 3997 // Remove {V, 0} from the set of ExprValueMap[S] 3998 if (auto *SV = getSCEVValues(S)) 3999 SV->remove({V, nullptr}); 4000 4001 // Remove {V, Offset} from the set of ExprValueMap[Stripped] 4002 const SCEV *Stripped; 4003 ConstantInt *Offset; 4004 std::tie(Stripped, Offset) = splitAddExpr(S); 4005 if (Offset != nullptr) { 4006 if (auto *SV = getSCEVValues(Stripped)) 4007 SV->remove({V, Offset}); 4008 } 4009 ValueExprMap.erase(V); 4010 } 4011 } 4012 4013 /// Check whether value has nuw/nsw/exact set but SCEV does not. 4014 /// TODO: In reality it is better to check the poison recursively 4015 /// but this is better than nothing. 4016 static bool SCEVLostPoisonFlags(const SCEV *S, const Value *V) { 4017 if (auto *I = dyn_cast<Instruction>(V)) { 4018 if (isa<OverflowingBinaryOperator>(I)) { 4019 if (auto *NS = dyn_cast<SCEVNAryExpr>(S)) { 4020 if (I->hasNoSignedWrap() && !NS->hasNoSignedWrap()) 4021 return true; 4022 if (I->hasNoUnsignedWrap() && !NS->hasNoUnsignedWrap()) 4023 return true; 4024 } 4025 } else if (isa<PossiblyExactOperator>(I) && I->isExact()) 4026 return true; 4027 } 4028 return false; 4029 } 4030 4031 /// Return an existing SCEV if it exists, otherwise analyze the expression and 4032 /// create a new one. 4033 const SCEV *ScalarEvolution::getSCEV(Value *V) { 4034 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 4035 4036 const SCEV *S = getExistingSCEV(V); 4037 if (S == nullptr) { 4038 S = createSCEV(V); 4039 // During PHI resolution, it is possible to create two SCEVs for the same 4040 // V, so it is needed to double check whether V->S is inserted into 4041 // ValueExprMap before insert S->{V, 0} into ExprValueMap. 4042 std::pair<ValueExprMapType::iterator, bool> Pair = 4043 ValueExprMap.insert({SCEVCallbackVH(V, this), S}); 4044 if (Pair.second && !SCEVLostPoisonFlags(S, V)) { 4045 ExprValueMap[S].insert({V, nullptr}); 4046 4047 // If S == Stripped + Offset, add Stripped -> {V, Offset} into 4048 // ExprValueMap. 4049 const SCEV *Stripped = S; 4050 ConstantInt *Offset = nullptr; 4051 std::tie(Stripped, Offset) = splitAddExpr(S); 4052 // If stripped is SCEVUnknown, don't bother to save 4053 // Stripped -> {V, offset}. It doesn't simplify and sometimes even 4054 // increase the complexity of the expansion code. 4055 // If V is GetElementPtrInst, don't save Stripped -> {V, offset} 4056 // because it may generate add/sub instead of GEP in SCEV expansion. 4057 if (Offset != nullptr && !isa<SCEVUnknown>(Stripped) && 4058 !isa<GetElementPtrInst>(V)) 4059 ExprValueMap[Stripped].insert({V, Offset}); 4060 } 4061 } 4062 return S; 4063 } 4064 4065 const SCEV *ScalarEvolution::getExistingSCEV(Value *V) { 4066 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 4067 4068 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 4069 if (I != ValueExprMap.end()) { 4070 const SCEV *S = I->second; 4071 if (checkValidity(S)) 4072 return S; 4073 eraseValueFromMap(V); 4074 forgetMemoizedResults(S); 4075 } 4076 return nullptr; 4077 } 4078 4079 /// Return a SCEV corresponding to -V = -1*V 4080 const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V, 4081 SCEV::NoWrapFlags Flags) { 4082 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 4083 return getConstant( 4084 cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue()))); 4085 4086 Type *Ty = V->getType(); 4087 Ty = getEffectiveSCEVType(Ty); 4088 return getMulExpr(V, getMinusOne(Ty), Flags); 4089 } 4090 4091 /// If Expr computes ~A, return A else return nullptr 4092 static const SCEV *MatchNotExpr(const SCEV *Expr) { 4093 const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Expr); 4094 if (!Add || Add->getNumOperands() != 2 || 4095 !Add->getOperand(0)->isAllOnesValue()) 4096 return nullptr; 4097 4098 const SCEVMulExpr *AddRHS = dyn_cast<SCEVMulExpr>(Add->getOperand(1)); 4099 if (!AddRHS || AddRHS->getNumOperands() != 2 || 4100 !AddRHS->getOperand(0)->isAllOnesValue()) 4101 return nullptr; 4102 4103 return AddRHS->getOperand(1); 4104 } 4105 4106 /// Return a SCEV corresponding to ~V = -1-V 4107 const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) { 4108 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 4109 return getConstant( 4110 cast<ConstantInt>(ConstantExpr::getNot(VC->getValue()))); 4111 4112 // Fold ~(u|s)(min|max)(~x, ~y) to (u|s)(max|min)(x, y) 4113 if (const SCEVMinMaxExpr *MME = dyn_cast<SCEVMinMaxExpr>(V)) { 4114 auto MatchMinMaxNegation = [&](const SCEVMinMaxExpr *MME) { 4115 SmallVector<const SCEV *, 2> MatchedOperands; 4116 for (const SCEV *Operand : MME->operands()) { 4117 const SCEV *Matched = MatchNotExpr(Operand); 4118 if (!Matched) 4119 return (const SCEV *)nullptr; 4120 MatchedOperands.push_back(Matched); 4121 } 4122 return getMinMaxExpr(SCEVMinMaxExpr::negate(MME->getSCEVType()), 4123 MatchedOperands); 4124 }; 4125 if (const SCEV *Replaced = MatchMinMaxNegation(MME)) 4126 return Replaced; 4127 } 4128 4129 Type *Ty = V->getType(); 4130 Ty = getEffectiveSCEVType(Ty); 4131 return getMinusSCEV(getMinusOne(Ty), V); 4132 } 4133 4134 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS, 4135 SCEV::NoWrapFlags Flags, 4136 unsigned Depth) { 4137 // Fast path: X - X --> 0. 4138 if (LHS == RHS) 4139 return getZero(LHS->getType()); 4140 4141 // If we subtract two pointers with different pointer bases, bail. 4142 // Eventually, we're going to add an assertion to getMulExpr that we 4143 // can't multiply by a pointer. 4144 if (RHS->getType()->isPointerTy()) { 4145 if (!LHS->getType()->isPointerTy() || 4146 getPointerBase(LHS) != getPointerBase(RHS)) 4147 return getCouldNotCompute(); 4148 } 4149 4150 // We represent LHS - RHS as LHS + (-1)*RHS. This transformation 4151 // makes it so that we cannot make much use of NUW. 4152 auto AddFlags = SCEV::FlagAnyWrap; 4153 const bool RHSIsNotMinSigned = 4154 !getSignedRangeMin(RHS).isMinSignedValue(); 4155 if (maskFlags(Flags, SCEV::FlagNSW) == SCEV::FlagNSW) { 4156 // Let M be the minimum representable signed value. Then (-1)*RHS 4157 // signed-wraps if and only if RHS is M. That can happen even for 4158 // a NSW subtraction because e.g. (-1)*M signed-wraps even though 4159 // -1 - M does not. So to transfer NSW from LHS - RHS to LHS + 4160 // (-1)*RHS, we need to prove that RHS != M. 4161 // 4162 // If LHS is non-negative and we know that LHS - RHS does not 4163 // signed-wrap, then RHS cannot be M. So we can rule out signed-wrap 4164 // either by proving that RHS > M or that LHS >= 0. 4165 if (RHSIsNotMinSigned || isKnownNonNegative(LHS)) { 4166 AddFlags = SCEV::FlagNSW; 4167 } 4168 } 4169 4170 // FIXME: Find a correct way to transfer NSW to (-1)*M when LHS - 4171 // RHS is NSW and LHS >= 0. 4172 // 4173 // The difficulty here is that the NSW flag may have been proven 4174 // relative to a loop that is to be found in a recurrence in LHS and 4175 // not in RHS. Applying NSW to (-1)*M may then let the NSW have a 4176 // larger scope than intended. 4177 auto NegFlags = RHSIsNotMinSigned ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 4178 4179 return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags, Depth); 4180 } 4181 4182 const SCEV *ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty, 4183 unsigned Depth) { 4184 Type *SrcTy = V->getType(); 4185 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4186 "Cannot truncate or zero extend with non-integer arguments!"); 4187 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4188 return V; // No conversion 4189 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 4190 return getTruncateExpr(V, Ty, Depth); 4191 return getZeroExtendExpr(V, Ty, Depth); 4192 } 4193 4194 const SCEV *ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, Type *Ty, 4195 unsigned Depth) { 4196 Type *SrcTy = V->getType(); 4197 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4198 "Cannot truncate or zero extend with non-integer arguments!"); 4199 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4200 return V; // No conversion 4201 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 4202 return getTruncateExpr(V, Ty, Depth); 4203 return getSignExtendExpr(V, Ty, Depth); 4204 } 4205 4206 const SCEV * 4207 ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) { 4208 Type *SrcTy = V->getType(); 4209 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4210 "Cannot noop or zero extend with non-integer arguments!"); 4211 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 4212 "getNoopOrZeroExtend cannot truncate!"); 4213 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4214 return V; // No conversion 4215 return getZeroExtendExpr(V, Ty); 4216 } 4217 4218 const SCEV * 4219 ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) { 4220 Type *SrcTy = V->getType(); 4221 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4222 "Cannot noop or sign extend with non-integer arguments!"); 4223 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 4224 "getNoopOrSignExtend cannot truncate!"); 4225 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4226 return V; // No conversion 4227 return getSignExtendExpr(V, Ty); 4228 } 4229 4230 const SCEV * 4231 ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) { 4232 Type *SrcTy = V->getType(); 4233 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4234 "Cannot noop or any extend with non-integer arguments!"); 4235 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 4236 "getNoopOrAnyExtend cannot truncate!"); 4237 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4238 return V; // No conversion 4239 return getAnyExtendExpr(V, Ty); 4240 } 4241 4242 const SCEV * 4243 ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) { 4244 Type *SrcTy = V->getType(); 4245 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4246 "Cannot truncate or noop with non-integer arguments!"); 4247 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) && 4248 "getTruncateOrNoop cannot extend!"); 4249 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4250 return V; // No conversion 4251 return getTruncateExpr(V, Ty); 4252 } 4253 4254 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS, 4255 const SCEV *RHS) { 4256 const SCEV *PromotedLHS = LHS; 4257 const SCEV *PromotedRHS = RHS; 4258 4259 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 4260 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 4261 else 4262 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 4263 4264 return getUMaxExpr(PromotedLHS, PromotedRHS); 4265 } 4266 4267 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS, 4268 const SCEV *RHS) { 4269 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 4270 return getUMinFromMismatchedTypes(Ops); 4271 } 4272 4273 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes( 4274 SmallVectorImpl<const SCEV *> &Ops) { 4275 assert(!Ops.empty() && "At least one operand must be!"); 4276 // Trivial case. 4277 if (Ops.size() == 1) 4278 return Ops[0]; 4279 4280 // Find the max type first. 4281 Type *MaxType = nullptr; 4282 for (auto *S : Ops) 4283 if (MaxType) 4284 MaxType = getWiderType(MaxType, S->getType()); 4285 else 4286 MaxType = S->getType(); 4287 assert(MaxType && "Failed to find maximum type!"); 4288 4289 // Extend all ops to max type. 4290 SmallVector<const SCEV *, 2> PromotedOps; 4291 for (auto *S : Ops) 4292 PromotedOps.push_back(getNoopOrZeroExtend(S, MaxType)); 4293 4294 // Generate umin. 4295 return getUMinExpr(PromotedOps); 4296 } 4297 4298 const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) { 4299 // A pointer operand may evaluate to a nonpointer expression, such as null. 4300 if (!V->getType()->isPointerTy()) 4301 return V; 4302 4303 while (true) { 4304 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(V)) { 4305 V = AddRec->getStart(); 4306 } else if (auto *Add = dyn_cast<SCEVAddExpr>(V)) { 4307 const SCEV *PtrOp = nullptr; 4308 for (const SCEV *AddOp : Add->operands()) { 4309 if (AddOp->getType()->isPointerTy()) { 4310 // Cannot find the base of an expression with multiple pointer ops. 4311 if (PtrOp) 4312 return V; 4313 PtrOp = AddOp; 4314 } 4315 } 4316 if (!PtrOp) // All operands were non-pointer. 4317 return V; 4318 V = PtrOp; 4319 } else // Not something we can look further into. 4320 return V; 4321 } 4322 } 4323 4324 /// Push users of the given Instruction onto the given Worklist. 4325 static void 4326 PushDefUseChildren(Instruction *I, 4327 SmallVectorImpl<Instruction *> &Worklist) { 4328 // Push the def-use children onto the Worklist stack. 4329 for (User *U : I->users()) 4330 Worklist.push_back(cast<Instruction>(U)); 4331 } 4332 4333 void ScalarEvolution::forgetSymbolicName(Instruction *PN, const SCEV *SymName) { 4334 SmallVector<Instruction *, 16> Worklist; 4335 PushDefUseChildren(PN, Worklist); 4336 4337 SmallPtrSet<Instruction *, 8> Visited; 4338 Visited.insert(PN); 4339 while (!Worklist.empty()) { 4340 Instruction *I = Worklist.pop_back_val(); 4341 if (!Visited.insert(I).second) 4342 continue; 4343 4344 auto It = ValueExprMap.find_as(static_cast<Value *>(I)); 4345 if (It != ValueExprMap.end()) { 4346 const SCEV *Old = It->second; 4347 4348 // Short-circuit the def-use traversal if the symbolic name 4349 // ceases to appear in expressions. 4350 if (Old != SymName && !hasOperand(Old, SymName)) 4351 continue; 4352 4353 // SCEVUnknown for a PHI either means that it has an unrecognized 4354 // structure, it's a PHI that's in the progress of being computed 4355 // by createNodeForPHI, or it's a single-value PHI. In the first case, 4356 // additional loop trip count information isn't going to change anything. 4357 // In the second case, createNodeForPHI will perform the necessary 4358 // updates on its own when it gets to that point. In the third, we do 4359 // want to forget the SCEVUnknown. 4360 if (!isa<PHINode>(I) || 4361 !isa<SCEVUnknown>(Old) || 4362 (I != PN && Old == SymName)) { 4363 eraseValueFromMap(It->first); 4364 forgetMemoizedResults(Old); 4365 } 4366 } 4367 4368 PushDefUseChildren(I, Worklist); 4369 } 4370 } 4371 4372 namespace { 4373 4374 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its start 4375 /// expression in case its Loop is L. If it is not L then 4376 /// if IgnoreOtherLoops is true then use AddRec itself 4377 /// otherwise rewrite cannot be done. 4378 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done. 4379 class SCEVInitRewriter : public SCEVRewriteVisitor<SCEVInitRewriter> { 4380 public: 4381 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, 4382 bool IgnoreOtherLoops = true) { 4383 SCEVInitRewriter Rewriter(L, SE); 4384 const SCEV *Result = Rewriter.visit(S); 4385 if (Rewriter.hasSeenLoopVariantSCEVUnknown()) 4386 return SE.getCouldNotCompute(); 4387 return Rewriter.hasSeenOtherLoops() && !IgnoreOtherLoops 4388 ? SE.getCouldNotCompute() 4389 : Result; 4390 } 4391 4392 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4393 if (!SE.isLoopInvariant(Expr, L)) 4394 SeenLoopVariantSCEVUnknown = true; 4395 return Expr; 4396 } 4397 4398 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4399 // Only re-write AddRecExprs for this loop. 4400 if (Expr->getLoop() == L) 4401 return Expr->getStart(); 4402 SeenOtherLoops = true; 4403 return Expr; 4404 } 4405 4406 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; } 4407 4408 bool hasSeenOtherLoops() { return SeenOtherLoops; } 4409 4410 private: 4411 explicit SCEVInitRewriter(const Loop *L, ScalarEvolution &SE) 4412 : SCEVRewriteVisitor(SE), L(L) {} 4413 4414 const Loop *L; 4415 bool SeenLoopVariantSCEVUnknown = false; 4416 bool SeenOtherLoops = false; 4417 }; 4418 4419 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its post 4420 /// increment expression in case its Loop is L. If it is not L then 4421 /// use AddRec itself. 4422 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done. 4423 class SCEVPostIncRewriter : public SCEVRewriteVisitor<SCEVPostIncRewriter> { 4424 public: 4425 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE) { 4426 SCEVPostIncRewriter Rewriter(L, SE); 4427 const SCEV *Result = Rewriter.visit(S); 4428 return Rewriter.hasSeenLoopVariantSCEVUnknown() 4429 ? SE.getCouldNotCompute() 4430 : Result; 4431 } 4432 4433 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4434 if (!SE.isLoopInvariant(Expr, L)) 4435 SeenLoopVariantSCEVUnknown = true; 4436 return Expr; 4437 } 4438 4439 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4440 // Only re-write AddRecExprs for this loop. 4441 if (Expr->getLoop() == L) 4442 return Expr->getPostIncExpr(SE); 4443 SeenOtherLoops = true; 4444 return Expr; 4445 } 4446 4447 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; } 4448 4449 bool hasSeenOtherLoops() { return SeenOtherLoops; } 4450 4451 private: 4452 explicit SCEVPostIncRewriter(const Loop *L, ScalarEvolution &SE) 4453 : SCEVRewriteVisitor(SE), L(L) {} 4454 4455 const Loop *L; 4456 bool SeenLoopVariantSCEVUnknown = false; 4457 bool SeenOtherLoops = false; 4458 }; 4459 4460 /// This class evaluates the compare condition by matching it against the 4461 /// condition of loop latch. If there is a match we assume a true value 4462 /// for the condition while building SCEV nodes. 4463 class SCEVBackedgeConditionFolder 4464 : public SCEVRewriteVisitor<SCEVBackedgeConditionFolder> { 4465 public: 4466 static const SCEV *rewrite(const SCEV *S, const Loop *L, 4467 ScalarEvolution &SE) { 4468 bool IsPosBECond = false; 4469 Value *BECond = nullptr; 4470 if (BasicBlock *Latch = L->getLoopLatch()) { 4471 BranchInst *BI = dyn_cast<BranchInst>(Latch->getTerminator()); 4472 if (BI && BI->isConditional()) { 4473 assert(BI->getSuccessor(0) != BI->getSuccessor(1) && 4474 "Both outgoing branches should not target same header!"); 4475 BECond = BI->getCondition(); 4476 IsPosBECond = BI->getSuccessor(0) == L->getHeader(); 4477 } else { 4478 return S; 4479 } 4480 } 4481 SCEVBackedgeConditionFolder Rewriter(L, BECond, IsPosBECond, SE); 4482 return Rewriter.visit(S); 4483 } 4484 4485 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4486 const SCEV *Result = Expr; 4487 bool InvariantF = SE.isLoopInvariant(Expr, L); 4488 4489 if (!InvariantF) { 4490 Instruction *I = cast<Instruction>(Expr->getValue()); 4491 switch (I->getOpcode()) { 4492 case Instruction::Select: { 4493 SelectInst *SI = cast<SelectInst>(I); 4494 Optional<const SCEV *> Res = 4495 compareWithBackedgeCondition(SI->getCondition()); 4496 if (Res.hasValue()) { 4497 bool IsOne = cast<SCEVConstant>(Res.getValue())->getValue()->isOne(); 4498 Result = SE.getSCEV(IsOne ? SI->getTrueValue() : SI->getFalseValue()); 4499 } 4500 break; 4501 } 4502 default: { 4503 Optional<const SCEV *> Res = compareWithBackedgeCondition(I); 4504 if (Res.hasValue()) 4505 Result = Res.getValue(); 4506 break; 4507 } 4508 } 4509 } 4510 return Result; 4511 } 4512 4513 private: 4514 explicit SCEVBackedgeConditionFolder(const Loop *L, Value *BECond, 4515 bool IsPosBECond, ScalarEvolution &SE) 4516 : SCEVRewriteVisitor(SE), L(L), BackedgeCond(BECond), 4517 IsPositiveBECond(IsPosBECond) {} 4518 4519 Optional<const SCEV *> compareWithBackedgeCondition(Value *IC); 4520 4521 const Loop *L; 4522 /// Loop back condition. 4523 Value *BackedgeCond = nullptr; 4524 /// Set to true if loop back is on positive branch condition. 4525 bool IsPositiveBECond; 4526 }; 4527 4528 Optional<const SCEV *> 4529 SCEVBackedgeConditionFolder::compareWithBackedgeCondition(Value *IC) { 4530 4531 // If value matches the backedge condition for loop latch, 4532 // then return a constant evolution node based on loopback 4533 // branch taken. 4534 if (BackedgeCond == IC) 4535 return IsPositiveBECond ? SE.getOne(Type::getInt1Ty(SE.getContext())) 4536 : SE.getZero(Type::getInt1Ty(SE.getContext())); 4537 return None; 4538 } 4539 4540 class SCEVShiftRewriter : public SCEVRewriteVisitor<SCEVShiftRewriter> { 4541 public: 4542 static const SCEV *rewrite(const SCEV *S, const Loop *L, 4543 ScalarEvolution &SE) { 4544 SCEVShiftRewriter Rewriter(L, SE); 4545 const SCEV *Result = Rewriter.visit(S); 4546 return Rewriter.isValid() ? Result : SE.getCouldNotCompute(); 4547 } 4548 4549 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4550 // Only allow AddRecExprs for this loop. 4551 if (!SE.isLoopInvariant(Expr, L)) 4552 Valid = false; 4553 return Expr; 4554 } 4555 4556 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4557 if (Expr->getLoop() == L && Expr->isAffine()) 4558 return SE.getMinusSCEV(Expr, Expr->getStepRecurrence(SE)); 4559 Valid = false; 4560 return Expr; 4561 } 4562 4563 bool isValid() { return Valid; } 4564 4565 private: 4566 explicit SCEVShiftRewriter(const Loop *L, ScalarEvolution &SE) 4567 : SCEVRewriteVisitor(SE), L(L) {} 4568 4569 const Loop *L; 4570 bool Valid = true; 4571 }; 4572 4573 } // end anonymous namespace 4574 4575 SCEV::NoWrapFlags 4576 ScalarEvolution::proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR) { 4577 if (!AR->isAffine()) 4578 return SCEV::FlagAnyWrap; 4579 4580 using OBO = OverflowingBinaryOperator; 4581 4582 SCEV::NoWrapFlags Result = SCEV::FlagAnyWrap; 4583 4584 if (!AR->hasNoSignedWrap()) { 4585 ConstantRange AddRecRange = getSignedRange(AR); 4586 ConstantRange IncRange = getSignedRange(AR->getStepRecurrence(*this)); 4587 4588 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 4589 Instruction::Add, IncRange, OBO::NoSignedWrap); 4590 if (NSWRegion.contains(AddRecRange)) 4591 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNSW); 4592 } 4593 4594 if (!AR->hasNoUnsignedWrap()) { 4595 ConstantRange AddRecRange = getUnsignedRange(AR); 4596 ConstantRange IncRange = getUnsignedRange(AR->getStepRecurrence(*this)); 4597 4598 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 4599 Instruction::Add, IncRange, OBO::NoUnsignedWrap); 4600 if (NUWRegion.contains(AddRecRange)) 4601 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNUW); 4602 } 4603 4604 return Result; 4605 } 4606 4607 SCEV::NoWrapFlags 4608 ScalarEvolution::proveNoSignedWrapViaInduction(const SCEVAddRecExpr *AR) { 4609 SCEV::NoWrapFlags Result = AR->getNoWrapFlags(); 4610 4611 if (AR->hasNoSignedWrap()) 4612 return Result; 4613 4614 if (!AR->isAffine()) 4615 return Result; 4616 4617 const SCEV *Step = AR->getStepRecurrence(*this); 4618 const Loop *L = AR->getLoop(); 4619 4620 // Check whether the backedge-taken count is SCEVCouldNotCompute. 4621 // Note that this serves two purposes: It filters out loops that are 4622 // simply not analyzable, and it covers the case where this code is 4623 // being called from within backedge-taken count analysis, such that 4624 // attempting to ask for the backedge-taken count would likely result 4625 // in infinite recursion. In the later case, the analysis code will 4626 // cope with a conservative value, and it will take care to purge 4627 // that value once it has finished. 4628 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 4629 4630 // Normally, in the cases we can prove no-overflow via a 4631 // backedge guarding condition, we can also compute a backedge 4632 // taken count for the loop. The exceptions are assumptions and 4633 // guards present in the loop -- SCEV is not great at exploiting 4634 // these to compute max backedge taken counts, but can still use 4635 // these to prove lack of overflow. Use this fact to avoid 4636 // doing extra work that may not pay off. 4637 4638 if (isa<SCEVCouldNotCompute>(MaxBECount) && !HasGuards && 4639 AC.assumptions().empty()) 4640 return Result; 4641 4642 // If the backedge is guarded by a comparison with the pre-inc value the 4643 // addrec is safe. Also, if the entry is guarded by a comparison with the 4644 // start value and the backedge is guarded by a comparison with the post-inc 4645 // value, the addrec is safe. 4646 ICmpInst::Predicate Pred; 4647 const SCEV *OverflowLimit = 4648 getSignedOverflowLimitForStep(Step, &Pred, this); 4649 if (OverflowLimit && 4650 (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) || 4651 isKnownOnEveryIteration(Pred, AR, OverflowLimit))) { 4652 Result = setFlags(Result, SCEV::FlagNSW); 4653 } 4654 return Result; 4655 } 4656 SCEV::NoWrapFlags 4657 ScalarEvolution::proveNoUnsignedWrapViaInduction(const SCEVAddRecExpr *AR) { 4658 SCEV::NoWrapFlags Result = AR->getNoWrapFlags(); 4659 4660 if (AR->hasNoUnsignedWrap()) 4661 return Result; 4662 4663 if (!AR->isAffine()) 4664 return Result; 4665 4666 const SCEV *Step = AR->getStepRecurrence(*this); 4667 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 4668 const Loop *L = AR->getLoop(); 4669 4670 // Check whether the backedge-taken count is SCEVCouldNotCompute. 4671 // Note that this serves two purposes: It filters out loops that are 4672 // simply not analyzable, and it covers the case where this code is 4673 // being called from within backedge-taken count analysis, such that 4674 // attempting to ask for the backedge-taken count would likely result 4675 // in infinite recursion. In the later case, the analysis code will 4676 // cope with a conservative value, and it will take care to purge 4677 // that value once it has finished. 4678 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 4679 4680 // Normally, in the cases we can prove no-overflow via a 4681 // backedge guarding condition, we can also compute a backedge 4682 // taken count for the loop. The exceptions are assumptions and 4683 // guards present in the loop -- SCEV is not great at exploiting 4684 // these to compute max backedge taken counts, but can still use 4685 // these to prove lack of overflow. Use this fact to avoid 4686 // doing extra work that may not pay off. 4687 4688 if (isa<SCEVCouldNotCompute>(MaxBECount) && !HasGuards && 4689 AC.assumptions().empty()) 4690 return Result; 4691 4692 // If the backedge is guarded by a comparison with the pre-inc value the 4693 // addrec is safe. Also, if the entry is guarded by a comparison with the 4694 // start value and the backedge is guarded by a comparison with the post-inc 4695 // value, the addrec is safe. 4696 if (isKnownPositive(Step)) { 4697 const SCEV *N = getConstant(APInt::getMinValue(BitWidth) - 4698 getUnsignedRangeMax(Step)); 4699 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) || 4700 isKnownOnEveryIteration(ICmpInst::ICMP_ULT, AR, N)) { 4701 Result = setFlags(Result, SCEV::FlagNUW); 4702 } 4703 } 4704 4705 return Result; 4706 } 4707 4708 namespace { 4709 4710 /// Represents an abstract binary operation. This may exist as a 4711 /// normal instruction or constant expression, or may have been 4712 /// derived from an expression tree. 4713 struct BinaryOp { 4714 unsigned Opcode; 4715 Value *LHS; 4716 Value *RHS; 4717 bool IsNSW = false; 4718 bool IsNUW = false; 4719 4720 /// Op is set if this BinaryOp corresponds to a concrete LLVM instruction or 4721 /// constant expression. 4722 Operator *Op = nullptr; 4723 4724 explicit BinaryOp(Operator *Op) 4725 : Opcode(Op->getOpcode()), LHS(Op->getOperand(0)), RHS(Op->getOperand(1)), 4726 Op(Op) { 4727 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(Op)) { 4728 IsNSW = OBO->hasNoSignedWrap(); 4729 IsNUW = OBO->hasNoUnsignedWrap(); 4730 } 4731 } 4732 4733 explicit BinaryOp(unsigned Opcode, Value *LHS, Value *RHS, bool IsNSW = false, 4734 bool IsNUW = false) 4735 : Opcode(Opcode), LHS(LHS), RHS(RHS), IsNSW(IsNSW), IsNUW(IsNUW) {} 4736 }; 4737 4738 } // end anonymous namespace 4739 4740 /// Try to map \p V into a BinaryOp, and return \c None on failure. 4741 static Optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) { 4742 auto *Op = dyn_cast<Operator>(V); 4743 if (!Op) 4744 return None; 4745 4746 // Implementation detail: all the cleverness here should happen without 4747 // creating new SCEV expressions -- our caller knowns tricks to avoid creating 4748 // SCEV expressions when possible, and we should not break that. 4749 4750 switch (Op->getOpcode()) { 4751 case Instruction::Add: 4752 case Instruction::Sub: 4753 case Instruction::Mul: 4754 case Instruction::UDiv: 4755 case Instruction::URem: 4756 case Instruction::And: 4757 case Instruction::Or: 4758 case Instruction::AShr: 4759 case Instruction::Shl: 4760 return BinaryOp(Op); 4761 4762 case Instruction::Xor: 4763 if (auto *RHSC = dyn_cast<ConstantInt>(Op->getOperand(1))) 4764 // If the RHS of the xor is a signmask, then this is just an add. 4765 // Instcombine turns add of signmask into xor as a strength reduction step. 4766 if (RHSC->getValue().isSignMask()) 4767 return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1)); 4768 return BinaryOp(Op); 4769 4770 case Instruction::LShr: 4771 // Turn logical shift right of a constant into a unsigned divide. 4772 if (ConstantInt *SA = dyn_cast<ConstantInt>(Op->getOperand(1))) { 4773 uint32_t BitWidth = cast<IntegerType>(Op->getType())->getBitWidth(); 4774 4775 // If the shift count is not less than the bitwidth, the result of 4776 // the shift is undefined. Don't try to analyze it, because the 4777 // resolution chosen here may differ from the resolution chosen in 4778 // other parts of the compiler. 4779 if (SA->getValue().ult(BitWidth)) { 4780 Constant *X = 4781 ConstantInt::get(SA->getContext(), 4782 APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 4783 return BinaryOp(Instruction::UDiv, Op->getOperand(0), X); 4784 } 4785 } 4786 return BinaryOp(Op); 4787 4788 case Instruction::ExtractValue: { 4789 auto *EVI = cast<ExtractValueInst>(Op); 4790 if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0) 4791 break; 4792 4793 auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand()); 4794 if (!WO) 4795 break; 4796 4797 Instruction::BinaryOps BinOp = WO->getBinaryOp(); 4798 bool Signed = WO->isSigned(); 4799 // TODO: Should add nuw/nsw flags for mul as well. 4800 if (BinOp == Instruction::Mul || !isOverflowIntrinsicNoWrap(WO, DT)) 4801 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS()); 4802 4803 // Now that we know that all uses of the arithmetic-result component of 4804 // CI are guarded by the overflow check, we can go ahead and pretend 4805 // that the arithmetic is non-overflowing. 4806 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS(), 4807 /* IsNSW = */ Signed, /* IsNUW = */ !Signed); 4808 } 4809 4810 default: 4811 break; 4812 } 4813 4814 // Recognise intrinsic loop.decrement.reg, and as this has exactly the same 4815 // semantics as a Sub, return a binary sub expression. 4816 if (auto *II = dyn_cast<IntrinsicInst>(V)) 4817 if (II->getIntrinsicID() == Intrinsic::loop_decrement_reg) 4818 return BinaryOp(Instruction::Sub, II->getOperand(0), II->getOperand(1)); 4819 4820 return None; 4821 } 4822 4823 /// Helper function to createAddRecFromPHIWithCasts. We have a phi 4824 /// node whose symbolic (unknown) SCEV is \p SymbolicPHI, which is updated via 4825 /// the loop backedge by a SCEVAddExpr, possibly also with a few casts on the 4826 /// way. This function checks if \p Op, an operand of this SCEVAddExpr, 4827 /// follows one of the following patterns: 4828 /// Op == (SExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) 4829 /// Op == (ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) 4830 /// If the SCEV expression of \p Op conforms with one of the expected patterns 4831 /// we return the type of the truncation operation, and indicate whether the 4832 /// truncated type should be treated as signed/unsigned by setting 4833 /// \p Signed to true/false, respectively. 4834 static Type *isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI, 4835 bool &Signed, ScalarEvolution &SE) { 4836 // The case where Op == SymbolicPHI (that is, with no type conversions on 4837 // the way) is handled by the regular add recurrence creating logic and 4838 // would have already been triggered in createAddRecForPHI. Reaching it here 4839 // means that createAddRecFromPHI had failed for this PHI before (e.g., 4840 // because one of the other operands of the SCEVAddExpr updating this PHI is 4841 // not invariant). 4842 // 4843 // Here we look for the case where Op = (ext(trunc(SymbolicPHI))), and in 4844 // this case predicates that allow us to prove that Op == SymbolicPHI will 4845 // be added. 4846 if (Op == SymbolicPHI) 4847 return nullptr; 4848 4849 unsigned SourceBits = SE.getTypeSizeInBits(SymbolicPHI->getType()); 4850 unsigned NewBits = SE.getTypeSizeInBits(Op->getType()); 4851 if (SourceBits != NewBits) 4852 return nullptr; 4853 4854 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(Op); 4855 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(Op); 4856 if (!SExt && !ZExt) 4857 return nullptr; 4858 const SCEVTruncateExpr *Trunc = 4859 SExt ? dyn_cast<SCEVTruncateExpr>(SExt->getOperand()) 4860 : dyn_cast<SCEVTruncateExpr>(ZExt->getOperand()); 4861 if (!Trunc) 4862 return nullptr; 4863 const SCEV *X = Trunc->getOperand(); 4864 if (X != SymbolicPHI) 4865 return nullptr; 4866 Signed = SExt != nullptr; 4867 return Trunc->getType(); 4868 } 4869 4870 static const Loop *isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI) { 4871 if (!PN->getType()->isIntegerTy()) 4872 return nullptr; 4873 const Loop *L = LI.getLoopFor(PN->getParent()); 4874 if (!L || L->getHeader() != PN->getParent()) 4875 return nullptr; 4876 return L; 4877 } 4878 4879 // Analyze \p SymbolicPHI, a SCEV expression of a phi node, and check if the 4880 // computation that updates the phi follows the following pattern: 4881 // (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum 4882 // which correspond to a phi->trunc->sext/zext->add->phi update chain. 4883 // If so, try to see if it can be rewritten as an AddRecExpr under some 4884 // Predicates. If successful, return them as a pair. Also cache the results 4885 // of the analysis. 4886 // 4887 // Example usage scenario: 4888 // Say the Rewriter is called for the following SCEV: 4889 // 8 * ((sext i32 (trunc i64 %X to i32) to i64) + %Step) 4890 // where: 4891 // %X = phi i64 (%Start, %BEValue) 4892 // It will visitMul->visitAdd->visitSExt->visitTrunc->visitUnknown(%X), 4893 // and call this function with %SymbolicPHI = %X. 4894 // 4895 // The analysis will find that the value coming around the backedge has 4896 // the following SCEV: 4897 // BEValue = ((sext i32 (trunc i64 %X to i32) to i64) + %Step) 4898 // Upon concluding that this matches the desired pattern, the function 4899 // will return the pair {NewAddRec, SmallPredsVec} where: 4900 // NewAddRec = {%Start,+,%Step} 4901 // SmallPredsVec = {P1, P2, P3} as follows: 4902 // P1(WrapPred): AR: {trunc(%Start),+,(trunc %Step)}<nsw> Flags: <nssw> 4903 // P2(EqualPred): %Start == (sext i32 (trunc i64 %Start to i32) to i64) 4904 // P3(EqualPred): %Step == (sext i32 (trunc i64 %Step to i32) to i64) 4905 // The returned pair means that SymbolicPHI can be rewritten into NewAddRec 4906 // under the predicates {P1,P2,P3}. 4907 // This predicated rewrite will be cached in PredicatedSCEVRewrites: 4908 // PredicatedSCEVRewrites[{%X,L}] = {NewAddRec, {P1,P2,P3)} 4909 // 4910 // TODO's: 4911 // 4912 // 1) Extend the Induction descriptor to also support inductions that involve 4913 // casts: When needed (namely, when we are called in the context of the 4914 // vectorizer induction analysis), a Set of cast instructions will be 4915 // populated by this method, and provided back to isInductionPHI. This is 4916 // needed to allow the vectorizer to properly record them to be ignored by 4917 // the cost model and to avoid vectorizing them (otherwise these casts, 4918 // which are redundant under the runtime overflow checks, will be 4919 // vectorized, which can be costly). 4920 // 4921 // 2) Support additional induction/PHISCEV patterns: We also want to support 4922 // inductions where the sext-trunc / zext-trunc operations (partly) occur 4923 // after the induction update operation (the induction increment): 4924 // 4925 // (Trunc iy (SExt/ZExt ix (%SymbolicPHI + InvariantAccum) to iy) to ix) 4926 // which correspond to a phi->add->trunc->sext/zext->phi update chain. 4927 // 4928 // (Trunc iy ((SExt/ZExt ix (%SymbolicPhi) to iy) + InvariantAccum) to ix) 4929 // which correspond to a phi->trunc->add->sext/zext->phi update chain. 4930 // 4931 // 3) Outline common code with createAddRecFromPHI to avoid duplication. 4932 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4933 ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI) { 4934 SmallVector<const SCEVPredicate *, 3> Predicates; 4935 4936 // *** Part1: Analyze if we have a phi-with-cast pattern for which we can 4937 // return an AddRec expression under some predicate. 4938 4939 auto *PN = cast<PHINode>(SymbolicPHI->getValue()); 4940 const Loop *L = isIntegerLoopHeaderPHI(PN, LI); 4941 assert(L && "Expecting an integer loop header phi"); 4942 4943 // The loop may have multiple entrances or multiple exits; we can analyze 4944 // this phi as an addrec if it has a unique entry value and a unique 4945 // backedge value. 4946 Value *BEValueV = nullptr, *StartValueV = nullptr; 4947 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 4948 Value *V = PN->getIncomingValue(i); 4949 if (L->contains(PN->getIncomingBlock(i))) { 4950 if (!BEValueV) { 4951 BEValueV = V; 4952 } else if (BEValueV != V) { 4953 BEValueV = nullptr; 4954 break; 4955 } 4956 } else if (!StartValueV) { 4957 StartValueV = V; 4958 } else if (StartValueV != V) { 4959 StartValueV = nullptr; 4960 break; 4961 } 4962 } 4963 if (!BEValueV || !StartValueV) 4964 return None; 4965 4966 const SCEV *BEValue = getSCEV(BEValueV); 4967 4968 // If the value coming around the backedge is an add with the symbolic 4969 // value we just inserted, possibly with casts that we can ignore under 4970 // an appropriate runtime guard, then we found a simple induction variable! 4971 const auto *Add = dyn_cast<SCEVAddExpr>(BEValue); 4972 if (!Add) 4973 return None; 4974 4975 // If there is a single occurrence of the symbolic value, possibly 4976 // casted, replace it with a recurrence. 4977 unsigned FoundIndex = Add->getNumOperands(); 4978 Type *TruncTy = nullptr; 4979 bool Signed; 4980 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4981 if ((TruncTy = 4982 isSimpleCastedPHI(Add->getOperand(i), SymbolicPHI, Signed, *this))) 4983 if (FoundIndex == e) { 4984 FoundIndex = i; 4985 break; 4986 } 4987 4988 if (FoundIndex == Add->getNumOperands()) 4989 return None; 4990 4991 // Create an add with everything but the specified operand. 4992 SmallVector<const SCEV *, 8> Ops; 4993 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4994 if (i != FoundIndex) 4995 Ops.push_back(Add->getOperand(i)); 4996 const SCEV *Accum = getAddExpr(Ops); 4997 4998 // The runtime checks will not be valid if the step amount is 4999 // varying inside the loop. 5000 if (!isLoopInvariant(Accum, L)) 5001 return None; 5002 5003 // *** Part2: Create the predicates 5004 5005 // Analysis was successful: we have a phi-with-cast pattern for which we 5006 // can return an AddRec expression under the following predicates: 5007 // 5008 // P1: A Wrap predicate that guarantees that Trunc(Start) + i*Trunc(Accum) 5009 // fits within the truncated type (does not overflow) for i = 0 to n-1. 5010 // P2: An Equal predicate that guarantees that 5011 // Start = (Ext ix (Trunc iy (Start) to ix) to iy) 5012 // P3: An Equal predicate that guarantees that 5013 // Accum = (Ext ix (Trunc iy (Accum) to ix) to iy) 5014 // 5015 // As we next prove, the above predicates guarantee that: 5016 // Start + i*Accum = (Ext ix (Trunc iy ( Start + i*Accum ) to ix) to iy) 5017 // 5018 // 5019 // More formally, we want to prove that: 5020 // Expr(i+1) = Start + (i+1) * Accum 5021 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum 5022 // 5023 // Given that: 5024 // 1) Expr(0) = Start 5025 // 2) Expr(1) = Start + Accum 5026 // = (Ext ix (Trunc iy (Start) to ix) to iy) + Accum :: from P2 5027 // 3) Induction hypothesis (step i): 5028 // Expr(i) = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum 5029 // 5030 // Proof: 5031 // Expr(i+1) = 5032 // = Start + (i+1)*Accum 5033 // = (Start + i*Accum) + Accum 5034 // = Expr(i) + Accum 5035 // = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum + Accum 5036 // :: from step i 5037 // 5038 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) + Accum + Accum 5039 // 5040 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) 5041 // + (Ext ix (Trunc iy (Accum) to ix) to iy) 5042 // + Accum :: from P3 5043 // 5044 // = (Ext ix (Trunc iy ((Start + (i-1)*Accum) + Accum) to ix) to iy) 5045 // + Accum :: from P1: Ext(x)+Ext(y)=>Ext(x+y) 5046 // 5047 // = (Ext ix (Trunc iy (Start + i*Accum) to ix) to iy) + Accum 5048 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum 5049 // 5050 // By induction, the same applies to all iterations 1<=i<n: 5051 // 5052 5053 // Create a truncated addrec for which we will add a no overflow check (P1). 5054 const SCEV *StartVal = getSCEV(StartValueV); 5055 const SCEV *PHISCEV = 5056 getAddRecExpr(getTruncateExpr(StartVal, TruncTy), 5057 getTruncateExpr(Accum, TruncTy), L, SCEV::FlagAnyWrap); 5058 5059 // PHISCEV can be either a SCEVConstant or a SCEVAddRecExpr. 5060 // ex: If truncated Accum is 0 and StartVal is a constant, then PHISCEV 5061 // will be constant. 5062 // 5063 // If PHISCEV is a constant, then P1 degenerates into P2 or P3, so we don't 5064 // add P1. 5065 if (const auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) { 5066 SCEVWrapPredicate::IncrementWrapFlags AddedFlags = 5067 Signed ? SCEVWrapPredicate::IncrementNSSW 5068 : SCEVWrapPredicate::IncrementNUSW; 5069 const SCEVPredicate *AddRecPred = getWrapPredicate(AR, AddedFlags); 5070 Predicates.push_back(AddRecPred); 5071 } 5072 5073 // Create the Equal Predicates P2,P3: 5074 5075 // It is possible that the predicates P2 and/or P3 are computable at 5076 // compile time due to StartVal and/or Accum being constants. 5077 // If either one is, then we can check that now and escape if either P2 5078 // or P3 is false. 5079 5080 // Construct the extended SCEV: (Ext ix (Trunc iy (Expr) to ix) to iy) 5081 // for each of StartVal and Accum 5082 auto getExtendedExpr = [&](const SCEV *Expr, 5083 bool CreateSignExtend) -> const SCEV * { 5084 assert(isLoopInvariant(Expr, L) && "Expr is expected to be invariant"); 5085 const SCEV *TruncatedExpr = getTruncateExpr(Expr, TruncTy); 5086 const SCEV *ExtendedExpr = 5087 CreateSignExtend ? getSignExtendExpr(TruncatedExpr, Expr->getType()) 5088 : getZeroExtendExpr(TruncatedExpr, Expr->getType()); 5089 return ExtendedExpr; 5090 }; 5091 5092 // Given: 5093 // ExtendedExpr = (Ext ix (Trunc iy (Expr) to ix) to iy 5094 // = getExtendedExpr(Expr) 5095 // Determine whether the predicate P: Expr == ExtendedExpr 5096 // is known to be false at compile time 5097 auto PredIsKnownFalse = [&](const SCEV *Expr, 5098 const SCEV *ExtendedExpr) -> bool { 5099 return Expr != ExtendedExpr && 5100 isKnownPredicate(ICmpInst::ICMP_NE, Expr, ExtendedExpr); 5101 }; 5102 5103 const SCEV *StartExtended = getExtendedExpr(StartVal, Signed); 5104 if (PredIsKnownFalse(StartVal, StartExtended)) { 5105 LLVM_DEBUG(dbgs() << "P2 is compile-time false\n";); 5106 return None; 5107 } 5108 5109 // The Step is always Signed (because the overflow checks are either 5110 // NSSW or NUSW) 5111 const SCEV *AccumExtended = getExtendedExpr(Accum, /*CreateSignExtend=*/true); 5112 if (PredIsKnownFalse(Accum, AccumExtended)) { 5113 LLVM_DEBUG(dbgs() << "P3 is compile-time false\n";); 5114 return None; 5115 } 5116 5117 auto AppendPredicate = [&](const SCEV *Expr, 5118 const SCEV *ExtendedExpr) -> void { 5119 if (Expr != ExtendedExpr && 5120 !isKnownPredicate(ICmpInst::ICMP_EQ, Expr, ExtendedExpr)) { 5121 const SCEVPredicate *Pred = getEqualPredicate(Expr, ExtendedExpr); 5122 LLVM_DEBUG(dbgs() << "Added Predicate: " << *Pred); 5123 Predicates.push_back(Pred); 5124 } 5125 }; 5126 5127 AppendPredicate(StartVal, StartExtended); 5128 AppendPredicate(Accum, AccumExtended); 5129 5130 // *** Part3: Predicates are ready. Now go ahead and create the new addrec in 5131 // which the casts had been folded away. The caller can rewrite SymbolicPHI 5132 // into NewAR if it will also add the runtime overflow checks specified in 5133 // Predicates. 5134 auto *NewAR = getAddRecExpr(StartVal, Accum, L, SCEV::FlagAnyWrap); 5135 5136 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> PredRewrite = 5137 std::make_pair(NewAR, Predicates); 5138 // Remember the result of the analysis for this SCEV at this locayyytion. 5139 PredicatedSCEVRewrites[{SymbolicPHI, L}] = PredRewrite; 5140 return PredRewrite; 5141 } 5142 5143 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 5144 ScalarEvolution::createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI) { 5145 auto *PN = cast<PHINode>(SymbolicPHI->getValue()); 5146 const Loop *L = isIntegerLoopHeaderPHI(PN, LI); 5147 if (!L) 5148 return None; 5149 5150 // Check to see if we already analyzed this PHI. 5151 auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L}); 5152 if (I != PredicatedSCEVRewrites.end()) { 5153 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> Rewrite = 5154 I->second; 5155 // Analysis was done before and failed to create an AddRec: 5156 if (Rewrite.first == SymbolicPHI) 5157 return None; 5158 // Analysis was done before and succeeded to create an AddRec under 5159 // a predicate: 5160 assert(isa<SCEVAddRecExpr>(Rewrite.first) && "Expected an AddRec"); 5161 assert(!(Rewrite.second).empty() && "Expected to find Predicates"); 5162 return Rewrite; 5163 } 5164 5165 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 5166 Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI); 5167 5168 // Record in the cache that the analysis failed 5169 if (!Rewrite) { 5170 SmallVector<const SCEVPredicate *, 3> Predicates; 5171 PredicatedSCEVRewrites[{SymbolicPHI, L}] = {SymbolicPHI, Predicates}; 5172 return None; 5173 } 5174 5175 return Rewrite; 5176 } 5177 5178 // FIXME: This utility is currently required because the Rewriter currently 5179 // does not rewrite this expression: 5180 // {0, +, (sext ix (trunc iy to ix) to iy)} 5181 // into {0, +, %step}, 5182 // even when the following Equal predicate exists: 5183 // "%step == (sext ix (trunc iy to ix) to iy)". 5184 bool PredicatedScalarEvolution::areAddRecsEqualWithPreds( 5185 const SCEVAddRecExpr *AR1, const SCEVAddRecExpr *AR2) const { 5186 if (AR1 == AR2) 5187 return true; 5188 5189 auto areExprsEqual = [&](const SCEV *Expr1, const SCEV *Expr2) -> bool { 5190 if (Expr1 != Expr2 && !Preds.implies(SE.getEqualPredicate(Expr1, Expr2)) && 5191 !Preds.implies(SE.getEqualPredicate(Expr2, Expr1))) 5192 return false; 5193 return true; 5194 }; 5195 5196 if (!areExprsEqual(AR1->getStart(), AR2->getStart()) || 5197 !areExprsEqual(AR1->getStepRecurrence(SE), AR2->getStepRecurrence(SE))) 5198 return false; 5199 return true; 5200 } 5201 5202 /// A helper function for createAddRecFromPHI to handle simple cases. 5203 /// 5204 /// This function tries to find an AddRec expression for the simplest (yet most 5205 /// common) cases: PN = PHI(Start, OP(Self, LoopInvariant)). 5206 /// If it fails, createAddRecFromPHI will use a more general, but slow, 5207 /// technique for finding the AddRec expression. 5208 const SCEV *ScalarEvolution::createSimpleAffineAddRec(PHINode *PN, 5209 Value *BEValueV, 5210 Value *StartValueV) { 5211 const Loop *L = LI.getLoopFor(PN->getParent()); 5212 assert(L && L->getHeader() == PN->getParent()); 5213 assert(BEValueV && StartValueV); 5214 5215 auto BO = MatchBinaryOp(BEValueV, DT); 5216 if (!BO) 5217 return nullptr; 5218 5219 if (BO->Opcode != Instruction::Add) 5220 return nullptr; 5221 5222 const SCEV *Accum = nullptr; 5223 if (BO->LHS == PN && L->isLoopInvariant(BO->RHS)) 5224 Accum = getSCEV(BO->RHS); 5225 else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS)) 5226 Accum = getSCEV(BO->LHS); 5227 5228 if (!Accum) 5229 return nullptr; 5230 5231 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5232 if (BO->IsNUW) 5233 Flags = setFlags(Flags, SCEV::FlagNUW); 5234 if (BO->IsNSW) 5235 Flags = setFlags(Flags, SCEV::FlagNSW); 5236 5237 const SCEV *StartVal = getSCEV(StartValueV); 5238 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 5239 5240 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 5241 5242 // We can add Flags to the post-inc expression only if we 5243 // know that it is *undefined behavior* for BEValueV to 5244 // overflow. 5245 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) 5246 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) 5247 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 5248 5249 return PHISCEV; 5250 } 5251 5252 const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) { 5253 const Loop *L = LI.getLoopFor(PN->getParent()); 5254 if (!L || L->getHeader() != PN->getParent()) 5255 return nullptr; 5256 5257 // The loop may have multiple entrances or multiple exits; we can analyze 5258 // this phi as an addrec if it has a unique entry value and a unique 5259 // backedge value. 5260 Value *BEValueV = nullptr, *StartValueV = nullptr; 5261 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 5262 Value *V = PN->getIncomingValue(i); 5263 if (L->contains(PN->getIncomingBlock(i))) { 5264 if (!BEValueV) { 5265 BEValueV = V; 5266 } else if (BEValueV != V) { 5267 BEValueV = nullptr; 5268 break; 5269 } 5270 } else if (!StartValueV) { 5271 StartValueV = V; 5272 } else if (StartValueV != V) { 5273 StartValueV = nullptr; 5274 break; 5275 } 5276 } 5277 if (!BEValueV || !StartValueV) 5278 return nullptr; 5279 5280 assert(ValueExprMap.find_as(PN) == ValueExprMap.end() && 5281 "PHI node already processed?"); 5282 5283 // First, try to find AddRec expression without creating a fictituos symbolic 5284 // value for PN. 5285 if (auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV)) 5286 return S; 5287 5288 // Handle PHI node value symbolically. 5289 const SCEV *SymbolicName = getUnknown(PN); 5290 ValueExprMap.insert({SCEVCallbackVH(PN, this), SymbolicName}); 5291 5292 // Using this symbolic name for the PHI, analyze the value coming around 5293 // the back-edge. 5294 const SCEV *BEValue = getSCEV(BEValueV); 5295 5296 // NOTE: If BEValue is loop invariant, we know that the PHI node just 5297 // has a special value for the first iteration of the loop. 5298 5299 // If the value coming around the backedge is an add with the symbolic 5300 // value we just inserted, then we found a simple induction variable! 5301 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) { 5302 // If there is a single occurrence of the symbolic value, replace it 5303 // with a recurrence. 5304 unsigned FoundIndex = Add->getNumOperands(); 5305 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 5306 if (Add->getOperand(i) == SymbolicName) 5307 if (FoundIndex == e) { 5308 FoundIndex = i; 5309 break; 5310 } 5311 5312 if (FoundIndex != Add->getNumOperands()) { 5313 // Create an add with everything but the specified operand. 5314 SmallVector<const SCEV *, 8> Ops; 5315 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 5316 if (i != FoundIndex) 5317 Ops.push_back(SCEVBackedgeConditionFolder::rewrite(Add->getOperand(i), 5318 L, *this)); 5319 const SCEV *Accum = getAddExpr(Ops); 5320 5321 // This is not a valid addrec if the step amount is varying each 5322 // loop iteration, but is not itself an addrec in this loop. 5323 if (isLoopInvariant(Accum, L) || 5324 (isa<SCEVAddRecExpr>(Accum) && 5325 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) { 5326 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5327 5328 if (auto BO = MatchBinaryOp(BEValueV, DT)) { 5329 if (BO->Opcode == Instruction::Add && BO->LHS == PN) { 5330 if (BO->IsNUW) 5331 Flags = setFlags(Flags, SCEV::FlagNUW); 5332 if (BO->IsNSW) 5333 Flags = setFlags(Flags, SCEV::FlagNSW); 5334 } 5335 } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) { 5336 // If the increment is an inbounds GEP, then we know the address 5337 // space cannot be wrapped around. We cannot make any guarantee 5338 // about signed or unsigned overflow because pointers are 5339 // unsigned but we may have a negative index from the base 5340 // pointer. We can guarantee that no unsigned wrap occurs if the 5341 // indices form a positive value. 5342 if (GEP->isInBounds() && GEP->getOperand(0) == PN) { 5343 Flags = setFlags(Flags, SCEV::FlagNW); 5344 5345 const SCEV *Ptr = getSCEV(GEP->getPointerOperand()); 5346 if (isKnownPositive(getMinusSCEV(getSCEV(GEP), Ptr))) 5347 Flags = setFlags(Flags, SCEV::FlagNUW); 5348 } 5349 5350 // We cannot transfer nuw and nsw flags from subtraction 5351 // operations -- sub nuw X, Y is not the same as add nuw X, -Y 5352 // for instance. 5353 } 5354 5355 const SCEV *StartVal = getSCEV(StartValueV); 5356 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 5357 5358 // Okay, for the entire analysis of this edge we assumed the PHI 5359 // to be symbolic. We now need to go back and purge all of the 5360 // entries for the scalars that use the symbolic expression. 5361 forgetSymbolicName(PN, SymbolicName); 5362 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 5363 5364 // We can add Flags to the post-inc expression only if we 5365 // know that it is *undefined behavior* for BEValueV to 5366 // overflow. 5367 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) 5368 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) 5369 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 5370 5371 return PHISCEV; 5372 } 5373 } 5374 } else { 5375 // Otherwise, this could be a loop like this: 5376 // i = 0; for (j = 1; ..; ++j) { .... i = j; } 5377 // In this case, j = {1,+,1} and BEValue is j. 5378 // Because the other in-value of i (0) fits the evolution of BEValue 5379 // i really is an addrec evolution. 5380 // 5381 // We can generalize this saying that i is the shifted value of BEValue 5382 // by one iteration: 5383 // PHI(f(0), f({1,+,1})) --> f({0,+,1}) 5384 const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *this); 5385 const SCEV *Start = SCEVInitRewriter::rewrite(Shifted, L, *this, false); 5386 if (Shifted != getCouldNotCompute() && 5387 Start != getCouldNotCompute()) { 5388 const SCEV *StartVal = getSCEV(StartValueV); 5389 if (Start == StartVal) { 5390 // Okay, for the entire analysis of this edge we assumed the PHI 5391 // to be symbolic. We now need to go back and purge all of the 5392 // entries for the scalars that use the symbolic expression. 5393 forgetSymbolicName(PN, SymbolicName); 5394 ValueExprMap[SCEVCallbackVH(PN, this)] = Shifted; 5395 return Shifted; 5396 } 5397 } 5398 } 5399 5400 // Remove the temporary PHI node SCEV that has been inserted while intending 5401 // to create an AddRecExpr for this PHI node. We can not keep this temporary 5402 // as it will prevent later (possibly simpler) SCEV expressions to be added 5403 // to the ValueExprMap. 5404 eraseValueFromMap(PN); 5405 5406 return nullptr; 5407 } 5408 5409 // Checks if the SCEV S is available at BB. S is considered available at BB 5410 // if S can be materialized at BB without introducing a fault. 5411 static bool IsAvailableOnEntry(const Loop *L, DominatorTree &DT, const SCEV *S, 5412 BasicBlock *BB) { 5413 struct CheckAvailable { 5414 bool TraversalDone = false; 5415 bool Available = true; 5416 5417 const Loop *L = nullptr; // The loop BB is in (can be nullptr) 5418 BasicBlock *BB = nullptr; 5419 DominatorTree &DT; 5420 5421 CheckAvailable(const Loop *L, BasicBlock *BB, DominatorTree &DT) 5422 : L(L), BB(BB), DT(DT) {} 5423 5424 bool setUnavailable() { 5425 TraversalDone = true; 5426 Available = false; 5427 return false; 5428 } 5429 5430 bool follow(const SCEV *S) { 5431 switch (S->getSCEVType()) { 5432 case scConstant: 5433 case scPtrToInt: 5434 case scTruncate: 5435 case scZeroExtend: 5436 case scSignExtend: 5437 case scAddExpr: 5438 case scMulExpr: 5439 case scUMaxExpr: 5440 case scSMaxExpr: 5441 case scUMinExpr: 5442 case scSMinExpr: 5443 // These expressions are available if their operand(s) is/are. 5444 return true; 5445 5446 case scAddRecExpr: { 5447 // We allow add recurrences that are on the loop BB is in, or some 5448 // outer loop. This guarantees availability because the value of the 5449 // add recurrence at BB is simply the "current" value of the induction 5450 // variable. We can relax this in the future; for instance an add 5451 // recurrence on a sibling dominating loop is also available at BB. 5452 const auto *ARLoop = cast<SCEVAddRecExpr>(S)->getLoop(); 5453 if (L && (ARLoop == L || ARLoop->contains(L))) 5454 return true; 5455 5456 return setUnavailable(); 5457 } 5458 5459 case scUnknown: { 5460 // For SCEVUnknown, we check for simple dominance. 5461 const auto *SU = cast<SCEVUnknown>(S); 5462 Value *V = SU->getValue(); 5463 5464 if (isa<Argument>(V)) 5465 return false; 5466 5467 if (isa<Instruction>(V) && DT.dominates(cast<Instruction>(V), BB)) 5468 return false; 5469 5470 return setUnavailable(); 5471 } 5472 5473 case scUDivExpr: 5474 case scCouldNotCompute: 5475 // We do not try to smart about these at all. 5476 return setUnavailable(); 5477 } 5478 llvm_unreachable("Unknown SCEV kind!"); 5479 } 5480 5481 bool isDone() { return TraversalDone; } 5482 }; 5483 5484 CheckAvailable CA(L, BB, DT); 5485 SCEVTraversal<CheckAvailable> ST(CA); 5486 5487 ST.visitAll(S); 5488 return CA.Available; 5489 } 5490 5491 // Try to match a control flow sequence that branches out at BI and merges back 5492 // at Merge into a "C ? LHS : RHS" select pattern. Return true on a successful 5493 // match. 5494 static bool BrPHIToSelect(DominatorTree &DT, BranchInst *BI, PHINode *Merge, 5495 Value *&C, Value *&LHS, Value *&RHS) { 5496 C = BI->getCondition(); 5497 5498 BasicBlockEdge LeftEdge(BI->getParent(), BI->getSuccessor(0)); 5499 BasicBlockEdge RightEdge(BI->getParent(), BI->getSuccessor(1)); 5500 5501 if (!LeftEdge.isSingleEdge()) 5502 return false; 5503 5504 assert(RightEdge.isSingleEdge() && "Follows from LeftEdge.isSingleEdge()"); 5505 5506 Use &LeftUse = Merge->getOperandUse(0); 5507 Use &RightUse = Merge->getOperandUse(1); 5508 5509 if (DT.dominates(LeftEdge, LeftUse) && DT.dominates(RightEdge, RightUse)) { 5510 LHS = LeftUse; 5511 RHS = RightUse; 5512 return true; 5513 } 5514 5515 if (DT.dominates(LeftEdge, RightUse) && DT.dominates(RightEdge, LeftUse)) { 5516 LHS = RightUse; 5517 RHS = LeftUse; 5518 return true; 5519 } 5520 5521 return false; 5522 } 5523 5524 const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(PHINode *PN) { 5525 auto IsReachable = 5526 [&](BasicBlock *BB) { return DT.isReachableFromEntry(BB); }; 5527 if (PN->getNumIncomingValues() == 2 && all_of(PN->blocks(), IsReachable)) { 5528 const Loop *L = LI.getLoopFor(PN->getParent()); 5529 5530 // We don't want to break LCSSA, even in a SCEV expression tree. 5531 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 5532 if (LI.getLoopFor(PN->getIncomingBlock(i)) != L) 5533 return nullptr; 5534 5535 // Try to match 5536 // 5537 // br %cond, label %left, label %right 5538 // left: 5539 // br label %merge 5540 // right: 5541 // br label %merge 5542 // merge: 5543 // V = phi [ %x, %left ], [ %y, %right ] 5544 // 5545 // as "select %cond, %x, %y" 5546 5547 BasicBlock *IDom = DT[PN->getParent()]->getIDom()->getBlock(); 5548 assert(IDom && "At least the entry block should dominate PN"); 5549 5550 auto *BI = dyn_cast<BranchInst>(IDom->getTerminator()); 5551 Value *Cond = nullptr, *LHS = nullptr, *RHS = nullptr; 5552 5553 if (BI && BI->isConditional() && 5554 BrPHIToSelect(DT, BI, PN, Cond, LHS, RHS) && 5555 IsAvailableOnEntry(L, DT, getSCEV(LHS), PN->getParent()) && 5556 IsAvailableOnEntry(L, DT, getSCEV(RHS), PN->getParent())) 5557 return createNodeForSelectOrPHI(PN, Cond, LHS, RHS); 5558 } 5559 5560 return nullptr; 5561 } 5562 5563 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) { 5564 if (const SCEV *S = createAddRecFromPHI(PN)) 5565 return S; 5566 5567 if (const SCEV *S = createNodeFromSelectLikePHI(PN)) 5568 return S; 5569 5570 // If the PHI has a single incoming value, follow that value, unless the 5571 // PHI's incoming blocks are in a different loop, in which case doing so 5572 // risks breaking LCSSA form. Instcombine would normally zap these, but 5573 // it doesn't have DominatorTree information, so it may miss cases. 5574 if (Value *V = SimplifyInstruction(PN, {getDataLayout(), &TLI, &DT, &AC})) 5575 if (LI.replacementPreservesLCSSAForm(PN, V)) 5576 return getSCEV(V); 5577 5578 // If it's not a loop phi, we can't handle it yet. 5579 return getUnknown(PN); 5580 } 5581 5582 const SCEV *ScalarEvolution::createNodeForSelectOrPHI(Instruction *I, 5583 Value *Cond, 5584 Value *TrueVal, 5585 Value *FalseVal) { 5586 // Handle "constant" branch or select. This can occur for instance when a 5587 // loop pass transforms an inner loop and moves on to process the outer loop. 5588 if (auto *CI = dyn_cast<ConstantInt>(Cond)) 5589 return getSCEV(CI->isOne() ? TrueVal : FalseVal); 5590 5591 // Try to match some simple smax or umax patterns. 5592 auto *ICI = dyn_cast<ICmpInst>(Cond); 5593 if (!ICI) 5594 return getUnknown(I); 5595 5596 Value *LHS = ICI->getOperand(0); 5597 Value *RHS = ICI->getOperand(1); 5598 5599 switch (ICI->getPredicate()) { 5600 case ICmpInst::ICMP_SLT: 5601 case ICmpInst::ICMP_SLE: 5602 case ICmpInst::ICMP_ULT: 5603 case ICmpInst::ICMP_ULE: 5604 std::swap(LHS, RHS); 5605 LLVM_FALLTHROUGH; 5606 case ICmpInst::ICMP_SGT: 5607 case ICmpInst::ICMP_SGE: 5608 case ICmpInst::ICMP_UGT: 5609 case ICmpInst::ICMP_UGE: 5610 // a > b ? a+x : b+x -> max(a, b)+x 5611 // a > b ? b+x : a+x -> min(a, b)+x 5612 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { 5613 bool Signed = ICI->isSigned(); 5614 const SCEV *LA = getSCEV(TrueVal); 5615 const SCEV *RA = getSCEV(FalseVal); 5616 const SCEV *LS = getSCEV(LHS); 5617 const SCEV *RS = getSCEV(RHS); 5618 if (LA->getType()->isPointerTy()) { 5619 // FIXME: Handle cases where LS/RS are pointers not equal to LA/RA. 5620 // Need to make sure we can't produce weird expressions involving 5621 // negated pointers. 5622 if (LA == LS && RA == RS) 5623 return Signed ? getSMaxExpr(LS, RS) : getUMaxExpr(LS, RS); 5624 if (LA == RS && RA == LS) 5625 return Signed ? getSMinExpr(LS, RS) : getUMinExpr(LS, RS); 5626 } 5627 auto CoerceOperand = [&](const SCEV *Op) -> const SCEV * { 5628 if (Op->getType()->isPointerTy()) { 5629 Op = getLosslessPtrToIntExpr(Op); 5630 if (isa<SCEVCouldNotCompute>(Op)) 5631 return Op; 5632 } 5633 if (Signed) 5634 Op = getNoopOrSignExtend(Op, I->getType()); 5635 else 5636 Op = getNoopOrZeroExtend(Op, I->getType()); 5637 return Op; 5638 }; 5639 LS = CoerceOperand(LS); 5640 RS = CoerceOperand(RS); 5641 if (isa<SCEVCouldNotCompute>(LS) || isa<SCEVCouldNotCompute>(RS)) 5642 break; 5643 const SCEV *LDiff = getMinusSCEV(LA, LS); 5644 const SCEV *RDiff = getMinusSCEV(RA, RS); 5645 if (LDiff == RDiff) 5646 return getAddExpr(Signed ? getSMaxExpr(LS, RS) : getUMaxExpr(LS, RS), 5647 LDiff); 5648 LDiff = getMinusSCEV(LA, RS); 5649 RDiff = getMinusSCEV(RA, LS); 5650 if (LDiff == RDiff) 5651 return getAddExpr(Signed ? getSMinExpr(LS, RS) : getUMinExpr(LS, RS), 5652 LDiff); 5653 } 5654 break; 5655 case ICmpInst::ICMP_NE: 5656 // n != 0 ? n+x : 1+x -> umax(n, 1)+x 5657 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 5658 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 5659 const SCEV *One = getOne(I->getType()); 5660 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5661 const SCEV *LA = getSCEV(TrueVal); 5662 const SCEV *RA = getSCEV(FalseVal); 5663 const SCEV *LDiff = getMinusSCEV(LA, LS); 5664 const SCEV *RDiff = getMinusSCEV(RA, One); 5665 if (LDiff == RDiff) 5666 return getAddExpr(getUMaxExpr(One, LS), LDiff); 5667 } 5668 break; 5669 case ICmpInst::ICMP_EQ: 5670 // n == 0 ? 1+x : n+x -> umax(n, 1)+x 5671 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 5672 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 5673 const SCEV *One = getOne(I->getType()); 5674 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5675 const SCEV *LA = getSCEV(TrueVal); 5676 const SCEV *RA = getSCEV(FalseVal); 5677 const SCEV *LDiff = getMinusSCEV(LA, One); 5678 const SCEV *RDiff = getMinusSCEV(RA, LS); 5679 if (LDiff == RDiff) 5680 return getAddExpr(getUMaxExpr(One, LS), LDiff); 5681 } 5682 break; 5683 default: 5684 break; 5685 } 5686 5687 return getUnknown(I); 5688 } 5689 5690 /// Expand GEP instructions into add and multiply operations. This allows them 5691 /// to be analyzed by regular SCEV code. 5692 const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) { 5693 // Don't attempt to analyze GEPs over unsized objects. 5694 if (!GEP->getSourceElementType()->isSized()) 5695 return getUnknown(GEP); 5696 5697 SmallVector<const SCEV *, 4> IndexExprs; 5698 for (auto Index = GEP->idx_begin(); Index != GEP->idx_end(); ++Index) 5699 IndexExprs.push_back(getSCEV(*Index)); 5700 return getGEPExpr(GEP, IndexExprs); 5701 } 5702 5703 uint32_t ScalarEvolution::GetMinTrailingZerosImpl(const SCEV *S) { 5704 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 5705 return C->getAPInt().countTrailingZeros(); 5706 5707 if (const SCEVPtrToIntExpr *I = dyn_cast<SCEVPtrToIntExpr>(S)) 5708 return GetMinTrailingZeros(I->getOperand()); 5709 5710 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S)) 5711 return std::min(GetMinTrailingZeros(T->getOperand()), 5712 (uint32_t)getTypeSizeInBits(T->getType())); 5713 5714 if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) { 5715 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 5716 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) 5717 ? getTypeSizeInBits(E->getType()) 5718 : OpRes; 5719 } 5720 5721 if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) { 5722 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 5723 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) 5724 ? getTypeSizeInBits(E->getType()) 5725 : OpRes; 5726 } 5727 5728 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) { 5729 // The result is the min of all operands results. 5730 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 5731 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 5732 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 5733 return MinOpRes; 5734 } 5735 5736 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { 5737 // The result is the sum of all operands results. 5738 uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0)); 5739 uint32_t BitWidth = getTypeSizeInBits(M->getType()); 5740 for (unsigned i = 1, e = M->getNumOperands(); 5741 SumOpRes != BitWidth && i != e; ++i) 5742 SumOpRes = 5743 std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)), BitWidth); 5744 return SumOpRes; 5745 } 5746 5747 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { 5748 // The result is the min of all operands results. 5749 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 5750 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 5751 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 5752 return MinOpRes; 5753 } 5754 5755 if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) { 5756 // The result is the min of all operands results. 5757 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 5758 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 5759 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 5760 return MinOpRes; 5761 } 5762 5763 if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) { 5764 // The result is the min of all operands results. 5765 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 5766 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 5767 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 5768 return MinOpRes; 5769 } 5770 5771 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 5772 // For a SCEVUnknown, ask ValueTracking. 5773 KnownBits Known = computeKnownBits(U->getValue(), getDataLayout(), 0, &AC, nullptr, &DT); 5774 return Known.countMinTrailingZeros(); 5775 } 5776 5777 // SCEVUDivExpr 5778 return 0; 5779 } 5780 5781 uint32_t ScalarEvolution::GetMinTrailingZeros(const SCEV *S) { 5782 auto I = MinTrailingZerosCache.find(S); 5783 if (I != MinTrailingZerosCache.end()) 5784 return I->second; 5785 5786 uint32_t Result = GetMinTrailingZerosImpl(S); 5787 auto InsertPair = MinTrailingZerosCache.insert({S, Result}); 5788 assert(InsertPair.second && "Should insert a new key"); 5789 return InsertPair.first->second; 5790 } 5791 5792 /// Helper method to assign a range to V from metadata present in the IR. 5793 static Optional<ConstantRange> GetRangeFromMetadata(Value *V) { 5794 if (Instruction *I = dyn_cast<Instruction>(V)) 5795 if (MDNode *MD = I->getMetadata(LLVMContext::MD_range)) 5796 return getConstantRangeFromMetadata(*MD); 5797 5798 return None; 5799 } 5800 5801 void ScalarEvolution::setNoWrapFlags(SCEVAddRecExpr *AddRec, 5802 SCEV::NoWrapFlags Flags) { 5803 if (AddRec->getNoWrapFlags(Flags) != Flags) { 5804 AddRec->setNoWrapFlags(Flags); 5805 UnsignedRanges.erase(AddRec); 5806 SignedRanges.erase(AddRec); 5807 } 5808 } 5809 5810 ConstantRange ScalarEvolution:: 5811 getRangeForUnknownRecurrence(const SCEVUnknown *U) { 5812 const DataLayout &DL = getDataLayout(); 5813 5814 unsigned BitWidth = getTypeSizeInBits(U->getType()); 5815 const ConstantRange FullSet(BitWidth, /*isFullSet=*/true); 5816 5817 // Match a simple recurrence of the form: <start, ShiftOp, Step>, and then 5818 // use information about the trip count to improve our available range. Note 5819 // that the trip count independent cases are already handled by known bits. 5820 // WARNING: The definition of recurrence used here is subtly different than 5821 // the one used by AddRec (and thus most of this file). Step is allowed to 5822 // be arbitrarily loop varying here, where AddRec allows only loop invariant 5823 // and other addrecs in the same loop (for non-affine addrecs). The code 5824 // below intentionally handles the case where step is not loop invariant. 5825 auto *P = dyn_cast<PHINode>(U->getValue()); 5826 if (!P) 5827 return FullSet; 5828 5829 // Make sure that no Phi input comes from an unreachable block. Otherwise, 5830 // even the values that are not available in these blocks may come from them, 5831 // and this leads to false-positive recurrence test. 5832 for (auto *Pred : predecessors(P->getParent())) 5833 if (!DT.isReachableFromEntry(Pred)) 5834 return FullSet; 5835 5836 BinaryOperator *BO; 5837 Value *Start, *Step; 5838 if (!matchSimpleRecurrence(P, BO, Start, Step)) 5839 return FullSet; 5840 5841 // If we found a recurrence in reachable code, we must be in a loop. Note 5842 // that BO might be in some subloop of L, and that's completely okay. 5843 auto *L = LI.getLoopFor(P->getParent()); 5844 assert(L && L->getHeader() == P->getParent()); 5845 if (!L->contains(BO->getParent())) 5846 // NOTE: This bailout should be an assert instead. However, asserting 5847 // the condition here exposes a case where LoopFusion is querying SCEV 5848 // with malformed loop information during the midst of the transform. 5849 // There doesn't appear to be an obvious fix, so for the moment bailout 5850 // until the caller issue can be fixed. PR49566 tracks the bug. 5851 return FullSet; 5852 5853 // TODO: Extend to other opcodes such as mul, and div 5854 switch (BO->getOpcode()) { 5855 default: 5856 return FullSet; 5857 case Instruction::AShr: 5858 case Instruction::LShr: 5859 case Instruction::Shl: 5860 break; 5861 }; 5862 5863 if (BO->getOperand(0) != P) 5864 // TODO: Handle the power function forms some day. 5865 return FullSet; 5866 5867 unsigned TC = getSmallConstantMaxTripCount(L); 5868 if (!TC || TC >= BitWidth) 5869 return FullSet; 5870 5871 auto KnownStart = computeKnownBits(Start, DL, 0, &AC, nullptr, &DT); 5872 auto KnownStep = computeKnownBits(Step, DL, 0, &AC, nullptr, &DT); 5873 assert(KnownStart.getBitWidth() == BitWidth && 5874 KnownStep.getBitWidth() == BitWidth); 5875 5876 // Compute total shift amount, being careful of overflow and bitwidths. 5877 auto MaxShiftAmt = KnownStep.getMaxValue(); 5878 APInt TCAP(BitWidth, TC-1); 5879 bool Overflow = false; 5880 auto TotalShift = MaxShiftAmt.umul_ov(TCAP, Overflow); 5881 if (Overflow) 5882 return FullSet; 5883 5884 switch (BO->getOpcode()) { 5885 default: 5886 llvm_unreachable("filtered out above"); 5887 case Instruction::AShr: { 5888 // For each ashr, three cases: 5889 // shift = 0 => unchanged value 5890 // saturation => 0 or -1 5891 // other => a value closer to zero (of the same sign) 5892 // Thus, the end value is closer to zero than the start. 5893 auto KnownEnd = KnownBits::ashr(KnownStart, 5894 KnownBits::makeConstant(TotalShift)); 5895 if (KnownStart.isNonNegative()) 5896 // Analogous to lshr (simply not yet canonicalized) 5897 return ConstantRange::getNonEmpty(KnownEnd.getMinValue(), 5898 KnownStart.getMaxValue() + 1); 5899 if (KnownStart.isNegative()) 5900 // End >=u Start && End <=s Start 5901 return ConstantRange::getNonEmpty(KnownStart.getMinValue(), 5902 KnownEnd.getMaxValue() + 1); 5903 break; 5904 } 5905 case Instruction::LShr: { 5906 // For each lshr, three cases: 5907 // shift = 0 => unchanged value 5908 // saturation => 0 5909 // other => a smaller positive number 5910 // Thus, the low end of the unsigned range is the last value produced. 5911 auto KnownEnd = KnownBits::lshr(KnownStart, 5912 KnownBits::makeConstant(TotalShift)); 5913 return ConstantRange::getNonEmpty(KnownEnd.getMinValue(), 5914 KnownStart.getMaxValue() + 1); 5915 } 5916 case Instruction::Shl: { 5917 // Iff no bits are shifted out, value increases on every shift. 5918 auto KnownEnd = KnownBits::shl(KnownStart, 5919 KnownBits::makeConstant(TotalShift)); 5920 if (TotalShift.ult(KnownStart.countMinLeadingZeros())) 5921 return ConstantRange(KnownStart.getMinValue(), 5922 KnownEnd.getMaxValue() + 1); 5923 break; 5924 } 5925 }; 5926 return FullSet; 5927 } 5928 5929 /// Determine the range for a particular SCEV. If SignHint is 5930 /// HINT_RANGE_UNSIGNED (resp. HINT_RANGE_SIGNED) then getRange prefers ranges 5931 /// with a "cleaner" unsigned (resp. signed) representation. 5932 const ConstantRange & 5933 ScalarEvolution::getRangeRef(const SCEV *S, 5934 ScalarEvolution::RangeSignHint SignHint) { 5935 DenseMap<const SCEV *, ConstantRange> &Cache = 5936 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges 5937 : SignedRanges; 5938 ConstantRange::PreferredRangeType RangeType = 5939 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED 5940 ? ConstantRange::Unsigned : ConstantRange::Signed; 5941 5942 // See if we've computed this range already. 5943 DenseMap<const SCEV *, ConstantRange>::iterator I = Cache.find(S); 5944 if (I != Cache.end()) 5945 return I->second; 5946 5947 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 5948 return setRange(C, SignHint, ConstantRange(C->getAPInt())); 5949 5950 unsigned BitWidth = getTypeSizeInBits(S->getType()); 5951 ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true); 5952 using OBO = OverflowingBinaryOperator; 5953 5954 // If the value has known zeros, the maximum value will have those known zeros 5955 // as well. 5956 uint32_t TZ = GetMinTrailingZeros(S); 5957 if (TZ != 0) { 5958 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) 5959 ConservativeResult = 5960 ConstantRange(APInt::getMinValue(BitWidth), 5961 APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1); 5962 else 5963 ConservativeResult = ConstantRange( 5964 APInt::getSignedMinValue(BitWidth), 5965 APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1); 5966 } 5967 5968 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 5969 ConstantRange X = getRangeRef(Add->getOperand(0), SignHint); 5970 unsigned WrapType = OBO::AnyWrap; 5971 if (Add->hasNoSignedWrap()) 5972 WrapType |= OBO::NoSignedWrap; 5973 if (Add->hasNoUnsignedWrap()) 5974 WrapType |= OBO::NoUnsignedWrap; 5975 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i) 5976 X = X.addWithNoWrap(getRangeRef(Add->getOperand(i), SignHint), 5977 WrapType, RangeType); 5978 return setRange(Add, SignHint, 5979 ConservativeResult.intersectWith(X, RangeType)); 5980 } 5981 5982 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 5983 ConstantRange X = getRangeRef(Mul->getOperand(0), SignHint); 5984 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i) 5985 X = X.multiply(getRangeRef(Mul->getOperand(i), SignHint)); 5986 return setRange(Mul, SignHint, 5987 ConservativeResult.intersectWith(X, RangeType)); 5988 } 5989 5990 if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) { 5991 ConstantRange X = getRangeRef(SMax->getOperand(0), SignHint); 5992 for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i) 5993 X = X.smax(getRangeRef(SMax->getOperand(i), SignHint)); 5994 return setRange(SMax, SignHint, 5995 ConservativeResult.intersectWith(X, RangeType)); 5996 } 5997 5998 if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) { 5999 ConstantRange X = getRangeRef(UMax->getOperand(0), SignHint); 6000 for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i) 6001 X = X.umax(getRangeRef(UMax->getOperand(i), SignHint)); 6002 return setRange(UMax, SignHint, 6003 ConservativeResult.intersectWith(X, RangeType)); 6004 } 6005 6006 if (const SCEVSMinExpr *SMin = dyn_cast<SCEVSMinExpr>(S)) { 6007 ConstantRange X = getRangeRef(SMin->getOperand(0), SignHint); 6008 for (unsigned i = 1, e = SMin->getNumOperands(); i != e; ++i) 6009 X = X.smin(getRangeRef(SMin->getOperand(i), SignHint)); 6010 return setRange(SMin, SignHint, 6011 ConservativeResult.intersectWith(X, RangeType)); 6012 } 6013 6014 if (const SCEVUMinExpr *UMin = dyn_cast<SCEVUMinExpr>(S)) { 6015 ConstantRange X = getRangeRef(UMin->getOperand(0), SignHint); 6016 for (unsigned i = 1, e = UMin->getNumOperands(); i != e; ++i) 6017 X = X.umin(getRangeRef(UMin->getOperand(i), SignHint)); 6018 return setRange(UMin, SignHint, 6019 ConservativeResult.intersectWith(X, RangeType)); 6020 } 6021 6022 if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) { 6023 ConstantRange X = getRangeRef(UDiv->getLHS(), SignHint); 6024 ConstantRange Y = getRangeRef(UDiv->getRHS(), SignHint); 6025 return setRange(UDiv, SignHint, 6026 ConservativeResult.intersectWith(X.udiv(Y), RangeType)); 6027 } 6028 6029 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) { 6030 ConstantRange X = getRangeRef(ZExt->getOperand(), SignHint); 6031 return setRange(ZExt, SignHint, 6032 ConservativeResult.intersectWith(X.zeroExtend(BitWidth), 6033 RangeType)); 6034 } 6035 6036 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) { 6037 ConstantRange X = getRangeRef(SExt->getOperand(), SignHint); 6038 return setRange(SExt, SignHint, 6039 ConservativeResult.intersectWith(X.signExtend(BitWidth), 6040 RangeType)); 6041 } 6042 6043 if (const SCEVPtrToIntExpr *PtrToInt = dyn_cast<SCEVPtrToIntExpr>(S)) { 6044 ConstantRange X = getRangeRef(PtrToInt->getOperand(), SignHint); 6045 return setRange(PtrToInt, SignHint, X); 6046 } 6047 6048 if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) { 6049 ConstantRange X = getRangeRef(Trunc->getOperand(), SignHint); 6050 return setRange(Trunc, SignHint, 6051 ConservativeResult.intersectWith(X.truncate(BitWidth), 6052 RangeType)); 6053 } 6054 6055 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) { 6056 // If there's no unsigned wrap, the value will never be less than its 6057 // initial value. 6058 if (AddRec->hasNoUnsignedWrap()) { 6059 APInt UnsignedMinValue = getUnsignedRangeMin(AddRec->getStart()); 6060 if (!UnsignedMinValue.isNullValue()) 6061 ConservativeResult = ConservativeResult.intersectWith( 6062 ConstantRange(UnsignedMinValue, APInt(BitWidth, 0)), RangeType); 6063 } 6064 6065 // If there's no signed wrap, and all the operands except initial value have 6066 // the same sign or zero, the value won't ever be: 6067 // 1: smaller than initial value if operands are non negative, 6068 // 2: bigger than initial value if operands are non positive. 6069 // For both cases, value can not cross signed min/max boundary. 6070 if (AddRec->hasNoSignedWrap()) { 6071 bool AllNonNeg = true; 6072 bool AllNonPos = true; 6073 for (unsigned i = 1, e = AddRec->getNumOperands(); i != e; ++i) { 6074 if (!isKnownNonNegative(AddRec->getOperand(i))) 6075 AllNonNeg = false; 6076 if (!isKnownNonPositive(AddRec->getOperand(i))) 6077 AllNonPos = false; 6078 } 6079 if (AllNonNeg) 6080 ConservativeResult = ConservativeResult.intersectWith( 6081 ConstantRange::getNonEmpty(getSignedRangeMin(AddRec->getStart()), 6082 APInt::getSignedMinValue(BitWidth)), 6083 RangeType); 6084 else if (AllNonPos) 6085 ConservativeResult = ConservativeResult.intersectWith( 6086 ConstantRange::getNonEmpty( 6087 APInt::getSignedMinValue(BitWidth), 6088 getSignedRangeMax(AddRec->getStart()) + 1), 6089 RangeType); 6090 } 6091 6092 // TODO: non-affine addrec 6093 if (AddRec->isAffine()) { 6094 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(AddRec->getLoop()); 6095 if (!isa<SCEVCouldNotCompute>(MaxBECount) && 6096 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) { 6097 auto RangeFromAffine = getRangeForAffineAR( 6098 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 6099 BitWidth); 6100 ConservativeResult = 6101 ConservativeResult.intersectWith(RangeFromAffine, RangeType); 6102 6103 auto RangeFromFactoring = getRangeViaFactoring( 6104 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 6105 BitWidth); 6106 ConservativeResult = 6107 ConservativeResult.intersectWith(RangeFromFactoring, RangeType); 6108 } 6109 6110 // Now try symbolic BE count and more powerful methods. 6111 if (UseExpensiveRangeSharpening) { 6112 const SCEV *SymbolicMaxBECount = 6113 getSymbolicMaxBackedgeTakenCount(AddRec->getLoop()); 6114 if (!isa<SCEVCouldNotCompute>(SymbolicMaxBECount) && 6115 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth && 6116 AddRec->hasNoSelfWrap()) { 6117 auto RangeFromAffineNew = getRangeForAffineNoSelfWrappingAR( 6118 AddRec, SymbolicMaxBECount, BitWidth, SignHint); 6119 ConservativeResult = 6120 ConservativeResult.intersectWith(RangeFromAffineNew, RangeType); 6121 } 6122 } 6123 } 6124 6125 return setRange(AddRec, SignHint, std::move(ConservativeResult)); 6126 } 6127 6128 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 6129 6130 // Check if the IR explicitly contains !range metadata. 6131 Optional<ConstantRange> MDRange = GetRangeFromMetadata(U->getValue()); 6132 if (MDRange.hasValue()) 6133 ConservativeResult = ConservativeResult.intersectWith(MDRange.getValue(), 6134 RangeType); 6135 6136 // Use facts about recurrences in the underlying IR. Note that add 6137 // recurrences are AddRecExprs and thus don't hit this path. This 6138 // primarily handles shift recurrences. 6139 auto CR = getRangeForUnknownRecurrence(U); 6140 ConservativeResult = ConservativeResult.intersectWith(CR); 6141 6142 // See if ValueTracking can give us a useful range. 6143 const DataLayout &DL = getDataLayout(); 6144 KnownBits Known = computeKnownBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 6145 if (Known.getBitWidth() != BitWidth) 6146 Known = Known.zextOrTrunc(BitWidth); 6147 6148 // ValueTracking may be able to compute a tighter result for the number of 6149 // sign bits than for the value of those sign bits. 6150 unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 6151 if (U->getType()->isPointerTy()) { 6152 // If the pointer size is larger than the index size type, this can cause 6153 // NS to be larger than BitWidth. So compensate for this. 6154 unsigned ptrSize = DL.getPointerTypeSizeInBits(U->getType()); 6155 int ptrIdxDiff = ptrSize - BitWidth; 6156 if (ptrIdxDiff > 0 && ptrSize > BitWidth && NS > (unsigned)ptrIdxDiff) 6157 NS -= ptrIdxDiff; 6158 } 6159 6160 if (NS > 1) { 6161 // If we know any of the sign bits, we know all of the sign bits. 6162 if (!Known.Zero.getHiBits(NS).isNullValue()) 6163 Known.Zero.setHighBits(NS); 6164 if (!Known.One.getHiBits(NS).isNullValue()) 6165 Known.One.setHighBits(NS); 6166 } 6167 6168 if (Known.getMinValue() != Known.getMaxValue() + 1) 6169 ConservativeResult = ConservativeResult.intersectWith( 6170 ConstantRange(Known.getMinValue(), Known.getMaxValue() + 1), 6171 RangeType); 6172 if (NS > 1) 6173 ConservativeResult = ConservativeResult.intersectWith( 6174 ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1), 6175 APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1), 6176 RangeType); 6177 6178 // A range of Phi is a subset of union of all ranges of its input. 6179 if (const PHINode *Phi = dyn_cast<PHINode>(U->getValue())) { 6180 // Make sure that we do not run over cycled Phis. 6181 if (PendingPhiRanges.insert(Phi).second) { 6182 ConstantRange RangeFromOps(BitWidth, /*isFullSet=*/false); 6183 for (auto &Op : Phi->operands()) { 6184 auto OpRange = getRangeRef(getSCEV(Op), SignHint); 6185 RangeFromOps = RangeFromOps.unionWith(OpRange); 6186 // No point to continue if we already have a full set. 6187 if (RangeFromOps.isFullSet()) 6188 break; 6189 } 6190 ConservativeResult = 6191 ConservativeResult.intersectWith(RangeFromOps, RangeType); 6192 bool Erased = PendingPhiRanges.erase(Phi); 6193 assert(Erased && "Failed to erase Phi properly?"); 6194 (void) Erased; 6195 } 6196 } 6197 6198 return setRange(U, SignHint, std::move(ConservativeResult)); 6199 } 6200 6201 return setRange(S, SignHint, std::move(ConservativeResult)); 6202 } 6203 6204 // Given a StartRange, Step and MaxBECount for an expression compute a range of 6205 // values that the expression can take. Initially, the expression has a value 6206 // from StartRange and then is changed by Step up to MaxBECount times. Signed 6207 // argument defines if we treat Step as signed or unsigned. 6208 static ConstantRange getRangeForAffineARHelper(APInt Step, 6209 const ConstantRange &StartRange, 6210 const APInt &MaxBECount, 6211 unsigned BitWidth, bool Signed) { 6212 // If either Step or MaxBECount is 0, then the expression won't change, and we 6213 // just need to return the initial range. 6214 if (Step == 0 || MaxBECount == 0) 6215 return StartRange; 6216 6217 // If we don't know anything about the initial value (i.e. StartRange is 6218 // FullRange), then we don't know anything about the final range either. 6219 // Return FullRange. 6220 if (StartRange.isFullSet()) 6221 return ConstantRange::getFull(BitWidth); 6222 6223 // If Step is signed and negative, then we use its absolute value, but we also 6224 // note that we're moving in the opposite direction. 6225 bool Descending = Signed && Step.isNegative(); 6226 6227 if (Signed) 6228 // This is correct even for INT_SMIN. Let's look at i8 to illustrate this: 6229 // abs(INT_SMIN) = abs(-128) = abs(0x80) = -0x80 = 0x80 = 128. 6230 // This equations hold true due to the well-defined wrap-around behavior of 6231 // APInt. 6232 Step = Step.abs(); 6233 6234 // Check if Offset is more than full span of BitWidth. If it is, the 6235 // expression is guaranteed to overflow. 6236 if (APInt::getMaxValue(StartRange.getBitWidth()).udiv(Step).ult(MaxBECount)) 6237 return ConstantRange::getFull(BitWidth); 6238 6239 // Offset is by how much the expression can change. Checks above guarantee no 6240 // overflow here. 6241 APInt Offset = Step * MaxBECount; 6242 6243 // Minimum value of the final range will match the minimal value of StartRange 6244 // if the expression is increasing and will be decreased by Offset otherwise. 6245 // Maximum value of the final range will match the maximal value of StartRange 6246 // if the expression is decreasing and will be increased by Offset otherwise. 6247 APInt StartLower = StartRange.getLower(); 6248 APInt StartUpper = StartRange.getUpper() - 1; 6249 APInt MovedBoundary = Descending ? (StartLower - std::move(Offset)) 6250 : (StartUpper + std::move(Offset)); 6251 6252 // It's possible that the new minimum/maximum value will fall into the initial 6253 // range (due to wrap around). This means that the expression can take any 6254 // value in this bitwidth, and we have to return full range. 6255 if (StartRange.contains(MovedBoundary)) 6256 return ConstantRange::getFull(BitWidth); 6257 6258 APInt NewLower = 6259 Descending ? std::move(MovedBoundary) : std::move(StartLower); 6260 APInt NewUpper = 6261 Descending ? std::move(StartUpper) : std::move(MovedBoundary); 6262 NewUpper += 1; 6263 6264 // No overflow detected, return [StartLower, StartUpper + Offset + 1) range. 6265 return ConstantRange::getNonEmpty(std::move(NewLower), std::move(NewUpper)); 6266 } 6267 6268 ConstantRange ScalarEvolution::getRangeForAffineAR(const SCEV *Start, 6269 const SCEV *Step, 6270 const SCEV *MaxBECount, 6271 unsigned BitWidth) { 6272 assert(!isa<SCEVCouldNotCompute>(MaxBECount) && 6273 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth && 6274 "Precondition!"); 6275 6276 MaxBECount = getNoopOrZeroExtend(MaxBECount, Start->getType()); 6277 APInt MaxBECountValue = getUnsignedRangeMax(MaxBECount); 6278 6279 // First, consider step signed. 6280 ConstantRange StartSRange = getSignedRange(Start); 6281 ConstantRange StepSRange = getSignedRange(Step); 6282 6283 // If Step can be both positive and negative, we need to find ranges for the 6284 // maximum absolute step values in both directions and union them. 6285 ConstantRange SR = 6286 getRangeForAffineARHelper(StepSRange.getSignedMin(), StartSRange, 6287 MaxBECountValue, BitWidth, /* Signed = */ true); 6288 SR = SR.unionWith(getRangeForAffineARHelper(StepSRange.getSignedMax(), 6289 StartSRange, MaxBECountValue, 6290 BitWidth, /* Signed = */ true)); 6291 6292 // Next, consider step unsigned. 6293 ConstantRange UR = getRangeForAffineARHelper( 6294 getUnsignedRangeMax(Step), getUnsignedRange(Start), 6295 MaxBECountValue, BitWidth, /* Signed = */ false); 6296 6297 // Finally, intersect signed and unsigned ranges. 6298 return SR.intersectWith(UR, ConstantRange::Smallest); 6299 } 6300 6301 ConstantRange ScalarEvolution::getRangeForAffineNoSelfWrappingAR( 6302 const SCEVAddRecExpr *AddRec, const SCEV *MaxBECount, unsigned BitWidth, 6303 ScalarEvolution::RangeSignHint SignHint) { 6304 assert(AddRec->isAffine() && "Non-affine AddRecs are not suppored!\n"); 6305 assert(AddRec->hasNoSelfWrap() && 6306 "This only works for non-self-wrapping AddRecs!"); 6307 const bool IsSigned = SignHint == HINT_RANGE_SIGNED; 6308 const SCEV *Step = AddRec->getStepRecurrence(*this); 6309 // Only deal with constant step to save compile time. 6310 if (!isa<SCEVConstant>(Step)) 6311 return ConstantRange::getFull(BitWidth); 6312 // Let's make sure that we can prove that we do not self-wrap during 6313 // MaxBECount iterations. We need this because MaxBECount is a maximum 6314 // iteration count estimate, and we might infer nw from some exit for which we 6315 // do not know max exit count (or any other side reasoning). 6316 // TODO: Turn into assert at some point. 6317 if (getTypeSizeInBits(MaxBECount->getType()) > 6318 getTypeSizeInBits(AddRec->getType())) 6319 return ConstantRange::getFull(BitWidth); 6320 MaxBECount = getNoopOrZeroExtend(MaxBECount, AddRec->getType()); 6321 const SCEV *RangeWidth = getMinusOne(AddRec->getType()); 6322 const SCEV *StepAbs = getUMinExpr(Step, getNegativeSCEV(Step)); 6323 const SCEV *MaxItersWithoutWrap = getUDivExpr(RangeWidth, StepAbs); 6324 if (!isKnownPredicateViaConstantRanges(ICmpInst::ICMP_ULE, MaxBECount, 6325 MaxItersWithoutWrap)) 6326 return ConstantRange::getFull(BitWidth); 6327 6328 ICmpInst::Predicate LEPred = 6329 IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; 6330 ICmpInst::Predicate GEPred = 6331 IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; 6332 const SCEV *End = AddRec->evaluateAtIteration(MaxBECount, *this); 6333 6334 // We know that there is no self-wrap. Let's take Start and End values and 6335 // look at all intermediate values V1, V2, ..., Vn that IndVar takes during 6336 // the iteration. They either lie inside the range [Min(Start, End), 6337 // Max(Start, End)] or outside it: 6338 // 6339 // Case 1: RangeMin ... Start V1 ... VN End ... RangeMax; 6340 // Case 2: RangeMin Vk ... V1 Start ... End Vn ... Vk + 1 RangeMax; 6341 // 6342 // No self wrap flag guarantees that the intermediate values cannot be BOTH 6343 // outside and inside the range [Min(Start, End), Max(Start, End)]. Using that 6344 // knowledge, let's try to prove that we are dealing with Case 1. It is so if 6345 // Start <= End and step is positive, or Start >= End and step is negative. 6346 const SCEV *Start = AddRec->getStart(); 6347 ConstantRange StartRange = getRangeRef(Start, SignHint); 6348 ConstantRange EndRange = getRangeRef(End, SignHint); 6349 ConstantRange RangeBetween = StartRange.unionWith(EndRange); 6350 // If they already cover full iteration space, we will know nothing useful 6351 // even if we prove what we want to prove. 6352 if (RangeBetween.isFullSet()) 6353 return RangeBetween; 6354 // Only deal with ranges that do not wrap (i.e. RangeMin < RangeMax). 6355 bool IsWrappedSet = IsSigned ? RangeBetween.isSignWrappedSet() 6356 : RangeBetween.isWrappedSet(); 6357 if (IsWrappedSet) 6358 return ConstantRange::getFull(BitWidth); 6359 6360 if (isKnownPositive(Step) && 6361 isKnownPredicateViaConstantRanges(LEPred, Start, End)) 6362 return RangeBetween; 6363 else if (isKnownNegative(Step) && 6364 isKnownPredicateViaConstantRanges(GEPred, Start, End)) 6365 return RangeBetween; 6366 return ConstantRange::getFull(BitWidth); 6367 } 6368 6369 ConstantRange ScalarEvolution::getRangeViaFactoring(const SCEV *Start, 6370 const SCEV *Step, 6371 const SCEV *MaxBECount, 6372 unsigned BitWidth) { 6373 // RangeOf({C?A:B,+,C?P:Q}) == RangeOf(C?{A,+,P}:{B,+,Q}) 6374 // == RangeOf({A,+,P}) union RangeOf({B,+,Q}) 6375 6376 struct SelectPattern { 6377 Value *Condition = nullptr; 6378 APInt TrueValue; 6379 APInt FalseValue; 6380 6381 explicit SelectPattern(ScalarEvolution &SE, unsigned BitWidth, 6382 const SCEV *S) { 6383 Optional<unsigned> CastOp; 6384 APInt Offset(BitWidth, 0); 6385 6386 assert(SE.getTypeSizeInBits(S->getType()) == BitWidth && 6387 "Should be!"); 6388 6389 // Peel off a constant offset: 6390 if (auto *SA = dyn_cast<SCEVAddExpr>(S)) { 6391 // In the future we could consider being smarter here and handle 6392 // {Start+Step,+,Step} too. 6393 if (SA->getNumOperands() != 2 || !isa<SCEVConstant>(SA->getOperand(0))) 6394 return; 6395 6396 Offset = cast<SCEVConstant>(SA->getOperand(0))->getAPInt(); 6397 S = SA->getOperand(1); 6398 } 6399 6400 // Peel off a cast operation 6401 if (auto *SCast = dyn_cast<SCEVIntegralCastExpr>(S)) { 6402 CastOp = SCast->getSCEVType(); 6403 S = SCast->getOperand(); 6404 } 6405 6406 using namespace llvm::PatternMatch; 6407 6408 auto *SU = dyn_cast<SCEVUnknown>(S); 6409 const APInt *TrueVal, *FalseVal; 6410 if (!SU || 6411 !match(SU->getValue(), m_Select(m_Value(Condition), m_APInt(TrueVal), 6412 m_APInt(FalseVal)))) { 6413 Condition = nullptr; 6414 return; 6415 } 6416 6417 TrueValue = *TrueVal; 6418 FalseValue = *FalseVal; 6419 6420 // Re-apply the cast we peeled off earlier 6421 if (CastOp.hasValue()) 6422 switch (*CastOp) { 6423 default: 6424 llvm_unreachable("Unknown SCEV cast type!"); 6425 6426 case scTruncate: 6427 TrueValue = TrueValue.trunc(BitWidth); 6428 FalseValue = FalseValue.trunc(BitWidth); 6429 break; 6430 case scZeroExtend: 6431 TrueValue = TrueValue.zext(BitWidth); 6432 FalseValue = FalseValue.zext(BitWidth); 6433 break; 6434 case scSignExtend: 6435 TrueValue = TrueValue.sext(BitWidth); 6436 FalseValue = FalseValue.sext(BitWidth); 6437 break; 6438 } 6439 6440 // Re-apply the constant offset we peeled off earlier 6441 TrueValue += Offset; 6442 FalseValue += Offset; 6443 } 6444 6445 bool isRecognized() { return Condition != nullptr; } 6446 }; 6447 6448 SelectPattern StartPattern(*this, BitWidth, Start); 6449 if (!StartPattern.isRecognized()) 6450 return ConstantRange::getFull(BitWidth); 6451 6452 SelectPattern StepPattern(*this, BitWidth, Step); 6453 if (!StepPattern.isRecognized()) 6454 return ConstantRange::getFull(BitWidth); 6455 6456 if (StartPattern.Condition != StepPattern.Condition) { 6457 // We don't handle this case today; but we could, by considering four 6458 // possibilities below instead of two. I'm not sure if there are cases where 6459 // that will help over what getRange already does, though. 6460 return ConstantRange::getFull(BitWidth); 6461 } 6462 6463 // NB! Calling ScalarEvolution::getConstant is fine, but we should not try to 6464 // construct arbitrary general SCEV expressions here. This function is called 6465 // from deep in the call stack, and calling getSCEV (on a sext instruction, 6466 // say) can end up caching a suboptimal value. 6467 6468 // FIXME: without the explicit `this` receiver below, MSVC errors out with 6469 // C2352 and C2512 (otherwise it isn't needed). 6470 6471 const SCEV *TrueStart = this->getConstant(StartPattern.TrueValue); 6472 const SCEV *TrueStep = this->getConstant(StepPattern.TrueValue); 6473 const SCEV *FalseStart = this->getConstant(StartPattern.FalseValue); 6474 const SCEV *FalseStep = this->getConstant(StepPattern.FalseValue); 6475 6476 ConstantRange TrueRange = 6477 this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount, BitWidth); 6478 ConstantRange FalseRange = 6479 this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount, BitWidth); 6480 6481 return TrueRange.unionWith(FalseRange); 6482 } 6483 6484 SCEV::NoWrapFlags ScalarEvolution::getNoWrapFlagsFromUB(const Value *V) { 6485 if (isa<ConstantExpr>(V)) return SCEV::FlagAnyWrap; 6486 const BinaryOperator *BinOp = cast<BinaryOperator>(V); 6487 6488 // Return early if there are no flags to propagate to the SCEV. 6489 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 6490 if (BinOp->hasNoUnsignedWrap()) 6491 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 6492 if (BinOp->hasNoSignedWrap()) 6493 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 6494 if (Flags == SCEV::FlagAnyWrap) 6495 return SCEV::FlagAnyWrap; 6496 6497 return isSCEVExprNeverPoison(BinOp) ? Flags : SCEV::FlagAnyWrap; 6498 } 6499 6500 bool ScalarEvolution::isSCEVExprNeverPoison(const Instruction *I) { 6501 // Here we check that I is in the header of the innermost loop containing I, 6502 // since we only deal with instructions in the loop header. The actual loop we 6503 // need to check later will come from an add recurrence, but getting that 6504 // requires computing the SCEV of the operands, which can be expensive. This 6505 // check we can do cheaply to rule out some cases early. 6506 Loop *InnermostContainingLoop = LI.getLoopFor(I->getParent()); 6507 if (InnermostContainingLoop == nullptr || 6508 InnermostContainingLoop->getHeader() != I->getParent()) 6509 return false; 6510 6511 // Only proceed if we can prove that I does not yield poison. 6512 if (!programUndefinedIfPoison(I)) 6513 return false; 6514 6515 // At this point we know that if I is executed, then it does not wrap 6516 // according to at least one of NSW or NUW. If I is not executed, then we do 6517 // not know if the calculation that I represents would wrap. Multiple 6518 // instructions can map to the same SCEV. If we apply NSW or NUW from I to 6519 // the SCEV, we must guarantee no wrapping for that SCEV also when it is 6520 // derived from other instructions that map to the same SCEV. We cannot make 6521 // that guarantee for cases where I is not executed. So we need to find the 6522 // loop that I is considered in relation to and prove that I is executed for 6523 // every iteration of that loop. That implies that the value that I 6524 // calculates does not wrap anywhere in the loop, so then we can apply the 6525 // flags to the SCEV. 6526 // 6527 // We check isLoopInvariant to disambiguate in case we are adding recurrences 6528 // from different loops, so that we know which loop to prove that I is 6529 // executed in. 6530 for (unsigned OpIndex = 0; OpIndex < I->getNumOperands(); ++OpIndex) { 6531 // I could be an extractvalue from a call to an overflow intrinsic. 6532 // TODO: We can do better here in some cases. 6533 if (!isSCEVable(I->getOperand(OpIndex)->getType())) 6534 return false; 6535 const SCEV *Op = getSCEV(I->getOperand(OpIndex)); 6536 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 6537 bool AllOtherOpsLoopInvariant = true; 6538 for (unsigned OtherOpIndex = 0; OtherOpIndex < I->getNumOperands(); 6539 ++OtherOpIndex) { 6540 if (OtherOpIndex != OpIndex) { 6541 const SCEV *OtherOp = getSCEV(I->getOperand(OtherOpIndex)); 6542 if (!isLoopInvariant(OtherOp, AddRec->getLoop())) { 6543 AllOtherOpsLoopInvariant = false; 6544 break; 6545 } 6546 } 6547 } 6548 if (AllOtherOpsLoopInvariant && 6549 isGuaranteedToExecuteForEveryIteration(I, AddRec->getLoop())) 6550 return true; 6551 } 6552 } 6553 return false; 6554 } 6555 6556 bool ScalarEvolution::isAddRecNeverPoison(const Instruction *I, const Loop *L) { 6557 // If we know that \c I can never be poison period, then that's enough. 6558 if (isSCEVExprNeverPoison(I)) 6559 return true; 6560 6561 // For an add recurrence specifically, we assume that infinite loops without 6562 // side effects are undefined behavior, and then reason as follows: 6563 // 6564 // If the add recurrence is poison in any iteration, it is poison on all 6565 // future iterations (since incrementing poison yields poison). If the result 6566 // of the add recurrence is fed into the loop latch condition and the loop 6567 // does not contain any throws or exiting blocks other than the latch, we now 6568 // have the ability to "choose" whether the backedge is taken or not (by 6569 // choosing a sufficiently evil value for the poison feeding into the branch) 6570 // for every iteration including and after the one in which \p I first became 6571 // poison. There are two possibilities (let's call the iteration in which \p 6572 // I first became poison as K): 6573 // 6574 // 1. In the set of iterations including and after K, the loop body executes 6575 // no side effects. In this case executing the backege an infinte number 6576 // of times will yield undefined behavior. 6577 // 6578 // 2. In the set of iterations including and after K, the loop body executes 6579 // at least one side effect. In this case, that specific instance of side 6580 // effect is control dependent on poison, which also yields undefined 6581 // behavior. 6582 6583 auto *ExitingBB = L->getExitingBlock(); 6584 auto *LatchBB = L->getLoopLatch(); 6585 if (!ExitingBB || !LatchBB || ExitingBB != LatchBB) 6586 return false; 6587 6588 SmallPtrSet<const Instruction *, 16> Pushed; 6589 SmallVector<const Instruction *, 8> PoisonStack; 6590 6591 // We start by assuming \c I, the post-inc add recurrence, is poison. Only 6592 // things that are known to be poison under that assumption go on the 6593 // PoisonStack. 6594 Pushed.insert(I); 6595 PoisonStack.push_back(I); 6596 6597 bool LatchControlDependentOnPoison = false; 6598 while (!PoisonStack.empty() && !LatchControlDependentOnPoison) { 6599 const Instruction *Poison = PoisonStack.pop_back_val(); 6600 6601 for (auto *PoisonUser : Poison->users()) { 6602 if (propagatesPoison(cast<Operator>(PoisonUser))) { 6603 if (Pushed.insert(cast<Instruction>(PoisonUser)).second) 6604 PoisonStack.push_back(cast<Instruction>(PoisonUser)); 6605 } else if (auto *BI = dyn_cast<BranchInst>(PoisonUser)) { 6606 assert(BI->isConditional() && "Only possibility!"); 6607 if (BI->getParent() == LatchBB) { 6608 LatchControlDependentOnPoison = true; 6609 break; 6610 } 6611 } 6612 } 6613 } 6614 6615 return LatchControlDependentOnPoison && loopHasNoAbnormalExits(L); 6616 } 6617 6618 ScalarEvolution::LoopProperties 6619 ScalarEvolution::getLoopProperties(const Loop *L) { 6620 using LoopProperties = ScalarEvolution::LoopProperties; 6621 6622 auto Itr = LoopPropertiesCache.find(L); 6623 if (Itr == LoopPropertiesCache.end()) { 6624 auto HasSideEffects = [](Instruction *I) { 6625 if (auto *SI = dyn_cast<StoreInst>(I)) 6626 return !SI->isSimple(); 6627 6628 return I->mayHaveSideEffects(); 6629 }; 6630 6631 LoopProperties LP = {/* HasNoAbnormalExits */ true, 6632 /*HasNoSideEffects*/ true}; 6633 6634 for (auto *BB : L->getBlocks()) 6635 for (auto &I : *BB) { 6636 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 6637 LP.HasNoAbnormalExits = false; 6638 if (HasSideEffects(&I)) 6639 LP.HasNoSideEffects = false; 6640 if (!LP.HasNoAbnormalExits && !LP.HasNoSideEffects) 6641 break; // We're already as pessimistic as we can get. 6642 } 6643 6644 auto InsertPair = LoopPropertiesCache.insert({L, LP}); 6645 assert(InsertPair.second && "We just checked!"); 6646 Itr = InsertPair.first; 6647 } 6648 6649 return Itr->second; 6650 } 6651 6652 bool ScalarEvolution::loopIsFiniteByAssumption(const Loop *L) { 6653 // A mustprogress loop without side effects must be finite. 6654 // TODO: The check used here is very conservative. It's only *specific* 6655 // side effects which are well defined in infinite loops. 6656 return isMustProgress(L) && loopHasNoSideEffects(L); 6657 } 6658 6659 const SCEV *ScalarEvolution::createSCEV(Value *V) { 6660 if (!isSCEVable(V->getType())) 6661 return getUnknown(V); 6662 6663 if (Instruction *I = dyn_cast<Instruction>(V)) { 6664 // Don't attempt to analyze instructions in blocks that aren't 6665 // reachable. Such instructions don't matter, and they aren't required 6666 // to obey basic rules for definitions dominating uses which this 6667 // analysis depends on. 6668 if (!DT.isReachableFromEntry(I->getParent())) 6669 return getUnknown(UndefValue::get(V->getType())); 6670 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) 6671 return getConstant(CI); 6672 else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) 6673 return GA->isInterposable() ? getUnknown(V) : getSCEV(GA->getAliasee()); 6674 else if (!isa<ConstantExpr>(V)) 6675 return getUnknown(V); 6676 6677 Operator *U = cast<Operator>(V); 6678 if (auto BO = MatchBinaryOp(U, DT)) { 6679 switch (BO->Opcode) { 6680 case Instruction::Add: { 6681 // The simple thing to do would be to just call getSCEV on both operands 6682 // and call getAddExpr with the result. However if we're looking at a 6683 // bunch of things all added together, this can be quite inefficient, 6684 // because it leads to N-1 getAddExpr calls for N ultimate operands. 6685 // Instead, gather up all the operands and make a single getAddExpr call. 6686 // LLVM IR canonical form means we need only traverse the left operands. 6687 SmallVector<const SCEV *, 4> AddOps; 6688 do { 6689 if (BO->Op) { 6690 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 6691 AddOps.push_back(OpSCEV); 6692 break; 6693 } 6694 6695 // If a NUW or NSW flag can be applied to the SCEV for this 6696 // addition, then compute the SCEV for this addition by itself 6697 // with a separate call to getAddExpr. We need to do that 6698 // instead of pushing the operands of the addition onto AddOps, 6699 // since the flags are only known to apply to this particular 6700 // addition - they may not apply to other additions that can be 6701 // formed with operands from AddOps. 6702 const SCEV *RHS = getSCEV(BO->RHS); 6703 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 6704 if (Flags != SCEV::FlagAnyWrap) { 6705 const SCEV *LHS = getSCEV(BO->LHS); 6706 if (BO->Opcode == Instruction::Sub) 6707 AddOps.push_back(getMinusSCEV(LHS, RHS, Flags)); 6708 else 6709 AddOps.push_back(getAddExpr(LHS, RHS, Flags)); 6710 break; 6711 } 6712 } 6713 6714 if (BO->Opcode == Instruction::Sub) 6715 AddOps.push_back(getNegativeSCEV(getSCEV(BO->RHS))); 6716 else 6717 AddOps.push_back(getSCEV(BO->RHS)); 6718 6719 auto NewBO = MatchBinaryOp(BO->LHS, DT); 6720 if (!NewBO || (NewBO->Opcode != Instruction::Add && 6721 NewBO->Opcode != Instruction::Sub)) { 6722 AddOps.push_back(getSCEV(BO->LHS)); 6723 break; 6724 } 6725 BO = NewBO; 6726 } while (true); 6727 6728 return getAddExpr(AddOps); 6729 } 6730 6731 case Instruction::Mul: { 6732 SmallVector<const SCEV *, 4> MulOps; 6733 do { 6734 if (BO->Op) { 6735 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 6736 MulOps.push_back(OpSCEV); 6737 break; 6738 } 6739 6740 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 6741 if (Flags != SCEV::FlagAnyWrap) { 6742 MulOps.push_back( 6743 getMulExpr(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags)); 6744 break; 6745 } 6746 } 6747 6748 MulOps.push_back(getSCEV(BO->RHS)); 6749 auto NewBO = MatchBinaryOp(BO->LHS, DT); 6750 if (!NewBO || NewBO->Opcode != Instruction::Mul) { 6751 MulOps.push_back(getSCEV(BO->LHS)); 6752 break; 6753 } 6754 BO = NewBO; 6755 } while (true); 6756 6757 return getMulExpr(MulOps); 6758 } 6759 case Instruction::UDiv: 6760 return getUDivExpr(getSCEV(BO->LHS), getSCEV(BO->RHS)); 6761 case Instruction::URem: 6762 return getURemExpr(getSCEV(BO->LHS), getSCEV(BO->RHS)); 6763 case Instruction::Sub: { 6764 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 6765 if (BO->Op) 6766 Flags = getNoWrapFlagsFromUB(BO->Op); 6767 return getMinusSCEV(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags); 6768 } 6769 case Instruction::And: 6770 // For an expression like x&255 that merely masks off the high bits, 6771 // use zext(trunc(x)) as the SCEV expression. 6772 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6773 if (CI->isZero()) 6774 return getSCEV(BO->RHS); 6775 if (CI->isMinusOne()) 6776 return getSCEV(BO->LHS); 6777 const APInt &A = CI->getValue(); 6778 6779 // Instcombine's ShrinkDemandedConstant may strip bits out of 6780 // constants, obscuring what would otherwise be a low-bits mask. 6781 // Use computeKnownBits to compute what ShrinkDemandedConstant 6782 // knew about to reconstruct a low-bits mask value. 6783 unsigned LZ = A.countLeadingZeros(); 6784 unsigned TZ = A.countTrailingZeros(); 6785 unsigned BitWidth = A.getBitWidth(); 6786 KnownBits Known(BitWidth); 6787 computeKnownBits(BO->LHS, Known, getDataLayout(), 6788 0, &AC, nullptr, &DT); 6789 6790 APInt EffectiveMask = 6791 APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ); 6792 if ((LZ != 0 || TZ != 0) && !((~A & ~Known.Zero) & EffectiveMask)) { 6793 const SCEV *MulCount = getConstant(APInt::getOneBitSet(BitWidth, TZ)); 6794 const SCEV *LHS = getSCEV(BO->LHS); 6795 const SCEV *ShiftedLHS = nullptr; 6796 if (auto *LHSMul = dyn_cast<SCEVMulExpr>(LHS)) { 6797 if (auto *OpC = dyn_cast<SCEVConstant>(LHSMul->getOperand(0))) { 6798 // For an expression like (x * 8) & 8, simplify the multiply. 6799 unsigned MulZeros = OpC->getAPInt().countTrailingZeros(); 6800 unsigned GCD = std::min(MulZeros, TZ); 6801 APInt DivAmt = APInt::getOneBitSet(BitWidth, TZ - GCD); 6802 SmallVector<const SCEV*, 4> MulOps; 6803 MulOps.push_back(getConstant(OpC->getAPInt().lshr(GCD))); 6804 MulOps.append(LHSMul->op_begin() + 1, LHSMul->op_end()); 6805 auto *NewMul = getMulExpr(MulOps, LHSMul->getNoWrapFlags()); 6806 ShiftedLHS = getUDivExpr(NewMul, getConstant(DivAmt)); 6807 } 6808 } 6809 if (!ShiftedLHS) 6810 ShiftedLHS = getUDivExpr(LHS, MulCount); 6811 return getMulExpr( 6812 getZeroExtendExpr( 6813 getTruncateExpr(ShiftedLHS, 6814 IntegerType::get(getContext(), BitWidth - LZ - TZ)), 6815 BO->LHS->getType()), 6816 MulCount); 6817 } 6818 } 6819 break; 6820 6821 case Instruction::Or: 6822 // If the RHS of the Or is a constant, we may have something like: 6823 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop 6824 // optimizations will transparently handle this case. 6825 // 6826 // In order for this transformation to be safe, the LHS must be of the 6827 // form X*(2^n) and the Or constant must be less than 2^n. 6828 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6829 const SCEV *LHS = getSCEV(BO->LHS); 6830 const APInt &CIVal = CI->getValue(); 6831 if (GetMinTrailingZeros(LHS) >= 6832 (CIVal.getBitWidth() - CIVal.countLeadingZeros())) { 6833 // Build a plain add SCEV. 6834 return getAddExpr(LHS, getSCEV(CI), 6835 (SCEV::NoWrapFlags)(SCEV::FlagNUW | SCEV::FlagNSW)); 6836 } 6837 } 6838 break; 6839 6840 case Instruction::Xor: 6841 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6842 // If the RHS of xor is -1, then this is a not operation. 6843 if (CI->isMinusOne()) 6844 return getNotSCEV(getSCEV(BO->LHS)); 6845 6846 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask. 6847 // This is a variant of the check for xor with -1, and it handles 6848 // the case where instcombine has trimmed non-demanded bits out 6849 // of an xor with -1. 6850 if (auto *LBO = dyn_cast<BinaryOperator>(BO->LHS)) 6851 if (ConstantInt *LCI = dyn_cast<ConstantInt>(LBO->getOperand(1))) 6852 if (LBO->getOpcode() == Instruction::And && 6853 LCI->getValue() == CI->getValue()) 6854 if (const SCEVZeroExtendExpr *Z = 6855 dyn_cast<SCEVZeroExtendExpr>(getSCEV(BO->LHS))) { 6856 Type *UTy = BO->LHS->getType(); 6857 const SCEV *Z0 = Z->getOperand(); 6858 Type *Z0Ty = Z0->getType(); 6859 unsigned Z0TySize = getTypeSizeInBits(Z0Ty); 6860 6861 // If C is a low-bits mask, the zero extend is serving to 6862 // mask off the high bits. Complement the operand and 6863 // re-apply the zext. 6864 if (CI->getValue().isMask(Z0TySize)) 6865 return getZeroExtendExpr(getNotSCEV(Z0), UTy); 6866 6867 // If C is a single bit, it may be in the sign-bit position 6868 // before the zero-extend. In this case, represent the xor 6869 // using an add, which is equivalent, and re-apply the zext. 6870 APInt Trunc = CI->getValue().trunc(Z0TySize); 6871 if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() && 6872 Trunc.isSignMask()) 6873 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)), 6874 UTy); 6875 } 6876 } 6877 break; 6878 6879 case Instruction::Shl: 6880 // Turn shift left of a constant amount into a multiply. 6881 if (ConstantInt *SA = dyn_cast<ConstantInt>(BO->RHS)) { 6882 uint32_t BitWidth = cast<IntegerType>(SA->getType())->getBitWidth(); 6883 6884 // If the shift count is not less than the bitwidth, the result of 6885 // the shift is undefined. Don't try to analyze it, because the 6886 // resolution chosen here may differ from the resolution chosen in 6887 // other parts of the compiler. 6888 if (SA->getValue().uge(BitWidth)) 6889 break; 6890 6891 // We can safely preserve the nuw flag in all cases. It's also safe to 6892 // turn a nuw nsw shl into a nuw nsw mul. However, nsw in isolation 6893 // requires special handling. It can be preserved as long as we're not 6894 // left shifting by bitwidth - 1. 6895 auto Flags = SCEV::FlagAnyWrap; 6896 if (BO->Op) { 6897 auto MulFlags = getNoWrapFlagsFromUB(BO->Op); 6898 if ((MulFlags & SCEV::FlagNSW) && 6899 ((MulFlags & SCEV::FlagNUW) || SA->getValue().ult(BitWidth - 1))) 6900 Flags = (SCEV::NoWrapFlags)(Flags | SCEV::FlagNSW); 6901 if (MulFlags & SCEV::FlagNUW) 6902 Flags = (SCEV::NoWrapFlags)(Flags | SCEV::FlagNUW); 6903 } 6904 6905 Constant *X = ConstantInt::get( 6906 getContext(), APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 6907 return getMulExpr(getSCEV(BO->LHS), getSCEV(X), Flags); 6908 } 6909 break; 6910 6911 case Instruction::AShr: { 6912 // AShr X, C, where C is a constant. 6913 ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS); 6914 if (!CI) 6915 break; 6916 6917 Type *OuterTy = BO->LHS->getType(); 6918 uint64_t BitWidth = getTypeSizeInBits(OuterTy); 6919 // If the shift count is not less than the bitwidth, the result of 6920 // the shift is undefined. Don't try to analyze it, because the 6921 // resolution chosen here may differ from the resolution chosen in 6922 // other parts of the compiler. 6923 if (CI->getValue().uge(BitWidth)) 6924 break; 6925 6926 if (CI->isZero()) 6927 return getSCEV(BO->LHS); // shift by zero --> noop 6928 6929 uint64_t AShrAmt = CI->getZExtValue(); 6930 Type *TruncTy = IntegerType::get(getContext(), BitWidth - AShrAmt); 6931 6932 Operator *L = dyn_cast<Operator>(BO->LHS); 6933 if (L && L->getOpcode() == Instruction::Shl) { 6934 // X = Shl A, n 6935 // Y = AShr X, m 6936 // Both n and m are constant. 6937 6938 const SCEV *ShlOp0SCEV = getSCEV(L->getOperand(0)); 6939 if (L->getOperand(1) == BO->RHS) 6940 // For a two-shift sext-inreg, i.e. n = m, 6941 // use sext(trunc(x)) as the SCEV expression. 6942 return getSignExtendExpr( 6943 getTruncateExpr(ShlOp0SCEV, TruncTy), OuterTy); 6944 6945 ConstantInt *ShlAmtCI = dyn_cast<ConstantInt>(L->getOperand(1)); 6946 if (ShlAmtCI && ShlAmtCI->getValue().ult(BitWidth)) { 6947 uint64_t ShlAmt = ShlAmtCI->getZExtValue(); 6948 if (ShlAmt > AShrAmt) { 6949 // When n > m, use sext(mul(trunc(x), 2^(n-m)))) as the SCEV 6950 // expression. We already checked that ShlAmt < BitWidth, so 6951 // the multiplier, 1 << (ShlAmt - AShrAmt), fits into TruncTy as 6952 // ShlAmt - AShrAmt < Amt. 6953 APInt Mul = APInt::getOneBitSet(BitWidth - AShrAmt, 6954 ShlAmt - AShrAmt); 6955 return getSignExtendExpr( 6956 getMulExpr(getTruncateExpr(ShlOp0SCEV, TruncTy), 6957 getConstant(Mul)), OuterTy); 6958 } 6959 } 6960 } 6961 break; 6962 } 6963 } 6964 } 6965 6966 switch (U->getOpcode()) { 6967 case Instruction::Trunc: 6968 return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType()); 6969 6970 case Instruction::ZExt: 6971 return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 6972 6973 case Instruction::SExt: 6974 if (auto BO = MatchBinaryOp(U->getOperand(0), DT)) { 6975 // The NSW flag of a subtract does not always survive the conversion to 6976 // A + (-1)*B. By pushing sign extension onto its operands we are much 6977 // more likely to preserve NSW and allow later AddRec optimisations. 6978 // 6979 // NOTE: This is effectively duplicating this logic from getSignExtend: 6980 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 6981 // but by that point the NSW information has potentially been lost. 6982 if (BO->Opcode == Instruction::Sub && BO->IsNSW) { 6983 Type *Ty = U->getType(); 6984 auto *V1 = getSignExtendExpr(getSCEV(BO->LHS), Ty); 6985 auto *V2 = getSignExtendExpr(getSCEV(BO->RHS), Ty); 6986 return getMinusSCEV(V1, V2, SCEV::FlagNSW); 6987 } 6988 } 6989 return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 6990 6991 case Instruction::BitCast: 6992 // BitCasts are no-op casts so we just eliminate the cast. 6993 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType())) 6994 return getSCEV(U->getOperand(0)); 6995 break; 6996 6997 case Instruction::PtrToInt: { 6998 // Pointer to integer cast is straight-forward, so do model it. 6999 const SCEV *Op = getSCEV(U->getOperand(0)); 7000 Type *DstIntTy = U->getType(); 7001 // But only if effective SCEV (integer) type is wide enough to represent 7002 // all possible pointer values. 7003 const SCEV *IntOp = getPtrToIntExpr(Op, DstIntTy); 7004 if (isa<SCEVCouldNotCompute>(IntOp)) 7005 return getUnknown(V); 7006 return IntOp; 7007 } 7008 case Instruction::IntToPtr: 7009 // Just don't deal with inttoptr casts. 7010 return getUnknown(V); 7011 7012 case Instruction::SDiv: 7013 // If both operands are non-negative, this is just an udiv. 7014 if (isKnownNonNegative(getSCEV(U->getOperand(0))) && 7015 isKnownNonNegative(getSCEV(U->getOperand(1)))) 7016 return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(U->getOperand(1))); 7017 break; 7018 7019 case Instruction::SRem: 7020 // If both operands are non-negative, this is just an urem. 7021 if (isKnownNonNegative(getSCEV(U->getOperand(0))) && 7022 isKnownNonNegative(getSCEV(U->getOperand(1)))) 7023 return getURemExpr(getSCEV(U->getOperand(0)), getSCEV(U->getOperand(1))); 7024 break; 7025 7026 case Instruction::GetElementPtr: 7027 return createNodeForGEP(cast<GEPOperator>(U)); 7028 7029 case Instruction::PHI: 7030 return createNodeForPHI(cast<PHINode>(U)); 7031 7032 case Instruction::Select: 7033 // U can also be a select constant expr, which let fall through. Since 7034 // createNodeForSelect only works for a condition that is an `ICmpInst`, and 7035 // constant expressions cannot have instructions as operands, we'd have 7036 // returned getUnknown for a select constant expressions anyway. 7037 if (isa<Instruction>(U)) 7038 return createNodeForSelectOrPHI(cast<Instruction>(U), U->getOperand(0), 7039 U->getOperand(1), U->getOperand(2)); 7040 break; 7041 7042 case Instruction::Call: 7043 case Instruction::Invoke: 7044 if (Value *RV = cast<CallBase>(U)->getReturnedArgOperand()) 7045 return getSCEV(RV); 7046 7047 if (auto *II = dyn_cast<IntrinsicInst>(U)) { 7048 switch (II->getIntrinsicID()) { 7049 case Intrinsic::abs: 7050 return getAbsExpr( 7051 getSCEV(II->getArgOperand(0)), 7052 /*IsNSW=*/cast<ConstantInt>(II->getArgOperand(1))->isOne()); 7053 case Intrinsic::umax: 7054 return getUMaxExpr(getSCEV(II->getArgOperand(0)), 7055 getSCEV(II->getArgOperand(1))); 7056 case Intrinsic::umin: 7057 return getUMinExpr(getSCEV(II->getArgOperand(0)), 7058 getSCEV(II->getArgOperand(1))); 7059 case Intrinsic::smax: 7060 return getSMaxExpr(getSCEV(II->getArgOperand(0)), 7061 getSCEV(II->getArgOperand(1))); 7062 case Intrinsic::smin: 7063 return getSMinExpr(getSCEV(II->getArgOperand(0)), 7064 getSCEV(II->getArgOperand(1))); 7065 case Intrinsic::usub_sat: { 7066 const SCEV *X = getSCEV(II->getArgOperand(0)); 7067 const SCEV *Y = getSCEV(II->getArgOperand(1)); 7068 const SCEV *ClampedY = getUMinExpr(X, Y); 7069 return getMinusSCEV(X, ClampedY, SCEV::FlagNUW); 7070 } 7071 case Intrinsic::uadd_sat: { 7072 const SCEV *X = getSCEV(II->getArgOperand(0)); 7073 const SCEV *Y = getSCEV(II->getArgOperand(1)); 7074 const SCEV *ClampedX = getUMinExpr(X, getNotSCEV(Y)); 7075 return getAddExpr(ClampedX, Y, SCEV::FlagNUW); 7076 } 7077 case Intrinsic::start_loop_iterations: 7078 // A start_loop_iterations is just equivalent to the first operand for 7079 // SCEV purposes. 7080 return getSCEV(II->getArgOperand(0)); 7081 default: 7082 break; 7083 } 7084 } 7085 break; 7086 } 7087 7088 return getUnknown(V); 7089 } 7090 7091 //===----------------------------------------------------------------------===// 7092 // Iteration Count Computation Code 7093 // 7094 7095 const SCEV *ScalarEvolution::getTripCountFromExitCount(const SCEV *ExitCount) { 7096 // Get the trip count from the BE count by adding 1. Overflow, results 7097 // in zero which means "unknown". 7098 return getAddExpr(ExitCount, getOne(ExitCount->getType())); 7099 } 7100 7101 static unsigned getConstantTripCount(const SCEVConstant *ExitCount) { 7102 if (!ExitCount) 7103 return 0; 7104 7105 ConstantInt *ExitConst = ExitCount->getValue(); 7106 7107 // Guard against huge trip counts. 7108 if (ExitConst->getValue().getActiveBits() > 32) 7109 return 0; 7110 7111 // In case of integer overflow, this returns 0, which is correct. 7112 return ((unsigned)ExitConst->getZExtValue()) + 1; 7113 } 7114 7115 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L) { 7116 auto *ExitCount = dyn_cast<SCEVConstant>(getBackedgeTakenCount(L, Exact)); 7117 return getConstantTripCount(ExitCount); 7118 } 7119 7120 unsigned 7121 ScalarEvolution::getSmallConstantTripCount(const Loop *L, 7122 const BasicBlock *ExitingBlock) { 7123 assert(ExitingBlock && "Must pass a non-null exiting block!"); 7124 assert(L->isLoopExiting(ExitingBlock) && 7125 "Exiting block must actually branch out of the loop!"); 7126 const SCEVConstant *ExitCount = 7127 dyn_cast<SCEVConstant>(getExitCount(L, ExitingBlock)); 7128 return getConstantTripCount(ExitCount); 7129 } 7130 7131 unsigned ScalarEvolution::getSmallConstantMaxTripCount(const Loop *L) { 7132 const auto *MaxExitCount = 7133 dyn_cast<SCEVConstant>(getConstantMaxBackedgeTakenCount(L)); 7134 return getConstantTripCount(MaxExitCount); 7135 } 7136 7137 unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L) { 7138 SmallVector<BasicBlock *, 8> ExitingBlocks; 7139 L->getExitingBlocks(ExitingBlocks); 7140 7141 Optional<unsigned> Res = None; 7142 for (auto *ExitingBB : ExitingBlocks) { 7143 unsigned Multiple = getSmallConstantTripMultiple(L, ExitingBB); 7144 if (!Res) 7145 Res = Multiple; 7146 Res = (unsigned)GreatestCommonDivisor64(*Res, Multiple); 7147 } 7148 return Res.getValueOr(1); 7149 } 7150 7151 unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L, 7152 const SCEV *ExitCount) { 7153 if (ExitCount == getCouldNotCompute()) 7154 return 1; 7155 7156 // Get the trip count 7157 const SCEV *TCExpr = getTripCountFromExitCount(ExitCount); 7158 7159 const SCEVConstant *TC = dyn_cast<SCEVConstant>(TCExpr); 7160 if (!TC) 7161 // Attempt to factor more general cases. Returns the greatest power of 7162 // two divisor. If overflow happens, the trip count expression is still 7163 // divisible by the greatest power of 2 divisor returned. 7164 return 1U << std::min((uint32_t)31, 7165 GetMinTrailingZeros(applyLoopGuards(TCExpr, L))); 7166 7167 ConstantInt *Result = TC->getValue(); 7168 7169 // Guard against huge trip counts (this requires checking 7170 // for zero to handle the case where the trip count == -1 and the 7171 // addition wraps). 7172 if (!Result || Result->getValue().getActiveBits() > 32 || 7173 Result->getValue().getActiveBits() == 0) 7174 return 1; 7175 7176 return (unsigned)Result->getZExtValue(); 7177 } 7178 7179 /// Returns the largest constant divisor of the trip count of this loop as a 7180 /// normal unsigned value, if possible. This means that the actual trip count is 7181 /// always a multiple of the returned value (don't forget the trip count could 7182 /// very well be zero as well!). 7183 /// 7184 /// Returns 1 if the trip count is unknown or not guaranteed to be the 7185 /// multiple of a constant (which is also the case if the trip count is simply 7186 /// constant, use getSmallConstantTripCount for that case), Will also return 1 7187 /// if the trip count is very large (>= 2^32). 7188 /// 7189 /// As explained in the comments for getSmallConstantTripCount, this assumes 7190 /// that control exits the loop via ExitingBlock. 7191 unsigned 7192 ScalarEvolution::getSmallConstantTripMultiple(const Loop *L, 7193 const BasicBlock *ExitingBlock) { 7194 assert(ExitingBlock && "Must pass a non-null exiting block!"); 7195 assert(L->isLoopExiting(ExitingBlock) && 7196 "Exiting block must actually branch out of the loop!"); 7197 const SCEV *ExitCount = getExitCount(L, ExitingBlock); 7198 return getSmallConstantTripMultiple(L, ExitCount); 7199 } 7200 7201 const SCEV *ScalarEvolution::getExitCount(const Loop *L, 7202 const BasicBlock *ExitingBlock, 7203 ExitCountKind Kind) { 7204 switch (Kind) { 7205 case Exact: 7206 case SymbolicMaximum: 7207 return getBackedgeTakenInfo(L).getExact(ExitingBlock, this); 7208 case ConstantMaximum: 7209 return getBackedgeTakenInfo(L).getConstantMax(ExitingBlock, this); 7210 }; 7211 llvm_unreachable("Invalid ExitCountKind!"); 7212 } 7213 7214 const SCEV * 7215 ScalarEvolution::getPredicatedBackedgeTakenCount(const Loop *L, 7216 SCEVUnionPredicate &Preds) { 7217 return getPredicatedBackedgeTakenInfo(L).getExact(L, this, &Preds); 7218 } 7219 7220 const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L, 7221 ExitCountKind Kind) { 7222 switch (Kind) { 7223 case Exact: 7224 return getBackedgeTakenInfo(L).getExact(L, this); 7225 case ConstantMaximum: 7226 return getBackedgeTakenInfo(L).getConstantMax(this); 7227 case SymbolicMaximum: 7228 return getBackedgeTakenInfo(L).getSymbolicMax(L, this); 7229 }; 7230 llvm_unreachable("Invalid ExitCountKind!"); 7231 } 7232 7233 bool ScalarEvolution::isBackedgeTakenCountMaxOrZero(const Loop *L) { 7234 return getBackedgeTakenInfo(L).isConstantMaxOrZero(this); 7235 } 7236 7237 /// Push PHI nodes in the header of the given loop onto the given Worklist. 7238 static void 7239 PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) { 7240 BasicBlock *Header = L->getHeader(); 7241 7242 // Push all Loop-header PHIs onto the Worklist stack. 7243 for (PHINode &PN : Header->phis()) 7244 Worklist.push_back(&PN); 7245 } 7246 7247 const ScalarEvolution::BackedgeTakenInfo & 7248 ScalarEvolution::getPredicatedBackedgeTakenInfo(const Loop *L) { 7249 auto &BTI = getBackedgeTakenInfo(L); 7250 if (BTI.hasFullInfo()) 7251 return BTI; 7252 7253 auto Pair = PredicatedBackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 7254 7255 if (!Pair.second) 7256 return Pair.first->second; 7257 7258 BackedgeTakenInfo Result = 7259 computeBackedgeTakenCount(L, /*AllowPredicates=*/true); 7260 7261 return PredicatedBackedgeTakenCounts.find(L)->second = std::move(Result); 7262 } 7263 7264 ScalarEvolution::BackedgeTakenInfo & 7265 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) { 7266 // Initially insert an invalid entry for this loop. If the insertion 7267 // succeeds, proceed to actually compute a backedge-taken count and 7268 // update the value. The temporary CouldNotCompute value tells SCEV 7269 // code elsewhere that it shouldn't attempt to request a new 7270 // backedge-taken count, which could result in infinite recursion. 7271 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair = 7272 BackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 7273 if (!Pair.second) 7274 return Pair.first->second; 7275 7276 // computeBackedgeTakenCount may allocate memory for its result. Inserting it 7277 // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result 7278 // must be cleared in this scope. 7279 BackedgeTakenInfo Result = computeBackedgeTakenCount(L); 7280 7281 // In product build, there are no usage of statistic. 7282 (void)NumTripCountsComputed; 7283 (void)NumTripCountsNotComputed; 7284 #if LLVM_ENABLE_STATS || !defined(NDEBUG) 7285 const SCEV *BEExact = Result.getExact(L, this); 7286 if (BEExact != getCouldNotCompute()) { 7287 assert(isLoopInvariant(BEExact, L) && 7288 isLoopInvariant(Result.getConstantMax(this), L) && 7289 "Computed backedge-taken count isn't loop invariant for loop!"); 7290 ++NumTripCountsComputed; 7291 } else if (Result.getConstantMax(this) == getCouldNotCompute() && 7292 isa<PHINode>(L->getHeader()->begin())) { 7293 // Only count loops that have phi nodes as not being computable. 7294 ++NumTripCountsNotComputed; 7295 } 7296 #endif // LLVM_ENABLE_STATS || !defined(NDEBUG) 7297 7298 // Now that we know more about the trip count for this loop, forget any 7299 // existing SCEV values for PHI nodes in this loop since they are only 7300 // conservative estimates made without the benefit of trip count 7301 // information. This is similar to the code in forgetLoop, except that 7302 // it handles SCEVUnknown PHI nodes specially. 7303 if (Result.hasAnyInfo()) { 7304 SmallVector<Instruction *, 16> Worklist; 7305 PushLoopPHIs(L, Worklist); 7306 7307 SmallPtrSet<Instruction *, 8> Discovered; 7308 while (!Worklist.empty()) { 7309 Instruction *I = Worklist.pop_back_val(); 7310 7311 ValueExprMapType::iterator It = 7312 ValueExprMap.find_as(static_cast<Value *>(I)); 7313 if (It != ValueExprMap.end()) { 7314 const SCEV *Old = It->second; 7315 7316 // SCEVUnknown for a PHI either means that it has an unrecognized 7317 // structure, or it's a PHI that's in the progress of being computed 7318 // by createNodeForPHI. In the former case, additional loop trip 7319 // count information isn't going to change anything. In the later 7320 // case, createNodeForPHI will perform the necessary updates on its 7321 // own when it gets to that point. 7322 if (!isa<PHINode>(I) || !isa<SCEVUnknown>(Old)) { 7323 eraseValueFromMap(It->first); 7324 forgetMemoizedResults(Old); 7325 } 7326 if (PHINode *PN = dyn_cast<PHINode>(I)) 7327 ConstantEvolutionLoopExitValue.erase(PN); 7328 } 7329 7330 // Since we don't need to invalidate anything for correctness and we're 7331 // only invalidating to make SCEV's results more precise, we get to stop 7332 // early to avoid invalidating too much. This is especially important in 7333 // cases like: 7334 // 7335 // %v = f(pn0, pn1) // pn0 and pn1 used through some other phi node 7336 // loop0: 7337 // %pn0 = phi 7338 // ... 7339 // loop1: 7340 // %pn1 = phi 7341 // ... 7342 // 7343 // where both loop0 and loop1's backedge taken count uses the SCEV 7344 // expression for %v. If we don't have the early stop below then in cases 7345 // like the above, getBackedgeTakenInfo(loop1) will clear out the trip 7346 // count for loop0 and getBackedgeTakenInfo(loop0) will clear out the trip 7347 // count for loop1, effectively nullifying SCEV's trip count cache. 7348 for (auto *U : I->users()) 7349 if (auto *I = dyn_cast<Instruction>(U)) { 7350 auto *LoopForUser = LI.getLoopFor(I->getParent()); 7351 if (LoopForUser && L->contains(LoopForUser) && 7352 Discovered.insert(I).second) 7353 Worklist.push_back(I); 7354 } 7355 } 7356 } 7357 7358 // Re-lookup the insert position, since the call to 7359 // computeBackedgeTakenCount above could result in a 7360 // recusive call to getBackedgeTakenInfo (on a different 7361 // loop), which would invalidate the iterator computed 7362 // earlier. 7363 return BackedgeTakenCounts.find(L)->second = std::move(Result); 7364 } 7365 7366 void ScalarEvolution::forgetAllLoops() { 7367 // This method is intended to forget all info about loops. It should 7368 // invalidate caches as if the following happened: 7369 // - The trip counts of all loops have changed arbitrarily 7370 // - Every llvm::Value has been updated in place to produce a different 7371 // result. 7372 BackedgeTakenCounts.clear(); 7373 PredicatedBackedgeTakenCounts.clear(); 7374 LoopPropertiesCache.clear(); 7375 ConstantEvolutionLoopExitValue.clear(); 7376 ValueExprMap.clear(); 7377 ValuesAtScopes.clear(); 7378 LoopDispositions.clear(); 7379 BlockDispositions.clear(); 7380 UnsignedRanges.clear(); 7381 SignedRanges.clear(); 7382 ExprValueMap.clear(); 7383 HasRecMap.clear(); 7384 MinTrailingZerosCache.clear(); 7385 PredicatedSCEVRewrites.clear(); 7386 } 7387 7388 void ScalarEvolution::forgetLoop(const Loop *L) { 7389 SmallVector<const Loop *, 16> LoopWorklist(1, L); 7390 SmallVector<Instruction *, 32> Worklist; 7391 SmallPtrSet<Instruction *, 16> Visited; 7392 7393 // Iterate over all the loops and sub-loops to drop SCEV information. 7394 while (!LoopWorklist.empty()) { 7395 auto *CurrL = LoopWorklist.pop_back_val(); 7396 7397 // Drop any stored trip count value. 7398 BackedgeTakenCounts.erase(CurrL); 7399 PredicatedBackedgeTakenCounts.erase(CurrL); 7400 7401 // Drop information about predicated SCEV rewrites for this loop. 7402 for (auto I = PredicatedSCEVRewrites.begin(); 7403 I != PredicatedSCEVRewrites.end();) { 7404 std::pair<const SCEV *, const Loop *> Entry = I->first; 7405 if (Entry.second == CurrL) 7406 PredicatedSCEVRewrites.erase(I++); 7407 else 7408 ++I; 7409 } 7410 7411 auto LoopUsersItr = LoopUsers.find(CurrL); 7412 if (LoopUsersItr != LoopUsers.end()) { 7413 for (auto *S : LoopUsersItr->second) 7414 forgetMemoizedResults(S); 7415 LoopUsers.erase(LoopUsersItr); 7416 } 7417 7418 // Drop information about expressions based on loop-header PHIs. 7419 PushLoopPHIs(CurrL, Worklist); 7420 7421 while (!Worklist.empty()) { 7422 Instruction *I = Worklist.pop_back_val(); 7423 if (!Visited.insert(I).second) 7424 continue; 7425 7426 ValueExprMapType::iterator It = 7427 ValueExprMap.find_as(static_cast<Value *>(I)); 7428 if (It != ValueExprMap.end()) { 7429 eraseValueFromMap(It->first); 7430 forgetMemoizedResults(It->second); 7431 if (PHINode *PN = dyn_cast<PHINode>(I)) 7432 ConstantEvolutionLoopExitValue.erase(PN); 7433 } 7434 7435 PushDefUseChildren(I, Worklist); 7436 } 7437 7438 LoopPropertiesCache.erase(CurrL); 7439 // Forget all contained loops too, to avoid dangling entries in the 7440 // ValuesAtScopes map. 7441 LoopWorklist.append(CurrL->begin(), CurrL->end()); 7442 } 7443 } 7444 7445 void ScalarEvolution::forgetTopmostLoop(const Loop *L) { 7446 while (Loop *Parent = L->getParentLoop()) 7447 L = Parent; 7448 forgetLoop(L); 7449 } 7450 7451 void ScalarEvolution::forgetValue(Value *V) { 7452 Instruction *I = dyn_cast<Instruction>(V); 7453 if (!I) return; 7454 7455 // Drop information about expressions based on loop-header PHIs. 7456 SmallVector<Instruction *, 16> Worklist; 7457 Worklist.push_back(I); 7458 7459 SmallPtrSet<Instruction *, 8> Visited; 7460 while (!Worklist.empty()) { 7461 I = Worklist.pop_back_val(); 7462 if (!Visited.insert(I).second) 7463 continue; 7464 7465 ValueExprMapType::iterator It = 7466 ValueExprMap.find_as(static_cast<Value *>(I)); 7467 if (It != ValueExprMap.end()) { 7468 eraseValueFromMap(It->first); 7469 forgetMemoizedResults(It->second); 7470 if (PHINode *PN = dyn_cast<PHINode>(I)) 7471 ConstantEvolutionLoopExitValue.erase(PN); 7472 } 7473 7474 PushDefUseChildren(I, Worklist); 7475 } 7476 } 7477 7478 void ScalarEvolution::forgetLoopDispositions(const Loop *L) { 7479 LoopDispositions.clear(); 7480 } 7481 7482 /// Get the exact loop backedge taken count considering all loop exits. A 7483 /// computable result can only be returned for loops with all exiting blocks 7484 /// dominating the latch. howFarToZero assumes that the limit of each loop test 7485 /// is never skipped. This is a valid assumption as long as the loop exits via 7486 /// that test. For precise results, it is the caller's responsibility to specify 7487 /// the relevant loop exiting block using getExact(ExitingBlock, SE). 7488 const SCEV * 7489 ScalarEvolution::BackedgeTakenInfo::getExact(const Loop *L, ScalarEvolution *SE, 7490 SCEVUnionPredicate *Preds) const { 7491 // If any exits were not computable, the loop is not computable. 7492 if (!isComplete() || ExitNotTaken.empty()) 7493 return SE->getCouldNotCompute(); 7494 7495 const BasicBlock *Latch = L->getLoopLatch(); 7496 // All exiting blocks we have collected must dominate the only backedge. 7497 if (!Latch) 7498 return SE->getCouldNotCompute(); 7499 7500 // All exiting blocks we have gathered dominate loop's latch, so exact trip 7501 // count is simply a minimum out of all these calculated exit counts. 7502 SmallVector<const SCEV *, 2> Ops; 7503 for (auto &ENT : ExitNotTaken) { 7504 const SCEV *BECount = ENT.ExactNotTaken; 7505 assert(BECount != SE->getCouldNotCompute() && "Bad exit SCEV!"); 7506 assert(SE->DT.dominates(ENT.ExitingBlock, Latch) && 7507 "We should only have known counts for exiting blocks that dominate " 7508 "latch!"); 7509 7510 Ops.push_back(BECount); 7511 7512 if (Preds && !ENT.hasAlwaysTruePredicate()) 7513 Preds->add(ENT.Predicate.get()); 7514 7515 assert((Preds || ENT.hasAlwaysTruePredicate()) && 7516 "Predicate should be always true!"); 7517 } 7518 7519 return SE->getUMinFromMismatchedTypes(Ops); 7520 } 7521 7522 /// Get the exact not taken count for this loop exit. 7523 const SCEV * 7524 ScalarEvolution::BackedgeTakenInfo::getExact(const BasicBlock *ExitingBlock, 7525 ScalarEvolution *SE) const { 7526 for (auto &ENT : ExitNotTaken) 7527 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate()) 7528 return ENT.ExactNotTaken; 7529 7530 return SE->getCouldNotCompute(); 7531 } 7532 7533 const SCEV *ScalarEvolution::BackedgeTakenInfo::getConstantMax( 7534 const BasicBlock *ExitingBlock, ScalarEvolution *SE) const { 7535 for (auto &ENT : ExitNotTaken) 7536 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate()) 7537 return ENT.MaxNotTaken; 7538 7539 return SE->getCouldNotCompute(); 7540 } 7541 7542 /// getConstantMax - Get the constant max backedge taken count for the loop. 7543 const SCEV * 7544 ScalarEvolution::BackedgeTakenInfo::getConstantMax(ScalarEvolution *SE) const { 7545 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 7546 return !ENT.hasAlwaysTruePredicate(); 7547 }; 7548 7549 if (any_of(ExitNotTaken, PredicateNotAlwaysTrue) || !getConstantMax()) 7550 return SE->getCouldNotCompute(); 7551 7552 assert((isa<SCEVCouldNotCompute>(getConstantMax()) || 7553 isa<SCEVConstant>(getConstantMax())) && 7554 "No point in having a non-constant max backedge taken count!"); 7555 return getConstantMax(); 7556 } 7557 7558 const SCEV * 7559 ScalarEvolution::BackedgeTakenInfo::getSymbolicMax(const Loop *L, 7560 ScalarEvolution *SE) { 7561 if (!SymbolicMax) 7562 SymbolicMax = SE->computeSymbolicMaxBackedgeTakenCount(L); 7563 return SymbolicMax; 7564 } 7565 7566 bool ScalarEvolution::BackedgeTakenInfo::isConstantMaxOrZero( 7567 ScalarEvolution *SE) const { 7568 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 7569 return !ENT.hasAlwaysTruePredicate(); 7570 }; 7571 return MaxOrZero && !any_of(ExitNotTaken, PredicateNotAlwaysTrue); 7572 } 7573 7574 bool ScalarEvolution::BackedgeTakenInfo::hasOperand(const SCEV *S) const { 7575 return Operands.contains(S); 7576 } 7577 7578 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E) 7579 : ExitLimit(E, E, false, None) { 7580 } 7581 7582 ScalarEvolution::ExitLimit::ExitLimit( 7583 const SCEV *E, const SCEV *M, bool MaxOrZero, 7584 ArrayRef<const SmallPtrSetImpl<const SCEVPredicate *> *> PredSetList) 7585 : ExactNotTaken(E), MaxNotTaken(M), MaxOrZero(MaxOrZero) { 7586 assert((isa<SCEVCouldNotCompute>(ExactNotTaken) || 7587 !isa<SCEVCouldNotCompute>(MaxNotTaken)) && 7588 "Exact is not allowed to be less precise than Max"); 7589 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 7590 isa<SCEVConstant>(MaxNotTaken)) && 7591 "No point in having a non-constant max backedge taken count!"); 7592 for (auto *PredSet : PredSetList) 7593 for (auto *P : *PredSet) 7594 addPredicate(P); 7595 assert((isa<SCEVCouldNotCompute>(E) || !E->getType()->isPointerTy()) && 7596 "Backedge count should be int"); 7597 assert((isa<SCEVCouldNotCompute>(M) || !M->getType()->isPointerTy()) && 7598 "Max backedge count should be int"); 7599 } 7600 7601 ScalarEvolution::ExitLimit::ExitLimit( 7602 const SCEV *E, const SCEV *M, bool MaxOrZero, 7603 const SmallPtrSetImpl<const SCEVPredicate *> &PredSet) 7604 : ExitLimit(E, M, MaxOrZero, {&PredSet}) { 7605 } 7606 7607 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E, const SCEV *M, 7608 bool MaxOrZero) 7609 : ExitLimit(E, M, MaxOrZero, None) { 7610 } 7611 7612 class SCEVRecordOperands { 7613 SmallPtrSetImpl<const SCEV *> &Operands; 7614 7615 public: 7616 SCEVRecordOperands(SmallPtrSetImpl<const SCEV *> &Operands) 7617 : Operands(Operands) {} 7618 bool follow(const SCEV *S) { 7619 Operands.insert(S); 7620 return true; 7621 } 7622 bool isDone() { return false; } 7623 }; 7624 7625 /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each 7626 /// computable exit into a persistent ExitNotTakenInfo array. 7627 ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo( 7628 ArrayRef<ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo> ExitCounts, 7629 bool IsComplete, const SCEV *ConstantMax, bool MaxOrZero) 7630 : ConstantMax(ConstantMax), IsComplete(IsComplete), MaxOrZero(MaxOrZero) { 7631 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; 7632 7633 ExitNotTaken.reserve(ExitCounts.size()); 7634 std::transform( 7635 ExitCounts.begin(), ExitCounts.end(), std::back_inserter(ExitNotTaken), 7636 [&](const EdgeExitInfo &EEI) { 7637 BasicBlock *ExitBB = EEI.first; 7638 const ExitLimit &EL = EEI.second; 7639 if (EL.Predicates.empty()) 7640 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, EL.MaxNotTaken, 7641 nullptr); 7642 7643 std::unique_ptr<SCEVUnionPredicate> Predicate(new SCEVUnionPredicate); 7644 for (auto *Pred : EL.Predicates) 7645 Predicate->add(Pred); 7646 7647 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, EL.MaxNotTaken, 7648 std::move(Predicate)); 7649 }); 7650 assert((isa<SCEVCouldNotCompute>(ConstantMax) || 7651 isa<SCEVConstant>(ConstantMax)) && 7652 "No point in having a non-constant max backedge taken count!"); 7653 7654 SCEVRecordOperands RecordOperands(Operands); 7655 SCEVTraversal<SCEVRecordOperands> ST(RecordOperands); 7656 if (!isa<SCEVCouldNotCompute>(ConstantMax)) 7657 ST.visitAll(ConstantMax); 7658 for (auto &ENT : ExitNotTaken) 7659 if (!isa<SCEVCouldNotCompute>(ENT.ExactNotTaken)) 7660 ST.visitAll(ENT.ExactNotTaken); 7661 } 7662 7663 /// Compute the number of times the backedge of the specified loop will execute. 7664 ScalarEvolution::BackedgeTakenInfo 7665 ScalarEvolution::computeBackedgeTakenCount(const Loop *L, 7666 bool AllowPredicates) { 7667 SmallVector<BasicBlock *, 8> ExitingBlocks; 7668 L->getExitingBlocks(ExitingBlocks); 7669 7670 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; 7671 7672 SmallVector<EdgeExitInfo, 4> ExitCounts; 7673 bool CouldComputeBECount = true; 7674 BasicBlock *Latch = L->getLoopLatch(); // may be NULL. 7675 const SCEV *MustExitMaxBECount = nullptr; 7676 const SCEV *MayExitMaxBECount = nullptr; 7677 bool MustExitMaxOrZero = false; 7678 7679 // Compute the ExitLimit for each loop exit. Use this to populate ExitCounts 7680 // and compute maxBECount. 7681 // Do a union of all the predicates here. 7682 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { 7683 BasicBlock *ExitBB = ExitingBlocks[i]; 7684 7685 // We canonicalize untaken exits to br (constant), ignore them so that 7686 // proving an exit untaken doesn't negatively impact our ability to reason 7687 // about the loop as whole. 7688 if (auto *BI = dyn_cast<BranchInst>(ExitBB->getTerminator())) 7689 if (auto *CI = dyn_cast<ConstantInt>(BI->getCondition())) { 7690 bool ExitIfTrue = !L->contains(BI->getSuccessor(0)); 7691 if ((ExitIfTrue && CI->isZero()) || (!ExitIfTrue && CI->isOne())) 7692 continue; 7693 } 7694 7695 ExitLimit EL = computeExitLimit(L, ExitBB, AllowPredicates); 7696 7697 assert((AllowPredicates || EL.Predicates.empty()) && 7698 "Predicated exit limit when predicates are not allowed!"); 7699 7700 // 1. For each exit that can be computed, add an entry to ExitCounts. 7701 // CouldComputeBECount is true only if all exits can be computed. 7702 if (EL.ExactNotTaken == getCouldNotCompute()) 7703 // We couldn't compute an exact value for this exit, so 7704 // we won't be able to compute an exact value for the loop. 7705 CouldComputeBECount = false; 7706 else 7707 ExitCounts.emplace_back(ExitBB, EL); 7708 7709 // 2. Derive the loop's MaxBECount from each exit's max number of 7710 // non-exiting iterations. Partition the loop exits into two kinds: 7711 // LoopMustExits and LoopMayExits. 7712 // 7713 // If the exit dominates the loop latch, it is a LoopMustExit otherwise it 7714 // is a LoopMayExit. If any computable LoopMustExit is found, then 7715 // MaxBECount is the minimum EL.MaxNotTaken of computable 7716 // LoopMustExits. Otherwise, MaxBECount is conservatively the maximum 7717 // EL.MaxNotTaken, where CouldNotCompute is considered greater than any 7718 // computable EL.MaxNotTaken. 7719 if (EL.MaxNotTaken != getCouldNotCompute() && Latch && 7720 DT.dominates(ExitBB, Latch)) { 7721 if (!MustExitMaxBECount) { 7722 MustExitMaxBECount = EL.MaxNotTaken; 7723 MustExitMaxOrZero = EL.MaxOrZero; 7724 } else { 7725 MustExitMaxBECount = 7726 getUMinFromMismatchedTypes(MustExitMaxBECount, EL.MaxNotTaken); 7727 } 7728 } else if (MayExitMaxBECount != getCouldNotCompute()) { 7729 if (!MayExitMaxBECount || EL.MaxNotTaken == getCouldNotCompute()) 7730 MayExitMaxBECount = EL.MaxNotTaken; 7731 else { 7732 MayExitMaxBECount = 7733 getUMaxFromMismatchedTypes(MayExitMaxBECount, EL.MaxNotTaken); 7734 } 7735 } 7736 } 7737 const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount : 7738 (MayExitMaxBECount ? MayExitMaxBECount : getCouldNotCompute()); 7739 // The loop backedge will be taken the maximum or zero times if there's 7740 // a single exit that must be taken the maximum or zero times. 7741 bool MaxOrZero = (MustExitMaxOrZero && ExitingBlocks.size() == 1); 7742 return BackedgeTakenInfo(std::move(ExitCounts), CouldComputeBECount, 7743 MaxBECount, MaxOrZero); 7744 } 7745 7746 ScalarEvolution::ExitLimit 7747 ScalarEvolution::computeExitLimit(const Loop *L, BasicBlock *ExitingBlock, 7748 bool AllowPredicates) { 7749 assert(L->contains(ExitingBlock) && "Exit count for non-loop block?"); 7750 // If our exiting block does not dominate the latch, then its connection with 7751 // loop's exit limit may be far from trivial. 7752 const BasicBlock *Latch = L->getLoopLatch(); 7753 if (!Latch || !DT.dominates(ExitingBlock, Latch)) 7754 return getCouldNotCompute(); 7755 7756 bool IsOnlyExit = (L->getExitingBlock() != nullptr); 7757 Instruction *Term = ExitingBlock->getTerminator(); 7758 if (BranchInst *BI = dyn_cast<BranchInst>(Term)) { 7759 assert(BI->isConditional() && "If unconditional, it can't be in loop!"); 7760 bool ExitIfTrue = !L->contains(BI->getSuccessor(0)); 7761 assert(ExitIfTrue == L->contains(BI->getSuccessor(1)) && 7762 "It should have one successor in loop and one exit block!"); 7763 // Proceed to the next level to examine the exit condition expression. 7764 return computeExitLimitFromCond( 7765 L, BI->getCondition(), ExitIfTrue, 7766 /*ControlsExit=*/IsOnlyExit, AllowPredicates); 7767 } 7768 7769 if (SwitchInst *SI = dyn_cast<SwitchInst>(Term)) { 7770 // For switch, make sure that there is a single exit from the loop. 7771 BasicBlock *Exit = nullptr; 7772 for (auto *SBB : successors(ExitingBlock)) 7773 if (!L->contains(SBB)) { 7774 if (Exit) // Multiple exit successors. 7775 return getCouldNotCompute(); 7776 Exit = SBB; 7777 } 7778 assert(Exit && "Exiting block must have at least one exit"); 7779 return computeExitLimitFromSingleExitSwitch(L, SI, Exit, 7780 /*ControlsExit=*/IsOnlyExit); 7781 } 7782 7783 return getCouldNotCompute(); 7784 } 7785 7786 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCond( 7787 const Loop *L, Value *ExitCond, bool ExitIfTrue, 7788 bool ControlsExit, bool AllowPredicates) { 7789 ScalarEvolution::ExitLimitCacheTy Cache(L, ExitIfTrue, AllowPredicates); 7790 return computeExitLimitFromCondCached(Cache, L, ExitCond, ExitIfTrue, 7791 ControlsExit, AllowPredicates); 7792 } 7793 7794 Optional<ScalarEvolution::ExitLimit> 7795 ScalarEvolution::ExitLimitCache::find(const Loop *L, Value *ExitCond, 7796 bool ExitIfTrue, bool ControlsExit, 7797 bool AllowPredicates) { 7798 (void)this->L; 7799 (void)this->ExitIfTrue; 7800 (void)this->AllowPredicates; 7801 7802 assert(this->L == L && this->ExitIfTrue == ExitIfTrue && 7803 this->AllowPredicates == AllowPredicates && 7804 "Variance in assumed invariant key components!"); 7805 auto Itr = TripCountMap.find({ExitCond, ControlsExit}); 7806 if (Itr == TripCountMap.end()) 7807 return None; 7808 return Itr->second; 7809 } 7810 7811 void ScalarEvolution::ExitLimitCache::insert(const Loop *L, Value *ExitCond, 7812 bool ExitIfTrue, 7813 bool ControlsExit, 7814 bool AllowPredicates, 7815 const ExitLimit &EL) { 7816 assert(this->L == L && this->ExitIfTrue == ExitIfTrue && 7817 this->AllowPredicates == AllowPredicates && 7818 "Variance in assumed invariant key components!"); 7819 7820 auto InsertResult = TripCountMap.insert({{ExitCond, ControlsExit}, EL}); 7821 assert(InsertResult.second && "Expected successful insertion!"); 7822 (void)InsertResult; 7823 (void)ExitIfTrue; 7824 } 7825 7826 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondCached( 7827 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, 7828 bool ControlsExit, bool AllowPredicates) { 7829 7830 if (auto MaybeEL = 7831 Cache.find(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates)) 7832 return *MaybeEL; 7833 7834 ExitLimit EL = computeExitLimitFromCondImpl(Cache, L, ExitCond, ExitIfTrue, 7835 ControlsExit, AllowPredicates); 7836 Cache.insert(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates, EL); 7837 return EL; 7838 } 7839 7840 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondImpl( 7841 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, 7842 bool ControlsExit, bool AllowPredicates) { 7843 // Handle BinOp conditions (And, Or). 7844 if (auto LimitFromBinOp = computeExitLimitFromCondFromBinOp( 7845 Cache, L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates)) 7846 return *LimitFromBinOp; 7847 7848 // With an icmp, it may be feasible to compute an exact backedge-taken count. 7849 // Proceed to the next level to examine the icmp. 7850 if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) { 7851 ExitLimit EL = 7852 computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit); 7853 if (EL.hasFullInfo() || !AllowPredicates) 7854 return EL; 7855 7856 // Try again, but use SCEV predicates this time. 7857 return computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit, 7858 /*AllowPredicates=*/true); 7859 } 7860 7861 // Check for a constant condition. These are normally stripped out by 7862 // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to 7863 // preserve the CFG and is temporarily leaving constant conditions 7864 // in place. 7865 if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) { 7866 if (ExitIfTrue == !CI->getZExtValue()) 7867 // The backedge is always taken. 7868 return getCouldNotCompute(); 7869 else 7870 // The backedge is never taken. 7871 return getZero(CI->getType()); 7872 } 7873 7874 // If it's not an integer or pointer comparison then compute it the hard way. 7875 return computeExitCountExhaustively(L, ExitCond, ExitIfTrue); 7876 } 7877 7878 Optional<ScalarEvolution::ExitLimit> 7879 ScalarEvolution::computeExitLimitFromCondFromBinOp( 7880 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, 7881 bool ControlsExit, bool AllowPredicates) { 7882 // Check if the controlling expression for this loop is an And or Or. 7883 Value *Op0, *Op1; 7884 bool IsAnd = false; 7885 if (match(ExitCond, m_LogicalAnd(m_Value(Op0), m_Value(Op1)))) 7886 IsAnd = true; 7887 else if (match(ExitCond, m_LogicalOr(m_Value(Op0), m_Value(Op1)))) 7888 IsAnd = false; 7889 else 7890 return None; 7891 7892 // EitherMayExit is true in these two cases: 7893 // br (and Op0 Op1), loop, exit 7894 // br (or Op0 Op1), exit, loop 7895 bool EitherMayExit = IsAnd ^ ExitIfTrue; 7896 ExitLimit EL0 = computeExitLimitFromCondCached(Cache, L, Op0, ExitIfTrue, 7897 ControlsExit && !EitherMayExit, 7898 AllowPredicates); 7899 ExitLimit EL1 = computeExitLimitFromCondCached(Cache, L, Op1, ExitIfTrue, 7900 ControlsExit && !EitherMayExit, 7901 AllowPredicates); 7902 7903 // Be robust against unsimplified IR for the form "op i1 X, NeutralElement" 7904 const Constant *NeutralElement = ConstantInt::get(ExitCond->getType(), IsAnd); 7905 if (isa<ConstantInt>(Op1)) 7906 return Op1 == NeutralElement ? EL0 : EL1; 7907 if (isa<ConstantInt>(Op0)) 7908 return Op0 == NeutralElement ? EL1 : EL0; 7909 7910 const SCEV *BECount = getCouldNotCompute(); 7911 const SCEV *MaxBECount = getCouldNotCompute(); 7912 if (EitherMayExit) { 7913 // Both conditions must be same for the loop to continue executing. 7914 // Choose the less conservative count. 7915 // If ExitCond is a short-circuit form (select), using 7916 // umin(EL0.ExactNotTaken, EL1.ExactNotTaken) is unsafe in general. 7917 // To see the detailed examples, please see 7918 // test/Analysis/ScalarEvolution/exit-count-select.ll 7919 bool PoisonSafe = isa<BinaryOperator>(ExitCond); 7920 if (!PoisonSafe) 7921 // Even if ExitCond is select, we can safely derive BECount using both 7922 // EL0 and EL1 in these cases: 7923 // (1) EL0.ExactNotTaken is non-zero 7924 // (2) EL1.ExactNotTaken is non-poison 7925 // (3) EL0.ExactNotTaken is zero (BECount should be simply zero and 7926 // it cannot be umin(0, ..)) 7927 // The PoisonSafe assignment below is simplified and the assertion after 7928 // BECount calculation fully guarantees the condition (3). 7929 PoisonSafe = isa<SCEVConstant>(EL0.ExactNotTaken) || 7930 isa<SCEVConstant>(EL1.ExactNotTaken); 7931 if (EL0.ExactNotTaken != getCouldNotCompute() && 7932 EL1.ExactNotTaken != getCouldNotCompute() && PoisonSafe) { 7933 BECount = 7934 getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken); 7935 7936 // If EL0.ExactNotTaken was zero and ExitCond was a short-circuit form, 7937 // it should have been simplified to zero (see the condition (3) above) 7938 assert(!isa<BinaryOperator>(ExitCond) || !EL0.ExactNotTaken->isZero() || 7939 BECount->isZero()); 7940 } 7941 if (EL0.MaxNotTaken == getCouldNotCompute()) 7942 MaxBECount = EL1.MaxNotTaken; 7943 else if (EL1.MaxNotTaken == getCouldNotCompute()) 7944 MaxBECount = EL0.MaxNotTaken; 7945 else 7946 MaxBECount = getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken); 7947 } else { 7948 // Both conditions must be same at the same time for the loop to exit. 7949 // For now, be conservative. 7950 if (EL0.ExactNotTaken == EL1.ExactNotTaken) 7951 BECount = EL0.ExactNotTaken; 7952 } 7953 7954 // There are cases (e.g. PR26207) where computeExitLimitFromCond is able 7955 // to be more aggressive when computing BECount than when computing 7956 // MaxBECount. In these cases it is possible for EL0.ExactNotTaken and 7957 // EL1.ExactNotTaken to match, but for EL0.MaxNotTaken and EL1.MaxNotTaken 7958 // to not. 7959 if (isa<SCEVCouldNotCompute>(MaxBECount) && 7960 !isa<SCEVCouldNotCompute>(BECount)) 7961 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 7962 7963 return ExitLimit(BECount, MaxBECount, false, 7964 { &EL0.Predicates, &EL1.Predicates }); 7965 } 7966 7967 ScalarEvolution::ExitLimit 7968 ScalarEvolution::computeExitLimitFromICmp(const Loop *L, 7969 ICmpInst *ExitCond, 7970 bool ExitIfTrue, 7971 bool ControlsExit, 7972 bool AllowPredicates) { 7973 // If the condition was exit on true, convert the condition to exit on false 7974 ICmpInst::Predicate Pred; 7975 if (!ExitIfTrue) 7976 Pred = ExitCond->getPredicate(); 7977 else 7978 Pred = ExitCond->getInversePredicate(); 7979 const ICmpInst::Predicate OriginalPred = Pred; 7980 7981 // Handle common loops like: for (X = "string"; *X; ++X) 7982 if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0))) 7983 if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) { 7984 ExitLimit ItCnt = 7985 computeLoadConstantCompareExitLimit(LI, RHS, L, Pred); 7986 if (ItCnt.hasAnyInfo()) 7987 return ItCnt; 7988 } 7989 7990 const SCEV *LHS = getSCEV(ExitCond->getOperand(0)); 7991 const SCEV *RHS = getSCEV(ExitCond->getOperand(1)); 7992 7993 // Try to evaluate any dependencies out of the loop. 7994 LHS = getSCEVAtScope(LHS, L); 7995 RHS = getSCEVAtScope(RHS, L); 7996 7997 // At this point, we would like to compute how many iterations of the 7998 // loop the predicate will return true for these inputs. 7999 if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) { 8000 // If there is a loop-invariant, force it into the RHS. 8001 std::swap(LHS, RHS); 8002 Pred = ICmpInst::getSwappedPredicate(Pred); 8003 } 8004 8005 // Simplify the operands before analyzing them. 8006 (void)SimplifyICmpOperands(Pred, LHS, RHS); 8007 8008 // If we have a comparison of a chrec against a constant, try to use value 8009 // ranges to answer this query. 8010 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) 8011 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS)) 8012 if (AddRec->getLoop() == L) { 8013 // Form the constant range. 8014 ConstantRange CompRange = 8015 ConstantRange::makeExactICmpRegion(Pred, RHSC->getAPInt()); 8016 8017 const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this); 8018 if (!isa<SCEVCouldNotCompute>(Ret)) return Ret; 8019 } 8020 8021 switch (Pred) { 8022 case ICmpInst::ICMP_NE: { // while (X != Y) 8023 // Convert to: while (X-Y != 0) 8024 if (LHS->getType()->isPointerTy()) { 8025 LHS = getLosslessPtrToIntExpr(LHS); 8026 if (isa<SCEVCouldNotCompute>(LHS)) 8027 return LHS; 8028 } 8029 if (RHS->getType()->isPointerTy()) { 8030 RHS = getLosslessPtrToIntExpr(RHS); 8031 if (isa<SCEVCouldNotCompute>(RHS)) 8032 return RHS; 8033 } 8034 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit, 8035 AllowPredicates); 8036 if (EL.hasAnyInfo()) return EL; 8037 break; 8038 } 8039 case ICmpInst::ICMP_EQ: { // while (X == Y) 8040 // Convert to: while (X-Y == 0) 8041 if (LHS->getType()->isPointerTy()) { 8042 LHS = getLosslessPtrToIntExpr(LHS); 8043 if (isa<SCEVCouldNotCompute>(LHS)) 8044 return LHS; 8045 } 8046 if (RHS->getType()->isPointerTy()) { 8047 RHS = getLosslessPtrToIntExpr(RHS); 8048 if (isa<SCEVCouldNotCompute>(RHS)) 8049 return RHS; 8050 } 8051 ExitLimit EL = howFarToNonZero(getMinusSCEV(LHS, RHS), L); 8052 if (EL.hasAnyInfo()) return EL; 8053 break; 8054 } 8055 case ICmpInst::ICMP_SLT: 8056 case ICmpInst::ICMP_ULT: { // while (X < Y) 8057 bool IsSigned = Pred == ICmpInst::ICMP_SLT; 8058 ExitLimit EL = howManyLessThans(LHS, RHS, L, IsSigned, ControlsExit, 8059 AllowPredicates); 8060 if (EL.hasAnyInfo()) return EL; 8061 break; 8062 } 8063 case ICmpInst::ICMP_SGT: 8064 case ICmpInst::ICMP_UGT: { // while (X > Y) 8065 bool IsSigned = Pred == ICmpInst::ICMP_SGT; 8066 ExitLimit EL = 8067 howManyGreaterThans(LHS, RHS, L, IsSigned, ControlsExit, 8068 AllowPredicates); 8069 if (EL.hasAnyInfo()) return EL; 8070 break; 8071 } 8072 default: 8073 break; 8074 } 8075 8076 auto *ExhaustiveCount = 8077 computeExitCountExhaustively(L, ExitCond, ExitIfTrue); 8078 8079 if (!isa<SCEVCouldNotCompute>(ExhaustiveCount)) 8080 return ExhaustiveCount; 8081 8082 return computeShiftCompareExitLimit(ExitCond->getOperand(0), 8083 ExitCond->getOperand(1), L, OriginalPred); 8084 } 8085 8086 ScalarEvolution::ExitLimit 8087 ScalarEvolution::computeExitLimitFromSingleExitSwitch(const Loop *L, 8088 SwitchInst *Switch, 8089 BasicBlock *ExitingBlock, 8090 bool ControlsExit) { 8091 assert(!L->contains(ExitingBlock) && "Not an exiting block!"); 8092 8093 // Give up if the exit is the default dest of a switch. 8094 if (Switch->getDefaultDest() == ExitingBlock) 8095 return getCouldNotCompute(); 8096 8097 assert(L->contains(Switch->getDefaultDest()) && 8098 "Default case must not exit the loop!"); 8099 const SCEV *LHS = getSCEVAtScope(Switch->getCondition(), L); 8100 const SCEV *RHS = getConstant(Switch->findCaseDest(ExitingBlock)); 8101 8102 // while (X != Y) --> while (X-Y != 0) 8103 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit); 8104 if (EL.hasAnyInfo()) 8105 return EL; 8106 8107 return getCouldNotCompute(); 8108 } 8109 8110 static ConstantInt * 8111 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, 8112 ScalarEvolution &SE) { 8113 const SCEV *InVal = SE.getConstant(C); 8114 const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE); 8115 assert(isa<SCEVConstant>(Val) && 8116 "Evaluation of SCEV at constant didn't fold correctly?"); 8117 return cast<SCEVConstant>(Val)->getValue(); 8118 } 8119 8120 /// Given an exit condition of 'icmp op load X, cst', try to see if we can 8121 /// compute the backedge execution count. 8122 ScalarEvolution::ExitLimit 8123 ScalarEvolution::computeLoadConstantCompareExitLimit( 8124 LoadInst *LI, 8125 Constant *RHS, 8126 const Loop *L, 8127 ICmpInst::Predicate predicate) { 8128 if (LI->isVolatile()) return getCouldNotCompute(); 8129 8130 // Check to see if the loaded pointer is a getelementptr of a global. 8131 // TODO: Use SCEV instead of manually grubbing with GEPs. 8132 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)); 8133 if (!GEP) return getCouldNotCompute(); 8134 8135 // Make sure that it is really a constant global we are gepping, with an 8136 // initializer, and make sure the first IDX is really 0. 8137 GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)); 8138 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() || 8139 GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) || 8140 !cast<Constant>(GEP->getOperand(1))->isNullValue()) 8141 return getCouldNotCompute(); 8142 8143 // Okay, we allow one non-constant index into the GEP instruction. 8144 Value *VarIdx = nullptr; 8145 std::vector<Constant*> Indexes; 8146 unsigned VarIdxNum = 0; 8147 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i) 8148 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) { 8149 Indexes.push_back(CI); 8150 } else if (!isa<ConstantInt>(GEP->getOperand(i))) { 8151 if (VarIdx) return getCouldNotCompute(); // Multiple non-constant idx's. 8152 VarIdx = GEP->getOperand(i); 8153 VarIdxNum = i-2; 8154 Indexes.push_back(nullptr); 8155 } 8156 8157 // Loop-invariant loads may be a byproduct of loop optimization. Skip them. 8158 if (!VarIdx) 8159 return getCouldNotCompute(); 8160 8161 // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant. 8162 // Check to see if X is a loop variant variable value now. 8163 const SCEV *Idx = getSCEV(VarIdx); 8164 Idx = getSCEVAtScope(Idx, L); 8165 8166 // We can only recognize very limited forms of loop index expressions, in 8167 // particular, only affine AddRec's like {C1,+,C2}<L>. 8168 const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx); 8169 if (!IdxExpr || IdxExpr->getLoop() != L || !IdxExpr->isAffine() || 8170 isLoopInvariant(IdxExpr, L) || 8171 !isa<SCEVConstant>(IdxExpr->getOperand(0)) || 8172 !isa<SCEVConstant>(IdxExpr->getOperand(1))) 8173 return getCouldNotCompute(); 8174 8175 unsigned MaxSteps = MaxBruteForceIterations; 8176 for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) { 8177 ConstantInt *ItCst = ConstantInt::get( 8178 cast<IntegerType>(IdxExpr->getType()), IterationNum); 8179 ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this); 8180 8181 // Form the GEP offset. 8182 Indexes[VarIdxNum] = Val; 8183 8184 Constant *Result = ConstantFoldLoadThroughGEPIndices(GV->getInitializer(), 8185 Indexes); 8186 if (!Result) break; // Cannot compute! 8187 8188 // Evaluate the condition for this iteration. 8189 Result = ConstantExpr::getICmp(predicate, Result, RHS); 8190 if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure 8191 if (cast<ConstantInt>(Result)->getValue().isMinValue()) { 8192 ++NumArrayLenItCounts; 8193 return getConstant(ItCst); // Found terminating iteration! 8194 } 8195 } 8196 return getCouldNotCompute(); 8197 } 8198 8199 ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit( 8200 Value *LHS, Value *RHSV, const Loop *L, ICmpInst::Predicate Pred) { 8201 ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV); 8202 if (!RHS) 8203 return getCouldNotCompute(); 8204 8205 const BasicBlock *Latch = L->getLoopLatch(); 8206 if (!Latch) 8207 return getCouldNotCompute(); 8208 8209 const BasicBlock *Predecessor = L->getLoopPredecessor(); 8210 if (!Predecessor) 8211 return getCouldNotCompute(); 8212 8213 // Return true if V is of the form "LHS `shift_op` <positive constant>". 8214 // Return LHS in OutLHS and shift_opt in OutOpCode. 8215 auto MatchPositiveShift = 8216 [](Value *V, Value *&OutLHS, Instruction::BinaryOps &OutOpCode) { 8217 8218 using namespace PatternMatch; 8219 8220 ConstantInt *ShiftAmt; 8221 if (match(V, m_LShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 8222 OutOpCode = Instruction::LShr; 8223 else if (match(V, m_AShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 8224 OutOpCode = Instruction::AShr; 8225 else if (match(V, m_Shl(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 8226 OutOpCode = Instruction::Shl; 8227 else 8228 return false; 8229 8230 return ShiftAmt->getValue().isStrictlyPositive(); 8231 }; 8232 8233 // Recognize a "shift recurrence" either of the form %iv or of %iv.shifted in 8234 // 8235 // loop: 8236 // %iv = phi i32 [ %iv.shifted, %loop ], [ %val, %preheader ] 8237 // %iv.shifted = lshr i32 %iv, <positive constant> 8238 // 8239 // Return true on a successful match. Return the corresponding PHI node (%iv 8240 // above) in PNOut and the opcode of the shift operation in OpCodeOut. 8241 auto MatchShiftRecurrence = 8242 [&](Value *V, PHINode *&PNOut, Instruction::BinaryOps &OpCodeOut) { 8243 Optional<Instruction::BinaryOps> PostShiftOpCode; 8244 8245 { 8246 Instruction::BinaryOps OpC; 8247 Value *V; 8248 8249 // If we encounter a shift instruction, "peel off" the shift operation, 8250 // and remember that we did so. Later when we inspect %iv's backedge 8251 // value, we will make sure that the backedge value uses the same 8252 // operation. 8253 // 8254 // Note: the peeled shift operation does not have to be the same 8255 // instruction as the one feeding into the PHI's backedge value. We only 8256 // really care about it being the same *kind* of shift instruction -- 8257 // that's all that is required for our later inferences to hold. 8258 if (MatchPositiveShift(LHS, V, OpC)) { 8259 PostShiftOpCode = OpC; 8260 LHS = V; 8261 } 8262 } 8263 8264 PNOut = dyn_cast<PHINode>(LHS); 8265 if (!PNOut || PNOut->getParent() != L->getHeader()) 8266 return false; 8267 8268 Value *BEValue = PNOut->getIncomingValueForBlock(Latch); 8269 Value *OpLHS; 8270 8271 return 8272 // The backedge value for the PHI node must be a shift by a positive 8273 // amount 8274 MatchPositiveShift(BEValue, OpLHS, OpCodeOut) && 8275 8276 // of the PHI node itself 8277 OpLHS == PNOut && 8278 8279 // and the kind of shift should be match the kind of shift we peeled 8280 // off, if any. 8281 (!PostShiftOpCode.hasValue() || *PostShiftOpCode == OpCodeOut); 8282 }; 8283 8284 PHINode *PN; 8285 Instruction::BinaryOps OpCode; 8286 if (!MatchShiftRecurrence(LHS, PN, OpCode)) 8287 return getCouldNotCompute(); 8288 8289 const DataLayout &DL = getDataLayout(); 8290 8291 // The key rationale for this optimization is that for some kinds of shift 8292 // recurrences, the value of the recurrence "stabilizes" to either 0 or -1 8293 // within a finite number of iterations. If the condition guarding the 8294 // backedge (in the sense that the backedge is taken if the condition is true) 8295 // is false for the value the shift recurrence stabilizes to, then we know 8296 // that the backedge is taken only a finite number of times. 8297 8298 ConstantInt *StableValue = nullptr; 8299 switch (OpCode) { 8300 default: 8301 llvm_unreachable("Impossible case!"); 8302 8303 case Instruction::AShr: { 8304 // {K,ashr,<positive-constant>} stabilizes to signum(K) in at most 8305 // bitwidth(K) iterations. 8306 Value *FirstValue = PN->getIncomingValueForBlock(Predecessor); 8307 KnownBits Known = computeKnownBits(FirstValue, DL, 0, &AC, 8308 Predecessor->getTerminator(), &DT); 8309 auto *Ty = cast<IntegerType>(RHS->getType()); 8310 if (Known.isNonNegative()) 8311 StableValue = ConstantInt::get(Ty, 0); 8312 else if (Known.isNegative()) 8313 StableValue = ConstantInt::get(Ty, -1, true); 8314 else 8315 return getCouldNotCompute(); 8316 8317 break; 8318 } 8319 case Instruction::LShr: 8320 case Instruction::Shl: 8321 // Both {K,lshr,<positive-constant>} and {K,shl,<positive-constant>} 8322 // stabilize to 0 in at most bitwidth(K) iterations. 8323 StableValue = ConstantInt::get(cast<IntegerType>(RHS->getType()), 0); 8324 break; 8325 } 8326 8327 auto *Result = 8328 ConstantFoldCompareInstOperands(Pred, StableValue, RHS, DL, &TLI); 8329 assert(Result->getType()->isIntegerTy(1) && 8330 "Otherwise cannot be an operand to a branch instruction"); 8331 8332 if (Result->isZeroValue()) { 8333 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 8334 const SCEV *UpperBound = 8335 getConstant(getEffectiveSCEVType(RHS->getType()), BitWidth); 8336 return ExitLimit(getCouldNotCompute(), UpperBound, false); 8337 } 8338 8339 return getCouldNotCompute(); 8340 } 8341 8342 /// Return true if we can constant fold an instruction of the specified type, 8343 /// assuming that all operands were constants. 8344 static bool CanConstantFold(const Instruction *I) { 8345 if (isa<BinaryOperator>(I) || isa<CmpInst>(I) || 8346 isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || 8347 isa<LoadInst>(I) || isa<ExtractValueInst>(I)) 8348 return true; 8349 8350 if (const CallInst *CI = dyn_cast<CallInst>(I)) 8351 if (const Function *F = CI->getCalledFunction()) 8352 return canConstantFoldCallTo(CI, F); 8353 return false; 8354 } 8355 8356 /// Determine whether this instruction can constant evolve within this loop 8357 /// assuming its operands can all constant evolve. 8358 static bool canConstantEvolve(Instruction *I, const Loop *L) { 8359 // An instruction outside of the loop can't be derived from a loop PHI. 8360 if (!L->contains(I)) return false; 8361 8362 if (isa<PHINode>(I)) { 8363 // We don't currently keep track of the control flow needed to evaluate 8364 // PHIs, so we cannot handle PHIs inside of loops. 8365 return L->getHeader() == I->getParent(); 8366 } 8367 8368 // If we won't be able to constant fold this expression even if the operands 8369 // are constants, bail early. 8370 return CanConstantFold(I); 8371 } 8372 8373 /// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by 8374 /// recursing through each instruction operand until reaching a loop header phi. 8375 static PHINode * 8376 getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L, 8377 DenseMap<Instruction *, PHINode *> &PHIMap, 8378 unsigned Depth) { 8379 if (Depth > MaxConstantEvolvingDepth) 8380 return nullptr; 8381 8382 // Otherwise, we can evaluate this instruction if all of its operands are 8383 // constant or derived from a PHI node themselves. 8384 PHINode *PHI = nullptr; 8385 for (Value *Op : UseInst->operands()) { 8386 if (isa<Constant>(Op)) continue; 8387 8388 Instruction *OpInst = dyn_cast<Instruction>(Op); 8389 if (!OpInst || !canConstantEvolve(OpInst, L)) return nullptr; 8390 8391 PHINode *P = dyn_cast<PHINode>(OpInst); 8392 if (!P) 8393 // If this operand is already visited, reuse the prior result. 8394 // We may have P != PHI if this is the deepest point at which the 8395 // inconsistent paths meet. 8396 P = PHIMap.lookup(OpInst); 8397 if (!P) { 8398 // Recurse and memoize the results, whether a phi is found or not. 8399 // This recursive call invalidates pointers into PHIMap. 8400 P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap, Depth + 1); 8401 PHIMap[OpInst] = P; 8402 } 8403 if (!P) 8404 return nullptr; // Not evolving from PHI 8405 if (PHI && PHI != P) 8406 return nullptr; // Evolving from multiple different PHIs. 8407 PHI = P; 8408 } 8409 // This is a expression evolving from a constant PHI! 8410 return PHI; 8411 } 8412 8413 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node 8414 /// in the loop that V is derived from. We allow arbitrary operations along the 8415 /// way, but the operands of an operation must either be constants or a value 8416 /// derived from a constant PHI. If this expression does not fit with these 8417 /// constraints, return null. 8418 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) { 8419 Instruction *I = dyn_cast<Instruction>(V); 8420 if (!I || !canConstantEvolve(I, L)) return nullptr; 8421 8422 if (PHINode *PN = dyn_cast<PHINode>(I)) 8423 return PN; 8424 8425 // Record non-constant instructions contained by the loop. 8426 DenseMap<Instruction *, PHINode *> PHIMap; 8427 return getConstantEvolvingPHIOperands(I, L, PHIMap, 0); 8428 } 8429 8430 /// EvaluateExpression - Given an expression that passes the 8431 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node 8432 /// in the loop has the value PHIVal. If we can't fold this expression for some 8433 /// reason, return null. 8434 static Constant *EvaluateExpression(Value *V, const Loop *L, 8435 DenseMap<Instruction *, Constant *> &Vals, 8436 const DataLayout &DL, 8437 const TargetLibraryInfo *TLI) { 8438 // Convenient constant check, but redundant for recursive calls. 8439 if (Constant *C = dyn_cast<Constant>(V)) return C; 8440 Instruction *I = dyn_cast<Instruction>(V); 8441 if (!I) return nullptr; 8442 8443 if (Constant *C = Vals.lookup(I)) return C; 8444 8445 // An instruction inside the loop depends on a value outside the loop that we 8446 // weren't given a mapping for, or a value such as a call inside the loop. 8447 if (!canConstantEvolve(I, L)) return nullptr; 8448 8449 // An unmapped PHI can be due to a branch or another loop inside this loop, 8450 // or due to this not being the initial iteration through a loop where we 8451 // couldn't compute the evolution of this particular PHI last time. 8452 if (isa<PHINode>(I)) return nullptr; 8453 8454 std::vector<Constant*> Operands(I->getNumOperands()); 8455 8456 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 8457 Instruction *Operand = dyn_cast<Instruction>(I->getOperand(i)); 8458 if (!Operand) { 8459 Operands[i] = dyn_cast<Constant>(I->getOperand(i)); 8460 if (!Operands[i]) return nullptr; 8461 continue; 8462 } 8463 Constant *C = EvaluateExpression(Operand, L, Vals, DL, TLI); 8464 Vals[Operand] = C; 8465 if (!C) return nullptr; 8466 Operands[i] = C; 8467 } 8468 8469 if (CmpInst *CI = dyn_cast<CmpInst>(I)) 8470 return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 8471 Operands[1], DL, TLI); 8472 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 8473 if (!LI->isVolatile()) 8474 return ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL); 8475 } 8476 return ConstantFoldInstOperands(I, Operands, DL, TLI); 8477 } 8478 8479 8480 // If every incoming value to PN except the one for BB is a specific Constant, 8481 // return that, else return nullptr. 8482 static Constant *getOtherIncomingValue(PHINode *PN, BasicBlock *BB) { 8483 Constant *IncomingVal = nullptr; 8484 8485 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 8486 if (PN->getIncomingBlock(i) == BB) 8487 continue; 8488 8489 auto *CurrentVal = dyn_cast<Constant>(PN->getIncomingValue(i)); 8490 if (!CurrentVal) 8491 return nullptr; 8492 8493 if (IncomingVal != CurrentVal) { 8494 if (IncomingVal) 8495 return nullptr; 8496 IncomingVal = CurrentVal; 8497 } 8498 } 8499 8500 return IncomingVal; 8501 } 8502 8503 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is 8504 /// in the header of its containing loop, we know the loop executes a 8505 /// constant number of times, and the PHI node is just a recurrence 8506 /// involving constants, fold it. 8507 Constant * 8508 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN, 8509 const APInt &BEs, 8510 const Loop *L) { 8511 auto I = ConstantEvolutionLoopExitValue.find(PN); 8512 if (I != ConstantEvolutionLoopExitValue.end()) 8513 return I->second; 8514 8515 if (BEs.ugt(MaxBruteForceIterations)) 8516 return ConstantEvolutionLoopExitValue[PN] = nullptr; // Not going to evaluate it. 8517 8518 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN]; 8519 8520 DenseMap<Instruction *, Constant *> CurrentIterVals; 8521 BasicBlock *Header = L->getHeader(); 8522 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 8523 8524 BasicBlock *Latch = L->getLoopLatch(); 8525 if (!Latch) 8526 return nullptr; 8527 8528 for (PHINode &PHI : Header->phis()) { 8529 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch)) 8530 CurrentIterVals[&PHI] = StartCST; 8531 } 8532 if (!CurrentIterVals.count(PN)) 8533 return RetVal = nullptr; 8534 8535 Value *BEValue = PN->getIncomingValueForBlock(Latch); 8536 8537 // Execute the loop symbolically to determine the exit value. 8538 assert(BEs.getActiveBits() < CHAR_BIT * sizeof(unsigned) && 8539 "BEs is <= MaxBruteForceIterations which is an 'unsigned'!"); 8540 8541 unsigned NumIterations = BEs.getZExtValue(); // must be in range 8542 unsigned IterationNum = 0; 8543 const DataLayout &DL = getDataLayout(); 8544 for (; ; ++IterationNum) { 8545 if (IterationNum == NumIterations) 8546 return RetVal = CurrentIterVals[PN]; // Got exit value! 8547 8548 // Compute the value of the PHIs for the next iteration. 8549 // EvaluateExpression adds non-phi values to the CurrentIterVals map. 8550 DenseMap<Instruction *, Constant *> NextIterVals; 8551 Constant *NextPHI = 8552 EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 8553 if (!NextPHI) 8554 return nullptr; // Couldn't evaluate! 8555 NextIterVals[PN] = NextPHI; 8556 8557 bool StoppedEvolving = NextPHI == CurrentIterVals[PN]; 8558 8559 // Also evaluate the other PHI nodes. However, we don't get to stop if we 8560 // cease to be able to evaluate one of them or if they stop evolving, 8561 // because that doesn't necessarily prevent us from computing PN. 8562 SmallVector<std::pair<PHINode *, Constant *>, 8> PHIsToCompute; 8563 for (const auto &I : CurrentIterVals) { 8564 PHINode *PHI = dyn_cast<PHINode>(I.first); 8565 if (!PHI || PHI == PN || PHI->getParent() != Header) continue; 8566 PHIsToCompute.emplace_back(PHI, I.second); 8567 } 8568 // We use two distinct loops because EvaluateExpression may invalidate any 8569 // iterators into CurrentIterVals. 8570 for (const auto &I : PHIsToCompute) { 8571 PHINode *PHI = I.first; 8572 Constant *&NextPHI = NextIterVals[PHI]; 8573 if (!NextPHI) { // Not already computed. 8574 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 8575 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 8576 } 8577 if (NextPHI != I.second) 8578 StoppedEvolving = false; 8579 } 8580 8581 // If all entries in CurrentIterVals == NextIterVals then we can stop 8582 // iterating, the loop can't continue to change. 8583 if (StoppedEvolving) 8584 return RetVal = CurrentIterVals[PN]; 8585 8586 CurrentIterVals.swap(NextIterVals); 8587 } 8588 } 8589 8590 const SCEV *ScalarEvolution::computeExitCountExhaustively(const Loop *L, 8591 Value *Cond, 8592 bool ExitWhen) { 8593 PHINode *PN = getConstantEvolvingPHI(Cond, L); 8594 if (!PN) return getCouldNotCompute(); 8595 8596 // If the loop is canonicalized, the PHI will have exactly two entries. 8597 // That's the only form we support here. 8598 if (PN->getNumIncomingValues() != 2) return getCouldNotCompute(); 8599 8600 DenseMap<Instruction *, Constant *> CurrentIterVals; 8601 BasicBlock *Header = L->getHeader(); 8602 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 8603 8604 BasicBlock *Latch = L->getLoopLatch(); 8605 assert(Latch && "Should follow from NumIncomingValues == 2!"); 8606 8607 for (PHINode &PHI : Header->phis()) { 8608 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch)) 8609 CurrentIterVals[&PHI] = StartCST; 8610 } 8611 if (!CurrentIterVals.count(PN)) 8612 return getCouldNotCompute(); 8613 8614 // Okay, we find a PHI node that defines the trip count of this loop. Execute 8615 // the loop symbolically to determine when the condition gets a value of 8616 // "ExitWhen". 8617 unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis. 8618 const DataLayout &DL = getDataLayout(); 8619 for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){ 8620 auto *CondVal = dyn_cast_or_null<ConstantInt>( 8621 EvaluateExpression(Cond, L, CurrentIterVals, DL, &TLI)); 8622 8623 // Couldn't symbolically evaluate. 8624 if (!CondVal) return getCouldNotCompute(); 8625 8626 if (CondVal->getValue() == uint64_t(ExitWhen)) { 8627 ++NumBruteForceTripCountsComputed; 8628 return getConstant(Type::getInt32Ty(getContext()), IterationNum); 8629 } 8630 8631 // Update all the PHI nodes for the next iteration. 8632 DenseMap<Instruction *, Constant *> NextIterVals; 8633 8634 // Create a list of which PHIs we need to compute. We want to do this before 8635 // calling EvaluateExpression on them because that may invalidate iterators 8636 // into CurrentIterVals. 8637 SmallVector<PHINode *, 8> PHIsToCompute; 8638 for (const auto &I : CurrentIterVals) { 8639 PHINode *PHI = dyn_cast<PHINode>(I.first); 8640 if (!PHI || PHI->getParent() != Header) continue; 8641 PHIsToCompute.push_back(PHI); 8642 } 8643 for (PHINode *PHI : PHIsToCompute) { 8644 Constant *&NextPHI = NextIterVals[PHI]; 8645 if (NextPHI) continue; // Already computed! 8646 8647 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 8648 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 8649 } 8650 CurrentIterVals.swap(NextIterVals); 8651 } 8652 8653 // Too many iterations were needed to evaluate. 8654 return getCouldNotCompute(); 8655 } 8656 8657 const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { 8658 SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values = 8659 ValuesAtScopes[V]; 8660 // Check to see if we've folded this expression at this loop before. 8661 for (auto &LS : Values) 8662 if (LS.first == L) 8663 return LS.second ? LS.second : V; 8664 8665 Values.emplace_back(L, nullptr); 8666 8667 // Otherwise compute it. 8668 const SCEV *C = computeSCEVAtScope(V, L); 8669 for (auto &LS : reverse(ValuesAtScopes[V])) 8670 if (LS.first == L) { 8671 LS.second = C; 8672 break; 8673 } 8674 return C; 8675 } 8676 8677 /// This builds up a Constant using the ConstantExpr interface. That way, we 8678 /// will return Constants for objects which aren't represented by a 8679 /// SCEVConstant, because SCEVConstant is restricted to ConstantInt. 8680 /// Returns NULL if the SCEV isn't representable as a Constant. 8681 static Constant *BuildConstantFromSCEV(const SCEV *V) { 8682 switch (V->getSCEVType()) { 8683 case scCouldNotCompute: 8684 case scAddRecExpr: 8685 return nullptr; 8686 case scConstant: 8687 return cast<SCEVConstant>(V)->getValue(); 8688 case scUnknown: 8689 return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue()); 8690 case scSignExtend: { 8691 const SCEVSignExtendExpr *SS = cast<SCEVSignExtendExpr>(V); 8692 if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand())) 8693 return ConstantExpr::getSExt(CastOp, SS->getType()); 8694 return nullptr; 8695 } 8696 case scZeroExtend: { 8697 const SCEVZeroExtendExpr *SZ = cast<SCEVZeroExtendExpr>(V); 8698 if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand())) 8699 return ConstantExpr::getZExt(CastOp, SZ->getType()); 8700 return nullptr; 8701 } 8702 case scPtrToInt: { 8703 const SCEVPtrToIntExpr *P2I = cast<SCEVPtrToIntExpr>(V); 8704 if (Constant *CastOp = BuildConstantFromSCEV(P2I->getOperand())) 8705 return ConstantExpr::getPtrToInt(CastOp, P2I->getType()); 8706 8707 return nullptr; 8708 } 8709 case scTruncate: { 8710 const SCEVTruncateExpr *ST = cast<SCEVTruncateExpr>(V); 8711 if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand())) 8712 return ConstantExpr::getTrunc(CastOp, ST->getType()); 8713 return nullptr; 8714 } 8715 case scAddExpr: { 8716 const SCEVAddExpr *SA = cast<SCEVAddExpr>(V); 8717 if (Constant *C = BuildConstantFromSCEV(SA->getOperand(0))) { 8718 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { 8719 unsigned AS = PTy->getAddressSpace(); 8720 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 8721 C = ConstantExpr::getBitCast(C, DestPtrTy); 8722 } 8723 for (unsigned i = 1, e = SA->getNumOperands(); i != e; ++i) { 8724 Constant *C2 = BuildConstantFromSCEV(SA->getOperand(i)); 8725 if (!C2) 8726 return nullptr; 8727 8728 // First pointer! 8729 if (!C->getType()->isPointerTy() && C2->getType()->isPointerTy()) { 8730 unsigned AS = C2->getType()->getPointerAddressSpace(); 8731 std::swap(C, C2); 8732 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 8733 // The offsets have been converted to bytes. We can add bytes to an 8734 // i8* by GEP with the byte count in the first index. 8735 C = ConstantExpr::getBitCast(C, DestPtrTy); 8736 } 8737 8738 // Don't bother trying to sum two pointers. We probably can't 8739 // statically compute a load that results from it anyway. 8740 if (C2->getType()->isPointerTy()) 8741 return nullptr; 8742 8743 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { 8744 if (PTy->getElementType()->isStructTy()) 8745 C2 = ConstantExpr::getIntegerCast( 8746 C2, Type::getInt32Ty(C->getContext()), true); 8747 C = ConstantExpr::getGetElementPtr(PTy->getElementType(), C, C2); 8748 } else 8749 C = ConstantExpr::getAdd(C, C2); 8750 } 8751 return C; 8752 } 8753 return nullptr; 8754 } 8755 case scMulExpr: { 8756 const SCEVMulExpr *SM = cast<SCEVMulExpr>(V); 8757 if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0))) { 8758 // Don't bother with pointers at all. 8759 if (C->getType()->isPointerTy()) 8760 return nullptr; 8761 for (unsigned i = 1, e = SM->getNumOperands(); i != e; ++i) { 8762 Constant *C2 = BuildConstantFromSCEV(SM->getOperand(i)); 8763 if (!C2 || C2->getType()->isPointerTy()) 8764 return nullptr; 8765 C = ConstantExpr::getMul(C, C2); 8766 } 8767 return C; 8768 } 8769 return nullptr; 8770 } 8771 case scUDivExpr: { 8772 const SCEVUDivExpr *SU = cast<SCEVUDivExpr>(V); 8773 if (Constant *LHS = BuildConstantFromSCEV(SU->getLHS())) 8774 if (Constant *RHS = BuildConstantFromSCEV(SU->getRHS())) 8775 if (LHS->getType() == RHS->getType()) 8776 return ConstantExpr::getUDiv(LHS, RHS); 8777 return nullptr; 8778 } 8779 case scSMaxExpr: 8780 case scUMaxExpr: 8781 case scSMinExpr: 8782 case scUMinExpr: 8783 return nullptr; // TODO: smax, umax, smin, umax. 8784 } 8785 llvm_unreachable("Unknown SCEV kind!"); 8786 } 8787 8788 const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) { 8789 if (isa<SCEVConstant>(V)) return V; 8790 8791 // If this instruction is evolved from a constant-evolving PHI, compute the 8792 // exit value from the loop without using SCEVs. 8793 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) { 8794 if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) { 8795 if (PHINode *PN = dyn_cast<PHINode>(I)) { 8796 const Loop *CurrLoop = this->LI[I->getParent()]; 8797 // Looking for loop exit value. 8798 if (CurrLoop && CurrLoop->getParentLoop() == L && 8799 PN->getParent() == CurrLoop->getHeader()) { 8800 // Okay, there is no closed form solution for the PHI node. Check 8801 // to see if the loop that contains it has a known backedge-taken 8802 // count. If so, we may be able to force computation of the exit 8803 // value. 8804 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(CurrLoop); 8805 // This trivial case can show up in some degenerate cases where 8806 // the incoming IR has not yet been fully simplified. 8807 if (BackedgeTakenCount->isZero()) { 8808 Value *InitValue = nullptr; 8809 bool MultipleInitValues = false; 8810 for (unsigned i = 0; i < PN->getNumIncomingValues(); i++) { 8811 if (!CurrLoop->contains(PN->getIncomingBlock(i))) { 8812 if (!InitValue) 8813 InitValue = PN->getIncomingValue(i); 8814 else if (InitValue != PN->getIncomingValue(i)) { 8815 MultipleInitValues = true; 8816 break; 8817 } 8818 } 8819 } 8820 if (!MultipleInitValues && InitValue) 8821 return getSCEV(InitValue); 8822 } 8823 // Do we have a loop invariant value flowing around the backedge 8824 // for a loop which must execute the backedge? 8825 if (!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && 8826 isKnownPositive(BackedgeTakenCount) && 8827 PN->getNumIncomingValues() == 2) { 8828 8829 unsigned InLoopPred = 8830 CurrLoop->contains(PN->getIncomingBlock(0)) ? 0 : 1; 8831 Value *BackedgeVal = PN->getIncomingValue(InLoopPred); 8832 if (CurrLoop->isLoopInvariant(BackedgeVal)) 8833 return getSCEV(BackedgeVal); 8834 } 8835 if (auto *BTCC = dyn_cast<SCEVConstant>(BackedgeTakenCount)) { 8836 // Okay, we know how many times the containing loop executes. If 8837 // this is a constant evolving PHI node, get the final value at 8838 // the specified iteration number. 8839 Constant *RV = getConstantEvolutionLoopExitValue( 8840 PN, BTCC->getAPInt(), CurrLoop); 8841 if (RV) return getSCEV(RV); 8842 } 8843 } 8844 8845 // If there is a single-input Phi, evaluate it at our scope. If we can 8846 // prove that this replacement does not break LCSSA form, use new value. 8847 if (PN->getNumOperands() == 1) { 8848 const SCEV *Input = getSCEV(PN->getOperand(0)); 8849 const SCEV *InputAtScope = getSCEVAtScope(Input, L); 8850 // TODO: We can generalize it using LI.replacementPreservesLCSSAForm, 8851 // for the simplest case just support constants. 8852 if (isa<SCEVConstant>(InputAtScope)) return InputAtScope; 8853 } 8854 } 8855 8856 // Okay, this is an expression that we cannot symbolically evaluate 8857 // into a SCEV. Check to see if it's possible to symbolically evaluate 8858 // the arguments into constants, and if so, try to constant propagate the 8859 // result. This is particularly useful for computing loop exit values. 8860 if (CanConstantFold(I)) { 8861 SmallVector<Constant *, 4> Operands; 8862 bool MadeImprovement = false; 8863 for (Value *Op : I->operands()) { 8864 if (Constant *C = dyn_cast<Constant>(Op)) { 8865 Operands.push_back(C); 8866 continue; 8867 } 8868 8869 // If any of the operands is non-constant and if they are 8870 // non-integer and non-pointer, don't even try to analyze them 8871 // with scev techniques. 8872 if (!isSCEVable(Op->getType())) 8873 return V; 8874 8875 const SCEV *OrigV = getSCEV(Op); 8876 const SCEV *OpV = getSCEVAtScope(OrigV, L); 8877 MadeImprovement |= OrigV != OpV; 8878 8879 Constant *C = BuildConstantFromSCEV(OpV); 8880 if (!C) return V; 8881 if (C->getType() != Op->getType()) 8882 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 8883 Op->getType(), 8884 false), 8885 C, Op->getType()); 8886 Operands.push_back(C); 8887 } 8888 8889 // Check to see if getSCEVAtScope actually made an improvement. 8890 if (MadeImprovement) { 8891 Constant *C = nullptr; 8892 const DataLayout &DL = getDataLayout(); 8893 if (const CmpInst *CI = dyn_cast<CmpInst>(I)) 8894 C = ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 8895 Operands[1], DL, &TLI); 8896 else if (const LoadInst *Load = dyn_cast<LoadInst>(I)) { 8897 if (!Load->isVolatile()) 8898 C = ConstantFoldLoadFromConstPtr(Operands[0], Load->getType(), 8899 DL); 8900 } else 8901 C = ConstantFoldInstOperands(I, Operands, DL, &TLI); 8902 if (!C) return V; 8903 return getSCEV(C); 8904 } 8905 } 8906 } 8907 8908 // This is some other type of SCEVUnknown, just return it. 8909 return V; 8910 } 8911 8912 if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) { 8913 // Avoid performing the look-up in the common case where the specified 8914 // expression has no loop-variant portions. 8915 for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) { 8916 const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 8917 if (OpAtScope != Comm->getOperand(i)) { 8918 // Okay, at least one of these operands is loop variant but might be 8919 // foldable. Build a new instance of the folded commutative expression. 8920 SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(), 8921 Comm->op_begin()+i); 8922 NewOps.push_back(OpAtScope); 8923 8924 for (++i; i != e; ++i) { 8925 OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 8926 NewOps.push_back(OpAtScope); 8927 } 8928 if (isa<SCEVAddExpr>(Comm)) 8929 return getAddExpr(NewOps, Comm->getNoWrapFlags()); 8930 if (isa<SCEVMulExpr>(Comm)) 8931 return getMulExpr(NewOps, Comm->getNoWrapFlags()); 8932 if (isa<SCEVMinMaxExpr>(Comm)) 8933 return getMinMaxExpr(Comm->getSCEVType(), NewOps); 8934 llvm_unreachable("Unknown commutative SCEV type!"); 8935 } 8936 } 8937 // If we got here, all operands are loop invariant. 8938 return Comm; 8939 } 8940 8941 if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) { 8942 const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L); 8943 const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L); 8944 if (LHS == Div->getLHS() && RHS == Div->getRHS()) 8945 return Div; // must be loop invariant 8946 return getUDivExpr(LHS, RHS); 8947 } 8948 8949 // If this is a loop recurrence for a loop that does not contain L, then we 8950 // are dealing with the final value computed by the loop. 8951 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) { 8952 // First, attempt to evaluate each operand. 8953 // Avoid performing the look-up in the common case where the specified 8954 // expression has no loop-variant portions. 8955 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 8956 const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L); 8957 if (OpAtScope == AddRec->getOperand(i)) 8958 continue; 8959 8960 // Okay, at least one of these operands is loop variant but might be 8961 // foldable. Build a new instance of the folded commutative expression. 8962 SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(), 8963 AddRec->op_begin()+i); 8964 NewOps.push_back(OpAtScope); 8965 for (++i; i != e; ++i) 8966 NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L)); 8967 8968 const SCEV *FoldedRec = 8969 getAddRecExpr(NewOps, AddRec->getLoop(), 8970 AddRec->getNoWrapFlags(SCEV::FlagNW)); 8971 AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec); 8972 // The addrec may be folded to a nonrecurrence, for example, if the 8973 // induction variable is multiplied by zero after constant folding. Go 8974 // ahead and return the folded value. 8975 if (!AddRec) 8976 return FoldedRec; 8977 break; 8978 } 8979 8980 // If the scope is outside the addrec's loop, evaluate it by using the 8981 // loop exit value of the addrec. 8982 if (!AddRec->getLoop()->contains(L)) { 8983 // To evaluate this recurrence, we need to know how many times the AddRec 8984 // loop iterates. Compute this now. 8985 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop()); 8986 if (BackedgeTakenCount == getCouldNotCompute()) return AddRec; 8987 8988 // Then, evaluate the AddRec. 8989 return AddRec->evaluateAtIteration(BackedgeTakenCount, *this); 8990 } 8991 8992 return AddRec; 8993 } 8994 8995 if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) { 8996 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8997 if (Op == Cast->getOperand()) 8998 return Cast; // must be loop invariant 8999 return getZeroExtendExpr(Op, Cast->getType()); 9000 } 9001 9002 if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) { 9003 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 9004 if (Op == Cast->getOperand()) 9005 return Cast; // must be loop invariant 9006 return getSignExtendExpr(Op, Cast->getType()); 9007 } 9008 9009 if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) { 9010 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 9011 if (Op == Cast->getOperand()) 9012 return Cast; // must be loop invariant 9013 return getTruncateExpr(Op, Cast->getType()); 9014 } 9015 9016 if (const SCEVPtrToIntExpr *Cast = dyn_cast<SCEVPtrToIntExpr>(V)) { 9017 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 9018 if (Op == Cast->getOperand()) 9019 return Cast; // must be loop invariant 9020 return getPtrToIntExpr(Op, Cast->getType()); 9021 } 9022 9023 llvm_unreachable("Unknown SCEV type!"); 9024 } 9025 9026 const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) { 9027 return getSCEVAtScope(getSCEV(V), L); 9028 } 9029 9030 const SCEV *ScalarEvolution::stripInjectiveFunctions(const SCEV *S) const { 9031 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) 9032 return stripInjectiveFunctions(ZExt->getOperand()); 9033 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) 9034 return stripInjectiveFunctions(SExt->getOperand()); 9035 return S; 9036 } 9037 9038 /// Finds the minimum unsigned root of the following equation: 9039 /// 9040 /// A * X = B (mod N) 9041 /// 9042 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of 9043 /// A and B isn't important. 9044 /// 9045 /// If the equation does not have a solution, SCEVCouldNotCompute is returned. 9046 static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const SCEV *B, 9047 ScalarEvolution &SE) { 9048 uint32_t BW = A.getBitWidth(); 9049 assert(BW == SE.getTypeSizeInBits(B->getType())); 9050 assert(A != 0 && "A must be non-zero."); 9051 9052 // 1. D = gcd(A, N) 9053 // 9054 // The gcd of A and N may have only one prime factor: 2. The number of 9055 // trailing zeros in A is its multiplicity 9056 uint32_t Mult2 = A.countTrailingZeros(); 9057 // D = 2^Mult2 9058 9059 // 2. Check if B is divisible by D. 9060 // 9061 // B is divisible by D if and only if the multiplicity of prime factor 2 for B 9062 // is not less than multiplicity of this prime factor for D. 9063 if (SE.GetMinTrailingZeros(B) < Mult2) 9064 return SE.getCouldNotCompute(); 9065 9066 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic 9067 // modulo (N / D). 9068 // 9069 // If D == 1, (N / D) == N == 2^BW, so we need one extra bit to represent 9070 // (N / D) in general. The inverse itself always fits into BW bits, though, 9071 // so we immediately truncate it. 9072 APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D 9073 APInt Mod(BW + 1, 0); 9074 Mod.setBit(BW - Mult2); // Mod = N / D 9075 APInt I = AD.multiplicativeInverse(Mod).trunc(BW); 9076 9077 // 4. Compute the minimum unsigned root of the equation: 9078 // I * (B / D) mod (N / D) 9079 // To simplify the computation, we factor out the divide by D: 9080 // (I * B mod N) / D 9081 const SCEV *D = SE.getConstant(APInt::getOneBitSet(BW, Mult2)); 9082 return SE.getUDivExactExpr(SE.getMulExpr(B, SE.getConstant(I)), D); 9083 } 9084 9085 /// For a given quadratic addrec, generate coefficients of the corresponding 9086 /// quadratic equation, multiplied by a common value to ensure that they are 9087 /// integers. 9088 /// The returned value is a tuple { A, B, C, M, BitWidth }, where 9089 /// Ax^2 + Bx + C is the quadratic function, M is the value that A, B and C 9090 /// were multiplied by, and BitWidth is the bit width of the original addrec 9091 /// coefficients. 9092 /// This function returns None if the addrec coefficients are not compile- 9093 /// time constants. 9094 static Optional<std::tuple<APInt, APInt, APInt, APInt, unsigned>> 9095 GetQuadraticEquation(const SCEVAddRecExpr *AddRec) { 9096 assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!"); 9097 const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0)); 9098 const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1)); 9099 const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2)); 9100 LLVM_DEBUG(dbgs() << __func__ << ": analyzing quadratic addrec: " 9101 << *AddRec << '\n'); 9102 9103 // We currently can only solve this if the coefficients are constants. 9104 if (!LC || !MC || !NC) { 9105 LLVM_DEBUG(dbgs() << __func__ << ": coefficients are not constant\n"); 9106 return None; 9107 } 9108 9109 APInt L = LC->getAPInt(); 9110 APInt M = MC->getAPInt(); 9111 APInt N = NC->getAPInt(); 9112 assert(!N.isNullValue() && "This is not a quadratic addrec"); 9113 9114 unsigned BitWidth = LC->getAPInt().getBitWidth(); 9115 unsigned NewWidth = BitWidth + 1; 9116 LLVM_DEBUG(dbgs() << __func__ << ": addrec coeff bw: " 9117 << BitWidth << '\n'); 9118 // The sign-extension (as opposed to a zero-extension) here matches the 9119 // extension used in SolveQuadraticEquationWrap (with the same motivation). 9120 N = N.sext(NewWidth); 9121 M = M.sext(NewWidth); 9122 L = L.sext(NewWidth); 9123 9124 // The increments are M, M+N, M+2N, ..., so the accumulated values are 9125 // L+M, (L+M)+(M+N), (L+M)+(M+N)+(M+2N), ..., that is, 9126 // L+M, L+2M+N, L+3M+3N, ... 9127 // After n iterations the accumulated value Acc is L + nM + n(n-1)/2 N. 9128 // 9129 // The equation Acc = 0 is then 9130 // L + nM + n(n-1)/2 N = 0, or 2L + 2M n + n(n-1) N = 0. 9131 // In a quadratic form it becomes: 9132 // N n^2 + (2M-N) n + 2L = 0. 9133 9134 APInt A = N; 9135 APInt B = 2 * M - A; 9136 APInt C = 2 * L; 9137 APInt T = APInt(NewWidth, 2); 9138 LLVM_DEBUG(dbgs() << __func__ << ": equation " << A << "x^2 + " << B 9139 << "x + " << C << ", coeff bw: " << NewWidth 9140 << ", multiplied by " << T << '\n'); 9141 return std::make_tuple(A, B, C, T, BitWidth); 9142 } 9143 9144 /// Helper function to compare optional APInts: 9145 /// (a) if X and Y both exist, return min(X, Y), 9146 /// (b) if neither X nor Y exist, return None, 9147 /// (c) if exactly one of X and Y exists, return that value. 9148 static Optional<APInt> MinOptional(Optional<APInt> X, Optional<APInt> Y) { 9149 if (X.hasValue() && Y.hasValue()) { 9150 unsigned W = std::max(X->getBitWidth(), Y->getBitWidth()); 9151 APInt XW = X->sextOrSelf(W); 9152 APInt YW = Y->sextOrSelf(W); 9153 return XW.slt(YW) ? *X : *Y; 9154 } 9155 if (!X.hasValue() && !Y.hasValue()) 9156 return None; 9157 return X.hasValue() ? *X : *Y; 9158 } 9159 9160 /// Helper function to truncate an optional APInt to a given BitWidth. 9161 /// When solving addrec-related equations, it is preferable to return a value 9162 /// that has the same bit width as the original addrec's coefficients. If the 9163 /// solution fits in the original bit width, truncate it (except for i1). 9164 /// Returning a value of a different bit width may inhibit some optimizations. 9165 /// 9166 /// In general, a solution to a quadratic equation generated from an addrec 9167 /// may require BW+1 bits, where BW is the bit width of the addrec's 9168 /// coefficients. The reason is that the coefficients of the quadratic 9169 /// equation are BW+1 bits wide (to avoid truncation when converting from 9170 /// the addrec to the equation). 9171 static Optional<APInt> TruncIfPossible(Optional<APInt> X, unsigned BitWidth) { 9172 if (!X.hasValue()) 9173 return None; 9174 unsigned W = X->getBitWidth(); 9175 if (BitWidth > 1 && BitWidth < W && X->isIntN(BitWidth)) 9176 return X->trunc(BitWidth); 9177 return X; 9178 } 9179 9180 /// Let c(n) be the value of the quadratic chrec {L,+,M,+,N} after n 9181 /// iterations. The values L, M, N are assumed to be signed, and they 9182 /// should all have the same bit widths. 9183 /// Find the least n >= 0 such that c(n) = 0 in the arithmetic modulo 2^BW, 9184 /// where BW is the bit width of the addrec's coefficients. 9185 /// If the calculated value is a BW-bit integer (for BW > 1), it will be 9186 /// returned as such, otherwise the bit width of the returned value may 9187 /// be greater than BW. 9188 /// 9189 /// This function returns None if 9190 /// (a) the addrec coefficients are not constant, or 9191 /// (b) SolveQuadraticEquationWrap was unable to find a solution. For cases 9192 /// like x^2 = 5, no integer solutions exist, in other cases an integer 9193 /// solution may exist, but SolveQuadraticEquationWrap may fail to find it. 9194 static Optional<APInt> 9195 SolveQuadraticAddRecExact(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) { 9196 APInt A, B, C, M; 9197 unsigned BitWidth; 9198 auto T = GetQuadraticEquation(AddRec); 9199 if (!T.hasValue()) 9200 return None; 9201 9202 std::tie(A, B, C, M, BitWidth) = *T; 9203 LLVM_DEBUG(dbgs() << __func__ << ": solving for unsigned overflow\n"); 9204 Optional<APInt> X = APIntOps::SolveQuadraticEquationWrap(A, B, C, BitWidth+1); 9205 if (!X.hasValue()) 9206 return None; 9207 9208 ConstantInt *CX = ConstantInt::get(SE.getContext(), *X); 9209 ConstantInt *V = EvaluateConstantChrecAtConstant(AddRec, CX, SE); 9210 if (!V->isZero()) 9211 return None; 9212 9213 return TruncIfPossible(X, BitWidth); 9214 } 9215 9216 /// Let c(n) be the value of the quadratic chrec {0,+,M,+,N} after n 9217 /// iterations. The values M, N are assumed to be signed, and they 9218 /// should all have the same bit widths. 9219 /// Find the least n such that c(n) does not belong to the given range, 9220 /// while c(n-1) does. 9221 /// 9222 /// This function returns None if 9223 /// (a) the addrec coefficients are not constant, or 9224 /// (b) SolveQuadraticEquationWrap was unable to find a solution for the 9225 /// bounds of the range. 9226 static Optional<APInt> 9227 SolveQuadraticAddRecRange(const SCEVAddRecExpr *AddRec, 9228 const ConstantRange &Range, ScalarEvolution &SE) { 9229 assert(AddRec->getOperand(0)->isZero() && 9230 "Starting value of addrec should be 0"); 9231 LLVM_DEBUG(dbgs() << __func__ << ": solving boundary crossing for range " 9232 << Range << ", addrec " << *AddRec << '\n'); 9233 // This case is handled in getNumIterationsInRange. Here we can assume that 9234 // we start in the range. 9235 assert(Range.contains(APInt(SE.getTypeSizeInBits(AddRec->getType()), 0)) && 9236 "Addrec's initial value should be in range"); 9237 9238 APInt A, B, C, M; 9239 unsigned BitWidth; 9240 auto T = GetQuadraticEquation(AddRec); 9241 if (!T.hasValue()) 9242 return None; 9243 9244 // Be careful about the return value: there can be two reasons for not 9245 // returning an actual number. First, if no solutions to the equations 9246 // were found, and second, if the solutions don't leave the given range. 9247 // The first case means that the actual solution is "unknown", the second 9248 // means that it's known, but not valid. If the solution is unknown, we 9249 // cannot make any conclusions. 9250 // Return a pair: the optional solution and a flag indicating if the 9251 // solution was found. 9252 auto SolveForBoundary = [&](APInt Bound) -> std::pair<Optional<APInt>,bool> { 9253 // Solve for signed overflow and unsigned overflow, pick the lower 9254 // solution. 9255 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: checking boundary " 9256 << Bound << " (before multiplying by " << M << ")\n"); 9257 Bound *= M; // The quadratic equation multiplier. 9258 9259 Optional<APInt> SO = None; 9260 if (BitWidth > 1) { 9261 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for " 9262 "signed overflow\n"); 9263 SO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound, BitWidth); 9264 } 9265 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for " 9266 "unsigned overflow\n"); 9267 Optional<APInt> UO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound, 9268 BitWidth+1); 9269 9270 auto LeavesRange = [&] (const APInt &X) { 9271 ConstantInt *C0 = ConstantInt::get(SE.getContext(), X); 9272 ConstantInt *V0 = EvaluateConstantChrecAtConstant(AddRec, C0, SE); 9273 if (Range.contains(V0->getValue())) 9274 return false; 9275 // X should be at least 1, so X-1 is non-negative. 9276 ConstantInt *C1 = ConstantInt::get(SE.getContext(), X-1); 9277 ConstantInt *V1 = EvaluateConstantChrecAtConstant(AddRec, C1, SE); 9278 if (Range.contains(V1->getValue())) 9279 return true; 9280 return false; 9281 }; 9282 9283 // If SolveQuadraticEquationWrap returns None, it means that there can 9284 // be a solution, but the function failed to find it. We cannot treat it 9285 // as "no solution". 9286 if (!SO.hasValue() || !UO.hasValue()) 9287 return { None, false }; 9288 9289 // Check the smaller value first to see if it leaves the range. 9290 // At this point, both SO and UO must have values. 9291 Optional<APInt> Min = MinOptional(SO, UO); 9292 if (LeavesRange(*Min)) 9293 return { Min, true }; 9294 Optional<APInt> Max = Min == SO ? UO : SO; 9295 if (LeavesRange(*Max)) 9296 return { Max, true }; 9297 9298 // Solutions were found, but were eliminated, hence the "true". 9299 return { None, true }; 9300 }; 9301 9302 std::tie(A, B, C, M, BitWidth) = *T; 9303 // Lower bound is inclusive, subtract 1 to represent the exiting value. 9304 APInt Lower = Range.getLower().sextOrSelf(A.getBitWidth()) - 1; 9305 APInt Upper = Range.getUpper().sextOrSelf(A.getBitWidth()); 9306 auto SL = SolveForBoundary(Lower); 9307 auto SU = SolveForBoundary(Upper); 9308 // If any of the solutions was unknown, no meaninigful conclusions can 9309 // be made. 9310 if (!SL.second || !SU.second) 9311 return None; 9312 9313 // Claim: The correct solution is not some value between Min and Max. 9314 // 9315 // Justification: Assuming that Min and Max are different values, one of 9316 // them is when the first signed overflow happens, the other is when the 9317 // first unsigned overflow happens. Crossing the range boundary is only 9318 // possible via an overflow (treating 0 as a special case of it, modeling 9319 // an overflow as crossing k*2^W for some k). 9320 // 9321 // The interesting case here is when Min was eliminated as an invalid 9322 // solution, but Max was not. The argument is that if there was another 9323 // overflow between Min and Max, it would also have been eliminated if 9324 // it was considered. 9325 // 9326 // For a given boundary, it is possible to have two overflows of the same 9327 // type (signed/unsigned) without having the other type in between: this 9328 // can happen when the vertex of the parabola is between the iterations 9329 // corresponding to the overflows. This is only possible when the two 9330 // overflows cross k*2^W for the same k. In such case, if the second one 9331 // left the range (and was the first one to do so), the first overflow 9332 // would have to enter the range, which would mean that either we had left 9333 // the range before or that we started outside of it. Both of these cases 9334 // are contradictions. 9335 // 9336 // Claim: In the case where SolveForBoundary returns None, the correct 9337 // solution is not some value between the Max for this boundary and the 9338 // Min of the other boundary. 9339 // 9340 // Justification: Assume that we had such Max_A and Min_B corresponding 9341 // to range boundaries A and B and such that Max_A < Min_B. If there was 9342 // a solution between Max_A and Min_B, it would have to be caused by an 9343 // overflow corresponding to either A or B. It cannot correspond to B, 9344 // since Min_B is the first occurrence of such an overflow. If it 9345 // corresponded to A, it would have to be either a signed or an unsigned 9346 // overflow that is larger than both eliminated overflows for A. But 9347 // between the eliminated overflows and this overflow, the values would 9348 // cover the entire value space, thus crossing the other boundary, which 9349 // is a contradiction. 9350 9351 return TruncIfPossible(MinOptional(SL.first, SU.first), BitWidth); 9352 } 9353 9354 ScalarEvolution::ExitLimit 9355 ScalarEvolution::howFarToZero(const SCEV *V, const Loop *L, bool ControlsExit, 9356 bool AllowPredicates) { 9357 9358 // This is only used for loops with a "x != y" exit test. The exit condition 9359 // is now expressed as a single expression, V = x-y. So the exit test is 9360 // effectively V != 0. We know and take advantage of the fact that this 9361 // expression only being used in a comparison by zero context. 9362 9363 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 9364 // If the value is a constant 9365 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 9366 // If the value is already zero, the branch will execute zero times. 9367 if (C->getValue()->isZero()) return C; 9368 return getCouldNotCompute(); // Otherwise it will loop infinitely. 9369 } 9370 9371 const SCEVAddRecExpr *AddRec = 9372 dyn_cast<SCEVAddRecExpr>(stripInjectiveFunctions(V)); 9373 9374 if (!AddRec && AllowPredicates) 9375 // Try to make this an AddRec using runtime tests, in the first X 9376 // iterations of this loop, where X is the SCEV expression found by the 9377 // algorithm below. 9378 AddRec = convertSCEVToAddRecWithPredicates(V, L, Predicates); 9379 9380 if (!AddRec || AddRec->getLoop() != L) 9381 return getCouldNotCompute(); 9382 9383 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of 9384 // the quadratic equation to solve it. 9385 if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) { 9386 // We can only use this value if the chrec ends up with an exact zero 9387 // value at this index. When solving for "X*X != 5", for example, we 9388 // should not accept a root of 2. 9389 if (auto S = SolveQuadraticAddRecExact(AddRec, *this)) { 9390 const auto *R = cast<SCEVConstant>(getConstant(S.getValue())); 9391 return ExitLimit(R, R, false, Predicates); 9392 } 9393 return getCouldNotCompute(); 9394 } 9395 9396 // Otherwise we can only handle this if it is affine. 9397 if (!AddRec->isAffine()) 9398 return getCouldNotCompute(); 9399 9400 // If this is an affine expression, the execution count of this branch is 9401 // the minimum unsigned root of the following equation: 9402 // 9403 // Start + Step*N = 0 (mod 2^BW) 9404 // 9405 // equivalent to: 9406 // 9407 // Step*N = -Start (mod 2^BW) 9408 // 9409 // where BW is the common bit width of Start and Step. 9410 9411 // Get the initial value for the loop. 9412 const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop()); 9413 const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop()); 9414 9415 // For now we handle only constant steps. 9416 // 9417 // TODO: Handle a nonconstant Step given AddRec<NUW>. If the 9418 // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap 9419 // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step. 9420 // We have not yet seen any such cases. 9421 const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step); 9422 if (!StepC || StepC->getValue()->isZero()) 9423 return getCouldNotCompute(); 9424 9425 // For positive steps (counting up until unsigned overflow): 9426 // N = -Start/Step (as unsigned) 9427 // For negative steps (counting down to zero): 9428 // N = Start/-Step 9429 // First compute the unsigned distance from zero in the direction of Step. 9430 bool CountDown = StepC->getAPInt().isNegative(); 9431 const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start); 9432 9433 // Handle unitary steps, which cannot wraparound. 9434 // 1*N = -Start; -1*N = Start (mod 2^BW), so: 9435 // N = Distance (as unsigned) 9436 if (StepC->getValue()->isOne() || StepC->getValue()->isMinusOne()) { 9437 APInt MaxBECount = getUnsignedRangeMax(applyLoopGuards(Distance, L)); 9438 APInt MaxBECountBase = getUnsignedRangeMax(Distance); 9439 if (MaxBECountBase.ult(MaxBECount)) 9440 MaxBECount = MaxBECountBase; 9441 9442 // When a loop like "for (int i = 0; i != n; ++i) { /* body */ }" is rotated, 9443 // we end up with a loop whose backedge-taken count is n - 1. Detect this 9444 // case, and see if we can improve the bound. 9445 // 9446 // Explicitly handling this here is necessary because getUnsignedRange 9447 // isn't context-sensitive; it doesn't know that we only care about the 9448 // range inside the loop. 9449 const SCEV *Zero = getZero(Distance->getType()); 9450 const SCEV *One = getOne(Distance->getType()); 9451 const SCEV *DistancePlusOne = getAddExpr(Distance, One); 9452 if (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_NE, DistancePlusOne, Zero)) { 9453 // If Distance + 1 doesn't overflow, we can compute the maximum distance 9454 // as "unsigned_max(Distance + 1) - 1". 9455 ConstantRange CR = getUnsignedRange(DistancePlusOne); 9456 MaxBECount = APIntOps::umin(MaxBECount, CR.getUnsignedMax() - 1); 9457 } 9458 return ExitLimit(Distance, getConstant(MaxBECount), false, Predicates); 9459 } 9460 9461 // If the condition controls loop exit (the loop exits only if the expression 9462 // is true) and the addition is no-wrap we can use unsigned divide to 9463 // compute the backedge count. In this case, the step may not divide the 9464 // distance, but we don't care because if the condition is "missed" the loop 9465 // will have undefined behavior due to wrapping. 9466 if (ControlsExit && AddRec->hasNoSelfWrap() && 9467 loopHasNoAbnormalExits(AddRec->getLoop())) { 9468 const SCEV *Exact = 9469 getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step); 9470 const SCEV *Max = getCouldNotCompute(); 9471 if (Exact != getCouldNotCompute()) { 9472 APInt MaxInt = getUnsignedRangeMax(applyLoopGuards(Exact, L)); 9473 APInt BaseMaxInt = getUnsignedRangeMax(Exact); 9474 if (BaseMaxInt.ult(MaxInt)) 9475 Max = getConstant(BaseMaxInt); 9476 else 9477 Max = getConstant(MaxInt); 9478 } 9479 return ExitLimit(Exact, Max, false, Predicates); 9480 } 9481 9482 // Solve the general equation. 9483 const SCEV *E = SolveLinEquationWithOverflow(StepC->getAPInt(), 9484 getNegativeSCEV(Start), *this); 9485 const SCEV *M = E == getCouldNotCompute() 9486 ? E 9487 : getConstant(getUnsignedRangeMax(E)); 9488 return ExitLimit(E, M, false, Predicates); 9489 } 9490 9491 ScalarEvolution::ExitLimit 9492 ScalarEvolution::howFarToNonZero(const SCEV *V, const Loop *L) { 9493 // Loops that look like: while (X == 0) are very strange indeed. We don't 9494 // handle them yet except for the trivial case. This could be expanded in the 9495 // future as needed. 9496 9497 // If the value is a constant, check to see if it is known to be non-zero 9498 // already. If so, the backedge will execute zero times. 9499 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 9500 if (!C->getValue()->isZero()) 9501 return getZero(C->getType()); 9502 return getCouldNotCompute(); // Otherwise it will loop infinitely. 9503 } 9504 9505 // We could implement others, but I really doubt anyone writes loops like 9506 // this, and if they did, they would already be constant folded. 9507 return getCouldNotCompute(); 9508 } 9509 9510 std::pair<const BasicBlock *, const BasicBlock *> 9511 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(const BasicBlock *BB) 9512 const { 9513 // If the block has a unique predecessor, then there is no path from the 9514 // predecessor to the block that does not go through the direct edge 9515 // from the predecessor to the block. 9516 if (const BasicBlock *Pred = BB->getSinglePredecessor()) 9517 return {Pred, BB}; 9518 9519 // A loop's header is defined to be a block that dominates the loop. 9520 // If the header has a unique predecessor outside the loop, it must be 9521 // a block that has exactly one successor that can reach the loop. 9522 if (const Loop *L = LI.getLoopFor(BB)) 9523 return {L->getLoopPredecessor(), L->getHeader()}; 9524 9525 return {nullptr, nullptr}; 9526 } 9527 9528 /// SCEV structural equivalence is usually sufficient for testing whether two 9529 /// expressions are equal, however for the purposes of looking for a condition 9530 /// guarding a loop, it can be useful to be a little more general, since a 9531 /// front-end may have replicated the controlling expression. 9532 static bool HasSameValue(const SCEV *A, const SCEV *B) { 9533 // Quick check to see if they are the same SCEV. 9534 if (A == B) return true; 9535 9536 auto ComputesEqualValues = [](const Instruction *A, const Instruction *B) { 9537 // Not all instructions that are "identical" compute the same value. For 9538 // instance, two distinct alloca instructions allocating the same type are 9539 // identical and do not read memory; but compute distinct values. 9540 return A->isIdenticalTo(B) && (isa<BinaryOperator>(A) || isa<GetElementPtrInst>(A)); 9541 }; 9542 9543 // Otherwise, if they're both SCEVUnknown, it's possible that they hold 9544 // two different instructions with the same value. Check for this case. 9545 if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A)) 9546 if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B)) 9547 if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue())) 9548 if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue())) 9549 if (ComputesEqualValues(AI, BI)) 9550 return true; 9551 9552 // Otherwise assume they may have a different value. 9553 return false; 9554 } 9555 9556 bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred, 9557 const SCEV *&LHS, const SCEV *&RHS, 9558 unsigned Depth) { 9559 bool Changed = false; 9560 // Simplifies ICMP to trivial true or false by turning it into '0 == 0' or 9561 // '0 != 0'. 9562 auto TrivialCase = [&](bool TriviallyTrue) { 9563 LHS = RHS = getConstant(ConstantInt::getFalse(getContext())); 9564 Pred = TriviallyTrue ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE; 9565 return true; 9566 }; 9567 // If we hit the max recursion limit bail out. 9568 if (Depth >= 3) 9569 return false; 9570 9571 // Canonicalize a constant to the right side. 9572 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 9573 // Check for both operands constant. 9574 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 9575 if (ConstantExpr::getICmp(Pred, 9576 LHSC->getValue(), 9577 RHSC->getValue())->isNullValue()) 9578 return TrivialCase(false); 9579 else 9580 return TrivialCase(true); 9581 } 9582 // Otherwise swap the operands to put the constant on the right. 9583 std::swap(LHS, RHS); 9584 Pred = ICmpInst::getSwappedPredicate(Pred); 9585 Changed = true; 9586 } 9587 9588 // If we're comparing an addrec with a value which is loop-invariant in the 9589 // addrec's loop, put the addrec on the left. Also make a dominance check, 9590 // as both operands could be addrecs loop-invariant in each other's loop. 9591 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) { 9592 const Loop *L = AR->getLoop(); 9593 if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) { 9594 std::swap(LHS, RHS); 9595 Pred = ICmpInst::getSwappedPredicate(Pred); 9596 Changed = true; 9597 } 9598 } 9599 9600 // If there's a constant operand, canonicalize comparisons with boundary 9601 // cases, and canonicalize *-or-equal comparisons to regular comparisons. 9602 if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) { 9603 const APInt &RA = RC->getAPInt(); 9604 9605 bool SimplifiedByConstantRange = false; 9606 9607 if (!ICmpInst::isEquality(Pred)) { 9608 ConstantRange ExactCR = ConstantRange::makeExactICmpRegion(Pred, RA); 9609 if (ExactCR.isFullSet()) 9610 return TrivialCase(true); 9611 else if (ExactCR.isEmptySet()) 9612 return TrivialCase(false); 9613 9614 APInt NewRHS; 9615 CmpInst::Predicate NewPred; 9616 if (ExactCR.getEquivalentICmp(NewPred, NewRHS) && 9617 ICmpInst::isEquality(NewPred)) { 9618 // We were able to convert an inequality to an equality. 9619 Pred = NewPred; 9620 RHS = getConstant(NewRHS); 9621 Changed = SimplifiedByConstantRange = true; 9622 } 9623 } 9624 9625 if (!SimplifiedByConstantRange) { 9626 switch (Pred) { 9627 default: 9628 break; 9629 case ICmpInst::ICMP_EQ: 9630 case ICmpInst::ICMP_NE: 9631 // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b. 9632 if (!RA) 9633 if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(LHS)) 9634 if (const SCEVMulExpr *ME = 9635 dyn_cast<SCEVMulExpr>(AE->getOperand(0))) 9636 if (AE->getNumOperands() == 2 && ME->getNumOperands() == 2 && 9637 ME->getOperand(0)->isAllOnesValue()) { 9638 RHS = AE->getOperand(1); 9639 LHS = ME->getOperand(1); 9640 Changed = true; 9641 } 9642 break; 9643 9644 9645 // The "Should have been caught earlier!" messages refer to the fact 9646 // that the ExactCR.isFullSet() or ExactCR.isEmptySet() check above 9647 // should have fired on the corresponding cases, and canonicalized the 9648 // check to trivial case. 9649 9650 case ICmpInst::ICMP_UGE: 9651 assert(!RA.isMinValue() && "Should have been caught earlier!"); 9652 Pred = ICmpInst::ICMP_UGT; 9653 RHS = getConstant(RA - 1); 9654 Changed = true; 9655 break; 9656 case ICmpInst::ICMP_ULE: 9657 assert(!RA.isMaxValue() && "Should have been caught earlier!"); 9658 Pred = ICmpInst::ICMP_ULT; 9659 RHS = getConstant(RA + 1); 9660 Changed = true; 9661 break; 9662 case ICmpInst::ICMP_SGE: 9663 assert(!RA.isMinSignedValue() && "Should have been caught earlier!"); 9664 Pred = ICmpInst::ICMP_SGT; 9665 RHS = getConstant(RA - 1); 9666 Changed = true; 9667 break; 9668 case ICmpInst::ICMP_SLE: 9669 assert(!RA.isMaxSignedValue() && "Should have been caught earlier!"); 9670 Pred = ICmpInst::ICMP_SLT; 9671 RHS = getConstant(RA + 1); 9672 Changed = true; 9673 break; 9674 } 9675 } 9676 } 9677 9678 // Check for obvious equality. 9679 if (HasSameValue(LHS, RHS)) { 9680 if (ICmpInst::isTrueWhenEqual(Pred)) 9681 return TrivialCase(true); 9682 if (ICmpInst::isFalseWhenEqual(Pred)) 9683 return TrivialCase(false); 9684 } 9685 9686 // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by 9687 // adding or subtracting 1 from one of the operands. 9688 switch (Pred) { 9689 case ICmpInst::ICMP_SLE: 9690 if (!getSignedRangeMax(RHS).isMaxSignedValue()) { 9691 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 9692 SCEV::FlagNSW); 9693 Pred = ICmpInst::ICMP_SLT; 9694 Changed = true; 9695 } else if (!getSignedRangeMin(LHS).isMinSignedValue()) { 9696 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS, 9697 SCEV::FlagNSW); 9698 Pred = ICmpInst::ICMP_SLT; 9699 Changed = true; 9700 } 9701 break; 9702 case ICmpInst::ICMP_SGE: 9703 if (!getSignedRangeMin(RHS).isMinSignedValue()) { 9704 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS, 9705 SCEV::FlagNSW); 9706 Pred = ICmpInst::ICMP_SGT; 9707 Changed = true; 9708 } else if (!getSignedRangeMax(LHS).isMaxSignedValue()) { 9709 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 9710 SCEV::FlagNSW); 9711 Pred = ICmpInst::ICMP_SGT; 9712 Changed = true; 9713 } 9714 break; 9715 case ICmpInst::ICMP_ULE: 9716 if (!getUnsignedRangeMax(RHS).isMaxValue()) { 9717 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 9718 SCEV::FlagNUW); 9719 Pred = ICmpInst::ICMP_ULT; 9720 Changed = true; 9721 } else if (!getUnsignedRangeMin(LHS).isMinValue()) { 9722 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS); 9723 Pred = ICmpInst::ICMP_ULT; 9724 Changed = true; 9725 } 9726 break; 9727 case ICmpInst::ICMP_UGE: 9728 if (!getUnsignedRangeMin(RHS).isMinValue()) { 9729 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS); 9730 Pred = ICmpInst::ICMP_UGT; 9731 Changed = true; 9732 } else if (!getUnsignedRangeMax(LHS).isMaxValue()) { 9733 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 9734 SCEV::FlagNUW); 9735 Pred = ICmpInst::ICMP_UGT; 9736 Changed = true; 9737 } 9738 break; 9739 default: 9740 break; 9741 } 9742 9743 // TODO: More simplifications are possible here. 9744 9745 // Recursively simplify until we either hit a recursion limit or nothing 9746 // changes. 9747 if (Changed) 9748 return SimplifyICmpOperands(Pred, LHS, RHS, Depth+1); 9749 9750 return Changed; 9751 } 9752 9753 bool ScalarEvolution::isKnownNegative(const SCEV *S) { 9754 return getSignedRangeMax(S).isNegative(); 9755 } 9756 9757 bool ScalarEvolution::isKnownPositive(const SCEV *S) { 9758 return getSignedRangeMin(S).isStrictlyPositive(); 9759 } 9760 9761 bool ScalarEvolution::isKnownNonNegative(const SCEV *S) { 9762 return !getSignedRangeMin(S).isNegative(); 9763 } 9764 9765 bool ScalarEvolution::isKnownNonPositive(const SCEV *S) { 9766 return !getSignedRangeMax(S).isStrictlyPositive(); 9767 } 9768 9769 bool ScalarEvolution::isKnownNonZero(const SCEV *S) { 9770 return isKnownNegative(S) || isKnownPositive(S); 9771 } 9772 9773 std::pair<const SCEV *, const SCEV *> 9774 ScalarEvolution::SplitIntoInitAndPostInc(const Loop *L, const SCEV *S) { 9775 // Compute SCEV on entry of loop L. 9776 const SCEV *Start = SCEVInitRewriter::rewrite(S, L, *this); 9777 if (Start == getCouldNotCompute()) 9778 return { Start, Start }; 9779 // Compute post increment SCEV for loop L. 9780 const SCEV *PostInc = SCEVPostIncRewriter::rewrite(S, L, *this); 9781 assert(PostInc != getCouldNotCompute() && "Unexpected could not compute"); 9782 return { Start, PostInc }; 9783 } 9784 9785 bool ScalarEvolution::isKnownViaInduction(ICmpInst::Predicate Pred, 9786 const SCEV *LHS, const SCEV *RHS) { 9787 // First collect all loops. 9788 SmallPtrSet<const Loop *, 8> LoopsUsed; 9789 getUsedLoops(LHS, LoopsUsed); 9790 getUsedLoops(RHS, LoopsUsed); 9791 9792 if (LoopsUsed.empty()) 9793 return false; 9794 9795 // Domination relationship must be a linear order on collected loops. 9796 #ifndef NDEBUG 9797 for (auto *L1 : LoopsUsed) 9798 for (auto *L2 : LoopsUsed) 9799 assert((DT.dominates(L1->getHeader(), L2->getHeader()) || 9800 DT.dominates(L2->getHeader(), L1->getHeader())) && 9801 "Domination relationship is not a linear order"); 9802 #endif 9803 9804 const Loop *MDL = 9805 *std::max_element(LoopsUsed.begin(), LoopsUsed.end(), 9806 [&](const Loop *L1, const Loop *L2) { 9807 return DT.properlyDominates(L1->getHeader(), L2->getHeader()); 9808 }); 9809 9810 // Get init and post increment value for LHS. 9811 auto SplitLHS = SplitIntoInitAndPostInc(MDL, LHS); 9812 // if LHS contains unknown non-invariant SCEV then bail out. 9813 if (SplitLHS.first == getCouldNotCompute()) 9814 return false; 9815 assert (SplitLHS.second != getCouldNotCompute() && "Unexpected CNC"); 9816 // Get init and post increment value for RHS. 9817 auto SplitRHS = SplitIntoInitAndPostInc(MDL, RHS); 9818 // if RHS contains unknown non-invariant SCEV then bail out. 9819 if (SplitRHS.first == getCouldNotCompute()) 9820 return false; 9821 assert (SplitRHS.second != getCouldNotCompute() && "Unexpected CNC"); 9822 // It is possible that init SCEV contains an invariant load but it does 9823 // not dominate MDL and is not available at MDL loop entry, so we should 9824 // check it here. 9825 if (!isAvailableAtLoopEntry(SplitLHS.first, MDL) || 9826 !isAvailableAtLoopEntry(SplitRHS.first, MDL)) 9827 return false; 9828 9829 // It seems backedge guard check is faster than entry one so in some cases 9830 // it can speed up whole estimation by short circuit 9831 return isLoopBackedgeGuardedByCond(MDL, Pred, SplitLHS.second, 9832 SplitRHS.second) && 9833 isLoopEntryGuardedByCond(MDL, Pred, SplitLHS.first, SplitRHS.first); 9834 } 9835 9836 bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred, 9837 const SCEV *LHS, const SCEV *RHS) { 9838 // Canonicalize the inputs first. 9839 (void)SimplifyICmpOperands(Pred, LHS, RHS); 9840 9841 if (isKnownViaInduction(Pred, LHS, RHS)) 9842 return true; 9843 9844 if (isKnownPredicateViaSplitting(Pred, LHS, RHS)) 9845 return true; 9846 9847 // Otherwise see what can be done with some simple reasoning. 9848 return isKnownViaNonRecursiveReasoning(Pred, LHS, RHS); 9849 } 9850 9851 Optional<bool> ScalarEvolution::evaluatePredicate(ICmpInst::Predicate Pred, 9852 const SCEV *LHS, 9853 const SCEV *RHS) { 9854 if (isKnownPredicate(Pred, LHS, RHS)) 9855 return true; 9856 else if (isKnownPredicate(ICmpInst::getInversePredicate(Pred), LHS, RHS)) 9857 return false; 9858 return None; 9859 } 9860 9861 bool ScalarEvolution::isKnownPredicateAt(ICmpInst::Predicate Pred, 9862 const SCEV *LHS, const SCEV *RHS, 9863 const Instruction *Context) { 9864 // TODO: Analyze guards and assumes from Context's block. 9865 return isKnownPredicate(Pred, LHS, RHS) || 9866 isBasicBlockEntryGuardedByCond(Context->getParent(), Pred, LHS, RHS); 9867 } 9868 9869 Optional<bool> 9870 ScalarEvolution::evaluatePredicateAt(ICmpInst::Predicate Pred, const SCEV *LHS, 9871 const SCEV *RHS, 9872 const Instruction *Context) { 9873 Optional<bool> KnownWithoutContext = evaluatePredicate(Pred, LHS, RHS); 9874 if (KnownWithoutContext) 9875 return KnownWithoutContext; 9876 9877 if (isBasicBlockEntryGuardedByCond(Context->getParent(), Pred, LHS, RHS)) 9878 return true; 9879 else if (isBasicBlockEntryGuardedByCond(Context->getParent(), 9880 ICmpInst::getInversePredicate(Pred), 9881 LHS, RHS)) 9882 return false; 9883 return None; 9884 } 9885 9886 bool ScalarEvolution::isKnownOnEveryIteration(ICmpInst::Predicate Pred, 9887 const SCEVAddRecExpr *LHS, 9888 const SCEV *RHS) { 9889 const Loop *L = LHS->getLoop(); 9890 return isLoopEntryGuardedByCond(L, Pred, LHS->getStart(), RHS) && 9891 isLoopBackedgeGuardedByCond(L, Pred, LHS->getPostIncExpr(*this), RHS); 9892 } 9893 9894 Optional<ScalarEvolution::MonotonicPredicateType> 9895 ScalarEvolution::getMonotonicPredicateType(const SCEVAddRecExpr *LHS, 9896 ICmpInst::Predicate Pred) { 9897 auto Result = getMonotonicPredicateTypeImpl(LHS, Pred); 9898 9899 #ifndef NDEBUG 9900 // Verify an invariant: inverting the predicate should turn a monotonically 9901 // increasing change to a monotonically decreasing one, and vice versa. 9902 if (Result) { 9903 auto ResultSwapped = 9904 getMonotonicPredicateTypeImpl(LHS, ICmpInst::getSwappedPredicate(Pred)); 9905 9906 assert(ResultSwapped.hasValue() && "should be able to analyze both!"); 9907 assert(ResultSwapped.getValue() != Result.getValue() && 9908 "monotonicity should flip as we flip the predicate"); 9909 } 9910 #endif 9911 9912 return Result; 9913 } 9914 9915 Optional<ScalarEvolution::MonotonicPredicateType> 9916 ScalarEvolution::getMonotonicPredicateTypeImpl(const SCEVAddRecExpr *LHS, 9917 ICmpInst::Predicate Pred) { 9918 // A zero step value for LHS means the induction variable is essentially a 9919 // loop invariant value. We don't really depend on the predicate actually 9920 // flipping from false to true (for increasing predicates, and the other way 9921 // around for decreasing predicates), all we care about is that *if* the 9922 // predicate changes then it only changes from false to true. 9923 // 9924 // A zero step value in itself is not very useful, but there may be places 9925 // where SCEV can prove X >= 0 but not prove X > 0, so it is helpful to be 9926 // as general as possible. 9927 9928 // Only handle LE/LT/GE/GT predicates. 9929 if (!ICmpInst::isRelational(Pred)) 9930 return None; 9931 9932 bool IsGreater = ICmpInst::isGE(Pred) || ICmpInst::isGT(Pred); 9933 assert((IsGreater || ICmpInst::isLE(Pred) || ICmpInst::isLT(Pred)) && 9934 "Should be greater or less!"); 9935 9936 // Check that AR does not wrap. 9937 if (ICmpInst::isUnsigned(Pred)) { 9938 if (!LHS->hasNoUnsignedWrap()) 9939 return None; 9940 return IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing; 9941 } else { 9942 assert(ICmpInst::isSigned(Pred) && 9943 "Relational predicate is either signed or unsigned!"); 9944 if (!LHS->hasNoSignedWrap()) 9945 return None; 9946 9947 const SCEV *Step = LHS->getStepRecurrence(*this); 9948 9949 if (isKnownNonNegative(Step)) 9950 return IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing; 9951 9952 if (isKnownNonPositive(Step)) 9953 return !IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing; 9954 9955 return None; 9956 } 9957 } 9958 9959 Optional<ScalarEvolution::LoopInvariantPredicate> 9960 ScalarEvolution::getLoopInvariantPredicate(ICmpInst::Predicate Pred, 9961 const SCEV *LHS, const SCEV *RHS, 9962 const Loop *L) { 9963 9964 // If there is a loop-invariant, force it into the RHS, otherwise bail out. 9965 if (!isLoopInvariant(RHS, L)) { 9966 if (!isLoopInvariant(LHS, L)) 9967 return None; 9968 9969 std::swap(LHS, RHS); 9970 Pred = ICmpInst::getSwappedPredicate(Pred); 9971 } 9972 9973 const SCEVAddRecExpr *ArLHS = dyn_cast<SCEVAddRecExpr>(LHS); 9974 if (!ArLHS || ArLHS->getLoop() != L) 9975 return None; 9976 9977 auto MonotonicType = getMonotonicPredicateType(ArLHS, Pred); 9978 if (!MonotonicType) 9979 return None; 9980 // If the predicate "ArLHS `Pred` RHS" monotonically increases from false to 9981 // true as the loop iterates, and the backedge is control dependent on 9982 // "ArLHS `Pred` RHS" == true then we can reason as follows: 9983 // 9984 // * if the predicate was false in the first iteration then the predicate 9985 // is never evaluated again, since the loop exits without taking the 9986 // backedge. 9987 // * if the predicate was true in the first iteration then it will 9988 // continue to be true for all future iterations since it is 9989 // monotonically increasing. 9990 // 9991 // For both the above possibilities, we can replace the loop varying 9992 // predicate with its value on the first iteration of the loop (which is 9993 // loop invariant). 9994 // 9995 // A similar reasoning applies for a monotonically decreasing predicate, by 9996 // replacing true with false and false with true in the above two bullets. 9997 bool Increasing = *MonotonicType == ScalarEvolution::MonotonicallyIncreasing; 9998 auto P = Increasing ? Pred : ICmpInst::getInversePredicate(Pred); 9999 10000 if (!isLoopBackedgeGuardedByCond(L, P, LHS, RHS)) 10001 return None; 10002 10003 return ScalarEvolution::LoopInvariantPredicate(Pred, ArLHS->getStart(), RHS); 10004 } 10005 10006 Optional<ScalarEvolution::LoopInvariantPredicate> 10007 ScalarEvolution::getLoopInvariantExitCondDuringFirstIterations( 10008 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, 10009 const Instruction *Context, const SCEV *MaxIter) { 10010 // Try to prove the following set of facts: 10011 // - The predicate is monotonic in the iteration space. 10012 // - If the check does not fail on the 1st iteration: 10013 // - No overflow will happen during first MaxIter iterations; 10014 // - It will not fail on the MaxIter'th iteration. 10015 // If the check does fail on the 1st iteration, we leave the loop and no 10016 // other checks matter. 10017 10018 // If there is a loop-invariant, force it into the RHS, otherwise bail out. 10019 if (!isLoopInvariant(RHS, L)) { 10020 if (!isLoopInvariant(LHS, L)) 10021 return None; 10022 10023 std::swap(LHS, RHS); 10024 Pred = ICmpInst::getSwappedPredicate(Pred); 10025 } 10026 10027 auto *AR = dyn_cast<SCEVAddRecExpr>(LHS); 10028 if (!AR || AR->getLoop() != L) 10029 return None; 10030 10031 // The predicate must be relational (i.e. <, <=, >=, >). 10032 if (!ICmpInst::isRelational(Pred)) 10033 return None; 10034 10035 // TODO: Support steps other than +/- 1. 10036 const SCEV *Step = AR->getStepRecurrence(*this); 10037 auto *One = getOne(Step->getType()); 10038 auto *MinusOne = getNegativeSCEV(One); 10039 if (Step != One && Step != MinusOne) 10040 return None; 10041 10042 // Type mismatch here means that MaxIter is potentially larger than max 10043 // unsigned value in start type, which mean we cannot prove no wrap for the 10044 // indvar. 10045 if (AR->getType() != MaxIter->getType()) 10046 return None; 10047 10048 // Value of IV on suggested last iteration. 10049 const SCEV *Last = AR->evaluateAtIteration(MaxIter, *this); 10050 // Does it still meet the requirement? 10051 if (!isLoopBackedgeGuardedByCond(L, Pred, Last, RHS)) 10052 return None; 10053 // Because step is +/- 1 and MaxIter has same type as Start (i.e. it does 10054 // not exceed max unsigned value of this type), this effectively proves 10055 // that there is no wrap during the iteration. To prove that there is no 10056 // signed/unsigned wrap, we need to check that 10057 // Start <= Last for step = 1 or Start >= Last for step = -1. 10058 ICmpInst::Predicate NoOverflowPred = 10059 CmpInst::isSigned(Pred) ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; 10060 if (Step == MinusOne) 10061 NoOverflowPred = CmpInst::getSwappedPredicate(NoOverflowPred); 10062 const SCEV *Start = AR->getStart(); 10063 if (!isKnownPredicateAt(NoOverflowPred, Start, Last, Context)) 10064 return None; 10065 10066 // Everything is fine. 10067 return ScalarEvolution::LoopInvariantPredicate(Pred, Start, RHS); 10068 } 10069 10070 bool ScalarEvolution::isKnownPredicateViaConstantRanges( 10071 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { 10072 if (HasSameValue(LHS, RHS)) 10073 return ICmpInst::isTrueWhenEqual(Pred); 10074 10075 // This code is split out from isKnownPredicate because it is called from 10076 // within isLoopEntryGuardedByCond. 10077 10078 auto CheckRanges = [&](const ConstantRange &RangeLHS, 10079 const ConstantRange &RangeRHS) { 10080 return RangeLHS.icmp(Pred, RangeRHS); 10081 }; 10082 10083 // The check at the top of the function catches the case where the values are 10084 // known to be equal. 10085 if (Pred == CmpInst::ICMP_EQ) 10086 return false; 10087 10088 if (Pred == CmpInst::ICMP_NE) { 10089 if (CheckRanges(getSignedRange(LHS), getSignedRange(RHS)) || 10090 CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS))) 10091 return true; 10092 auto *Diff = getMinusSCEV(LHS, RHS); 10093 return !isa<SCEVCouldNotCompute>(Diff) && isKnownNonZero(Diff); 10094 } 10095 10096 if (CmpInst::isSigned(Pred)) 10097 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)); 10098 10099 return CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)); 10100 } 10101 10102 bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred, 10103 const SCEV *LHS, 10104 const SCEV *RHS) { 10105 // Match X to (A + C1)<ExpectedFlags> and Y to (A + C2)<ExpectedFlags>, where 10106 // C1 and C2 are constant integers. If either X or Y are not add expressions, 10107 // consider them as X + 0 and Y + 0 respectively. C1 and C2 are returned via 10108 // OutC1 and OutC2. 10109 auto MatchBinaryAddToConst = [this](const SCEV *X, const SCEV *Y, 10110 APInt &OutC1, APInt &OutC2, 10111 SCEV::NoWrapFlags ExpectedFlags) { 10112 const SCEV *XNonConstOp, *XConstOp; 10113 const SCEV *YNonConstOp, *YConstOp; 10114 SCEV::NoWrapFlags XFlagsPresent; 10115 SCEV::NoWrapFlags YFlagsPresent; 10116 10117 if (!splitBinaryAdd(X, XConstOp, XNonConstOp, XFlagsPresent)) { 10118 XConstOp = getZero(X->getType()); 10119 XNonConstOp = X; 10120 XFlagsPresent = ExpectedFlags; 10121 } 10122 if (!isa<SCEVConstant>(XConstOp) || 10123 (XFlagsPresent & ExpectedFlags) != ExpectedFlags) 10124 return false; 10125 10126 if (!splitBinaryAdd(Y, YConstOp, YNonConstOp, YFlagsPresent)) { 10127 YConstOp = getZero(Y->getType()); 10128 YNonConstOp = Y; 10129 YFlagsPresent = ExpectedFlags; 10130 } 10131 10132 if (!isa<SCEVConstant>(YConstOp) || 10133 (YFlagsPresent & ExpectedFlags) != ExpectedFlags) 10134 return false; 10135 10136 if (YNonConstOp != XNonConstOp) 10137 return false; 10138 10139 OutC1 = cast<SCEVConstant>(XConstOp)->getAPInt(); 10140 OutC2 = cast<SCEVConstant>(YConstOp)->getAPInt(); 10141 10142 return true; 10143 }; 10144 10145 APInt C1; 10146 APInt C2; 10147 10148 switch (Pred) { 10149 default: 10150 break; 10151 10152 case ICmpInst::ICMP_SGE: 10153 std::swap(LHS, RHS); 10154 LLVM_FALLTHROUGH; 10155 case ICmpInst::ICMP_SLE: 10156 // (X + C1)<nsw> s<= (X + C2)<nsw> if C1 s<= C2. 10157 if (MatchBinaryAddToConst(LHS, RHS, C1, C2, SCEV::FlagNSW) && C1.sle(C2)) 10158 return true; 10159 10160 break; 10161 10162 case ICmpInst::ICMP_SGT: 10163 std::swap(LHS, RHS); 10164 LLVM_FALLTHROUGH; 10165 case ICmpInst::ICMP_SLT: 10166 // (X + C1)<nsw> s< (X + C2)<nsw> if C1 s< C2. 10167 if (MatchBinaryAddToConst(LHS, RHS, C1, C2, SCEV::FlagNSW) && C1.slt(C2)) 10168 return true; 10169 10170 break; 10171 10172 case ICmpInst::ICMP_UGE: 10173 std::swap(LHS, RHS); 10174 LLVM_FALLTHROUGH; 10175 case ICmpInst::ICMP_ULE: 10176 // (X + C1)<nuw> u<= (X + C2)<nuw> for C1 u<= C2. 10177 if (MatchBinaryAddToConst(RHS, LHS, C2, C1, SCEV::FlagNUW) && C1.ule(C2)) 10178 return true; 10179 10180 break; 10181 10182 case ICmpInst::ICMP_UGT: 10183 std::swap(LHS, RHS); 10184 LLVM_FALLTHROUGH; 10185 case ICmpInst::ICMP_ULT: 10186 // (X + C1)<nuw> u< (X + C2)<nuw> if C1 u< C2. 10187 if (MatchBinaryAddToConst(RHS, LHS, C2, C1, SCEV::FlagNUW) && C1.ult(C2)) 10188 return true; 10189 break; 10190 } 10191 10192 return false; 10193 } 10194 10195 bool ScalarEvolution::isKnownPredicateViaSplitting(ICmpInst::Predicate Pred, 10196 const SCEV *LHS, 10197 const SCEV *RHS) { 10198 if (Pred != ICmpInst::ICMP_ULT || ProvingSplitPredicate) 10199 return false; 10200 10201 // Allowing arbitrary number of activations of isKnownPredicateViaSplitting on 10202 // the stack can result in exponential time complexity. 10203 SaveAndRestore<bool> Restore(ProvingSplitPredicate, true); 10204 10205 // If L >= 0 then I `ult` L <=> I >= 0 && I `slt` L 10206 // 10207 // To prove L >= 0 we use isKnownNonNegative whereas to prove I >= 0 we use 10208 // isKnownPredicate. isKnownPredicate is more powerful, but also more 10209 // expensive; and using isKnownNonNegative(RHS) is sufficient for most of the 10210 // interesting cases seen in practice. We can consider "upgrading" L >= 0 to 10211 // use isKnownPredicate later if needed. 10212 return isKnownNonNegative(RHS) && 10213 isKnownPredicate(CmpInst::ICMP_SGE, LHS, getZero(LHS->getType())) && 10214 isKnownPredicate(CmpInst::ICMP_SLT, LHS, RHS); 10215 } 10216 10217 bool ScalarEvolution::isImpliedViaGuard(const BasicBlock *BB, 10218 ICmpInst::Predicate Pred, 10219 const SCEV *LHS, const SCEV *RHS) { 10220 // No need to even try if we know the module has no guards. 10221 if (!HasGuards) 10222 return false; 10223 10224 return any_of(*BB, [&](const Instruction &I) { 10225 using namespace llvm::PatternMatch; 10226 10227 Value *Condition; 10228 return match(&I, m_Intrinsic<Intrinsic::experimental_guard>( 10229 m_Value(Condition))) && 10230 isImpliedCond(Pred, LHS, RHS, Condition, false); 10231 }); 10232 } 10233 10234 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is 10235 /// protected by a conditional between LHS and RHS. This is used to 10236 /// to eliminate casts. 10237 bool 10238 ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L, 10239 ICmpInst::Predicate Pred, 10240 const SCEV *LHS, const SCEV *RHS) { 10241 // Interpret a null as meaning no loop, where there is obviously no guard 10242 // (interprocedural conditions notwithstanding). 10243 if (!L) return true; 10244 10245 if (VerifyIR) 10246 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()) && 10247 "This cannot be done on broken IR!"); 10248 10249 10250 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS)) 10251 return true; 10252 10253 BasicBlock *Latch = L->getLoopLatch(); 10254 if (!Latch) 10255 return false; 10256 10257 BranchInst *LoopContinuePredicate = 10258 dyn_cast<BranchInst>(Latch->getTerminator()); 10259 if (LoopContinuePredicate && LoopContinuePredicate->isConditional() && 10260 isImpliedCond(Pred, LHS, RHS, 10261 LoopContinuePredicate->getCondition(), 10262 LoopContinuePredicate->getSuccessor(0) != L->getHeader())) 10263 return true; 10264 10265 // We don't want more than one activation of the following loops on the stack 10266 // -- that can lead to O(n!) time complexity. 10267 if (WalkingBEDominatingConds) 10268 return false; 10269 10270 SaveAndRestore<bool> ClearOnExit(WalkingBEDominatingConds, true); 10271 10272 // See if we can exploit a trip count to prove the predicate. 10273 const auto &BETakenInfo = getBackedgeTakenInfo(L); 10274 const SCEV *LatchBECount = BETakenInfo.getExact(Latch, this); 10275 if (LatchBECount != getCouldNotCompute()) { 10276 // We know that Latch branches back to the loop header exactly 10277 // LatchBECount times. This means the backdege condition at Latch is 10278 // equivalent to "{0,+,1} u< LatchBECount". 10279 Type *Ty = LatchBECount->getType(); 10280 auto NoWrapFlags = SCEV::NoWrapFlags(SCEV::FlagNUW | SCEV::FlagNW); 10281 const SCEV *LoopCounter = 10282 getAddRecExpr(getZero(Ty), getOne(Ty), L, NoWrapFlags); 10283 if (isImpliedCond(Pred, LHS, RHS, ICmpInst::ICMP_ULT, LoopCounter, 10284 LatchBECount)) 10285 return true; 10286 } 10287 10288 // Check conditions due to any @llvm.assume intrinsics. 10289 for (auto &AssumeVH : AC.assumptions()) { 10290 if (!AssumeVH) 10291 continue; 10292 auto *CI = cast<CallInst>(AssumeVH); 10293 if (!DT.dominates(CI, Latch->getTerminator())) 10294 continue; 10295 10296 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false)) 10297 return true; 10298 } 10299 10300 // If the loop is not reachable from the entry block, we risk running into an 10301 // infinite loop as we walk up into the dom tree. These loops do not matter 10302 // anyway, so we just return a conservative answer when we see them. 10303 if (!DT.isReachableFromEntry(L->getHeader())) 10304 return false; 10305 10306 if (isImpliedViaGuard(Latch, Pred, LHS, RHS)) 10307 return true; 10308 10309 for (DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()]; 10310 DTN != HeaderDTN; DTN = DTN->getIDom()) { 10311 assert(DTN && "should reach the loop header before reaching the root!"); 10312 10313 BasicBlock *BB = DTN->getBlock(); 10314 if (isImpliedViaGuard(BB, Pred, LHS, RHS)) 10315 return true; 10316 10317 BasicBlock *PBB = BB->getSinglePredecessor(); 10318 if (!PBB) 10319 continue; 10320 10321 BranchInst *ContinuePredicate = dyn_cast<BranchInst>(PBB->getTerminator()); 10322 if (!ContinuePredicate || !ContinuePredicate->isConditional()) 10323 continue; 10324 10325 Value *Condition = ContinuePredicate->getCondition(); 10326 10327 // If we have an edge `E` within the loop body that dominates the only 10328 // latch, the condition guarding `E` also guards the backedge. This 10329 // reasoning works only for loops with a single latch. 10330 10331 BasicBlockEdge DominatingEdge(PBB, BB); 10332 if (DominatingEdge.isSingleEdge()) { 10333 // We're constructively (and conservatively) enumerating edges within the 10334 // loop body that dominate the latch. The dominator tree better agree 10335 // with us on this: 10336 assert(DT.dominates(DominatingEdge, Latch) && "should be!"); 10337 10338 if (isImpliedCond(Pred, LHS, RHS, Condition, 10339 BB != ContinuePredicate->getSuccessor(0))) 10340 return true; 10341 } 10342 } 10343 10344 return false; 10345 } 10346 10347 bool ScalarEvolution::isBasicBlockEntryGuardedByCond(const BasicBlock *BB, 10348 ICmpInst::Predicate Pred, 10349 const SCEV *LHS, 10350 const SCEV *RHS) { 10351 if (VerifyIR) 10352 assert(!verifyFunction(*BB->getParent(), &dbgs()) && 10353 "This cannot be done on broken IR!"); 10354 10355 // If we cannot prove strict comparison (e.g. a > b), maybe we can prove 10356 // the facts (a >= b && a != b) separately. A typical situation is when the 10357 // non-strict comparison is known from ranges and non-equality is known from 10358 // dominating predicates. If we are proving strict comparison, we always try 10359 // to prove non-equality and non-strict comparison separately. 10360 auto NonStrictPredicate = ICmpInst::getNonStrictPredicate(Pred); 10361 const bool ProvingStrictComparison = (Pred != NonStrictPredicate); 10362 bool ProvedNonStrictComparison = false; 10363 bool ProvedNonEquality = false; 10364 10365 auto SplitAndProve = 10366 [&](std::function<bool(ICmpInst::Predicate)> Fn) -> bool { 10367 if (!ProvedNonStrictComparison) 10368 ProvedNonStrictComparison = Fn(NonStrictPredicate); 10369 if (!ProvedNonEquality) 10370 ProvedNonEquality = Fn(ICmpInst::ICMP_NE); 10371 if (ProvedNonStrictComparison && ProvedNonEquality) 10372 return true; 10373 return false; 10374 }; 10375 10376 if (ProvingStrictComparison) { 10377 auto ProofFn = [&](ICmpInst::Predicate P) { 10378 return isKnownViaNonRecursiveReasoning(P, LHS, RHS); 10379 }; 10380 if (SplitAndProve(ProofFn)) 10381 return true; 10382 } 10383 10384 // Try to prove (Pred, LHS, RHS) using isImpliedViaGuard. 10385 auto ProveViaGuard = [&](const BasicBlock *Block) { 10386 if (isImpliedViaGuard(Block, Pred, LHS, RHS)) 10387 return true; 10388 if (ProvingStrictComparison) { 10389 auto ProofFn = [&](ICmpInst::Predicate P) { 10390 return isImpliedViaGuard(Block, P, LHS, RHS); 10391 }; 10392 if (SplitAndProve(ProofFn)) 10393 return true; 10394 } 10395 return false; 10396 }; 10397 10398 // Try to prove (Pred, LHS, RHS) using isImpliedCond. 10399 auto ProveViaCond = [&](const Value *Condition, bool Inverse) { 10400 const Instruction *Context = &BB->front(); 10401 if (isImpliedCond(Pred, LHS, RHS, Condition, Inverse, Context)) 10402 return true; 10403 if (ProvingStrictComparison) { 10404 auto ProofFn = [&](ICmpInst::Predicate P) { 10405 return isImpliedCond(P, LHS, RHS, Condition, Inverse, Context); 10406 }; 10407 if (SplitAndProve(ProofFn)) 10408 return true; 10409 } 10410 return false; 10411 }; 10412 10413 // Starting at the block's predecessor, climb up the predecessor chain, as long 10414 // as there are predecessors that can be found that have unique successors 10415 // leading to the original block. 10416 const Loop *ContainingLoop = LI.getLoopFor(BB); 10417 const BasicBlock *PredBB; 10418 if (ContainingLoop && ContainingLoop->getHeader() == BB) 10419 PredBB = ContainingLoop->getLoopPredecessor(); 10420 else 10421 PredBB = BB->getSinglePredecessor(); 10422 for (std::pair<const BasicBlock *, const BasicBlock *> Pair(PredBB, BB); 10423 Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { 10424 if (ProveViaGuard(Pair.first)) 10425 return true; 10426 10427 const BranchInst *LoopEntryPredicate = 10428 dyn_cast<BranchInst>(Pair.first->getTerminator()); 10429 if (!LoopEntryPredicate || 10430 LoopEntryPredicate->isUnconditional()) 10431 continue; 10432 10433 if (ProveViaCond(LoopEntryPredicate->getCondition(), 10434 LoopEntryPredicate->getSuccessor(0) != Pair.second)) 10435 return true; 10436 } 10437 10438 // Check conditions due to any @llvm.assume intrinsics. 10439 for (auto &AssumeVH : AC.assumptions()) { 10440 if (!AssumeVH) 10441 continue; 10442 auto *CI = cast<CallInst>(AssumeVH); 10443 if (!DT.dominates(CI, BB)) 10444 continue; 10445 10446 if (ProveViaCond(CI->getArgOperand(0), false)) 10447 return true; 10448 } 10449 10450 return false; 10451 } 10452 10453 bool ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L, 10454 ICmpInst::Predicate Pred, 10455 const SCEV *LHS, 10456 const SCEV *RHS) { 10457 // Interpret a null as meaning no loop, where there is obviously no guard 10458 // (interprocedural conditions notwithstanding). 10459 if (!L) 10460 return false; 10461 10462 // Both LHS and RHS must be available at loop entry. 10463 assert(isAvailableAtLoopEntry(LHS, L) && 10464 "LHS is not available at Loop Entry"); 10465 assert(isAvailableAtLoopEntry(RHS, L) && 10466 "RHS is not available at Loop Entry"); 10467 10468 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS)) 10469 return true; 10470 10471 return isBasicBlockEntryGuardedByCond(L->getHeader(), Pred, LHS, RHS); 10472 } 10473 10474 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, 10475 const SCEV *RHS, 10476 const Value *FoundCondValue, bool Inverse, 10477 const Instruction *Context) { 10478 // False conditions implies anything. Do not bother analyzing it further. 10479 if (FoundCondValue == 10480 ConstantInt::getBool(FoundCondValue->getContext(), Inverse)) 10481 return true; 10482 10483 if (!PendingLoopPredicates.insert(FoundCondValue).second) 10484 return false; 10485 10486 auto ClearOnExit = 10487 make_scope_exit([&]() { PendingLoopPredicates.erase(FoundCondValue); }); 10488 10489 // Recursively handle And and Or conditions. 10490 const Value *Op0, *Op1; 10491 if (match(FoundCondValue, m_LogicalAnd(m_Value(Op0), m_Value(Op1)))) { 10492 if (!Inverse) 10493 return isImpliedCond(Pred, LHS, RHS, Op0, Inverse, Context) || 10494 isImpliedCond(Pred, LHS, RHS, Op1, Inverse, Context); 10495 } else if (match(FoundCondValue, m_LogicalOr(m_Value(Op0), m_Value(Op1)))) { 10496 if (Inverse) 10497 return isImpliedCond(Pred, LHS, RHS, Op0, Inverse, Context) || 10498 isImpliedCond(Pred, LHS, RHS, Op1, Inverse, Context); 10499 } 10500 10501 const ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue); 10502 if (!ICI) return false; 10503 10504 // Now that we found a conditional branch that dominates the loop or controls 10505 // the loop latch. Check to see if it is the comparison we are looking for. 10506 ICmpInst::Predicate FoundPred; 10507 if (Inverse) 10508 FoundPred = ICI->getInversePredicate(); 10509 else 10510 FoundPred = ICI->getPredicate(); 10511 10512 const SCEV *FoundLHS = getSCEV(ICI->getOperand(0)); 10513 const SCEV *FoundRHS = getSCEV(ICI->getOperand(1)); 10514 10515 return isImpliedCond(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS, Context); 10516 } 10517 10518 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, 10519 const SCEV *RHS, 10520 ICmpInst::Predicate FoundPred, 10521 const SCEV *FoundLHS, const SCEV *FoundRHS, 10522 const Instruction *Context) { 10523 // Balance the types. 10524 if (getTypeSizeInBits(LHS->getType()) < 10525 getTypeSizeInBits(FoundLHS->getType())) { 10526 // For unsigned and equality predicates, try to prove that both found 10527 // operands fit into narrow unsigned range. If so, try to prove facts in 10528 // narrow types. 10529 if (!CmpInst::isSigned(FoundPred)) { 10530 auto *NarrowType = LHS->getType(); 10531 auto *WideType = FoundLHS->getType(); 10532 auto BitWidth = getTypeSizeInBits(NarrowType); 10533 const SCEV *MaxValue = getZeroExtendExpr( 10534 getConstant(APInt::getMaxValue(BitWidth)), WideType); 10535 if (isKnownPredicate(ICmpInst::ICMP_ULE, FoundLHS, MaxValue) && 10536 isKnownPredicate(ICmpInst::ICMP_ULE, FoundRHS, MaxValue)) { 10537 const SCEV *TruncFoundLHS = getTruncateExpr(FoundLHS, NarrowType); 10538 const SCEV *TruncFoundRHS = getTruncateExpr(FoundRHS, NarrowType); 10539 if (isImpliedCondBalancedTypes(Pred, LHS, RHS, FoundPred, TruncFoundLHS, 10540 TruncFoundRHS, Context)) 10541 return true; 10542 } 10543 } 10544 10545 if (CmpInst::isSigned(Pred)) { 10546 LHS = getSignExtendExpr(LHS, FoundLHS->getType()); 10547 RHS = getSignExtendExpr(RHS, FoundLHS->getType()); 10548 } else { 10549 LHS = getZeroExtendExpr(LHS, FoundLHS->getType()); 10550 RHS = getZeroExtendExpr(RHS, FoundLHS->getType()); 10551 } 10552 } else if (getTypeSizeInBits(LHS->getType()) > 10553 getTypeSizeInBits(FoundLHS->getType())) { 10554 if (CmpInst::isSigned(FoundPred)) { 10555 FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType()); 10556 FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType()); 10557 } else { 10558 FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType()); 10559 FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType()); 10560 } 10561 } 10562 return isImpliedCondBalancedTypes(Pred, LHS, RHS, FoundPred, FoundLHS, 10563 FoundRHS, Context); 10564 } 10565 10566 bool ScalarEvolution::isImpliedCondBalancedTypes( 10567 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 10568 ICmpInst::Predicate FoundPred, const SCEV *FoundLHS, const SCEV *FoundRHS, 10569 const Instruction *Context) { 10570 assert(getTypeSizeInBits(LHS->getType()) == 10571 getTypeSizeInBits(FoundLHS->getType()) && 10572 "Types should be balanced!"); 10573 // Canonicalize the query to match the way instcombine will have 10574 // canonicalized the comparison. 10575 if (SimplifyICmpOperands(Pred, LHS, RHS)) 10576 if (LHS == RHS) 10577 return CmpInst::isTrueWhenEqual(Pred); 10578 if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS)) 10579 if (FoundLHS == FoundRHS) 10580 return CmpInst::isFalseWhenEqual(FoundPred); 10581 10582 // Check to see if we can make the LHS or RHS match. 10583 if (LHS == FoundRHS || RHS == FoundLHS) { 10584 if (isa<SCEVConstant>(RHS)) { 10585 std::swap(FoundLHS, FoundRHS); 10586 FoundPred = ICmpInst::getSwappedPredicate(FoundPred); 10587 } else { 10588 std::swap(LHS, RHS); 10589 Pred = ICmpInst::getSwappedPredicate(Pred); 10590 } 10591 } 10592 10593 // Check whether the found predicate is the same as the desired predicate. 10594 if (FoundPred == Pred) 10595 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, Context); 10596 10597 // Check whether swapping the found predicate makes it the same as the 10598 // desired predicate. 10599 if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) { 10600 // We can write the implication 10601 // 0. LHS Pred RHS <- FoundLHS SwapPred FoundRHS 10602 // using one of the following ways: 10603 // 1. LHS Pred RHS <- FoundRHS Pred FoundLHS 10604 // 2. RHS SwapPred LHS <- FoundLHS SwapPred FoundRHS 10605 // 3. LHS Pred RHS <- ~FoundLHS Pred ~FoundRHS 10606 // 4. ~LHS SwapPred ~RHS <- FoundLHS SwapPred FoundRHS 10607 // Forms 1. and 2. require swapping the operands of one condition. Don't 10608 // do this if it would break canonical constant/addrec ordering. 10609 if (!isa<SCEVConstant>(RHS) && !isa<SCEVAddRecExpr>(LHS)) 10610 return isImpliedCondOperands(FoundPred, RHS, LHS, FoundLHS, FoundRHS, 10611 Context); 10612 if (!isa<SCEVConstant>(FoundRHS) && !isa<SCEVAddRecExpr>(FoundLHS)) 10613 return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS, Context); 10614 10615 // Don't try to getNotSCEV pointers. 10616 if (LHS->getType()->isPointerTy() || FoundLHS->getType()->isPointerTy()) 10617 return false; 10618 10619 // There's no clear preference between forms 3. and 4., try both. 10620 return isImpliedCondOperands(FoundPred, getNotSCEV(LHS), getNotSCEV(RHS), 10621 FoundLHS, FoundRHS, Context) || 10622 isImpliedCondOperands(Pred, LHS, RHS, getNotSCEV(FoundLHS), 10623 getNotSCEV(FoundRHS), Context); 10624 } 10625 10626 // Unsigned comparison is the same as signed comparison when both the operands 10627 // are non-negative. 10628 if (CmpInst::isUnsigned(FoundPred) && 10629 CmpInst::getSignedPredicate(FoundPred) == Pred && 10630 isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) 10631 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, Context); 10632 10633 // Check if we can make progress by sharpening ranges. 10634 if (FoundPred == ICmpInst::ICMP_NE && 10635 (isa<SCEVConstant>(FoundLHS) || isa<SCEVConstant>(FoundRHS))) { 10636 10637 const SCEVConstant *C = nullptr; 10638 const SCEV *V = nullptr; 10639 10640 if (isa<SCEVConstant>(FoundLHS)) { 10641 C = cast<SCEVConstant>(FoundLHS); 10642 V = FoundRHS; 10643 } else { 10644 C = cast<SCEVConstant>(FoundRHS); 10645 V = FoundLHS; 10646 } 10647 10648 // The guarding predicate tells us that C != V. If the known range 10649 // of V is [C, t), we can sharpen the range to [C + 1, t). The 10650 // range we consider has to correspond to same signedness as the 10651 // predicate we're interested in folding. 10652 10653 APInt Min = ICmpInst::isSigned(Pred) ? 10654 getSignedRangeMin(V) : getUnsignedRangeMin(V); 10655 10656 if (Min == C->getAPInt()) { 10657 // Given (V >= Min && V != Min) we conclude V >= (Min + 1). 10658 // This is true even if (Min + 1) wraps around -- in case of 10659 // wraparound, (Min + 1) < Min, so (V >= Min => V >= (Min + 1)). 10660 10661 APInt SharperMin = Min + 1; 10662 10663 switch (Pred) { 10664 case ICmpInst::ICMP_SGE: 10665 case ICmpInst::ICMP_UGE: 10666 // We know V `Pred` SharperMin. If this implies LHS `Pred` 10667 // RHS, we're done. 10668 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(SharperMin), 10669 Context)) 10670 return true; 10671 LLVM_FALLTHROUGH; 10672 10673 case ICmpInst::ICMP_SGT: 10674 case ICmpInst::ICMP_UGT: 10675 // We know from the range information that (V `Pred` Min || 10676 // V == Min). We know from the guarding condition that !(V 10677 // == Min). This gives us 10678 // 10679 // V `Pred` Min || V == Min && !(V == Min) 10680 // => V `Pred` Min 10681 // 10682 // If V `Pred` Min implies LHS `Pred` RHS, we're done. 10683 10684 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min), 10685 Context)) 10686 return true; 10687 break; 10688 10689 // `LHS < RHS` and `LHS <= RHS` are handled in the same way as `RHS > LHS` and `RHS >= LHS` respectively. 10690 case ICmpInst::ICMP_SLE: 10691 case ICmpInst::ICMP_ULE: 10692 if (isImpliedCondOperands(CmpInst::getSwappedPredicate(Pred), RHS, 10693 LHS, V, getConstant(SharperMin), Context)) 10694 return true; 10695 LLVM_FALLTHROUGH; 10696 10697 case ICmpInst::ICMP_SLT: 10698 case ICmpInst::ICMP_ULT: 10699 if (isImpliedCondOperands(CmpInst::getSwappedPredicate(Pred), RHS, 10700 LHS, V, getConstant(Min), Context)) 10701 return true; 10702 break; 10703 10704 default: 10705 // No change 10706 break; 10707 } 10708 } 10709 } 10710 10711 // Check whether the actual condition is beyond sufficient. 10712 if (FoundPred == ICmpInst::ICMP_EQ) 10713 if (ICmpInst::isTrueWhenEqual(Pred)) 10714 if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, Context)) 10715 return true; 10716 if (Pred == ICmpInst::ICMP_NE) 10717 if (!ICmpInst::isTrueWhenEqual(FoundPred)) 10718 if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS, 10719 Context)) 10720 return true; 10721 10722 // Otherwise assume the worst. 10723 return false; 10724 } 10725 10726 bool ScalarEvolution::splitBinaryAdd(const SCEV *Expr, 10727 const SCEV *&L, const SCEV *&R, 10728 SCEV::NoWrapFlags &Flags) { 10729 const auto *AE = dyn_cast<SCEVAddExpr>(Expr); 10730 if (!AE || AE->getNumOperands() != 2) 10731 return false; 10732 10733 L = AE->getOperand(0); 10734 R = AE->getOperand(1); 10735 Flags = AE->getNoWrapFlags(); 10736 return true; 10737 } 10738 10739 Optional<APInt> ScalarEvolution::computeConstantDifference(const SCEV *More, 10740 const SCEV *Less) { 10741 // We avoid subtracting expressions here because this function is usually 10742 // fairly deep in the call stack (i.e. is called many times). 10743 10744 // X - X = 0. 10745 if (More == Less) 10746 return APInt(getTypeSizeInBits(More->getType()), 0); 10747 10748 if (isa<SCEVAddRecExpr>(Less) && isa<SCEVAddRecExpr>(More)) { 10749 const auto *LAR = cast<SCEVAddRecExpr>(Less); 10750 const auto *MAR = cast<SCEVAddRecExpr>(More); 10751 10752 if (LAR->getLoop() != MAR->getLoop()) 10753 return None; 10754 10755 // We look at affine expressions only; not for correctness but to keep 10756 // getStepRecurrence cheap. 10757 if (!LAR->isAffine() || !MAR->isAffine()) 10758 return None; 10759 10760 if (LAR->getStepRecurrence(*this) != MAR->getStepRecurrence(*this)) 10761 return None; 10762 10763 Less = LAR->getStart(); 10764 More = MAR->getStart(); 10765 10766 // fall through 10767 } 10768 10769 if (isa<SCEVConstant>(Less) && isa<SCEVConstant>(More)) { 10770 const auto &M = cast<SCEVConstant>(More)->getAPInt(); 10771 const auto &L = cast<SCEVConstant>(Less)->getAPInt(); 10772 return M - L; 10773 } 10774 10775 SCEV::NoWrapFlags Flags; 10776 const SCEV *LLess = nullptr, *RLess = nullptr; 10777 const SCEV *LMore = nullptr, *RMore = nullptr; 10778 const SCEVConstant *C1 = nullptr, *C2 = nullptr; 10779 // Compare (X + C1) vs X. 10780 if (splitBinaryAdd(Less, LLess, RLess, Flags)) 10781 if ((C1 = dyn_cast<SCEVConstant>(LLess))) 10782 if (RLess == More) 10783 return -(C1->getAPInt()); 10784 10785 // Compare X vs (X + C2). 10786 if (splitBinaryAdd(More, LMore, RMore, Flags)) 10787 if ((C2 = dyn_cast<SCEVConstant>(LMore))) 10788 if (RMore == Less) 10789 return C2->getAPInt(); 10790 10791 // Compare (X + C1) vs (X + C2). 10792 if (C1 && C2 && RLess == RMore) 10793 return C2->getAPInt() - C1->getAPInt(); 10794 10795 return None; 10796 } 10797 10798 bool ScalarEvolution::isImpliedCondOperandsViaAddRecStart( 10799 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 10800 const SCEV *FoundLHS, const SCEV *FoundRHS, const Instruction *Context) { 10801 // Try to recognize the following pattern: 10802 // 10803 // FoundRHS = ... 10804 // ... 10805 // loop: 10806 // FoundLHS = {Start,+,W} 10807 // context_bb: // Basic block from the same loop 10808 // known(Pred, FoundLHS, FoundRHS) 10809 // 10810 // If some predicate is known in the context of a loop, it is also known on 10811 // each iteration of this loop, including the first iteration. Therefore, in 10812 // this case, `FoundLHS Pred FoundRHS` implies `Start Pred FoundRHS`. Try to 10813 // prove the original pred using this fact. 10814 if (!Context) 10815 return false; 10816 const BasicBlock *ContextBB = Context->getParent(); 10817 // Make sure AR varies in the context block. 10818 if (auto *AR = dyn_cast<SCEVAddRecExpr>(FoundLHS)) { 10819 const Loop *L = AR->getLoop(); 10820 // Make sure that context belongs to the loop and executes on 1st iteration 10821 // (if it ever executes at all). 10822 if (!L->contains(ContextBB) || !DT.dominates(ContextBB, L->getLoopLatch())) 10823 return false; 10824 if (!isAvailableAtLoopEntry(FoundRHS, AR->getLoop())) 10825 return false; 10826 return isImpliedCondOperands(Pred, LHS, RHS, AR->getStart(), FoundRHS); 10827 } 10828 10829 if (auto *AR = dyn_cast<SCEVAddRecExpr>(FoundRHS)) { 10830 const Loop *L = AR->getLoop(); 10831 // Make sure that context belongs to the loop and executes on 1st iteration 10832 // (if it ever executes at all). 10833 if (!L->contains(ContextBB) || !DT.dominates(ContextBB, L->getLoopLatch())) 10834 return false; 10835 if (!isAvailableAtLoopEntry(FoundLHS, AR->getLoop())) 10836 return false; 10837 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, AR->getStart()); 10838 } 10839 10840 return false; 10841 } 10842 10843 bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow( 10844 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 10845 const SCEV *FoundLHS, const SCEV *FoundRHS) { 10846 if (Pred != CmpInst::ICMP_SLT && Pred != CmpInst::ICMP_ULT) 10847 return false; 10848 10849 const auto *AddRecLHS = dyn_cast<SCEVAddRecExpr>(LHS); 10850 if (!AddRecLHS) 10851 return false; 10852 10853 const auto *AddRecFoundLHS = dyn_cast<SCEVAddRecExpr>(FoundLHS); 10854 if (!AddRecFoundLHS) 10855 return false; 10856 10857 // We'd like to let SCEV reason about control dependencies, so we constrain 10858 // both the inequalities to be about add recurrences on the same loop. This 10859 // way we can use isLoopEntryGuardedByCond later. 10860 10861 const Loop *L = AddRecFoundLHS->getLoop(); 10862 if (L != AddRecLHS->getLoop()) 10863 return false; 10864 10865 // FoundLHS u< FoundRHS u< -C => (FoundLHS + C) u< (FoundRHS + C) ... (1) 10866 // 10867 // FoundLHS s< FoundRHS s< INT_MIN - C => (FoundLHS + C) s< (FoundRHS + C) 10868 // ... (2) 10869 // 10870 // Informal proof for (2), assuming (1) [*]: 10871 // 10872 // We'll also assume (A s< B) <=> ((A + INT_MIN) u< (B + INT_MIN)) ... (3)[**] 10873 // 10874 // Then 10875 // 10876 // FoundLHS s< FoundRHS s< INT_MIN - C 10877 // <=> (FoundLHS + INT_MIN) u< (FoundRHS + INT_MIN) u< -C [ using (3) ] 10878 // <=> (FoundLHS + INT_MIN + C) u< (FoundRHS + INT_MIN + C) [ using (1) ] 10879 // <=> (FoundLHS + INT_MIN + C + INT_MIN) s< 10880 // (FoundRHS + INT_MIN + C + INT_MIN) [ using (3) ] 10881 // <=> FoundLHS + C s< FoundRHS + C 10882 // 10883 // [*]: (1) can be proved by ruling out overflow. 10884 // 10885 // [**]: This can be proved by analyzing all the four possibilities: 10886 // (A s< 0, B s< 0), (A s< 0, B s>= 0), (A s>= 0, B s< 0) and 10887 // (A s>= 0, B s>= 0). 10888 // 10889 // Note: 10890 // Despite (2), "FoundRHS s< INT_MIN - C" does not mean that "FoundRHS + C" 10891 // will not sign underflow. For instance, say FoundLHS = (i8 -128), FoundRHS 10892 // = (i8 -127) and C = (i8 -100). Then INT_MIN - C = (i8 -28), and FoundRHS 10893 // s< (INT_MIN - C). Lack of sign overflow / underflow in "FoundRHS + C" is 10894 // neither necessary nor sufficient to prove "(FoundLHS + C) s< (FoundRHS + 10895 // C)". 10896 10897 Optional<APInt> LDiff = computeConstantDifference(LHS, FoundLHS); 10898 Optional<APInt> RDiff = computeConstantDifference(RHS, FoundRHS); 10899 if (!LDiff || !RDiff || *LDiff != *RDiff) 10900 return false; 10901 10902 if (LDiff->isMinValue()) 10903 return true; 10904 10905 APInt FoundRHSLimit; 10906 10907 if (Pred == CmpInst::ICMP_ULT) { 10908 FoundRHSLimit = -(*RDiff); 10909 } else { 10910 assert(Pred == CmpInst::ICMP_SLT && "Checked above!"); 10911 FoundRHSLimit = APInt::getSignedMinValue(getTypeSizeInBits(RHS->getType())) - *RDiff; 10912 } 10913 10914 // Try to prove (1) or (2), as needed. 10915 return isAvailableAtLoopEntry(FoundRHS, L) && 10916 isLoopEntryGuardedByCond(L, Pred, FoundRHS, 10917 getConstant(FoundRHSLimit)); 10918 } 10919 10920 bool ScalarEvolution::isImpliedViaMerge(ICmpInst::Predicate Pred, 10921 const SCEV *LHS, const SCEV *RHS, 10922 const SCEV *FoundLHS, 10923 const SCEV *FoundRHS, unsigned Depth) { 10924 const PHINode *LPhi = nullptr, *RPhi = nullptr; 10925 10926 auto ClearOnExit = make_scope_exit([&]() { 10927 if (LPhi) { 10928 bool Erased = PendingMerges.erase(LPhi); 10929 assert(Erased && "Failed to erase LPhi!"); 10930 (void)Erased; 10931 } 10932 if (RPhi) { 10933 bool Erased = PendingMerges.erase(RPhi); 10934 assert(Erased && "Failed to erase RPhi!"); 10935 (void)Erased; 10936 } 10937 }); 10938 10939 // Find respective Phis and check that they are not being pending. 10940 if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS)) 10941 if (auto *Phi = dyn_cast<PHINode>(LU->getValue())) { 10942 if (!PendingMerges.insert(Phi).second) 10943 return false; 10944 LPhi = Phi; 10945 } 10946 if (const SCEVUnknown *RU = dyn_cast<SCEVUnknown>(RHS)) 10947 if (auto *Phi = dyn_cast<PHINode>(RU->getValue())) { 10948 // If we detect a loop of Phi nodes being processed by this method, for 10949 // example: 10950 // 10951 // %a = phi i32 [ %some1, %preheader ], [ %b, %latch ] 10952 // %b = phi i32 [ %some2, %preheader ], [ %a, %latch ] 10953 // 10954 // we don't want to deal with a case that complex, so return conservative 10955 // answer false. 10956 if (!PendingMerges.insert(Phi).second) 10957 return false; 10958 RPhi = Phi; 10959 } 10960 10961 // If none of LHS, RHS is a Phi, nothing to do here. 10962 if (!LPhi && !RPhi) 10963 return false; 10964 10965 // If there is a SCEVUnknown Phi we are interested in, make it left. 10966 if (!LPhi) { 10967 std::swap(LHS, RHS); 10968 std::swap(FoundLHS, FoundRHS); 10969 std::swap(LPhi, RPhi); 10970 Pred = ICmpInst::getSwappedPredicate(Pred); 10971 } 10972 10973 assert(LPhi && "LPhi should definitely be a SCEVUnknown Phi!"); 10974 const BasicBlock *LBB = LPhi->getParent(); 10975 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 10976 10977 auto ProvedEasily = [&](const SCEV *S1, const SCEV *S2) { 10978 return isKnownViaNonRecursiveReasoning(Pred, S1, S2) || 10979 isImpliedCondOperandsViaRanges(Pred, S1, S2, FoundLHS, FoundRHS) || 10980 isImpliedViaOperations(Pred, S1, S2, FoundLHS, FoundRHS, Depth); 10981 }; 10982 10983 if (RPhi && RPhi->getParent() == LBB) { 10984 // Case one: RHS is also a SCEVUnknown Phi from the same basic block. 10985 // If we compare two Phis from the same block, and for each entry block 10986 // the predicate is true for incoming values from this block, then the 10987 // predicate is also true for the Phis. 10988 for (const BasicBlock *IncBB : predecessors(LBB)) { 10989 const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB)); 10990 const SCEV *R = getSCEV(RPhi->getIncomingValueForBlock(IncBB)); 10991 if (!ProvedEasily(L, R)) 10992 return false; 10993 } 10994 } else if (RAR && RAR->getLoop()->getHeader() == LBB) { 10995 // Case two: RHS is also a Phi from the same basic block, and it is an 10996 // AddRec. It means that there is a loop which has both AddRec and Unknown 10997 // PHIs, for it we can compare incoming values of AddRec from above the loop 10998 // and latch with their respective incoming values of LPhi. 10999 // TODO: Generalize to handle loops with many inputs in a header. 11000 if (LPhi->getNumIncomingValues() != 2) return false; 11001 11002 auto *RLoop = RAR->getLoop(); 11003 auto *Predecessor = RLoop->getLoopPredecessor(); 11004 assert(Predecessor && "Loop with AddRec with no predecessor?"); 11005 const SCEV *L1 = getSCEV(LPhi->getIncomingValueForBlock(Predecessor)); 11006 if (!ProvedEasily(L1, RAR->getStart())) 11007 return false; 11008 auto *Latch = RLoop->getLoopLatch(); 11009 assert(Latch && "Loop with AddRec with no latch?"); 11010 const SCEV *L2 = getSCEV(LPhi->getIncomingValueForBlock(Latch)); 11011 if (!ProvedEasily(L2, RAR->getPostIncExpr(*this))) 11012 return false; 11013 } else { 11014 // In all other cases go over inputs of LHS and compare each of them to RHS, 11015 // the predicate is true for (LHS, RHS) if it is true for all such pairs. 11016 // At this point RHS is either a non-Phi, or it is a Phi from some block 11017 // different from LBB. 11018 for (const BasicBlock *IncBB : predecessors(LBB)) { 11019 // Check that RHS is available in this block. 11020 if (!dominates(RHS, IncBB)) 11021 return false; 11022 const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB)); 11023 // Make sure L does not refer to a value from a potentially previous 11024 // iteration of a loop. 11025 if (!properlyDominates(L, IncBB)) 11026 return false; 11027 if (!ProvedEasily(L, RHS)) 11028 return false; 11029 } 11030 } 11031 return true; 11032 } 11033 11034 bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred, 11035 const SCEV *LHS, const SCEV *RHS, 11036 const SCEV *FoundLHS, 11037 const SCEV *FoundRHS, 11038 const Instruction *Context) { 11039 if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, FoundLHS, FoundRHS)) 11040 return true; 11041 11042 if (isImpliedCondOperandsViaNoOverflow(Pred, LHS, RHS, FoundLHS, FoundRHS)) 11043 return true; 11044 11045 if (isImpliedCondOperandsViaAddRecStart(Pred, LHS, RHS, FoundLHS, FoundRHS, 11046 Context)) 11047 return true; 11048 11049 return isImpliedCondOperandsHelper(Pred, LHS, RHS, 11050 FoundLHS, FoundRHS); 11051 } 11052 11053 /// Is MaybeMinMaxExpr an (U|S)(Min|Max) of Candidate and some other values? 11054 template <typename MinMaxExprType> 11055 static bool IsMinMaxConsistingOf(const SCEV *MaybeMinMaxExpr, 11056 const SCEV *Candidate) { 11057 const MinMaxExprType *MinMaxExpr = dyn_cast<MinMaxExprType>(MaybeMinMaxExpr); 11058 if (!MinMaxExpr) 11059 return false; 11060 11061 return is_contained(MinMaxExpr->operands(), Candidate); 11062 } 11063 11064 static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE, 11065 ICmpInst::Predicate Pred, 11066 const SCEV *LHS, const SCEV *RHS) { 11067 // If both sides are affine addrecs for the same loop, with equal 11068 // steps, and we know the recurrences don't wrap, then we only 11069 // need to check the predicate on the starting values. 11070 11071 if (!ICmpInst::isRelational(Pred)) 11072 return false; 11073 11074 const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS); 11075 if (!LAR) 11076 return false; 11077 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 11078 if (!RAR) 11079 return false; 11080 if (LAR->getLoop() != RAR->getLoop()) 11081 return false; 11082 if (!LAR->isAffine() || !RAR->isAffine()) 11083 return false; 11084 11085 if (LAR->getStepRecurrence(SE) != RAR->getStepRecurrence(SE)) 11086 return false; 11087 11088 SCEV::NoWrapFlags NW = ICmpInst::isSigned(Pred) ? 11089 SCEV::FlagNSW : SCEV::FlagNUW; 11090 if (!LAR->getNoWrapFlags(NW) || !RAR->getNoWrapFlags(NW)) 11091 return false; 11092 11093 return SE.isKnownPredicate(Pred, LAR->getStart(), RAR->getStart()); 11094 } 11095 11096 /// Is LHS `Pred` RHS true on the virtue of LHS or RHS being a Min or Max 11097 /// expression? 11098 static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE, 11099 ICmpInst::Predicate Pred, 11100 const SCEV *LHS, const SCEV *RHS) { 11101 switch (Pred) { 11102 default: 11103 return false; 11104 11105 case ICmpInst::ICMP_SGE: 11106 std::swap(LHS, RHS); 11107 LLVM_FALLTHROUGH; 11108 case ICmpInst::ICMP_SLE: 11109 return 11110 // min(A, ...) <= A 11111 IsMinMaxConsistingOf<SCEVSMinExpr>(LHS, RHS) || 11112 // A <= max(A, ...) 11113 IsMinMaxConsistingOf<SCEVSMaxExpr>(RHS, LHS); 11114 11115 case ICmpInst::ICMP_UGE: 11116 std::swap(LHS, RHS); 11117 LLVM_FALLTHROUGH; 11118 case ICmpInst::ICMP_ULE: 11119 return 11120 // min(A, ...) <= A 11121 IsMinMaxConsistingOf<SCEVUMinExpr>(LHS, RHS) || 11122 // A <= max(A, ...) 11123 IsMinMaxConsistingOf<SCEVUMaxExpr>(RHS, LHS); 11124 } 11125 11126 llvm_unreachable("covered switch fell through?!"); 11127 } 11128 11129 bool ScalarEvolution::isImpliedViaOperations(ICmpInst::Predicate Pred, 11130 const SCEV *LHS, const SCEV *RHS, 11131 const SCEV *FoundLHS, 11132 const SCEV *FoundRHS, 11133 unsigned Depth) { 11134 assert(getTypeSizeInBits(LHS->getType()) == 11135 getTypeSizeInBits(RHS->getType()) && 11136 "LHS and RHS have different sizes?"); 11137 assert(getTypeSizeInBits(FoundLHS->getType()) == 11138 getTypeSizeInBits(FoundRHS->getType()) && 11139 "FoundLHS and FoundRHS have different sizes?"); 11140 // We want to avoid hurting the compile time with analysis of too big trees. 11141 if (Depth > MaxSCEVOperationsImplicationDepth) 11142 return false; 11143 11144 // We only want to work with GT comparison so far. 11145 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_SLT) { 11146 Pred = CmpInst::getSwappedPredicate(Pred); 11147 std::swap(LHS, RHS); 11148 std::swap(FoundLHS, FoundRHS); 11149 } 11150 11151 // For unsigned, try to reduce it to corresponding signed comparison. 11152 if (Pred == ICmpInst::ICMP_UGT) 11153 // We can replace unsigned predicate with its signed counterpart if all 11154 // involved values are non-negative. 11155 // TODO: We could have better support for unsigned. 11156 if (isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) { 11157 // Knowing that both FoundLHS and FoundRHS are non-negative, and knowing 11158 // FoundLHS >u FoundRHS, we also know that FoundLHS >s FoundRHS. Let us 11159 // use this fact to prove that LHS and RHS are non-negative. 11160 const SCEV *MinusOne = getMinusOne(LHS->getType()); 11161 if (isImpliedCondOperands(ICmpInst::ICMP_SGT, LHS, MinusOne, FoundLHS, 11162 FoundRHS) && 11163 isImpliedCondOperands(ICmpInst::ICMP_SGT, RHS, MinusOne, FoundLHS, 11164 FoundRHS)) 11165 Pred = ICmpInst::ICMP_SGT; 11166 } 11167 11168 if (Pred != ICmpInst::ICMP_SGT) 11169 return false; 11170 11171 auto GetOpFromSExt = [&](const SCEV *S) { 11172 if (auto *Ext = dyn_cast<SCEVSignExtendExpr>(S)) 11173 return Ext->getOperand(); 11174 // TODO: If S is a SCEVConstant then you can cheaply "strip" the sext off 11175 // the constant in some cases. 11176 return S; 11177 }; 11178 11179 // Acquire values from extensions. 11180 auto *OrigLHS = LHS; 11181 auto *OrigFoundLHS = FoundLHS; 11182 LHS = GetOpFromSExt(LHS); 11183 FoundLHS = GetOpFromSExt(FoundLHS); 11184 11185 // Is the SGT predicate can be proved trivially or using the found context. 11186 auto IsSGTViaContext = [&](const SCEV *S1, const SCEV *S2) { 11187 return isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGT, S1, S2) || 11188 isImpliedViaOperations(ICmpInst::ICMP_SGT, S1, S2, OrigFoundLHS, 11189 FoundRHS, Depth + 1); 11190 }; 11191 11192 if (auto *LHSAddExpr = dyn_cast<SCEVAddExpr>(LHS)) { 11193 // We want to avoid creation of any new non-constant SCEV. Since we are 11194 // going to compare the operands to RHS, we should be certain that we don't 11195 // need any size extensions for this. So let's decline all cases when the 11196 // sizes of types of LHS and RHS do not match. 11197 // TODO: Maybe try to get RHS from sext to catch more cases? 11198 if (getTypeSizeInBits(LHS->getType()) != getTypeSizeInBits(RHS->getType())) 11199 return false; 11200 11201 // Should not overflow. 11202 if (!LHSAddExpr->hasNoSignedWrap()) 11203 return false; 11204 11205 auto *LL = LHSAddExpr->getOperand(0); 11206 auto *LR = LHSAddExpr->getOperand(1); 11207 auto *MinusOne = getMinusOne(RHS->getType()); 11208 11209 // Checks that S1 >= 0 && S2 > RHS, trivially or using the found context. 11210 auto IsSumGreaterThanRHS = [&](const SCEV *S1, const SCEV *S2) { 11211 return IsSGTViaContext(S1, MinusOne) && IsSGTViaContext(S2, RHS); 11212 }; 11213 // Try to prove the following rule: 11214 // (LHS = LL + LR) && (LL >= 0) && (LR > RHS) => (LHS > RHS). 11215 // (LHS = LL + LR) && (LR >= 0) && (LL > RHS) => (LHS > RHS). 11216 if (IsSumGreaterThanRHS(LL, LR) || IsSumGreaterThanRHS(LR, LL)) 11217 return true; 11218 } else if (auto *LHSUnknownExpr = dyn_cast<SCEVUnknown>(LHS)) { 11219 Value *LL, *LR; 11220 // FIXME: Once we have SDiv implemented, we can get rid of this matching. 11221 11222 using namespace llvm::PatternMatch; 11223 11224 if (match(LHSUnknownExpr->getValue(), m_SDiv(m_Value(LL), m_Value(LR)))) { 11225 // Rules for division. 11226 // We are going to perform some comparisons with Denominator and its 11227 // derivative expressions. In general case, creating a SCEV for it may 11228 // lead to a complex analysis of the entire graph, and in particular it 11229 // can request trip count recalculation for the same loop. This would 11230 // cache as SCEVCouldNotCompute to avoid the infinite recursion. To avoid 11231 // this, we only want to create SCEVs that are constants in this section. 11232 // So we bail if Denominator is not a constant. 11233 if (!isa<ConstantInt>(LR)) 11234 return false; 11235 11236 auto *Denominator = cast<SCEVConstant>(getSCEV(LR)); 11237 11238 // We want to make sure that LHS = FoundLHS / Denominator. If it is so, 11239 // then a SCEV for the numerator already exists and matches with FoundLHS. 11240 auto *Numerator = getExistingSCEV(LL); 11241 if (!Numerator || Numerator->getType() != FoundLHS->getType()) 11242 return false; 11243 11244 // Make sure that the numerator matches with FoundLHS and the denominator 11245 // is positive. 11246 if (!HasSameValue(Numerator, FoundLHS) || !isKnownPositive(Denominator)) 11247 return false; 11248 11249 auto *DTy = Denominator->getType(); 11250 auto *FRHSTy = FoundRHS->getType(); 11251 if (DTy->isPointerTy() != FRHSTy->isPointerTy()) 11252 // One of types is a pointer and another one is not. We cannot extend 11253 // them properly to a wider type, so let us just reject this case. 11254 // TODO: Usage of getEffectiveSCEVType for DTy, FRHSTy etc should help 11255 // to avoid this check. 11256 return false; 11257 11258 // Given that: 11259 // FoundLHS > FoundRHS, LHS = FoundLHS / Denominator, Denominator > 0. 11260 auto *WTy = getWiderType(DTy, FRHSTy); 11261 auto *DenominatorExt = getNoopOrSignExtend(Denominator, WTy); 11262 auto *FoundRHSExt = getNoopOrSignExtend(FoundRHS, WTy); 11263 11264 // Try to prove the following rule: 11265 // (FoundRHS > Denominator - 2) && (RHS <= 0) => (LHS > RHS). 11266 // For example, given that FoundLHS > 2. It means that FoundLHS is at 11267 // least 3. If we divide it by Denominator < 4, we will have at least 1. 11268 auto *DenomMinusTwo = getMinusSCEV(DenominatorExt, getConstant(WTy, 2)); 11269 if (isKnownNonPositive(RHS) && 11270 IsSGTViaContext(FoundRHSExt, DenomMinusTwo)) 11271 return true; 11272 11273 // Try to prove the following rule: 11274 // (FoundRHS > -1 - Denominator) && (RHS < 0) => (LHS > RHS). 11275 // For example, given that FoundLHS > -3. Then FoundLHS is at least -2. 11276 // If we divide it by Denominator > 2, then: 11277 // 1. If FoundLHS is negative, then the result is 0. 11278 // 2. If FoundLHS is non-negative, then the result is non-negative. 11279 // Anyways, the result is non-negative. 11280 auto *MinusOne = getMinusOne(WTy); 11281 auto *NegDenomMinusOne = getMinusSCEV(MinusOne, DenominatorExt); 11282 if (isKnownNegative(RHS) && 11283 IsSGTViaContext(FoundRHSExt, NegDenomMinusOne)) 11284 return true; 11285 } 11286 } 11287 11288 // If our expression contained SCEVUnknown Phis, and we split it down and now 11289 // need to prove something for them, try to prove the predicate for every 11290 // possible incoming values of those Phis. 11291 if (isImpliedViaMerge(Pred, OrigLHS, RHS, OrigFoundLHS, FoundRHS, Depth + 1)) 11292 return true; 11293 11294 return false; 11295 } 11296 11297 static bool isKnownPredicateExtendIdiom(ICmpInst::Predicate Pred, 11298 const SCEV *LHS, const SCEV *RHS) { 11299 // zext x u<= sext x, sext x s<= zext x 11300 switch (Pred) { 11301 case ICmpInst::ICMP_SGE: 11302 std::swap(LHS, RHS); 11303 LLVM_FALLTHROUGH; 11304 case ICmpInst::ICMP_SLE: { 11305 // If operand >=s 0 then ZExt == SExt. If operand <s 0 then SExt <s ZExt. 11306 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(LHS); 11307 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(RHS); 11308 if (SExt && ZExt && SExt->getOperand() == ZExt->getOperand()) 11309 return true; 11310 break; 11311 } 11312 case ICmpInst::ICMP_UGE: 11313 std::swap(LHS, RHS); 11314 LLVM_FALLTHROUGH; 11315 case ICmpInst::ICMP_ULE: { 11316 // If operand >=s 0 then ZExt == SExt. If operand <s 0 then ZExt <u SExt. 11317 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(LHS); 11318 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(RHS); 11319 if (SExt && ZExt && SExt->getOperand() == ZExt->getOperand()) 11320 return true; 11321 break; 11322 } 11323 default: 11324 break; 11325 }; 11326 return false; 11327 } 11328 11329 bool 11330 ScalarEvolution::isKnownViaNonRecursiveReasoning(ICmpInst::Predicate Pred, 11331 const SCEV *LHS, const SCEV *RHS) { 11332 return isKnownPredicateExtendIdiom(Pred, LHS, RHS) || 11333 isKnownPredicateViaConstantRanges(Pred, LHS, RHS) || 11334 IsKnownPredicateViaMinOrMax(*this, Pred, LHS, RHS) || 11335 IsKnownPredicateViaAddRecStart(*this, Pred, LHS, RHS) || 11336 isKnownPredicateViaNoOverflow(Pred, LHS, RHS); 11337 } 11338 11339 bool 11340 ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred, 11341 const SCEV *LHS, const SCEV *RHS, 11342 const SCEV *FoundLHS, 11343 const SCEV *FoundRHS) { 11344 switch (Pred) { 11345 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!"); 11346 case ICmpInst::ICMP_EQ: 11347 case ICmpInst::ICMP_NE: 11348 if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS)) 11349 return true; 11350 break; 11351 case ICmpInst::ICMP_SLT: 11352 case ICmpInst::ICMP_SLE: 11353 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, LHS, FoundLHS) && 11354 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, RHS, FoundRHS)) 11355 return true; 11356 break; 11357 case ICmpInst::ICMP_SGT: 11358 case ICmpInst::ICMP_SGE: 11359 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, LHS, FoundLHS) && 11360 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, RHS, FoundRHS)) 11361 return true; 11362 break; 11363 case ICmpInst::ICMP_ULT: 11364 case ICmpInst::ICMP_ULE: 11365 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, LHS, FoundLHS) && 11366 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, RHS, FoundRHS)) 11367 return true; 11368 break; 11369 case ICmpInst::ICMP_UGT: 11370 case ICmpInst::ICMP_UGE: 11371 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, LHS, FoundLHS) && 11372 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, RHS, FoundRHS)) 11373 return true; 11374 break; 11375 } 11376 11377 // Maybe it can be proved via operations? 11378 if (isImpliedViaOperations(Pred, LHS, RHS, FoundLHS, FoundRHS)) 11379 return true; 11380 11381 return false; 11382 } 11383 11384 bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred, 11385 const SCEV *LHS, 11386 const SCEV *RHS, 11387 const SCEV *FoundLHS, 11388 const SCEV *FoundRHS) { 11389 if (!isa<SCEVConstant>(RHS) || !isa<SCEVConstant>(FoundRHS)) 11390 // The restriction on `FoundRHS` be lifted easily -- it exists only to 11391 // reduce the compile time impact of this optimization. 11392 return false; 11393 11394 Optional<APInt> Addend = computeConstantDifference(LHS, FoundLHS); 11395 if (!Addend) 11396 return false; 11397 11398 const APInt &ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getAPInt(); 11399 11400 // `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the 11401 // antecedent "`FoundLHS` `Pred` `FoundRHS`". 11402 ConstantRange FoundLHSRange = 11403 ConstantRange::makeExactICmpRegion(Pred, ConstFoundRHS); 11404 11405 // Since `LHS` is `FoundLHS` + `Addend`, we can compute a range for `LHS`: 11406 ConstantRange LHSRange = FoundLHSRange.add(ConstantRange(*Addend)); 11407 11408 // We can also compute the range of values for `LHS` that satisfy the 11409 // consequent, "`LHS` `Pred` `RHS`": 11410 const APInt &ConstRHS = cast<SCEVConstant>(RHS)->getAPInt(); 11411 // The antecedent implies the consequent if every value of `LHS` that 11412 // satisfies the antecedent also satisfies the consequent. 11413 return LHSRange.icmp(Pred, ConstRHS); 11414 } 11415 11416 bool ScalarEvolution::canIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride, 11417 bool IsSigned) { 11418 assert(isKnownPositive(Stride) && "Positive stride expected!"); 11419 11420 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 11421 const SCEV *One = getOne(Stride->getType()); 11422 11423 if (IsSigned) { 11424 APInt MaxRHS = getSignedRangeMax(RHS); 11425 APInt MaxValue = APInt::getSignedMaxValue(BitWidth); 11426 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 11427 11428 // SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow! 11429 return (std::move(MaxValue) - MaxStrideMinusOne).slt(MaxRHS); 11430 } 11431 11432 APInt MaxRHS = getUnsignedRangeMax(RHS); 11433 APInt MaxValue = APInt::getMaxValue(BitWidth); 11434 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 11435 11436 // UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow! 11437 return (std::move(MaxValue) - MaxStrideMinusOne).ult(MaxRHS); 11438 } 11439 11440 bool ScalarEvolution::canIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride, 11441 bool IsSigned) { 11442 11443 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 11444 const SCEV *One = getOne(Stride->getType()); 11445 11446 if (IsSigned) { 11447 APInt MinRHS = getSignedRangeMin(RHS); 11448 APInt MinValue = APInt::getSignedMinValue(BitWidth); 11449 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 11450 11451 // SMinRHS - SMaxStrideMinusOne < SMinValue => overflow! 11452 return (std::move(MinValue) + MaxStrideMinusOne).sgt(MinRHS); 11453 } 11454 11455 APInt MinRHS = getUnsignedRangeMin(RHS); 11456 APInt MinValue = APInt::getMinValue(BitWidth); 11457 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 11458 11459 // UMinRHS - UMaxStrideMinusOne < UMinValue => overflow! 11460 return (std::move(MinValue) + MaxStrideMinusOne).ugt(MinRHS); 11461 } 11462 11463 const SCEV *ScalarEvolution::computeBECount(const SCEV *Delta, 11464 const SCEV *Step) { 11465 const SCEV *One = getOne(Step->getType()); 11466 Delta = getAddExpr(Delta, getMinusSCEV(Step, One)); 11467 return getUDivExpr(Delta, Step); 11468 } 11469 11470 const SCEV *ScalarEvolution::computeMaxBECountForLT(const SCEV *Start, 11471 const SCEV *Stride, 11472 const SCEV *End, 11473 unsigned BitWidth, 11474 bool IsSigned) { 11475 11476 assert(!isKnownNonPositive(Stride) && 11477 "Stride is expected strictly positive!"); 11478 // Calculate the maximum backedge count based on the range of values 11479 // permitted by Start, End, and Stride. 11480 const SCEV *MaxBECount; 11481 APInt MinStart = 11482 IsSigned ? getSignedRangeMin(Start) : getUnsignedRangeMin(Start); 11483 11484 APInt StrideForMaxBECount = 11485 IsSigned ? getSignedRangeMin(Stride) : getUnsignedRangeMin(Stride); 11486 11487 // We already know that the stride is positive, so we paper over conservatism 11488 // in our range computation by forcing StrideForMaxBECount to be at least one. 11489 // In theory this is unnecessary, but we expect MaxBECount to be a 11490 // SCEVConstant, and (udiv <constant> 0) is not constant folded by SCEV (there 11491 // is nothing to constant fold it to). 11492 APInt One(BitWidth, 1, IsSigned); 11493 StrideForMaxBECount = APIntOps::smax(One, StrideForMaxBECount); 11494 11495 APInt MaxValue = IsSigned ? APInt::getSignedMaxValue(BitWidth) 11496 : APInt::getMaxValue(BitWidth); 11497 APInt Limit = MaxValue - (StrideForMaxBECount - 1); 11498 11499 // Although End can be a MAX expression we estimate MaxEnd considering only 11500 // the case End = RHS of the loop termination condition. This is safe because 11501 // in the other case (End - Start) is zero, leading to a zero maximum backedge 11502 // taken count. 11503 APInt MaxEnd = IsSigned ? APIntOps::smin(getSignedRangeMax(End), Limit) 11504 : APIntOps::umin(getUnsignedRangeMax(End), Limit); 11505 11506 MaxBECount = computeBECount(getConstant(MaxEnd - MinStart) /* Delta */, 11507 getConstant(StrideForMaxBECount) /* Step */); 11508 11509 return MaxBECount; 11510 } 11511 11512 ScalarEvolution::ExitLimit 11513 ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS, 11514 const Loop *L, bool IsSigned, 11515 bool ControlsExit, bool AllowPredicates) { 11516 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 11517 11518 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 11519 bool PredicatedIV = false; 11520 11521 if (!IV && AllowPredicates) { 11522 // Try to make this an AddRec using runtime tests, in the first X 11523 // iterations of this loop, where X is the SCEV expression found by the 11524 // algorithm below. 11525 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 11526 PredicatedIV = true; 11527 } 11528 11529 // Avoid weird loops 11530 if (!IV || IV->getLoop() != L || !IV->isAffine()) 11531 return getCouldNotCompute(); 11532 11533 auto WrapType = IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW; 11534 bool NoWrap = ControlsExit && IV->getNoWrapFlags(WrapType); 11535 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; 11536 11537 const SCEV *Stride = IV->getStepRecurrence(*this); 11538 11539 bool PositiveStride = isKnownPositive(Stride); 11540 11541 // Avoid negative or zero stride values. 11542 if (!PositiveStride) { 11543 // We can compute the correct backedge taken count for loops with unknown 11544 // strides if we can prove that the loop is not an infinite loop with side 11545 // effects. Here's the loop structure we are trying to handle - 11546 // 11547 // i = start 11548 // do { 11549 // A[i] = i; 11550 // i += s; 11551 // } while (i < end); 11552 // 11553 // The backedge taken count for such loops is evaluated as - 11554 // (max(end, start + stride) - start - 1) /u stride 11555 // 11556 // The additional preconditions that we need to check to prove correctness 11557 // of the above formula is as follows - 11558 // 11559 // a) IV is either nuw or nsw depending upon signedness (indicated by the 11560 // NoWrap flag). 11561 // b) loop is single exit with no side effects. 11562 // 11563 // 11564 // Precondition a) implies that if the stride is negative, this is a single 11565 // trip loop. The backedge taken count formula reduces to zero in this case. 11566 // 11567 // Precondition b) implies that the unknown stride cannot be zero otherwise 11568 // we have UB. 11569 // 11570 // The positive stride case is the same as isKnownPositive(Stride) returning 11571 // true (original behavior of the function). 11572 // 11573 // We want to make sure that the stride is truly unknown as there are edge 11574 // cases where ScalarEvolution propagates no wrap flags to the 11575 // post-increment/decrement IV even though the increment/decrement operation 11576 // itself is wrapping. The computed backedge taken count may be wrong in 11577 // such cases. This is prevented by checking that the stride is not known to 11578 // be either positive or non-positive. For example, no wrap flags are 11579 // propagated to the post-increment IV of this loop with a trip count of 2 - 11580 // 11581 // unsigned char i; 11582 // for(i=127; i<128; i+=129) 11583 // A[i] = i; 11584 // 11585 if (PredicatedIV || !NoWrap || isKnownNonPositive(Stride) || 11586 !loopIsFiniteByAssumption(L)) 11587 return getCouldNotCompute(); 11588 } else if (!Stride->isOne() && !NoWrap) { 11589 auto isUBOnWrap = [&]() { 11590 // Can we prove this loop *must* be UB if overflow of IV occurs? 11591 // Reasoning goes as follows: 11592 // * Suppose the IV did self wrap. 11593 // * If Stride evenly divides the iteration space, then once wrap 11594 // occurs, the loop must revisit the same values. 11595 // * We know that RHS is invariant, and that none of those values 11596 // caused this exit to be taken previously. Thus, this exit is 11597 // dynamically dead. 11598 // * If this is the sole exit, then a dead exit implies the loop 11599 // must be infinite if there are no abnormal exits. 11600 // * If the loop were infinite, then it must either not be mustprogress 11601 // or have side effects. Otherwise, it must be UB. 11602 // * It can't (by assumption), be UB so we have contradicted our 11603 // premise and can conclude the IV did not in fact self-wrap. 11604 // From no-self-wrap, we need to then prove no-(un)signed-wrap. This 11605 // follows trivially from the fact that every (un)signed-wrapped, but 11606 // not self-wrapped value must be LT than the last value before 11607 // (un)signed wrap. Since we know that last value didn't exit, nor 11608 // will any smaller one. 11609 11610 if (!isLoopInvariant(RHS, L)) 11611 return false; 11612 11613 auto *StrideC = dyn_cast<SCEVConstant>(Stride); 11614 if (!StrideC || !StrideC->getAPInt().isPowerOf2()) 11615 return false; 11616 11617 if (!ControlsExit || !loopHasNoAbnormalExits(L)) 11618 return false; 11619 11620 return loopIsFiniteByAssumption(L); 11621 }; 11622 11623 // Avoid proven overflow cases: this will ensure that the backedge taken 11624 // count will not generate any unsigned overflow. Relaxed no-overflow 11625 // conditions exploit NoWrapFlags, allowing to optimize in presence of 11626 // undefined behaviors like the case of C language. 11627 if (canIVOverflowOnLT(RHS, Stride, IsSigned) && !isUBOnWrap()) 11628 return getCouldNotCompute(); 11629 } 11630 11631 const SCEV *Start = IV->getStart(); 11632 11633 // Preserve pointer-typed Start/RHS to pass to isLoopEntryGuardedByCond. 11634 // Use integer-typed versions for actual computation. 11635 const SCEV *OrigStart = Start; 11636 const SCEV *OrigRHS = RHS; 11637 if (Start->getType()->isPointerTy()) { 11638 Start = getLosslessPtrToIntExpr(Start); 11639 if (isa<SCEVCouldNotCompute>(Start)) 11640 return Start; 11641 } 11642 if (RHS->getType()->isPointerTy()) { 11643 RHS = getLosslessPtrToIntExpr(RHS); 11644 if (isa<SCEVCouldNotCompute>(RHS)) 11645 return RHS; 11646 } 11647 11648 const SCEV *End = RHS; 11649 // When the RHS is not invariant, we do not know the end bound of the loop and 11650 // cannot calculate the ExactBECount needed by ExitLimit. However, we can 11651 // calculate the MaxBECount, given the start, stride and max value for the end 11652 // bound of the loop (RHS), and the fact that IV does not overflow (which is 11653 // checked above). 11654 if (!isLoopInvariant(RHS, L)) { 11655 const SCEV *MaxBECount = computeMaxBECountForLT( 11656 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned); 11657 return ExitLimit(getCouldNotCompute() /* ExactNotTaken */, MaxBECount, 11658 false /*MaxOrZero*/, Predicates); 11659 } 11660 // If the backedge is taken at least once, then it will be taken 11661 // (End-Start)/Stride times (rounded up to a multiple of Stride), where Start 11662 // is the LHS value of the less-than comparison the first time it is evaluated 11663 // and End is the RHS. 11664 const SCEV *BECountIfBackedgeTaken = 11665 computeBECount(getMinusSCEV(End, Start), Stride); 11666 // If the loop entry is guarded by the result of the backedge test of the 11667 // first loop iteration, then we know the backedge will be taken at least 11668 // once and so the backedge taken count is as above. If not then we use the 11669 // expression (max(End,Start)-Start)/Stride to describe the backedge count, 11670 // as if the backedge is taken at least once max(End,Start) is End and so the 11671 // result is as above, and if not max(End,Start) is Start so we get a backedge 11672 // count of zero. 11673 const SCEV *BECount; 11674 if (isLoopEntryGuardedByCond(L, Cond, getMinusSCEV(OrigStart, Stride), OrigRHS)) 11675 BECount = BECountIfBackedgeTaken; 11676 else { 11677 // If we know that RHS >= Start in the context of loop, then we know that 11678 // max(RHS, Start) = RHS at this point. 11679 if (isLoopEntryGuardedByCond( 11680 L, IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE, OrigRHS, OrigStart)) 11681 End = RHS; 11682 else 11683 End = IsSigned ? getSMaxExpr(RHS, Start) : getUMaxExpr(RHS, Start); 11684 BECount = computeBECount(getMinusSCEV(End, Start), Stride); 11685 } 11686 11687 const SCEV *MaxBECount; 11688 bool MaxOrZero = false; 11689 if (isa<SCEVConstant>(BECount)) 11690 MaxBECount = BECount; 11691 else if (isa<SCEVConstant>(BECountIfBackedgeTaken)) { 11692 // If we know exactly how many times the backedge will be taken if it's 11693 // taken at least once, then the backedge count will either be that or 11694 // zero. 11695 MaxBECount = BECountIfBackedgeTaken; 11696 MaxOrZero = true; 11697 } else { 11698 MaxBECount = computeMaxBECountForLT( 11699 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned); 11700 } 11701 11702 if (isa<SCEVCouldNotCompute>(MaxBECount) && 11703 !isa<SCEVCouldNotCompute>(BECount)) 11704 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 11705 11706 return ExitLimit(BECount, MaxBECount, MaxOrZero, Predicates); 11707 } 11708 11709 ScalarEvolution::ExitLimit 11710 ScalarEvolution::howManyGreaterThans(const SCEV *LHS, const SCEV *RHS, 11711 const Loop *L, bool IsSigned, 11712 bool ControlsExit, bool AllowPredicates) { 11713 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 11714 // We handle only IV > Invariant 11715 if (!isLoopInvariant(RHS, L)) 11716 return getCouldNotCompute(); 11717 11718 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 11719 if (!IV && AllowPredicates) 11720 // Try to make this an AddRec using runtime tests, in the first X 11721 // iterations of this loop, where X is the SCEV expression found by the 11722 // algorithm below. 11723 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 11724 11725 // Avoid weird loops 11726 if (!IV || IV->getLoop() != L || !IV->isAffine()) 11727 return getCouldNotCompute(); 11728 11729 auto WrapType = IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW; 11730 bool NoWrap = ControlsExit && IV->getNoWrapFlags(WrapType); 11731 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; 11732 11733 const SCEV *Stride = getNegativeSCEV(IV->getStepRecurrence(*this)); 11734 11735 // Avoid negative or zero stride values 11736 if (!isKnownPositive(Stride)) 11737 return getCouldNotCompute(); 11738 11739 // Avoid proven overflow cases: this will ensure that the backedge taken count 11740 // will not generate any unsigned overflow. Relaxed no-overflow conditions 11741 // exploit NoWrapFlags, allowing to optimize in presence of undefined 11742 // behaviors like the case of C language. 11743 if (!Stride->isOne() && !NoWrap) 11744 if (canIVOverflowOnGT(RHS, Stride, IsSigned)) 11745 return getCouldNotCompute(); 11746 11747 const SCEV *Start = IV->getStart(); 11748 const SCEV *End = RHS; 11749 if (!isLoopEntryGuardedByCond(L, Cond, getAddExpr(Start, Stride), RHS)) { 11750 // If we know that Start >= RHS in the context of loop, then we know that 11751 // min(RHS, Start) = RHS at this point. 11752 if (isLoopEntryGuardedByCond( 11753 L, IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE, Start, RHS)) 11754 End = RHS; 11755 else 11756 End = IsSigned ? getSMinExpr(RHS, Start) : getUMinExpr(RHS, Start); 11757 } 11758 11759 if (Start->getType()->isPointerTy()) { 11760 Start = getLosslessPtrToIntExpr(Start); 11761 if (isa<SCEVCouldNotCompute>(Start)) 11762 return Start; 11763 } 11764 if (End->getType()->isPointerTy()) { 11765 End = getLosslessPtrToIntExpr(End); 11766 if (isa<SCEVCouldNotCompute>(End)) 11767 return End; 11768 } 11769 11770 const SCEV *BECount = computeBECount(getMinusSCEV(Start, End), Stride); 11771 11772 APInt MaxStart = IsSigned ? getSignedRangeMax(Start) 11773 : getUnsignedRangeMax(Start); 11774 11775 APInt MinStride = IsSigned ? getSignedRangeMin(Stride) 11776 : getUnsignedRangeMin(Stride); 11777 11778 unsigned BitWidth = getTypeSizeInBits(LHS->getType()); 11779 APInt Limit = IsSigned ? APInt::getSignedMinValue(BitWidth) + (MinStride - 1) 11780 : APInt::getMinValue(BitWidth) + (MinStride - 1); 11781 11782 // Although End can be a MIN expression we estimate MinEnd considering only 11783 // the case End = RHS. This is safe because in the other case (Start - End) 11784 // is zero, leading to a zero maximum backedge taken count. 11785 APInt MinEnd = 11786 IsSigned ? APIntOps::smax(getSignedRangeMin(RHS), Limit) 11787 : APIntOps::umax(getUnsignedRangeMin(RHS), Limit); 11788 11789 const SCEV *MaxBECount = isa<SCEVConstant>(BECount) 11790 ? BECount 11791 : computeBECount(getConstant(MaxStart - MinEnd), 11792 getConstant(MinStride)); 11793 11794 if (isa<SCEVCouldNotCompute>(MaxBECount)) 11795 MaxBECount = BECount; 11796 11797 return ExitLimit(BECount, MaxBECount, false, Predicates); 11798 } 11799 11800 const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range, 11801 ScalarEvolution &SE) const { 11802 if (Range.isFullSet()) // Infinite loop. 11803 return SE.getCouldNotCompute(); 11804 11805 // If the start is a non-zero constant, shift the range to simplify things. 11806 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart())) 11807 if (!SC->getValue()->isZero()) { 11808 SmallVector<const SCEV *, 4> Operands(operands()); 11809 Operands[0] = SE.getZero(SC->getType()); 11810 const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(), 11811 getNoWrapFlags(FlagNW)); 11812 if (const auto *ShiftedAddRec = dyn_cast<SCEVAddRecExpr>(Shifted)) 11813 return ShiftedAddRec->getNumIterationsInRange( 11814 Range.subtract(SC->getAPInt()), SE); 11815 // This is strange and shouldn't happen. 11816 return SE.getCouldNotCompute(); 11817 } 11818 11819 // The only time we can solve this is when we have all constant indices. 11820 // Otherwise, we cannot determine the overflow conditions. 11821 if (any_of(operands(), [](const SCEV *Op) { return !isa<SCEVConstant>(Op); })) 11822 return SE.getCouldNotCompute(); 11823 11824 // Okay at this point we know that all elements of the chrec are constants and 11825 // that the start element is zero. 11826 11827 // First check to see if the range contains zero. If not, the first 11828 // iteration exits. 11829 unsigned BitWidth = SE.getTypeSizeInBits(getType()); 11830 if (!Range.contains(APInt(BitWidth, 0))) 11831 return SE.getZero(getType()); 11832 11833 if (isAffine()) { 11834 // If this is an affine expression then we have this situation: 11835 // Solve {0,+,A} in Range === Ax in Range 11836 11837 // We know that zero is in the range. If A is positive then we know that 11838 // the upper value of the range must be the first possible exit value. 11839 // If A is negative then the lower of the range is the last possible loop 11840 // value. Also note that we already checked for a full range. 11841 APInt A = cast<SCEVConstant>(getOperand(1))->getAPInt(); 11842 APInt End = A.sge(1) ? (Range.getUpper() - 1) : Range.getLower(); 11843 11844 // The exit value should be (End+A)/A. 11845 APInt ExitVal = (End + A).udiv(A); 11846 ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal); 11847 11848 // Evaluate at the exit value. If we really did fall out of the valid 11849 // range, then we computed our trip count, otherwise wrap around or other 11850 // things must have happened. 11851 ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE); 11852 if (Range.contains(Val->getValue())) 11853 return SE.getCouldNotCompute(); // Something strange happened 11854 11855 // Ensure that the previous value is in the range. This is a sanity check. 11856 assert(Range.contains( 11857 EvaluateConstantChrecAtConstant(this, 11858 ConstantInt::get(SE.getContext(), ExitVal - 1), SE)->getValue()) && 11859 "Linear scev computation is off in a bad way!"); 11860 return SE.getConstant(ExitValue); 11861 } 11862 11863 if (isQuadratic()) { 11864 if (auto S = SolveQuadraticAddRecRange(this, Range, SE)) 11865 return SE.getConstant(S.getValue()); 11866 } 11867 11868 return SE.getCouldNotCompute(); 11869 } 11870 11871 const SCEVAddRecExpr * 11872 SCEVAddRecExpr::getPostIncExpr(ScalarEvolution &SE) const { 11873 assert(getNumOperands() > 1 && "AddRec with zero step?"); 11874 // There is a temptation to just call getAddExpr(this, getStepRecurrence(SE)), 11875 // but in this case we cannot guarantee that the value returned will be an 11876 // AddRec because SCEV does not have a fixed point where it stops 11877 // simplification: it is legal to return ({rec1} + {rec2}). For example, it 11878 // may happen if we reach arithmetic depth limit while simplifying. So we 11879 // construct the returned value explicitly. 11880 SmallVector<const SCEV *, 3> Ops; 11881 // If this is {A,+,B,+,C,...,+,N}, then its step is {B,+,C,+,...,+,N}, and 11882 // (this + Step) is {A+B,+,B+C,+...,+,N}. 11883 for (unsigned i = 0, e = getNumOperands() - 1; i < e; ++i) 11884 Ops.push_back(SE.getAddExpr(getOperand(i), getOperand(i + 1))); 11885 // We know that the last operand is not a constant zero (otherwise it would 11886 // have been popped out earlier). This guarantees us that if the result has 11887 // the same last operand, then it will also not be popped out, meaning that 11888 // the returned value will be an AddRec. 11889 const SCEV *Last = getOperand(getNumOperands() - 1); 11890 assert(!Last->isZero() && "Recurrency with zero step?"); 11891 Ops.push_back(Last); 11892 return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, getLoop(), 11893 SCEV::FlagAnyWrap)); 11894 } 11895 11896 // Return true when S contains at least an undef value. 11897 static inline bool containsUndefs(const SCEV *S) { 11898 return SCEVExprContains(S, [](const SCEV *S) { 11899 if (const auto *SU = dyn_cast<SCEVUnknown>(S)) 11900 return isa<UndefValue>(SU->getValue()); 11901 return false; 11902 }); 11903 } 11904 11905 namespace { 11906 11907 // Collect all steps of SCEV expressions. 11908 struct SCEVCollectStrides { 11909 ScalarEvolution &SE; 11910 SmallVectorImpl<const SCEV *> &Strides; 11911 11912 SCEVCollectStrides(ScalarEvolution &SE, SmallVectorImpl<const SCEV *> &S) 11913 : SE(SE), Strides(S) {} 11914 11915 bool follow(const SCEV *S) { 11916 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) 11917 Strides.push_back(AR->getStepRecurrence(SE)); 11918 return true; 11919 } 11920 11921 bool isDone() const { return false; } 11922 }; 11923 11924 // Collect all SCEVUnknown and SCEVMulExpr expressions. 11925 struct SCEVCollectTerms { 11926 SmallVectorImpl<const SCEV *> &Terms; 11927 11928 SCEVCollectTerms(SmallVectorImpl<const SCEV *> &T) : Terms(T) {} 11929 11930 bool follow(const SCEV *S) { 11931 if (isa<SCEVUnknown>(S) || isa<SCEVMulExpr>(S) || 11932 isa<SCEVSignExtendExpr>(S)) { 11933 if (!containsUndefs(S)) 11934 Terms.push_back(S); 11935 11936 // Stop recursion: once we collected a term, do not walk its operands. 11937 return false; 11938 } 11939 11940 // Keep looking. 11941 return true; 11942 } 11943 11944 bool isDone() const { return false; } 11945 }; 11946 11947 // Check if a SCEV contains an AddRecExpr. 11948 struct SCEVHasAddRec { 11949 bool &ContainsAddRec; 11950 11951 SCEVHasAddRec(bool &ContainsAddRec) : ContainsAddRec(ContainsAddRec) { 11952 ContainsAddRec = false; 11953 } 11954 11955 bool follow(const SCEV *S) { 11956 if (isa<SCEVAddRecExpr>(S)) { 11957 ContainsAddRec = true; 11958 11959 // Stop recursion: once we collected a term, do not walk its operands. 11960 return false; 11961 } 11962 11963 // Keep looking. 11964 return true; 11965 } 11966 11967 bool isDone() const { return false; } 11968 }; 11969 11970 // Find factors that are multiplied with an expression that (possibly as a 11971 // subexpression) contains an AddRecExpr. In the expression: 11972 // 11973 // 8 * (100 + %p * %q * (%a + {0, +, 1}_loop)) 11974 // 11975 // "%p * %q" are factors multiplied by the expression "(%a + {0, +, 1}_loop)" 11976 // that contains the AddRec {0, +, 1}_loop. %p * %q are likely to be array size 11977 // parameters as they form a product with an induction variable. 11978 // 11979 // This collector expects all array size parameters to be in the same MulExpr. 11980 // It might be necessary to later add support for collecting parameters that are 11981 // spread over different nested MulExpr. 11982 struct SCEVCollectAddRecMultiplies { 11983 SmallVectorImpl<const SCEV *> &Terms; 11984 ScalarEvolution &SE; 11985 11986 SCEVCollectAddRecMultiplies(SmallVectorImpl<const SCEV *> &T, ScalarEvolution &SE) 11987 : Terms(T), SE(SE) {} 11988 11989 bool follow(const SCEV *S) { 11990 if (auto *Mul = dyn_cast<SCEVMulExpr>(S)) { 11991 bool HasAddRec = false; 11992 SmallVector<const SCEV *, 0> Operands; 11993 for (auto Op : Mul->operands()) { 11994 const SCEVUnknown *Unknown = dyn_cast<SCEVUnknown>(Op); 11995 if (Unknown && !isa<CallInst>(Unknown->getValue())) { 11996 Operands.push_back(Op); 11997 } else if (Unknown) { 11998 HasAddRec = true; 11999 } else { 12000 bool ContainsAddRec = false; 12001 SCEVHasAddRec ContiansAddRec(ContainsAddRec); 12002 visitAll(Op, ContiansAddRec); 12003 HasAddRec |= ContainsAddRec; 12004 } 12005 } 12006 if (Operands.size() == 0) 12007 return true; 12008 12009 if (!HasAddRec) 12010 return false; 12011 12012 Terms.push_back(SE.getMulExpr(Operands)); 12013 // Stop recursion: once we collected a term, do not walk its operands. 12014 return false; 12015 } 12016 12017 // Keep looking. 12018 return true; 12019 } 12020 12021 bool isDone() const { return false; } 12022 }; 12023 12024 } // end anonymous namespace 12025 12026 /// Find parametric terms in this SCEVAddRecExpr. We first for parameters in 12027 /// two places: 12028 /// 1) The strides of AddRec expressions. 12029 /// 2) Unknowns that are multiplied with AddRec expressions. 12030 void ScalarEvolution::collectParametricTerms(const SCEV *Expr, 12031 SmallVectorImpl<const SCEV *> &Terms) { 12032 SmallVector<const SCEV *, 4> Strides; 12033 SCEVCollectStrides StrideCollector(*this, Strides); 12034 visitAll(Expr, StrideCollector); 12035 12036 LLVM_DEBUG({ 12037 dbgs() << "Strides:\n"; 12038 for (const SCEV *S : Strides) 12039 dbgs() << *S << "\n"; 12040 }); 12041 12042 for (const SCEV *S : Strides) { 12043 SCEVCollectTerms TermCollector(Terms); 12044 visitAll(S, TermCollector); 12045 } 12046 12047 LLVM_DEBUG({ 12048 dbgs() << "Terms:\n"; 12049 for (const SCEV *T : Terms) 12050 dbgs() << *T << "\n"; 12051 }); 12052 12053 SCEVCollectAddRecMultiplies MulCollector(Terms, *this); 12054 visitAll(Expr, MulCollector); 12055 } 12056 12057 static bool findArrayDimensionsRec(ScalarEvolution &SE, 12058 SmallVectorImpl<const SCEV *> &Terms, 12059 SmallVectorImpl<const SCEV *> &Sizes) { 12060 int Last = Terms.size() - 1; 12061 const SCEV *Step = Terms[Last]; 12062 12063 // End of recursion. 12064 if (Last == 0) { 12065 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Step)) { 12066 SmallVector<const SCEV *, 2> Qs; 12067 for (const SCEV *Op : M->operands()) 12068 if (!isa<SCEVConstant>(Op)) 12069 Qs.push_back(Op); 12070 12071 Step = SE.getMulExpr(Qs); 12072 } 12073 12074 Sizes.push_back(Step); 12075 return true; 12076 } 12077 12078 for (const SCEV *&Term : Terms) { 12079 // Normalize the terms before the next call to findArrayDimensionsRec. 12080 const SCEV *Q, *R; 12081 SCEVDivision::divide(SE, Term, Step, &Q, &R); 12082 12083 // Bail out when GCD does not evenly divide one of the terms. 12084 if (!R->isZero()) 12085 return false; 12086 12087 Term = Q; 12088 } 12089 12090 // Remove all SCEVConstants. 12091 erase_if(Terms, [](const SCEV *E) { return isa<SCEVConstant>(E); }); 12092 12093 if (Terms.size() > 0) 12094 if (!findArrayDimensionsRec(SE, Terms, Sizes)) 12095 return false; 12096 12097 Sizes.push_back(Step); 12098 return true; 12099 } 12100 12101 // Returns true when one of the SCEVs of Terms contains a SCEVUnknown parameter. 12102 static inline bool containsParameters(SmallVectorImpl<const SCEV *> &Terms) { 12103 for (const SCEV *T : Terms) 12104 if (SCEVExprContains(T, [](const SCEV *S) { return isa<SCEVUnknown>(S); })) 12105 return true; 12106 12107 return false; 12108 } 12109 12110 // Return the number of product terms in S. 12111 static inline int numberOfTerms(const SCEV *S) { 12112 if (const SCEVMulExpr *Expr = dyn_cast<SCEVMulExpr>(S)) 12113 return Expr->getNumOperands(); 12114 return 1; 12115 } 12116 12117 static const SCEV *removeConstantFactors(ScalarEvolution &SE, const SCEV *T) { 12118 if (isa<SCEVConstant>(T)) 12119 return nullptr; 12120 12121 if (isa<SCEVUnknown>(T)) 12122 return T; 12123 12124 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(T)) { 12125 SmallVector<const SCEV *, 2> Factors; 12126 for (const SCEV *Op : M->operands()) 12127 if (!isa<SCEVConstant>(Op)) 12128 Factors.push_back(Op); 12129 12130 return SE.getMulExpr(Factors); 12131 } 12132 12133 return T; 12134 } 12135 12136 /// Return the size of an element read or written by Inst. 12137 const SCEV *ScalarEvolution::getElementSize(Instruction *Inst) { 12138 Type *Ty; 12139 if (StoreInst *Store = dyn_cast<StoreInst>(Inst)) 12140 Ty = Store->getValueOperand()->getType(); 12141 else if (LoadInst *Load = dyn_cast<LoadInst>(Inst)) 12142 Ty = Load->getType(); 12143 else 12144 return nullptr; 12145 12146 Type *ETy = getEffectiveSCEVType(PointerType::getUnqual(Ty)); 12147 return getSizeOfExpr(ETy, Ty); 12148 } 12149 12150 void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms, 12151 SmallVectorImpl<const SCEV *> &Sizes, 12152 const SCEV *ElementSize) { 12153 if (Terms.size() < 1 || !ElementSize) 12154 return; 12155 12156 // Early return when Terms do not contain parameters: we do not delinearize 12157 // non parametric SCEVs. 12158 if (!containsParameters(Terms)) 12159 return; 12160 12161 LLVM_DEBUG({ 12162 dbgs() << "Terms:\n"; 12163 for (const SCEV *T : Terms) 12164 dbgs() << *T << "\n"; 12165 }); 12166 12167 // Remove duplicates. 12168 array_pod_sort(Terms.begin(), Terms.end()); 12169 Terms.erase(std::unique(Terms.begin(), Terms.end()), Terms.end()); 12170 12171 // Put larger terms first. 12172 llvm::sort(Terms, [](const SCEV *LHS, const SCEV *RHS) { 12173 return numberOfTerms(LHS) > numberOfTerms(RHS); 12174 }); 12175 12176 // Try to divide all terms by the element size. If term is not divisible by 12177 // element size, proceed with the original term. 12178 for (const SCEV *&Term : Terms) { 12179 const SCEV *Q, *R; 12180 SCEVDivision::divide(*this, Term, ElementSize, &Q, &R); 12181 if (!Q->isZero()) 12182 Term = Q; 12183 } 12184 12185 SmallVector<const SCEV *, 4> NewTerms; 12186 12187 // Remove constant factors. 12188 for (const SCEV *T : Terms) 12189 if (const SCEV *NewT = removeConstantFactors(*this, T)) 12190 NewTerms.push_back(NewT); 12191 12192 LLVM_DEBUG({ 12193 dbgs() << "Terms after sorting:\n"; 12194 for (const SCEV *T : NewTerms) 12195 dbgs() << *T << "\n"; 12196 }); 12197 12198 if (NewTerms.empty() || !findArrayDimensionsRec(*this, NewTerms, Sizes)) { 12199 Sizes.clear(); 12200 return; 12201 } 12202 12203 // The last element to be pushed into Sizes is the size of an element. 12204 Sizes.push_back(ElementSize); 12205 12206 LLVM_DEBUG({ 12207 dbgs() << "Sizes:\n"; 12208 for (const SCEV *S : Sizes) 12209 dbgs() << *S << "\n"; 12210 }); 12211 } 12212 12213 void ScalarEvolution::computeAccessFunctions( 12214 const SCEV *Expr, SmallVectorImpl<const SCEV *> &Subscripts, 12215 SmallVectorImpl<const SCEV *> &Sizes) { 12216 // Early exit in case this SCEV is not an affine multivariate function. 12217 if (Sizes.empty()) 12218 return; 12219 12220 if (auto *AR = dyn_cast<SCEVAddRecExpr>(Expr)) 12221 if (!AR->isAffine()) 12222 return; 12223 12224 const SCEV *Res = Expr; 12225 int Last = Sizes.size() - 1; 12226 for (int i = Last; i >= 0; i--) { 12227 const SCEV *Q, *R; 12228 SCEVDivision::divide(*this, Res, Sizes[i], &Q, &R); 12229 12230 LLVM_DEBUG({ 12231 dbgs() << "Res: " << *Res << "\n"; 12232 dbgs() << "Sizes[i]: " << *Sizes[i] << "\n"; 12233 dbgs() << "Res divided by Sizes[i]:\n"; 12234 dbgs() << "Quotient: " << *Q << "\n"; 12235 dbgs() << "Remainder: " << *R << "\n"; 12236 }); 12237 12238 Res = Q; 12239 12240 // Do not record the last subscript corresponding to the size of elements in 12241 // the array. 12242 if (i == Last) { 12243 12244 // Bail out if the remainder is too complex. 12245 if (isa<SCEVAddRecExpr>(R)) { 12246 Subscripts.clear(); 12247 Sizes.clear(); 12248 return; 12249 } 12250 12251 continue; 12252 } 12253 12254 // Record the access function for the current subscript. 12255 Subscripts.push_back(R); 12256 } 12257 12258 // Also push in last position the remainder of the last division: it will be 12259 // the access function of the innermost dimension. 12260 Subscripts.push_back(Res); 12261 12262 std::reverse(Subscripts.begin(), Subscripts.end()); 12263 12264 LLVM_DEBUG({ 12265 dbgs() << "Subscripts:\n"; 12266 for (const SCEV *S : Subscripts) 12267 dbgs() << *S << "\n"; 12268 }); 12269 } 12270 12271 /// Splits the SCEV into two vectors of SCEVs representing the subscripts and 12272 /// sizes of an array access. Returns the remainder of the delinearization that 12273 /// is the offset start of the array. The SCEV->delinearize algorithm computes 12274 /// the multiples of SCEV coefficients: that is a pattern matching of sub 12275 /// expressions in the stride and base of a SCEV corresponding to the 12276 /// computation of a GCD (greatest common divisor) of base and stride. When 12277 /// SCEV->delinearize fails, it returns the SCEV unchanged. 12278 /// 12279 /// For example: when analyzing the memory access A[i][j][k] in this loop nest 12280 /// 12281 /// void foo(long n, long m, long o, double A[n][m][o]) { 12282 /// 12283 /// for (long i = 0; i < n; i++) 12284 /// for (long j = 0; j < m; j++) 12285 /// for (long k = 0; k < o; k++) 12286 /// A[i][j][k] = 1.0; 12287 /// } 12288 /// 12289 /// the delinearization input is the following AddRec SCEV: 12290 /// 12291 /// AddRec: {{{%A,+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>,+,8}<%for.k> 12292 /// 12293 /// From this SCEV, we are able to say that the base offset of the access is %A 12294 /// because it appears as an offset that does not divide any of the strides in 12295 /// the loops: 12296 /// 12297 /// CHECK: Base offset: %A 12298 /// 12299 /// and then SCEV->delinearize determines the size of some of the dimensions of 12300 /// the array as these are the multiples by which the strides are happening: 12301 /// 12302 /// CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes. 12303 /// 12304 /// Note that the outermost dimension remains of UnknownSize because there are 12305 /// no strides that would help identifying the size of the last dimension: when 12306 /// the array has been statically allocated, one could compute the size of that 12307 /// dimension by dividing the overall size of the array by the size of the known 12308 /// dimensions: %m * %o * 8. 12309 /// 12310 /// Finally delinearize provides the access functions for the array reference 12311 /// that does correspond to A[i][j][k] of the above C testcase: 12312 /// 12313 /// CHECK: ArrayRef[{0,+,1}<%for.i>][{0,+,1}<%for.j>][{0,+,1}<%for.k>] 12314 /// 12315 /// The testcases are checking the output of a function pass: 12316 /// DelinearizationPass that walks through all loads and stores of a function 12317 /// asking for the SCEV of the memory access with respect to all enclosing 12318 /// loops, calling SCEV->delinearize on that and printing the results. 12319 void ScalarEvolution::delinearize(const SCEV *Expr, 12320 SmallVectorImpl<const SCEV *> &Subscripts, 12321 SmallVectorImpl<const SCEV *> &Sizes, 12322 const SCEV *ElementSize) { 12323 // First step: collect parametric terms. 12324 SmallVector<const SCEV *, 4> Terms; 12325 collectParametricTerms(Expr, Terms); 12326 12327 if (Terms.empty()) 12328 return; 12329 12330 // Second step: find subscript sizes. 12331 findArrayDimensions(Terms, Sizes, ElementSize); 12332 12333 if (Sizes.empty()) 12334 return; 12335 12336 // Third step: compute the access functions for each subscript. 12337 computeAccessFunctions(Expr, Subscripts, Sizes); 12338 12339 if (Subscripts.empty()) 12340 return; 12341 12342 LLVM_DEBUG({ 12343 dbgs() << "succeeded to delinearize " << *Expr << "\n"; 12344 dbgs() << "ArrayDecl[UnknownSize]"; 12345 for (const SCEV *S : Sizes) 12346 dbgs() << "[" << *S << "]"; 12347 12348 dbgs() << "\nArrayRef"; 12349 for (const SCEV *S : Subscripts) 12350 dbgs() << "[" << *S << "]"; 12351 dbgs() << "\n"; 12352 }); 12353 } 12354 12355 bool ScalarEvolution::getIndexExpressionsFromGEP( 12356 const GetElementPtrInst *GEP, SmallVectorImpl<const SCEV *> &Subscripts, 12357 SmallVectorImpl<int> &Sizes) { 12358 assert(Subscripts.empty() && Sizes.empty() && 12359 "Expected output lists to be empty on entry to this function."); 12360 assert(GEP && "getIndexExpressionsFromGEP called with a null GEP"); 12361 Type *Ty = GEP->getPointerOperandType(); 12362 bool DroppedFirstDim = false; 12363 for (unsigned i = 1; i < GEP->getNumOperands(); i++) { 12364 const SCEV *Expr = getSCEV(GEP->getOperand(i)); 12365 if (i == 1) { 12366 if (auto *PtrTy = dyn_cast<PointerType>(Ty)) { 12367 Ty = PtrTy->getElementType(); 12368 } else if (auto *ArrayTy = dyn_cast<ArrayType>(Ty)) { 12369 Ty = ArrayTy->getElementType(); 12370 } else { 12371 Subscripts.clear(); 12372 Sizes.clear(); 12373 return false; 12374 } 12375 if (auto *Const = dyn_cast<SCEVConstant>(Expr)) 12376 if (Const->getValue()->isZero()) { 12377 DroppedFirstDim = true; 12378 continue; 12379 } 12380 Subscripts.push_back(Expr); 12381 continue; 12382 } 12383 12384 auto *ArrayTy = dyn_cast<ArrayType>(Ty); 12385 if (!ArrayTy) { 12386 Subscripts.clear(); 12387 Sizes.clear(); 12388 return false; 12389 } 12390 12391 Subscripts.push_back(Expr); 12392 if (!(DroppedFirstDim && i == 2)) 12393 Sizes.push_back(ArrayTy->getNumElements()); 12394 12395 Ty = ArrayTy->getElementType(); 12396 } 12397 return !Subscripts.empty(); 12398 } 12399 12400 //===----------------------------------------------------------------------===// 12401 // SCEVCallbackVH Class Implementation 12402 //===----------------------------------------------------------------------===// 12403 12404 void ScalarEvolution::SCEVCallbackVH::deleted() { 12405 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 12406 if (PHINode *PN = dyn_cast<PHINode>(getValPtr())) 12407 SE->ConstantEvolutionLoopExitValue.erase(PN); 12408 SE->eraseValueFromMap(getValPtr()); 12409 // this now dangles! 12410 } 12411 12412 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) { 12413 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 12414 12415 // Forget all the expressions associated with users of the old value, 12416 // so that future queries will recompute the expressions using the new 12417 // value. 12418 Value *Old = getValPtr(); 12419 SmallVector<User *, 16> Worklist(Old->users()); 12420 SmallPtrSet<User *, 8> Visited; 12421 while (!Worklist.empty()) { 12422 User *U = Worklist.pop_back_val(); 12423 // Deleting the Old value will cause this to dangle. Postpone 12424 // that until everything else is done. 12425 if (U == Old) 12426 continue; 12427 if (!Visited.insert(U).second) 12428 continue; 12429 if (PHINode *PN = dyn_cast<PHINode>(U)) 12430 SE->ConstantEvolutionLoopExitValue.erase(PN); 12431 SE->eraseValueFromMap(U); 12432 llvm::append_range(Worklist, U->users()); 12433 } 12434 // Delete the Old value. 12435 if (PHINode *PN = dyn_cast<PHINode>(Old)) 12436 SE->ConstantEvolutionLoopExitValue.erase(PN); 12437 SE->eraseValueFromMap(Old); 12438 // this now dangles! 12439 } 12440 12441 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se) 12442 : CallbackVH(V), SE(se) {} 12443 12444 //===----------------------------------------------------------------------===// 12445 // ScalarEvolution Class Implementation 12446 //===----------------------------------------------------------------------===// 12447 12448 ScalarEvolution::ScalarEvolution(Function &F, TargetLibraryInfo &TLI, 12449 AssumptionCache &AC, DominatorTree &DT, 12450 LoopInfo &LI) 12451 : F(F), TLI(TLI), AC(AC), DT(DT), LI(LI), 12452 CouldNotCompute(new SCEVCouldNotCompute()), ValuesAtScopes(64), 12453 LoopDispositions(64), BlockDispositions(64) { 12454 // To use guards for proving predicates, we need to scan every instruction in 12455 // relevant basic blocks, and not just terminators. Doing this is a waste of 12456 // time if the IR does not actually contain any calls to 12457 // @llvm.experimental.guard, so do a quick check and remember this beforehand. 12458 // 12459 // This pessimizes the case where a pass that preserves ScalarEvolution wants 12460 // to _add_ guards to the module when there weren't any before, and wants 12461 // ScalarEvolution to optimize based on those guards. For now we prefer to be 12462 // efficient in lieu of being smart in that rather obscure case. 12463 12464 auto *GuardDecl = F.getParent()->getFunction( 12465 Intrinsic::getName(Intrinsic::experimental_guard)); 12466 HasGuards = GuardDecl && !GuardDecl->use_empty(); 12467 } 12468 12469 ScalarEvolution::ScalarEvolution(ScalarEvolution &&Arg) 12470 : F(Arg.F), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC), DT(Arg.DT), 12471 LI(Arg.LI), CouldNotCompute(std::move(Arg.CouldNotCompute)), 12472 ValueExprMap(std::move(Arg.ValueExprMap)), 12473 PendingLoopPredicates(std::move(Arg.PendingLoopPredicates)), 12474 PendingPhiRanges(std::move(Arg.PendingPhiRanges)), 12475 PendingMerges(std::move(Arg.PendingMerges)), 12476 MinTrailingZerosCache(std::move(Arg.MinTrailingZerosCache)), 12477 BackedgeTakenCounts(std::move(Arg.BackedgeTakenCounts)), 12478 PredicatedBackedgeTakenCounts( 12479 std::move(Arg.PredicatedBackedgeTakenCounts)), 12480 ConstantEvolutionLoopExitValue( 12481 std::move(Arg.ConstantEvolutionLoopExitValue)), 12482 ValuesAtScopes(std::move(Arg.ValuesAtScopes)), 12483 LoopDispositions(std::move(Arg.LoopDispositions)), 12484 LoopPropertiesCache(std::move(Arg.LoopPropertiesCache)), 12485 BlockDispositions(std::move(Arg.BlockDispositions)), 12486 UnsignedRanges(std::move(Arg.UnsignedRanges)), 12487 SignedRanges(std::move(Arg.SignedRanges)), 12488 UniqueSCEVs(std::move(Arg.UniqueSCEVs)), 12489 UniquePreds(std::move(Arg.UniquePreds)), 12490 SCEVAllocator(std::move(Arg.SCEVAllocator)), 12491 LoopUsers(std::move(Arg.LoopUsers)), 12492 PredicatedSCEVRewrites(std::move(Arg.PredicatedSCEVRewrites)), 12493 FirstUnknown(Arg.FirstUnknown) { 12494 Arg.FirstUnknown = nullptr; 12495 } 12496 12497 ScalarEvolution::~ScalarEvolution() { 12498 // Iterate through all the SCEVUnknown instances and call their 12499 // destructors, so that they release their references to their values. 12500 for (SCEVUnknown *U = FirstUnknown; U;) { 12501 SCEVUnknown *Tmp = U; 12502 U = U->Next; 12503 Tmp->~SCEVUnknown(); 12504 } 12505 FirstUnknown = nullptr; 12506 12507 ExprValueMap.clear(); 12508 ValueExprMap.clear(); 12509 HasRecMap.clear(); 12510 BackedgeTakenCounts.clear(); 12511 PredicatedBackedgeTakenCounts.clear(); 12512 12513 assert(PendingLoopPredicates.empty() && "isImpliedCond garbage"); 12514 assert(PendingPhiRanges.empty() && "getRangeRef garbage"); 12515 assert(PendingMerges.empty() && "isImpliedViaMerge garbage"); 12516 assert(!WalkingBEDominatingConds && "isLoopBackedgeGuardedByCond garbage!"); 12517 assert(!ProvingSplitPredicate && "ProvingSplitPredicate garbage!"); 12518 } 12519 12520 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) { 12521 return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L)); 12522 } 12523 12524 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, 12525 const Loop *L) { 12526 // Print all inner loops first 12527 for (Loop *I : *L) 12528 PrintLoopInfo(OS, SE, I); 12529 12530 OS << "Loop "; 12531 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 12532 OS << ": "; 12533 12534 SmallVector<BasicBlock *, 8> ExitingBlocks; 12535 L->getExitingBlocks(ExitingBlocks); 12536 if (ExitingBlocks.size() != 1) 12537 OS << "<multiple exits> "; 12538 12539 if (SE->hasLoopInvariantBackedgeTakenCount(L)) 12540 OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L) << "\n"; 12541 else 12542 OS << "Unpredictable backedge-taken count.\n"; 12543 12544 if (ExitingBlocks.size() > 1) 12545 for (BasicBlock *ExitingBlock : ExitingBlocks) { 12546 OS << " exit count for " << ExitingBlock->getName() << ": " 12547 << *SE->getExitCount(L, ExitingBlock) << "\n"; 12548 } 12549 12550 OS << "Loop "; 12551 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 12552 OS << ": "; 12553 12554 if (!isa<SCEVCouldNotCompute>(SE->getConstantMaxBackedgeTakenCount(L))) { 12555 OS << "max backedge-taken count is " << *SE->getConstantMaxBackedgeTakenCount(L); 12556 if (SE->isBackedgeTakenCountMaxOrZero(L)) 12557 OS << ", actual taken count either this or zero."; 12558 } else { 12559 OS << "Unpredictable max backedge-taken count. "; 12560 } 12561 12562 OS << "\n" 12563 "Loop "; 12564 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 12565 OS << ": "; 12566 12567 SCEVUnionPredicate Pred; 12568 auto PBT = SE->getPredicatedBackedgeTakenCount(L, Pred); 12569 if (!isa<SCEVCouldNotCompute>(PBT)) { 12570 OS << "Predicated backedge-taken count is " << *PBT << "\n"; 12571 OS << " Predicates:\n"; 12572 Pred.print(OS, 4); 12573 } else { 12574 OS << "Unpredictable predicated backedge-taken count. "; 12575 } 12576 OS << "\n"; 12577 12578 if (SE->hasLoopInvariantBackedgeTakenCount(L)) { 12579 OS << "Loop "; 12580 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 12581 OS << ": "; 12582 OS << "Trip multiple is " << SE->getSmallConstantTripMultiple(L) << "\n"; 12583 } 12584 } 12585 12586 static StringRef loopDispositionToStr(ScalarEvolution::LoopDisposition LD) { 12587 switch (LD) { 12588 case ScalarEvolution::LoopVariant: 12589 return "Variant"; 12590 case ScalarEvolution::LoopInvariant: 12591 return "Invariant"; 12592 case ScalarEvolution::LoopComputable: 12593 return "Computable"; 12594 } 12595 llvm_unreachable("Unknown ScalarEvolution::LoopDisposition kind!"); 12596 } 12597 12598 void ScalarEvolution::print(raw_ostream &OS) const { 12599 // ScalarEvolution's implementation of the print method is to print 12600 // out SCEV values of all instructions that are interesting. Doing 12601 // this potentially causes it to create new SCEV objects though, 12602 // which technically conflicts with the const qualifier. This isn't 12603 // observable from outside the class though, so casting away the 12604 // const isn't dangerous. 12605 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 12606 12607 if (ClassifyExpressions) { 12608 OS << "Classifying expressions for: "; 12609 F.printAsOperand(OS, /*PrintType=*/false); 12610 OS << "\n"; 12611 for (Instruction &I : instructions(F)) 12612 if (isSCEVable(I.getType()) && !isa<CmpInst>(I)) { 12613 OS << I << '\n'; 12614 OS << " --> "; 12615 const SCEV *SV = SE.getSCEV(&I); 12616 SV->print(OS); 12617 if (!isa<SCEVCouldNotCompute>(SV)) { 12618 OS << " U: "; 12619 SE.getUnsignedRange(SV).print(OS); 12620 OS << " S: "; 12621 SE.getSignedRange(SV).print(OS); 12622 } 12623 12624 const Loop *L = LI.getLoopFor(I.getParent()); 12625 12626 const SCEV *AtUse = SE.getSCEVAtScope(SV, L); 12627 if (AtUse != SV) { 12628 OS << " --> "; 12629 AtUse->print(OS); 12630 if (!isa<SCEVCouldNotCompute>(AtUse)) { 12631 OS << " U: "; 12632 SE.getUnsignedRange(AtUse).print(OS); 12633 OS << " S: "; 12634 SE.getSignedRange(AtUse).print(OS); 12635 } 12636 } 12637 12638 if (L) { 12639 OS << "\t\t" "Exits: "; 12640 const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop()); 12641 if (!SE.isLoopInvariant(ExitValue, L)) { 12642 OS << "<<Unknown>>"; 12643 } else { 12644 OS << *ExitValue; 12645 } 12646 12647 bool First = true; 12648 for (auto *Iter = L; Iter; Iter = Iter->getParentLoop()) { 12649 if (First) { 12650 OS << "\t\t" "LoopDispositions: { "; 12651 First = false; 12652 } else { 12653 OS << ", "; 12654 } 12655 12656 Iter->getHeader()->printAsOperand(OS, /*PrintType=*/false); 12657 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, Iter)); 12658 } 12659 12660 for (auto *InnerL : depth_first(L)) { 12661 if (InnerL == L) 12662 continue; 12663 if (First) { 12664 OS << "\t\t" "LoopDispositions: { "; 12665 First = false; 12666 } else { 12667 OS << ", "; 12668 } 12669 12670 InnerL->getHeader()->printAsOperand(OS, /*PrintType=*/false); 12671 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, InnerL)); 12672 } 12673 12674 OS << " }"; 12675 } 12676 12677 OS << "\n"; 12678 } 12679 } 12680 12681 OS << "Determining loop execution counts for: "; 12682 F.printAsOperand(OS, /*PrintType=*/false); 12683 OS << "\n"; 12684 for (Loop *I : LI) 12685 PrintLoopInfo(OS, &SE, I); 12686 } 12687 12688 ScalarEvolution::LoopDisposition 12689 ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) { 12690 auto &Values = LoopDispositions[S]; 12691 for (auto &V : Values) { 12692 if (V.getPointer() == L) 12693 return V.getInt(); 12694 } 12695 Values.emplace_back(L, LoopVariant); 12696 LoopDisposition D = computeLoopDisposition(S, L); 12697 auto &Values2 = LoopDispositions[S]; 12698 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { 12699 if (V.getPointer() == L) { 12700 V.setInt(D); 12701 break; 12702 } 12703 } 12704 return D; 12705 } 12706 12707 ScalarEvolution::LoopDisposition 12708 ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) { 12709 switch (S->getSCEVType()) { 12710 case scConstant: 12711 return LoopInvariant; 12712 case scPtrToInt: 12713 case scTruncate: 12714 case scZeroExtend: 12715 case scSignExtend: 12716 return getLoopDisposition(cast<SCEVCastExpr>(S)->getOperand(), L); 12717 case scAddRecExpr: { 12718 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 12719 12720 // If L is the addrec's loop, it's computable. 12721 if (AR->getLoop() == L) 12722 return LoopComputable; 12723 12724 // Add recurrences are never invariant in the function-body (null loop). 12725 if (!L) 12726 return LoopVariant; 12727 12728 // Everything that is not defined at loop entry is variant. 12729 if (DT.dominates(L->getHeader(), AR->getLoop()->getHeader())) 12730 return LoopVariant; 12731 assert(!L->contains(AR->getLoop()) && "Containing loop's header does not" 12732 " dominate the contained loop's header?"); 12733 12734 // This recurrence is invariant w.r.t. L if AR's loop contains L. 12735 if (AR->getLoop()->contains(L)) 12736 return LoopInvariant; 12737 12738 // This recurrence is variant w.r.t. L if any of its operands 12739 // are variant. 12740 for (auto *Op : AR->operands()) 12741 if (!isLoopInvariant(Op, L)) 12742 return LoopVariant; 12743 12744 // Otherwise it's loop-invariant. 12745 return LoopInvariant; 12746 } 12747 case scAddExpr: 12748 case scMulExpr: 12749 case scUMaxExpr: 12750 case scSMaxExpr: 12751 case scUMinExpr: 12752 case scSMinExpr: { 12753 bool HasVarying = false; 12754 for (auto *Op : cast<SCEVNAryExpr>(S)->operands()) { 12755 LoopDisposition D = getLoopDisposition(Op, L); 12756 if (D == LoopVariant) 12757 return LoopVariant; 12758 if (D == LoopComputable) 12759 HasVarying = true; 12760 } 12761 return HasVarying ? LoopComputable : LoopInvariant; 12762 } 12763 case scUDivExpr: { 12764 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 12765 LoopDisposition LD = getLoopDisposition(UDiv->getLHS(), L); 12766 if (LD == LoopVariant) 12767 return LoopVariant; 12768 LoopDisposition RD = getLoopDisposition(UDiv->getRHS(), L); 12769 if (RD == LoopVariant) 12770 return LoopVariant; 12771 return (LD == LoopInvariant && RD == LoopInvariant) ? 12772 LoopInvariant : LoopComputable; 12773 } 12774 case scUnknown: 12775 // All non-instruction values are loop invariant. All instructions are loop 12776 // invariant if they are not contained in the specified loop. 12777 // Instructions are never considered invariant in the function body 12778 // (null loop) because they are defined within the "loop". 12779 if (auto *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) 12780 return (L && !L->contains(I)) ? LoopInvariant : LoopVariant; 12781 return LoopInvariant; 12782 case scCouldNotCompute: 12783 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 12784 } 12785 llvm_unreachable("Unknown SCEV kind!"); 12786 } 12787 12788 bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) { 12789 return getLoopDisposition(S, L) == LoopInvariant; 12790 } 12791 12792 bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) { 12793 return getLoopDisposition(S, L) == LoopComputable; 12794 } 12795 12796 ScalarEvolution::BlockDisposition 12797 ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) { 12798 auto &Values = BlockDispositions[S]; 12799 for (auto &V : Values) { 12800 if (V.getPointer() == BB) 12801 return V.getInt(); 12802 } 12803 Values.emplace_back(BB, DoesNotDominateBlock); 12804 BlockDisposition D = computeBlockDisposition(S, BB); 12805 auto &Values2 = BlockDispositions[S]; 12806 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { 12807 if (V.getPointer() == BB) { 12808 V.setInt(D); 12809 break; 12810 } 12811 } 12812 return D; 12813 } 12814 12815 ScalarEvolution::BlockDisposition 12816 ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) { 12817 switch (S->getSCEVType()) { 12818 case scConstant: 12819 return ProperlyDominatesBlock; 12820 case scPtrToInt: 12821 case scTruncate: 12822 case scZeroExtend: 12823 case scSignExtend: 12824 return getBlockDisposition(cast<SCEVCastExpr>(S)->getOperand(), BB); 12825 case scAddRecExpr: { 12826 // This uses a "dominates" query instead of "properly dominates" query 12827 // to test for proper dominance too, because the instruction which 12828 // produces the addrec's value is a PHI, and a PHI effectively properly 12829 // dominates its entire containing block. 12830 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 12831 if (!DT.dominates(AR->getLoop()->getHeader(), BB)) 12832 return DoesNotDominateBlock; 12833 12834 // Fall through into SCEVNAryExpr handling. 12835 LLVM_FALLTHROUGH; 12836 } 12837 case scAddExpr: 12838 case scMulExpr: 12839 case scUMaxExpr: 12840 case scSMaxExpr: 12841 case scUMinExpr: 12842 case scSMinExpr: { 12843 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S); 12844 bool Proper = true; 12845 for (const SCEV *NAryOp : NAry->operands()) { 12846 BlockDisposition D = getBlockDisposition(NAryOp, BB); 12847 if (D == DoesNotDominateBlock) 12848 return DoesNotDominateBlock; 12849 if (D == DominatesBlock) 12850 Proper = false; 12851 } 12852 return Proper ? ProperlyDominatesBlock : DominatesBlock; 12853 } 12854 case scUDivExpr: { 12855 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 12856 const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS(); 12857 BlockDisposition LD = getBlockDisposition(LHS, BB); 12858 if (LD == DoesNotDominateBlock) 12859 return DoesNotDominateBlock; 12860 BlockDisposition RD = getBlockDisposition(RHS, BB); 12861 if (RD == DoesNotDominateBlock) 12862 return DoesNotDominateBlock; 12863 return (LD == ProperlyDominatesBlock && RD == ProperlyDominatesBlock) ? 12864 ProperlyDominatesBlock : DominatesBlock; 12865 } 12866 case scUnknown: 12867 if (Instruction *I = 12868 dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) { 12869 if (I->getParent() == BB) 12870 return DominatesBlock; 12871 if (DT.properlyDominates(I->getParent(), BB)) 12872 return ProperlyDominatesBlock; 12873 return DoesNotDominateBlock; 12874 } 12875 return ProperlyDominatesBlock; 12876 case scCouldNotCompute: 12877 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 12878 } 12879 llvm_unreachable("Unknown SCEV kind!"); 12880 } 12881 12882 bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) { 12883 return getBlockDisposition(S, BB) >= DominatesBlock; 12884 } 12885 12886 bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) { 12887 return getBlockDisposition(S, BB) == ProperlyDominatesBlock; 12888 } 12889 12890 bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const { 12891 return SCEVExprContains(S, [&](const SCEV *Expr) { return Expr == Op; }); 12892 } 12893 12894 void 12895 ScalarEvolution::forgetMemoizedResults(const SCEV *S) { 12896 ValuesAtScopes.erase(S); 12897 LoopDispositions.erase(S); 12898 BlockDispositions.erase(S); 12899 UnsignedRanges.erase(S); 12900 SignedRanges.erase(S); 12901 ExprValueMap.erase(S); 12902 HasRecMap.erase(S); 12903 MinTrailingZerosCache.erase(S); 12904 12905 for (auto I = PredicatedSCEVRewrites.begin(); 12906 I != PredicatedSCEVRewrites.end();) { 12907 std::pair<const SCEV *, const Loop *> Entry = I->first; 12908 if (Entry.first == S) 12909 PredicatedSCEVRewrites.erase(I++); 12910 else 12911 ++I; 12912 } 12913 12914 auto RemoveSCEVFromBackedgeMap = 12915 [S](DenseMap<const Loop *, BackedgeTakenInfo> &Map) { 12916 for (auto I = Map.begin(), E = Map.end(); I != E;) { 12917 BackedgeTakenInfo &BEInfo = I->second; 12918 if (BEInfo.hasOperand(S)) 12919 Map.erase(I++); 12920 else 12921 ++I; 12922 } 12923 }; 12924 12925 RemoveSCEVFromBackedgeMap(BackedgeTakenCounts); 12926 RemoveSCEVFromBackedgeMap(PredicatedBackedgeTakenCounts); 12927 } 12928 12929 void 12930 ScalarEvolution::getUsedLoops(const SCEV *S, 12931 SmallPtrSetImpl<const Loop *> &LoopsUsed) { 12932 struct FindUsedLoops { 12933 FindUsedLoops(SmallPtrSetImpl<const Loop *> &LoopsUsed) 12934 : LoopsUsed(LoopsUsed) {} 12935 SmallPtrSetImpl<const Loop *> &LoopsUsed; 12936 bool follow(const SCEV *S) { 12937 if (auto *AR = dyn_cast<SCEVAddRecExpr>(S)) 12938 LoopsUsed.insert(AR->getLoop()); 12939 return true; 12940 } 12941 12942 bool isDone() const { return false; } 12943 }; 12944 12945 FindUsedLoops F(LoopsUsed); 12946 SCEVTraversal<FindUsedLoops>(F).visitAll(S); 12947 } 12948 12949 void ScalarEvolution::addToLoopUseLists(const SCEV *S) { 12950 SmallPtrSet<const Loop *, 8> LoopsUsed; 12951 getUsedLoops(S, LoopsUsed); 12952 for (auto *L : LoopsUsed) 12953 LoopUsers[L].push_back(S); 12954 } 12955 12956 void ScalarEvolution::verify() const { 12957 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 12958 ScalarEvolution SE2(F, TLI, AC, DT, LI); 12959 12960 SmallVector<Loop *, 8> LoopStack(LI.begin(), LI.end()); 12961 12962 // Map's SCEV expressions from one ScalarEvolution "universe" to another. 12963 struct SCEVMapper : public SCEVRewriteVisitor<SCEVMapper> { 12964 SCEVMapper(ScalarEvolution &SE) : SCEVRewriteVisitor<SCEVMapper>(SE) {} 12965 12966 const SCEV *visitConstant(const SCEVConstant *Constant) { 12967 return SE.getConstant(Constant->getAPInt()); 12968 } 12969 12970 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 12971 return SE.getUnknown(Expr->getValue()); 12972 } 12973 12974 const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) { 12975 return SE.getCouldNotCompute(); 12976 } 12977 }; 12978 12979 SCEVMapper SCM(SE2); 12980 12981 while (!LoopStack.empty()) { 12982 auto *L = LoopStack.pop_back_val(); 12983 llvm::append_range(LoopStack, *L); 12984 12985 auto *CurBECount = SCM.visit( 12986 const_cast<ScalarEvolution *>(this)->getBackedgeTakenCount(L)); 12987 auto *NewBECount = SE2.getBackedgeTakenCount(L); 12988 12989 if (CurBECount == SE2.getCouldNotCompute() || 12990 NewBECount == SE2.getCouldNotCompute()) { 12991 // NB! This situation is legal, but is very suspicious -- whatever pass 12992 // change the loop to make a trip count go from could not compute to 12993 // computable or vice-versa *should have* invalidated SCEV. However, we 12994 // choose not to assert here (for now) since we don't want false 12995 // positives. 12996 continue; 12997 } 12998 12999 if (containsUndefs(CurBECount) || containsUndefs(NewBECount)) { 13000 // SCEV treats "undef" as an unknown but consistent value (i.e. it does 13001 // not propagate undef aggressively). This means we can (and do) fail 13002 // verification in cases where a transform makes the trip count of a loop 13003 // go from "undef" to "undef+1" (say). The transform is fine, since in 13004 // both cases the loop iterates "undef" times, but SCEV thinks we 13005 // increased the trip count of the loop by 1 incorrectly. 13006 continue; 13007 } 13008 13009 if (SE.getTypeSizeInBits(CurBECount->getType()) > 13010 SE.getTypeSizeInBits(NewBECount->getType())) 13011 NewBECount = SE2.getZeroExtendExpr(NewBECount, CurBECount->getType()); 13012 else if (SE.getTypeSizeInBits(CurBECount->getType()) < 13013 SE.getTypeSizeInBits(NewBECount->getType())) 13014 CurBECount = SE2.getZeroExtendExpr(CurBECount, NewBECount->getType()); 13015 13016 const SCEV *Delta = SE2.getMinusSCEV(CurBECount, NewBECount); 13017 13018 // Unless VerifySCEVStrict is set, we only compare constant deltas. 13019 if ((VerifySCEVStrict || isa<SCEVConstant>(Delta)) && !Delta->isZero()) { 13020 dbgs() << "Trip Count for " << *L << " Changed!\n"; 13021 dbgs() << "Old: " << *CurBECount << "\n"; 13022 dbgs() << "New: " << *NewBECount << "\n"; 13023 dbgs() << "Delta: " << *Delta << "\n"; 13024 std::abort(); 13025 } 13026 } 13027 13028 // Collect all valid loops currently in LoopInfo. 13029 SmallPtrSet<Loop *, 32> ValidLoops; 13030 SmallVector<Loop *, 32> Worklist(LI.begin(), LI.end()); 13031 while (!Worklist.empty()) { 13032 Loop *L = Worklist.pop_back_val(); 13033 if (ValidLoops.contains(L)) 13034 continue; 13035 ValidLoops.insert(L); 13036 Worklist.append(L->begin(), L->end()); 13037 } 13038 // Check for SCEV expressions referencing invalid/deleted loops. 13039 for (auto &KV : ValueExprMap) { 13040 auto *AR = dyn_cast<SCEVAddRecExpr>(KV.second); 13041 if (!AR) 13042 continue; 13043 assert(ValidLoops.contains(AR->getLoop()) && 13044 "AddRec references invalid loop"); 13045 } 13046 } 13047 13048 bool ScalarEvolution::invalidate( 13049 Function &F, const PreservedAnalyses &PA, 13050 FunctionAnalysisManager::Invalidator &Inv) { 13051 // Invalidate the ScalarEvolution object whenever it isn't preserved or one 13052 // of its dependencies is invalidated. 13053 auto PAC = PA.getChecker<ScalarEvolutionAnalysis>(); 13054 return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) || 13055 Inv.invalidate<AssumptionAnalysis>(F, PA) || 13056 Inv.invalidate<DominatorTreeAnalysis>(F, PA) || 13057 Inv.invalidate<LoopAnalysis>(F, PA); 13058 } 13059 13060 AnalysisKey ScalarEvolutionAnalysis::Key; 13061 13062 ScalarEvolution ScalarEvolutionAnalysis::run(Function &F, 13063 FunctionAnalysisManager &AM) { 13064 return ScalarEvolution(F, AM.getResult<TargetLibraryAnalysis>(F), 13065 AM.getResult<AssumptionAnalysis>(F), 13066 AM.getResult<DominatorTreeAnalysis>(F), 13067 AM.getResult<LoopAnalysis>(F)); 13068 } 13069 13070 PreservedAnalyses 13071 ScalarEvolutionVerifierPass::run(Function &F, FunctionAnalysisManager &AM) { 13072 AM.getResult<ScalarEvolutionAnalysis>(F).verify(); 13073 return PreservedAnalyses::all(); 13074 } 13075 13076 PreservedAnalyses 13077 ScalarEvolutionPrinterPass::run(Function &F, FunctionAnalysisManager &AM) { 13078 // For compatibility with opt's -analyze feature under legacy pass manager 13079 // which was not ported to NPM. This keeps tests using 13080 // update_analyze_test_checks.py working. 13081 OS << "Printing analysis 'Scalar Evolution Analysis' for function '" 13082 << F.getName() << "':\n"; 13083 AM.getResult<ScalarEvolutionAnalysis>(F).print(OS); 13084 return PreservedAnalyses::all(); 13085 } 13086 13087 INITIALIZE_PASS_BEGIN(ScalarEvolutionWrapperPass, "scalar-evolution", 13088 "Scalar Evolution Analysis", false, true) 13089 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 13090 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 13091 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 13092 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 13093 INITIALIZE_PASS_END(ScalarEvolutionWrapperPass, "scalar-evolution", 13094 "Scalar Evolution Analysis", false, true) 13095 13096 char ScalarEvolutionWrapperPass::ID = 0; 13097 13098 ScalarEvolutionWrapperPass::ScalarEvolutionWrapperPass() : FunctionPass(ID) { 13099 initializeScalarEvolutionWrapperPassPass(*PassRegistry::getPassRegistry()); 13100 } 13101 13102 bool ScalarEvolutionWrapperPass::runOnFunction(Function &F) { 13103 SE.reset(new ScalarEvolution( 13104 F, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F), 13105 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F), 13106 getAnalysis<DominatorTreeWrapperPass>().getDomTree(), 13107 getAnalysis<LoopInfoWrapperPass>().getLoopInfo())); 13108 return false; 13109 } 13110 13111 void ScalarEvolutionWrapperPass::releaseMemory() { SE.reset(); } 13112 13113 void ScalarEvolutionWrapperPass::print(raw_ostream &OS, const Module *) const { 13114 SE->print(OS); 13115 } 13116 13117 void ScalarEvolutionWrapperPass::verifyAnalysis() const { 13118 if (!VerifySCEV) 13119 return; 13120 13121 SE->verify(); 13122 } 13123 13124 void ScalarEvolutionWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 13125 AU.setPreservesAll(); 13126 AU.addRequiredTransitive<AssumptionCacheTracker>(); 13127 AU.addRequiredTransitive<LoopInfoWrapperPass>(); 13128 AU.addRequiredTransitive<DominatorTreeWrapperPass>(); 13129 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>(); 13130 } 13131 13132 const SCEVPredicate *ScalarEvolution::getEqualPredicate(const SCEV *LHS, 13133 const SCEV *RHS) { 13134 FoldingSetNodeID ID; 13135 assert(LHS->getType() == RHS->getType() && 13136 "Type mismatch between LHS and RHS"); 13137 // Unique this node based on the arguments 13138 ID.AddInteger(SCEVPredicate::P_Equal); 13139 ID.AddPointer(LHS); 13140 ID.AddPointer(RHS); 13141 void *IP = nullptr; 13142 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 13143 return S; 13144 SCEVEqualPredicate *Eq = new (SCEVAllocator) 13145 SCEVEqualPredicate(ID.Intern(SCEVAllocator), LHS, RHS); 13146 UniquePreds.InsertNode(Eq, IP); 13147 return Eq; 13148 } 13149 13150 const SCEVPredicate *ScalarEvolution::getWrapPredicate( 13151 const SCEVAddRecExpr *AR, 13152 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 13153 FoldingSetNodeID ID; 13154 // Unique this node based on the arguments 13155 ID.AddInteger(SCEVPredicate::P_Wrap); 13156 ID.AddPointer(AR); 13157 ID.AddInteger(AddedFlags); 13158 void *IP = nullptr; 13159 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 13160 return S; 13161 auto *OF = new (SCEVAllocator) 13162 SCEVWrapPredicate(ID.Intern(SCEVAllocator), AR, AddedFlags); 13163 UniquePreds.InsertNode(OF, IP); 13164 return OF; 13165 } 13166 13167 namespace { 13168 13169 class SCEVPredicateRewriter : public SCEVRewriteVisitor<SCEVPredicateRewriter> { 13170 public: 13171 13172 /// Rewrites \p S in the context of a loop L and the SCEV predication 13173 /// infrastructure. 13174 /// 13175 /// If \p Pred is non-null, the SCEV expression is rewritten to respect the 13176 /// equivalences present in \p Pred. 13177 /// 13178 /// If \p NewPreds is non-null, rewrite is free to add further predicates to 13179 /// \p NewPreds such that the result will be an AddRecExpr. 13180 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, 13181 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 13182 SCEVUnionPredicate *Pred) { 13183 SCEVPredicateRewriter Rewriter(L, SE, NewPreds, Pred); 13184 return Rewriter.visit(S); 13185 } 13186 13187 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 13188 if (Pred) { 13189 auto ExprPreds = Pred->getPredicatesForExpr(Expr); 13190 for (auto *Pred : ExprPreds) 13191 if (const auto *IPred = dyn_cast<SCEVEqualPredicate>(Pred)) 13192 if (IPred->getLHS() == Expr) 13193 return IPred->getRHS(); 13194 } 13195 return convertToAddRecWithPreds(Expr); 13196 } 13197 13198 const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) { 13199 const SCEV *Operand = visit(Expr->getOperand()); 13200 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 13201 if (AR && AR->getLoop() == L && AR->isAffine()) { 13202 // This couldn't be folded because the operand didn't have the nuw 13203 // flag. Add the nusw flag as an assumption that we could make. 13204 const SCEV *Step = AR->getStepRecurrence(SE); 13205 Type *Ty = Expr->getType(); 13206 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNUSW)) 13207 return SE.getAddRecExpr(SE.getZeroExtendExpr(AR->getStart(), Ty), 13208 SE.getSignExtendExpr(Step, Ty), L, 13209 AR->getNoWrapFlags()); 13210 } 13211 return SE.getZeroExtendExpr(Operand, Expr->getType()); 13212 } 13213 13214 const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) { 13215 const SCEV *Operand = visit(Expr->getOperand()); 13216 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 13217 if (AR && AR->getLoop() == L && AR->isAffine()) { 13218 // This couldn't be folded because the operand didn't have the nsw 13219 // flag. Add the nssw flag as an assumption that we could make. 13220 const SCEV *Step = AR->getStepRecurrence(SE); 13221 Type *Ty = Expr->getType(); 13222 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNSSW)) 13223 return SE.getAddRecExpr(SE.getSignExtendExpr(AR->getStart(), Ty), 13224 SE.getSignExtendExpr(Step, Ty), L, 13225 AR->getNoWrapFlags()); 13226 } 13227 return SE.getSignExtendExpr(Operand, Expr->getType()); 13228 } 13229 13230 private: 13231 explicit SCEVPredicateRewriter(const Loop *L, ScalarEvolution &SE, 13232 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 13233 SCEVUnionPredicate *Pred) 13234 : SCEVRewriteVisitor(SE), NewPreds(NewPreds), Pred(Pred), L(L) {} 13235 13236 bool addOverflowAssumption(const SCEVPredicate *P) { 13237 if (!NewPreds) { 13238 // Check if we've already made this assumption. 13239 return Pred && Pred->implies(P); 13240 } 13241 NewPreds->insert(P); 13242 return true; 13243 } 13244 13245 bool addOverflowAssumption(const SCEVAddRecExpr *AR, 13246 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 13247 auto *A = SE.getWrapPredicate(AR, AddedFlags); 13248 return addOverflowAssumption(A); 13249 } 13250 13251 // If \p Expr represents a PHINode, we try to see if it can be represented 13252 // as an AddRec, possibly under a predicate (PHISCEVPred). If it is possible 13253 // to add this predicate as a runtime overflow check, we return the AddRec. 13254 // If \p Expr does not meet these conditions (is not a PHI node, or we 13255 // couldn't create an AddRec for it, or couldn't add the predicate), we just 13256 // return \p Expr. 13257 const SCEV *convertToAddRecWithPreds(const SCEVUnknown *Expr) { 13258 if (!isa<PHINode>(Expr->getValue())) 13259 return Expr; 13260 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 13261 PredicatedRewrite = SE.createAddRecFromPHIWithCasts(Expr); 13262 if (!PredicatedRewrite) 13263 return Expr; 13264 for (auto *P : PredicatedRewrite->second){ 13265 // Wrap predicates from outer loops are not supported. 13266 if (auto *WP = dyn_cast<const SCEVWrapPredicate>(P)) { 13267 auto *AR = cast<const SCEVAddRecExpr>(WP->getExpr()); 13268 if (L != AR->getLoop()) 13269 return Expr; 13270 } 13271 if (!addOverflowAssumption(P)) 13272 return Expr; 13273 } 13274 return PredicatedRewrite->first; 13275 } 13276 13277 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds; 13278 SCEVUnionPredicate *Pred; 13279 const Loop *L; 13280 }; 13281 13282 } // end anonymous namespace 13283 13284 const SCEV *ScalarEvolution::rewriteUsingPredicate(const SCEV *S, const Loop *L, 13285 SCEVUnionPredicate &Preds) { 13286 return SCEVPredicateRewriter::rewrite(S, L, *this, nullptr, &Preds); 13287 } 13288 13289 const SCEVAddRecExpr *ScalarEvolution::convertSCEVToAddRecWithPredicates( 13290 const SCEV *S, const Loop *L, 13291 SmallPtrSetImpl<const SCEVPredicate *> &Preds) { 13292 SmallPtrSet<const SCEVPredicate *, 4> TransformPreds; 13293 S = SCEVPredicateRewriter::rewrite(S, L, *this, &TransformPreds, nullptr); 13294 auto *AddRec = dyn_cast<SCEVAddRecExpr>(S); 13295 13296 if (!AddRec) 13297 return nullptr; 13298 13299 // Since the transformation was successful, we can now transfer the SCEV 13300 // predicates. 13301 for (auto *P : TransformPreds) 13302 Preds.insert(P); 13303 13304 return AddRec; 13305 } 13306 13307 /// SCEV predicates 13308 SCEVPredicate::SCEVPredicate(const FoldingSetNodeIDRef ID, 13309 SCEVPredicateKind Kind) 13310 : FastID(ID), Kind(Kind) {} 13311 13312 SCEVEqualPredicate::SCEVEqualPredicate(const FoldingSetNodeIDRef ID, 13313 const SCEV *LHS, const SCEV *RHS) 13314 : SCEVPredicate(ID, P_Equal), LHS(LHS), RHS(RHS) { 13315 assert(LHS->getType() == RHS->getType() && "LHS and RHS types don't match"); 13316 assert(LHS != RHS && "LHS and RHS are the same SCEV"); 13317 } 13318 13319 bool SCEVEqualPredicate::implies(const SCEVPredicate *N) const { 13320 const auto *Op = dyn_cast<SCEVEqualPredicate>(N); 13321 13322 if (!Op) 13323 return false; 13324 13325 return Op->LHS == LHS && Op->RHS == RHS; 13326 } 13327 13328 bool SCEVEqualPredicate::isAlwaysTrue() const { return false; } 13329 13330 const SCEV *SCEVEqualPredicate::getExpr() const { return LHS; } 13331 13332 void SCEVEqualPredicate::print(raw_ostream &OS, unsigned Depth) const { 13333 OS.indent(Depth) << "Equal predicate: " << *LHS << " == " << *RHS << "\n"; 13334 } 13335 13336 SCEVWrapPredicate::SCEVWrapPredicate(const FoldingSetNodeIDRef ID, 13337 const SCEVAddRecExpr *AR, 13338 IncrementWrapFlags Flags) 13339 : SCEVPredicate(ID, P_Wrap), AR(AR), Flags(Flags) {} 13340 13341 const SCEV *SCEVWrapPredicate::getExpr() const { return AR; } 13342 13343 bool SCEVWrapPredicate::implies(const SCEVPredicate *N) const { 13344 const auto *Op = dyn_cast<SCEVWrapPredicate>(N); 13345 13346 return Op && Op->AR == AR && setFlags(Flags, Op->Flags) == Flags; 13347 } 13348 13349 bool SCEVWrapPredicate::isAlwaysTrue() const { 13350 SCEV::NoWrapFlags ScevFlags = AR->getNoWrapFlags(); 13351 IncrementWrapFlags IFlags = Flags; 13352 13353 if (ScalarEvolution::setFlags(ScevFlags, SCEV::FlagNSW) == ScevFlags) 13354 IFlags = clearFlags(IFlags, IncrementNSSW); 13355 13356 return IFlags == IncrementAnyWrap; 13357 } 13358 13359 void SCEVWrapPredicate::print(raw_ostream &OS, unsigned Depth) const { 13360 OS.indent(Depth) << *getExpr() << " Added Flags: "; 13361 if (SCEVWrapPredicate::IncrementNUSW & getFlags()) 13362 OS << "<nusw>"; 13363 if (SCEVWrapPredicate::IncrementNSSW & getFlags()) 13364 OS << "<nssw>"; 13365 OS << "\n"; 13366 } 13367 13368 SCEVWrapPredicate::IncrementWrapFlags 13369 SCEVWrapPredicate::getImpliedFlags(const SCEVAddRecExpr *AR, 13370 ScalarEvolution &SE) { 13371 IncrementWrapFlags ImpliedFlags = IncrementAnyWrap; 13372 SCEV::NoWrapFlags StaticFlags = AR->getNoWrapFlags(); 13373 13374 // We can safely transfer the NSW flag as NSSW. 13375 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNSW) == StaticFlags) 13376 ImpliedFlags = IncrementNSSW; 13377 13378 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNUW) == StaticFlags) { 13379 // If the increment is positive, the SCEV NUW flag will also imply the 13380 // WrapPredicate NUSW flag. 13381 if (const auto *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE))) 13382 if (Step->getValue()->getValue().isNonNegative()) 13383 ImpliedFlags = setFlags(ImpliedFlags, IncrementNUSW); 13384 } 13385 13386 return ImpliedFlags; 13387 } 13388 13389 /// Union predicates don't get cached so create a dummy set ID for it. 13390 SCEVUnionPredicate::SCEVUnionPredicate() 13391 : SCEVPredicate(FoldingSetNodeIDRef(nullptr, 0), P_Union) {} 13392 13393 bool SCEVUnionPredicate::isAlwaysTrue() const { 13394 return all_of(Preds, 13395 [](const SCEVPredicate *I) { return I->isAlwaysTrue(); }); 13396 } 13397 13398 ArrayRef<const SCEVPredicate *> 13399 SCEVUnionPredicate::getPredicatesForExpr(const SCEV *Expr) { 13400 auto I = SCEVToPreds.find(Expr); 13401 if (I == SCEVToPreds.end()) 13402 return ArrayRef<const SCEVPredicate *>(); 13403 return I->second; 13404 } 13405 13406 bool SCEVUnionPredicate::implies(const SCEVPredicate *N) const { 13407 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) 13408 return all_of(Set->Preds, 13409 [this](const SCEVPredicate *I) { return this->implies(I); }); 13410 13411 auto ScevPredsIt = SCEVToPreds.find(N->getExpr()); 13412 if (ScevPredsIt == SCEVToPreds.end()) 13413 return false; 13414 auto &SCEVPreds = ScevPredsIt->second; 13415 13416 return any_of(SCEVPreds, 13417 [N](const SCEVPredicate *I) { return I->implies(N); }); 13418 } 13419 13420 const SCEV *SCEVUnionPredicate::getExpr() const { return nullptr; } 13421 13422 void SCEVUnionPredicate::print(raw_ostream &OS, unsigned Depth) const { 13423 for (auto Pred : Preds) 13424 Pred->print(OS, Depth); 13425 } 13426 13427 void SCEVUnionPredicate::add(const SCEVPredicate *N) { 13428 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) { 13429 for (auto Pred : Set->Preds) 13430 add(Pred); 13431 return; 13432 } 13433 13434 if (implies(N)) 13435 return; 13436 13437 const SCEV *Key = N->getExpr(); 13438 assert(Key && "Only SCEVUnionPredicate doesn't have an " 13439 " associated expression!"); 13440 13441 SCEVToPreds[Key].push_back(N); 13442 Preds.push_back(N); 13443 } 13444 13445 PredicatedScalarEvolution::PredicatedScalarEvolution(ScalarEvolution &SE, 13446 Loop &L) 13447 : SE(SE), L(L) {} 13448 13449 const SCEV *PredicatedScalarEvolution::getSCEV(Value *V) { 13450 const SCEV *Expr = SE.getSCEV(V); 13451 RewriteEntry &Entry = RewriteMap[Expr]; 13452 13453 // If we already have an entry and the version matches, return it. 13454 if (Entry.second && Generation == Entry.first) 13455 return Entry.second; 13456 13457 // We found an entry but it's stale. Rewrite the stale entry 13458 // according to the current predicate. 13459 if (Entry.second) 13460 Expr = Entry.second; 13461 13462 const SCEV *NewSCEV = SE.rewriteUsingPredicate(Expr, &L, Preds); 13463 Entry = {Generation, NewSCEV}; 13464 13465 return NewSCEV; 13466 } 13467 13468 const SCEV *PredicatedScalarEvolution::getBackedgeTakenCount() { 13469 if (!BackedgeCount) { 13470 SCEVUnionPredicate BackedgePred; 13471 BackedgeCount = SE.getPredicatedBackedgeTakenCount(&L, BackedgePred); 13472 addPredicate(BackedgePred); 13473 } 13474 return BackedgeCount; 13475 } 13476 13477 void PredicatedScalarEvolution::addPredicate(const SCEVPredicate &Pred) { 13478 if (Preds.implies(&Pred)) 13479 return; 13480 Preds.add(&Pred); 13481 updateGeneration(); 13482 } 13483 13484 const SCEVUnionPredicate &PredicatedScalarEvolution::getUnionPredicate() const { 13485 return Preds; 13486 } 13487 13488 void PredicatedScalarEvolution::updateGeneration() { 13489 // If the generation number wrapped recompute everything. 13490 if (++Generation == 0) { 13491 for (auto &II : RewriteMap) { 13492 const SCEV *Rewritten = II.second.second; 13493 II.second = {Generation, SE.rewriteUsingPredicate(Rewritten, &L, Preds)}; 13494 } 13495 } 13496 } 13497 13498 void PredicatedScalarEvolution::setNoOverflow( 13499 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 13500 const SCEV *Expr = getSCEV(V); 13501 const auto *AR = cast<SCEVAddRecExpr>(Expr); 13502 13503 auto ImpliedFlags = SCEVWrapPredicate::getImpliedFlags(AR, SE); 13504 13505 // Clear the statically implied flags. 13506 Flags = SCEVWrapPredicate::clearFlags(Flags, ImpliedFlags); 13507 addPredicate(*SE.getWrapPredicate(AR, Flags)); 13508 13509 auto II = FlagsMap.insert({V, Flags}); 13510 if (!II.second) 13511 II.first->second = SCEVWrapPredicate::setFlags(Flags, II.first->second); 13512 } 13513 13514 bool PredicatedScalarEvolution::hasNoOverflow( 13515 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 13516 const SCEV *Expr = getSCEV(V); 13517 const auto *AR = cast<SCEVAddRecExpr>(Expr); 13518 13519 Flags = SCEVWrapPredicate::clearFlags( 13520 Flags, SCEVWrapPredicate::getImpliedFlags(AR, SE)); 13521 13522 auto II = FlagsMap.find(V); 13523 13524 if (II != FlagsMap.end()) 13525 Flags = SCEVWrapPredicate::clearFlags(Flags, II->second); 13526 13527 return Flags == SCEVWrapPredicate::IncrementAnyWrap; 13528 } 13529 13530 const SCEVAddRecExpr *PredicatedScalarEvolution::getAsAddRec(Value *V) { 13531 const SCEV *Expr = this->getSCEV(V); 13532 SmallPtrSet<const SCEVPredicate *, 4> NewPreds; 13533 auto *New = SE.convertSCEVToAddRecWithPredicates(Expr, &L, NewPreds); 13534 13535 if (!New) 13536 return nullptr; 13537 13538 for (auto *P : NewPreds) 13539 Preds.add(P); 13540 13541 updateGeneration(); 13542 RewriteMap[SE.getSCEV(V)] = {Generation, New}; 13543 return New; 13544 } 13545 13546 PredicatedScalarEvolution::PredicatedScalarEvolution( 13547 const PredicatedScalarEvolution &Init) 13548 : RewriteMap(Init.RewriteMap), SE(Init.SE), L(Init.L), Preds(Init.Preds), 13549 Generation(Init.Generation), BackedgeCount(Init.BackedgeCount) { 13550 for (auto I : Init.FlagsMap) 13551 FlagsMap.insert(I); 13552 } 13553 13554 void PredicatedScalarEvolution::print(raw_ostream &OS, unsigned Depth) const { 13555 // For each block. 13556 for (auto *BB : L.getBlocks()) 13557 for (auto &I : *BB) { 13558 if (!SE.isSCEVable(I.getType())) 13559 continue; 13560 13561 auto *Expr = SE.getSCEV(&I); 13562 auto II = RewriteMap.find(Expr); 13563 13564 if (II == RewriteMap.end()) 13565 continue; 13566 13567 // Don't print things that are not interesting. 13568 if (II->second.second == Expr) 13569 continue; 13570 13571 OS.indent(Depth) << "[PSE]" << I << ":\n"; 13572 OS.indent(Depth + 2) << *Expr << "\n"; 13573 OS.indent(Depth + 2) << "--> " << *II->second.second << "\n"; 13574 } 13575 } 13576 13577 // Match the mathematical pattern A - (A / B) * B, where A and B can be 13578 // arbitrary expressions. Also match zext (trunc A to iB) to iY, which is used 13579 // for URem with constant power-of-2 second operands. 13580 // It's not always easy, as A and B can be folded (imagine A is X / 2, and B is 13581 // 4, A / B becomes X / 8). 13582 bool ScalarEvolution::matchURem(const SCEV *Expr, const SCEV *&LHS, 13583 const SCEV *&RHS) { 13584 // Try to match 'zext (trunc A to iB) to iY', which is used 13585 // for URem with constant power-of-2 second operands. Make sure the size of 13586 // the operand A matches the size of the whole expressions. 13587 if (const auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(Expr)) 13588 if (const auto *Trunc = dyn_cast<SCEVTruncateExpr>(ZExt->getOperand(0))) { 13589 LHS = Trunc->getOperand(); 13590 // Bail out if the type of the LHS is larger than the type of the 13591 // expression for now. 13592 if (getTypeSizeInBits(LHS->getType()) > 13593 getTypeSizeInBits(Expr->getType())) 13594 return false; 13595 if (LHS->getType() != Expr->getType()) 13596 LHS = getZeroExtendExpr(LHS, Expr->getType()); 13597 RHS = getConstant(APInt(getTypeSizeInBits(Expr->getType()), 1) 13598 << getTypeSizeInBits(Trunc->getType())); 13599 return true; 13600 } 13601 const auto *Add = dyn_cast<SCEVAddExpr>(Expr); 13602 if (Add == nullptr || Add->getNumOperands() != 2) 13603 return false; 13604 13605 const SCEV *A = Add->getOperand(1); 13606 const auto *Mul = dyn_cast<SCEVMulExpr>(Add->getOperand(0)); 13607 13608 if (Mul == nullptr) 13609 return false; 13610 13611 const auto MatchURemWithDivisor = [&](const SCEV *B) { 13612 // (SomeExpr + (-(SomeExpr / B) * B)). 13613 if (Expr == getURemExpr(A, B)) { 13614 LHS = A; 13615 RHS = B; 13616 return true; 13617 } 13618 return false; 13619 }; 13620 13621 // (SomeExpr + (-1 * (SomeExpr / B) * B)). 13622 if (Mul->getNumOperands() == 3 && isa<SCEVConstant>(Mul->getOperand(0))) 13623 return MatchURemWithDivisor(Mul->getOperand(1)) || 13624 MatchURemWithDivisor(Mul->getOperand(2)); 13625 13626 // (SomeExpr + ((-SomeExpr / B) * B)) or (SomeExpr + ((SomeExpr / B) * -B)). 13627 if (Mul->getNumOperands() == 2) 13628 return MatchURemWithDivisor(Mul->getOperand(1)) || 13629 MatchURemWithDivisor(Mul->getOperand(0)) || 13630 MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(1))) || 13631 MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(0))); 13632 return false; 13633 } 13634 13635 const SCEV * 13636 ScalarEvolution::computeSymbolicMaxBackedgeTakenCount(const Loop *L) { 13637 SmallVector<BasicBlock*, 16> ExitingBlocks; 13638 L->getExitingBlocks(ExitingBlocks); 13639 13640 // Form an expression for the maximum exit count possible for this loop. We 13641 // merge the max and exact information to approximate a version of 13642 // getConstantMaxBackedgeTakenCount which isn't restricted to just constants. 13643 SmallVector<const SCEV*, 4> ExitCounts; 13644 for (BasicBlock *ExitingBB : ExitingBlocks) { 13645 const SCEV *ExitCount = getExitCount(L, ExitingBB); 13646 if (isa<SCEVCouldNotCompute>(ExitCount)) 13647 ExitCount = getExitCount(L, ExitingBB, 13648 ScalarEvolution::ConstantMaximum); 13649 if (!isa<SCEVCouldNotCompute>(ExitCount)) { 13650 assert(DT.dominates(ExitingBB, L->getLoopLatch()) && 13651 "We should only have known counts for exiting blocks that " 13652 "dominate latch!"); 13653 ExitCounts.push_back(ExitCount); 13654 } 13655 } 13656 if (ExitCounts.empty()) 13657 return getCouldNotCompute(); 13658 return getUMinFromMismatchedTypes(ExitCounts); 13659 } 13660 13661 /// This rewriter is similar to SCEVParameterRewriter (it replaces SCEVUnknown 13662 /// components following the Map (Value -> SCEV)), but skips AddRecExpr because 13663 /// we cannot guarantee that the replacement is loop invariant in the loop of 13664 /// the AddRec. 13665 class SCEVLoopGuardRewriter : public SCEVRewriteVisitor<SCEVLoopGuardRewriter> { 13666 ValueToSCEVMapTy ⤅ 13667 13668 public: 13669 SCEVLoopGuardRewriter(ScalarEvolution &SE, ValueToSCEVMapTy &M) 13670 : SCEVRewriteVisitor(SE), Map(M) {} 13671 13672 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { return Expr; } 13673 13674 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 13675 auto I = Map.find(Expr->getValue()); 13676 if (I == Map.end()) 13677 return Expr; 13678 return I->second; 13679 } 13680 }; 13681 13682 const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) { 13683 auto CollectCondition = [&](ICmpInst::Predicate Predicate, const SCEV *LHS, 13684 const SCEV *RHS, ValueToSCEVMapTy &RewriteMap) { 13685 // If we have LHS == 0, check if LHS is computing a property of some unknown 13686 // SCEV %v which we can rewrite %v to express explicitly. 13687 const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS); 13688 if (Predicate == CmpInst::ICMP_EQ && RHSC && 13689 RHSC->getValue()->isNullValue()) { 13690 // If LHS is A % B, i.e. A % B == 0, rewrite A to (A /u B) * B to 13691 // explicitly express that. 13692 const SCEV *URemLHS = nullptr; 13693 const SCEV *URemRHS = nullptr; 13694 if (matchURem(LHS, URemLHS, URemRHS)) { 13695 if (const SCEVUnknown *LHSUnknown = dyn_cast<SCEVUnknown>(URemLHS)) { 13696 Value *V = LHSUnknown->getValue(); 13697 auto Multiple = 13698 getMulExpr(getUDivExpr(URemLHS, URemRHS), URemRHS, 13699 (SCEV::NoWrapFlags)(SCEV::FlagNUW | SCEV::FlagNSW)); 13700 RewriteMap[V] = Multiple; 13701 return; 13702 } 13703 } 13704 } 13705 13706 if (!isa<SCEVUnknown>(LHS) && isa<SCEVUnknown>(RHS)) { 13707 std::swap(LHS, RHS); 13708 Predicate = CmpInst::getSwappedPredicate(Predicate); 13709 } 13710 13711 // Check for a condition of the form (-C1 + X < C2). InstCombine will 13712 // create this form when combining two checks of the form (X u< C2 + C1) and 13713 // (X >=u C1). 13714 auto MatchRangeCheckIdiom = [this, Predicate, LHS, RHS, &RewriteMap]() { 13715 auto *AddExpr = dyn_cast<SCEVAddExpr>(LHS); 13716 if (!AddExpr || AddExpr->getNumOperands() != 2) 13717 return false; 13718 13719 auto *C1 = dyn_cast<SCEVConstant>(AddExpr->getOperand(0)); 13720 auto *LHSUnknown = dyn_cast<SCEVUnknown>(AddExpr->getOperand(1)); 13721 auto *C2 = dyn_cast<SCEVConstant>(RHS); 13722 if (!C1 || !C2 || !LHSUnknown) 13723 return false; 13724 13725 auto ExactRegion = 13726 ConstantRange::makeExactICmpRegion(Predicate, C2->getAPInt()) 13727 .sub(C1->getAPInt()); 13728 13729 // Bail out, unless we have a non-wrapping, monotonic range. 13730 if (ExactRegion.isWrappedSet() || ExactRegion.isFullSet()) 13731 return false; 13732 auto I = RewriteMap.find(LHSUnknown->getValue()); 13733 const SCEV *RewrittenLHS = I != RewriteMap.end() ? I->second : LHS; 13734 RewriteMap[LHSUnknown->getValue()] = getUMaxExpr( 13735 getConstant(ExactRegion.getUnsignedMin()), 13736 getUMinExpr(RewrittenLHS, getConstant(ExactRegion.getUnsignedMax()))); 13737 return true; 13738 }; 13739 if (MatchRangeCheckIdiom()) 13740 return; 13741 13742 // For now, limit to conditions that provide information about unknown 13743 // expressions. RHS also cannot contain add recurrences. 13744 auto *LHSUnknown = dyn_cast<SCEVUnknown>(LHS); 13745 if (!LHSUnknown || containsAddRecurrence(RHS)) 13746 return; 13747 13748 // Check whether LHS has already been rewritten. In that case we want to 13749 // chain further rewrites onto the already rewritten value. 13750 auto I = RewriteMap.find(LHSUnknown->getValue()); 13751 const SCEV *RewrittenLHS = I != RewriteMap.end() ? I->second : LHS; 13752 const SCEV *RewrittenRHS = nullptr; 13753 switch (Predicate) { 13754 case CmpInst::ICMP_ULT: 13755 RewrittenRHS = 13756 getUMinExpr(RewrittenLHS, getMinusSCEV(RHS, getOne(RHS->getType()))); 13757 break; 13758 case CmpInst::ICMP_SLT: 13759 RewrittenRHS = 13760 getSMinExpr(RewrittenLHS, getMinusSCEV(RHS, getOne(RHS->getType()))); 13761 break; 13762 case CmpInst::ICMP_ULE: 13763 RewrittenRHS = getUMinExpr(RewrittenLHS, RHS); 13764 break; 13765 case CmpInst::ICMP_SLE: 13766 RewrittenRHS = getSMinExpr(RewrittenLHS, RHS); 13767 break; 13768 case CmpInst::ICMP_UGT: 13769 RewrittenRHS = 13770 getUMaxExpr(RewrittenLHS, getAddExpr(RHS, getOne(RHS->getType()))); 13771 break; 13772 case CmpInst::ICMP_SGT: 13773 RewrittenRHS = 13774 getSMaxExpr(RewrittenLHS, getAddExpr(RHS, getOne(RHS->getType()))); 13775 break; 13776 case CmpInst::ICMP_UGE: 13777 RewrittenRHS = getUMaxExpr(RewrittenLHS, RHS); 13778 break; 13779 case CmpInst::ICMP_SGE: 13780 RewrittenRHS = getSMaxExpr(RewrittenLHS, RHS); 13781 break; 13782 case CmpInst::ICMP_EQ: 13783 if (isa<SCEVConstant>(RHS)) 13784 RewrittenRHS = RHS; 13785 break; 13786 case CmpInst::ICMP_NE: 13787 if (isa<SCEVConstant>(RHS) && 13788 cast<SCEVConstant>(RHS)->getValue()->isNullValue()) 13789 RewrittenRHS = getUMaxExpr(RewrittenLHS, getOne(RHS->getType())); 13790 break; 13791 default: 13792 break; 13793 } 13794 13795 if (RewrittenRHS) 13796 RewriteMap[LHSUnknown->getValue()] = RewrittenRHS; 13797 }; 13798 // Starting at the loop predecessor, climb up the predecessor chain, as long 13799 // as there are predecessors that can be found that have unique successors 13800 // leading to the original header. 13801 // TODO: share this logic with isLoopEntryGuardedByCond. 13802 ValueToSCEVMapTy RewriteMap; 13803 for (std::pair<const BasicBlock *, const BasicBlock *> Pair( 13804 L->getLoopPredecessor(), L->getHeader()); 13805 Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { 13806 13807 const BranchInst *LoopEntryPredicate = 13808 dyn_cast<BranchInst>(Pair.first->getTerminator()); 13809 if (!LoopEntryPredicate || LoopEntryPredicate->isUnconditional()) 13810 continue; 13811 13812 bool EnterIfTrue = LoopEntryPredicate->getSuccessor(0) == Pair.second; 13813 SmallVector<Value *, 8> Worklist; 13814 SmallPtrSet<Value *, 8> Visited; 13815 Worklist.push_back(LoopEntryPredicate->getCondition()); 13816 while (!Worklist.empty()) { 13817 Value *Cond = Worklist.pop_back_val(); 13818 if (!Visited.insert(Cond).second) 13819 continue; 13820 13821 if (auto *Cmp = dyn_cast<ICmpInst>(Cond)) { 13822 auto Predicate = 13823 EnterIfTrue ? Cmp->getPredicate() : Cmp->getInversePredicate(); 13824 CollectCondition(Predicate, getSCEV(Cmp->getOperand(0)), 13825 getSCEV(Cmp->getOperand(1)), RewriteMap); 13826 continue; 13827 } 13828 13829 Value *L, *R; 13830 if (EnterIfTrue ? match(Cond, m_LogicalAnd(m_Value(L), m_Value(R))) 13831 : match(Cond, m_LogicalOr(m_Value(L), m_Value(R)))) { 13832 Worklist.push_back(L); 13833 Worklist.push_back(R); 13834 } 13835 } 13836 } 13837 13838 // Also collect information from assumptions dominating the loop. 13839 for (auto &AssumeVH : AC.assumptions()) { 13840 if (!AssumeVH) 13841 continue; 13842 auto *AssumeI = cast<CallInst>(AssumeVH); 13843 auto *Cmp = dyn_cast<ICmpInst>(AssumeI->getOperand(0)); 13844 if (!Cmp || !DT.dominates(AssumeI, L->getHeader())) 13845 continue; 13846 CollectCondition(Cmp->getPredicate(), getSCEV(Cmp->getOperand(0)), 13847 getSCEV(Cmp->getOperand(1)), RewriteMap); 13848 } 13849 13850 if (RewriteMap.empty()) 13851 return Expr; 13852 SCEVLoopGuardRewriter Rewriter(*this, RewriteMap); 13853 return Rewriter.visit(Expr); 13854 } 13855