1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the implementation of the scalar evolution analysis 10 // engine, which is used primarily to analyze expressions involving induction 11 // variables in loops. 12 // 13 // There are several aspects to this library. First is the representation of 14 // scalar expressions, which are represented as subclasses of the SCEV class. 15 // These classes are used to represent certain types of subexpressions that we 16 // can handle. We only create one SCEV of a particular shape, so 17 // pointer-comparisons for equality are legal. 18 // 19 // One important aspect of the SCEV objects is that they are never cyclic, even 20 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If 21 // the PHI node is one of the idioms that we can represent (e.g., a polynomial 22 // recurrence) then we represent it directly as a recurrence node, otherwise we 23 // represent it as a SCEVUnknown node. 24 // 25 // In addition to being able to represent expressions of various types, we also 26 // have folders that are used to build the *canonical* representation for a 27 // particular expression. These folders are capable of using a variety of 28 // rewrite rules to simplify the expressions. 29 // 30 // Once the folders are defined, we can implement the more interesting 31 // higher-level code, such as the code that recognizes PHI nodes of various 32 // types, computes the execution count of a loop, etc. 33 // 34 // TODO: We should use these routines and value representations to implement 35 // dependence analysis! 36 // 37 //===----------------------------------------------------------------------===// 38 // 39 // There are several good references for the techniques used in this analysis. 40 // 41 // Chains of recurrences -- a method to expedite the evaluation 42 // of closed-form functions 43 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima 44 // 45 // On computational properties of chains of recurrences 46 // Eugene V. Zima 47 // 48 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization 49 // Robert A. van Engelen 50 // 51 // Efficient Symbolic Analysis for Optimizing Compilers 52 // Robert A. van Engelen 53 // 54 // Using the chains of recurrences algebra for data dependence testing and 55 // induction variable substitution 56 // MS Thesis, Johnie Birch 57 // 58 //===----------------------------------------------------------------------===// 59 60 #include "llvm/Analysis/ScalarEvolution.h" 61 #include "llvm/ADT/APInt.h" 62 #include "llvm/ADT/ArrayRef.h" 63 #include "llvm/ADT/DenseMap.h" 64 #include "llvm/ADT/DepthFirstIterator.h" 65 #include "llvm/ADT/EquivalenceClasses.h" 66 #include "llvm/ADT/FoldingSet.h" 67 #include "llvm/ADT/None.h" 68 #include "llvm/ADT/Optional.h" 69 #include "llvm/ADT/STLExtras.h" 70 #include "llvm/ADT/ScopeExit.h" 71 #include "llvm/ADT/Sequence.h" 72 #include "llvm/ADT/SetVector.h" 73 #include "llvm/ADT/SmallPtrSet.h" 74 #include "llvm/ADT/SmallSet.h" 75 #include "llvm/ADT/SmallVector.h" 76 #include "llvm/ADT/Statistic.h" 77 #include "llvm/ADT/StringRef.h" 78 #include "llvm/Analysis/AssumptionCache.h" 79 #include "llvm/Analysis/ConstantFolding.h" 80 #include "llvm/Analysis/InstructionSimplify.h" 81 #include "llvm/Analysis/LoopInfo.h" 82 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 83 #include "llvm/Analysis/TargetLibraryInfo.h" 84 #include "llvm/Analysis/ValueTracking.h" 85 #include "llvm/Config/llvm-config.h" 86 #include "llvm/IR/Argument.h" 87 #include "llvm/IR/BasicBlock.h" 88 #include "llvm/IR/CFG.h" 89 #include "llvm/IR/Constant.h" 90 #include "llvm/IR/ConstantRange.h" 91 #include "llvm/IR/Constants.h" 92 #include "llvm/IR/DataLayout.h" 93 #include "llvm/IR/DerivedTypes.h" 94 #include "llvm/IR/Dominators.h" 95 #include "llvm/IR/Function.h" 96 #include "llvm/IR/GlobalAlias.h" 97 #include "llvm/IR/GlobalValue.h" 98 #include "llvm/IR/GlobalVariable.h" 99 #include "llvm/IR/InstIterator.h" 100 #include "llvm/IR/InstrTypes.h" 101 #include "llvm/IR/Instruction.h" 102 #include "llvm/IR/Instructions.h" 103 #include "llvm/IR/IntrinsicInst.h" 104 #include "llvm/IR/Intrinsics.h" 105 #include "llvm/IR/LLVMContext.h" 106 #include "llvm/IR/Metadata.h" 107 #include "llvm/IR/Operator.h" 108 #include "llvm/IR/PatternMatch.h" 109 #include "llvm/IR/Type.h" 110 #include "llvm/IR/Use.h" 111 #include "llvm/IR/User.h" 112 #include "llvm/IR/Value.h" 113 #include "llvm/IR/Verifier.h" 114 #include "llvm/InitializePasses.h" 115 #include "llvm/Pass.h" 116 #include "llvm/Support/Casting.h" 117 #include "llvm/Support/CommandLine.h" 118 #include "llvm/Support/Compiler.h" 119 #include "llvm/Support/Debug.h" 120 #include "llvm/Support/ErrorHandling.h" 121 #include "llvm/Support/KnownBits.h" 122 #include "llvm/Support/SaveAndRestore.h" 123 #include "llvm/Support/raw_ostream.h" 124 #include <algorithm> 125 #include <cassert> 126 #include <climits> 127 #include <cstddef> 128 #include <cstdint> 129 #include <cstdlib> 130 #include <map> 131 #include <memory> 132 #include <tuple> 133 #include <utility> 134 #include <vector> 135 136 using namespace llvm; 137 138 #define DEBUG_TYPE "scalar-evolution" 139 140 STATISTIC(NumArrayLenItCounts, 141 "Number of trip counts computed with array length"); 142 STATISTIC(NumTripCountsComputed, 143 "Number of loops with predictable loop counts"); 144 STATISTIC(NumTripCountsNotComputed, 145 "Number of loops without predictable loop counts"); 146 STATISTIC(NumBruteForceTripCountsComputed, 147 "Number of loops with trip counts computed by force"); 148 149 static cl::opt<unsigned> 150 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, 151 cl::ZeroOrMore, 152 cl::desc("Maximum number of iterations SCEV will " 153 "symbolically execute a constant " 154 "derived loop"), 155 cl::init(100)); 156 157 // FIXME: Enable this with EXPENSIVE_CHECKS when the test suite is clean. 158 static cl::opt<bool> VerifySCEV( 159 "verify-scev", cl::Hidden, 160 cl::desc("Verify ScalarEvolution's backedge taken counts (slow)")); 161 static cl::opt<bool> VerifySCEVStrict( 162 "verify-scev-strict", cl::Hidden, 163 cl::desc("Enable stricter verification with -verify-scev is passed")); 164 static cl::opt<bool> 165 VerifySCEVMap("verify-scev-maps", cl::Hidden, 166 cl::desc("Verify no dangling value in ScalarEvolution's " 167 "ExprValueMap (slow)")); 168 169 static cl::opt<bool> VerifyIR( 170 "scev-verify-ir", cl::Hidden, 171 cl::desc("Verify IR correctness when making sensitive SCEV queries (slow)"), 172 cl::init(false)); 173 174 static cl::opt<unsigned> MulOpsInlineThreshold( 175 "scev-mulops-inline-threshold", cl::Hidden, 176 cl::desc("Threshold for inlining multiplication operands into a SCEV"), 177 cl::init(32)); 178 179 static cl::opt<unsigned> AddOpsInlineThreshold( 180 "scev-addops-inline-threshold", cl::Hidden, 181 cl::desc("Threshold for inlining addition operands into a SCEV"), 182 cl::init(500)); 183 184 static cl::opt<unsigned> MaxSCEVCompareDepth( 185 "scalar-evolution-max-scev-compare-depth", cl::Hidden, 186 cl::desc("Maximum depth of recursive SCEV complexity comparisons"), 187 cl::init(32)); 188 189 static cl::opt<unsigned> MaxSCEVOperationsImplicationDepth( 190 "scalar-evolution-max-scev-operations-implication-depth", cl::Hidden, 191 cl::desc("Maximum depth of recursive SCEV operations implication analysis"), 192 cl::init(2)); 193 194 static cl::opt<unsigned> MaxValueCompareDepth( 195 "scalar-evolution-max-value-compare-depth", cl::Hidden, 196 cl::desc("Maximum depth of recursive value complexity comparisons"), 197 cl::init(2)); 198 199 static cl::opt<unsigned> 200 MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden, 201 cl::desc("Maximum depth of recursive arithmetics"), 202 cl::init(32)); 203 204 static cl::opt<unsigned> MaxConstantEvolvingDepth( 205 "scalar-evolution-max-constant-evolving-depth", cl::Hidden, 206 cl::desc("Maximum depth of recursive constant evolving"), cl::init(32)); 207 208 static cl::opt<unsigned> 209 MaxCastDepth("scalar-evolution-max-cast-depth", cl::Hidden, 210 cl::desc("Maximum depth of recursive SExt/ZExt/Trunc"), 211 cl::init(8)); 212 213 static cl::opt<unsigned> 214 MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden, 215 cl::desc("Max coefficients in AddRec during evolving"), 216 cl::init(8)); 217 218 static cl::opt<unsigned> 219 HugeExprThreshold("scalar-evolution-huge-expr-threshold", cl::Hidden, 220 cl::desc("Size of the expression which is considered huge"), 221 cl::init(4096)); 222 223 static cl::opt<bool> 224 ClassifyExpressions("scalar-evolution-classify-expressions", 225 cl::Hidden, cl::init(true), 226 cl::desc("When printing analysis, include information on every instruction")); 227 228 229 //===----------------------------------------------------------------------===// 230 // SCEV class definitions 231 //===----------------------------------------------------------------------===// 232 233 //===----------------------------------------------------------------------===// 234 // Implementation of the SCEV class. 235 // 236 237 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 238 LLVM_DUMP_METHOD void SCEV::dump() const { 239 print(dbgs()); 240 dbgs() << '\n'; 241 } 242 #endif 243 244 void SCEV::print(raw_ostream &OS) const { 245 switch (static_cast<SCEVTypes>(getSCEVType())) { 246 case scConstant: 247 cast<SCEVConstant>(this)->getValue()->printAsOperand(OS, false); 248 return; 249 case scTruncate: { 250 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this); 251 const SCEV *Op = Trunc->getOperand(); 252 OS << "(trunc " << *Op->getType() << " " << *Op << " to " 253 << *Trunc->getType() << ")"; 254 return; 255 } 256 case scZeroExtend: { 257 const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this); 258 const SCEV *Op = ZExt->getOperand(); 259 OS << "(zext " << *Op->getType() << " " << *Op << " to " 260 << *ZExt->getType() << ")"; 261 return; 262 } 263 case scSignExtend: { 264 const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this); 265 const SCEV *Op = SExt->getOperand(); 266 OS << "(sext " << *Op->getType() << " " << *Op << " to " 267 << *SExt->getType() << ")"; 268 return; 269 } 270 case scAddRecExpr: { 271 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this); 272 OS << "{" << *AR->getOperand(0); 273 for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i) 274 OS << ",+," << *AR->getOperand(i); 275 OS << "}<"; 276 if (AR->hasNoUnsignedWrap()) 277 OS << "nuw><"; 278 if (AR->hasNoSignedWrap()) 279 OS << "nsw><"; 280 if (AR->hasNoSelfWrap() && 281 !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW))) 282 OS << "nw><"; 283 AR->getLoop()->getHeader()->printAsOperand(OS, /*PrintType=*/false); 284 OS << ">"; 285 return; 286 } 287 case scAddExpr: 288 case scMulExpr: 289 case scUMaxExpr: 290 case scSMaxExpr: 291 case scUMinExpr: 292 case scSMinExpr: { 293 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this); 294 const char *OpStr = nullptr; 295 switch (NAry->getSCEVType()) { 296 case scAddExpr: OpStr = " + "; break; 297 case scMulExpr: OpStr = " * "; break; 298 case scUMaxExpr: OpStr = " umax "; break; 299 case scSMaxExpr: OpStr = " smax "; break; 300 case scUMinExpr: 301 OpStr = " umin "; 302 break; 303 case scSMinExpr: 304 OpStr = " smin "; 305 break; 306 } 307 OS << "("; 308 for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end(); 309 I != E; ++I) { 310 OS << **I; 311 if (std::next(I) != E) 312 OS << OpStr; 313 } 314 OS << ")"; 315 switch (NAry->getSCEVType()) { 316 case scAddExpr: 317 case scMulExpr: 318 if (NAry->hasNoUnsignedWrap()) 319 OS << "<nuw>"; 320 if (NAry->hasNoSignedWrap()) 321 OS << "<nsw>"; 322 } 323 return; 324 } 325 case scUDivExpr: { 326 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this); 327 OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")"; 328 return; 329 } 330 case scUnknown: { 331 const SCEVUnknown *U = cast<SCEVUnknown>(this); 332 Type *AllocTy; 333 if (U->isSizeOf(AllocTy)) { 334 OS << "sizeof(" << *AllocTy << ")"; 335 return; 336 } 337 if (U->isAlignOf(AllocTy)) { 338 OS << "alignof(" << *AllocTy << ")"; 339 return; 340 } 341 342 Type *CTy; 343 Constant *FieldNo; 344 if (U->isOffsetOf(CTy, FieldNo)) { 345 OS << "offsetof(" << *CTy << ", "; 346 FieldNo->printAsOperand(OS, false); 347 OS << ")"; 348 return; 349 } 350 351 // Otherwise just print it normally. 352 U->getValue()->printAsOperand(OS, false); 353 return; 354 } 355 case scCouldNotCompute: 356 OS << "***COULDNOTCOMPUTE***"; 357 return; 358 } 359 llvm_unreachable("Unknown SCEV kind!"); 360 } 361 362 Type *SCEV::getType() const { 363 switch (static_cast<SCEVTypes>(getSCEVType())) { 364 case scConstant: 365 return cast<SCEVConstant>(this)->getType(); 366 case scTruncate: 367 case scZeroExtend: 368 case scSignExtend: 369 return cast<SCEVCastExpr>(this)->getType(); 370 case scAddRecExpr: 371 case scMulExpr: 372 case scUMaxExpr: 373 case scSMaxExpr: 374 case scUMinExpr: 375 case scSMinExpr: 376 return cast<SCEVNAryExpr>(this)->getType(); 377 case scAddExpr: 378 return cast<SCEVAddExpr>(this)->getType(); 379 case scUDivExpr: 380 return cast<SCEVUDivExpr>(this)->getType(); 381 case scUnknown: 382 return cast<SCEVUnknown>(this)->getType(); 383 case scCouldNotCompute: 384 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 385 } 386 llvm_unreachable("Unknown SCEV kind!"); 387 } 388 389 bool SCEV::isZero() const { 390 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 391 return SC->getValue()->isZero(); 392 return false; 393 } 394 395 bool SCEV::isOne() const { 396 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 397 return SC->getValue()->isOne(); 398 return false; 399 } 400 401 bool SCEV::isAllOnesValue() const { 402 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 403 return SC->getValue()->isMinusOne(); 404 return false; 405 } 406 407 bool SCEV::isNonConstantNegative() const { 408 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this); 409 if (!Mul) return false; 410 411 // If there is a constant factor, it will be first. 412 const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0)); 413 if (!SC) return false; 414 415 // Return true if the value is negative, this matches things like (-42 * V). 416 return SC->getAPInt().isNegative(); 417 } 418 419 SCEVCouldNotCompute::SCEVCouldNotCompute() : 420 SCEV(FoldingSetNodeIDRef(), scCouldNotCompute, 0) {} 421 422 bool SCEVCouldNotCompute::classof(const SCEV *S) { 423 return S->getSCEVType() == scCouldNotCompute; 424 } 425 426 const SCEV *ScalarEvolution::getConstant(ConstantInt *V) { 427 FoldingSetNodeID ID; 428 ID.AddInteger(scConstant); 429 ID.AddPointer(V); 430 void *IP = nullptr; 431 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 432 SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V); 433 UniqueSCEVs.InsertNode(S, IP); 434 return S; 435 } 436 437 const SCEV *ScalarEvolution::getConstant(const APInt &Val) { 438 return getConstant(ConstantInt::get(getContext(), Val)); 439 } 440 441 const SCEV * 442 ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) { 443 IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty)); 444 return getConstant(ConstantInt::get(ITy, V, isSigned)); 445 } 446 447 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID, 448 unsigned SCEVTy, const SCEV *op, Type *ty) 449 : SCEV(ID, SCEVTy, computeExpressionSize(op)), Op(op), Ty(ty) {} 450 451 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, 452 const SCEV *op, Type *ty) 453 : SCEVCastExpr(ID, scTruncate, op, ty) { 454 assert(Op->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 455 "Cannot truncate non-integer value!"); 456 } 457 458 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, 459 const SCEV *op, Type *ty) 460 : SCEVCastExpr(ID, scZeroExtend, op, ty) { 461 assert(Op->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 462 "Cannot zero extend non-integer value!"); 463 } 464 465 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, 466 const SCEV *op, Type *ty) 467 : SCEVCastExpr(ID, scSignExtend, op, ty) { 468 assert(Op->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 469 "Cannot sign extend non-integer value!"); 470 } 471 472 void SCEVUnknown::deleted() { 473 // Clear this SCEVUnknown from various maps. 474 SE->forgetMemoizedResults(this); 475 476 // Remove this SCEVUnknown from the uniquing map. 477 SE->UniqueSCEVs.RemoveNode(this); 478 479 // Release the value. 480 setValPtr(nullptr); 481 } 482 483 void SCEVUnknown::allUsesReplacedWith(Value *New) { 484 // Remove this SCEVUnknown from the uniquing map. 485 SE->UniqueSCEVs.RemoveNode(this); 486 487 // Update this SCEVUnknown to point to the new value. This is needed 488 // because there may still be outstanding SCEVs which still point to 489 // this SCEVUnknown. 490 setValPtr(New); 491 } 492 493 bool SCEVUnknown::isSizeOf(Type *&AllocTy) const { 494 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 495 if (VCE->getOpcode() == Instruction::PtrToInt) 496 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 497 if (CE->getOpcode() == Instruction::GetElementPtr && 498 CE->getOperand(0)->isNullValue() && 499 CE->getNumOperands() == 2) 500 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1))) 501 if (CI->isOne()) { 502 AllocTy = cast<PointerType>(CE->getOperand(0)->getType()) 503 ->getElementType(); 504 return true; 505 } 506 507 return false; 508 } 509 510 bool SCEVUnknown::isAlignOf(Type *&AllocTy) const { 511 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 512 if (VCE->getOpcode() == Instruction::PtrToInt) 513 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 514 if (CE->getOpcode() == Instruction::GetElementPtr && 515 CE->getOperand(0)->isNullValue()) { 516 Type *Ty = 517 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 518 if (StructType *STy = dyn_cast<StructType>(Ty)) 519 if (!STy->isPacked() && 520 CE->getNumOperands() == 3 && 521 CE->getOperand(1)->isNullValue()) { 522 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2))) 523 if (CI->isOne() && 524 STy->getNumElements() == 2 && 525 STy->getElementType(0)->isIntegerTy(1)) { 526 AllocTy = STy->getElementType(1); 527 return true; 528 } 529 } 530 } 531 532 return false; 533 } 534 535 bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const { 536 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 537 if (VCE->getOpcode() == Instruction::PtrToInt) 538 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 539 if (CE->getOpcode() == Instruction::GetElementPtr && 540 CE->getNumOperands() == 3 && 541 CE->getOperand(0)->isNullValue() && 542 CE->getOperand(1)->isNullValue()) { 543 Type *Ty = 544 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 545 // Ignore vector types here so that ScalarEvolutionExpander doesn't 546 // emit getelementptrs that index into vectors. 547 if (Ty->isStructTy() || Ty->isArrayTy()) { 548 CTy = Ty; 549 FieldNo = CE->getOperand(2); 550 return true; 551 } 552 } 553 554 return false; 555 } 556 557 //===----------------------------------------------------------------------===// 558 // SCEV Utilities 559 //===----------------------------------------------------------------------===// 560 561 /// Compare the two values \p LV and \p RV in terms of their "complexity" where 562 /// "complexity" is a partial (and somewhat ad-hoc) relation used to order 563 /// operands in SCEV expressions. \p EqCache is a set of pairs of values that 564 /// have been previously deemed to be "equally complex" by this routine. It is 565 /// intended to avoid exponential time complexity in cases like: 566 /// 567 /// %a = f(%x, %y) 568 /// %b = f(%a, %a) 569 /// %c = f(%b, %b) 570 /// 571 /// %d = f(%x, %y) 572 /// %e = f(%d, %d) 573 /// %f = f(%e, %e) 574 /// 575 /// CompareValueComplexity(%f, %c) 576 /// 577 /// Since we do not continue running this routine on expression trees once we 578 /// have seen unequal values, there is no need to track them in the cache. 579 static int 580 CompareValueComplexity(EquivalenceClasses<const Value *> &EqCacheValue, 581 const LoopInfo *const LI, Value *LV, Value *RV, 582 unsigned Depth) { 583 if (Depth > MaxValueCompareDepth || EqCacheValue.isEquivalent(LV, RV)) 584 return 0; 585 586 // Order pointer values after integer values. This helps SCEVExpander form 587 // GEPs. 588 bool LIsPointer = LV->getType()->isPointerTy(), 589 RIsPointer = RV->getType()->isPointerTy(); 590 if (LIsPointer != RIsPointer) 591 return (int)LIsPointer - (int)RIsPointer; 592 593 // Compare getValueID values. 594 unsigned LID = LV->getValueID(), RID = RV->getValueID(); 595 if (LID != RID) 596 return (int)LID - (int)RID; 597 598 // Sort arguments by their position. 599 if (const auto *LA = dyn_cast<Argument>(LV)) { 600 const auto *RA = cast<Argument>(RV); 601 unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo(); 602 return (int)LArgNo - (int)RArgNo; 603 } 604 605 if (const auto *LGV = dyn_cast<GlobalValue>(LV)) { 606 const auto *RGV = cast<GlobalValue>(RV); 607 608 const auto IsGVNameSemantic = [&](const GlobalValue *GV) { 609 auto LT = GV->getLinkage(); 610 return !(GlobalValue::isPrivateLinkage(LT) || 611 GlobalValue::isInternalLinkage(LT)); 612 }; 613 614 // Use the names to distinguish the two values, but only if the 615 // names are semantically important. 616 if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV)) 617 return LGV->getName().compare(RGV->getName()); 618 } 619 620 // For instructions, compare their loop depth, and their operand count. This 621 // is pretty loose. 622 if (const auto *LInst = dyn_cast<Instruction>(LV)) { 623 const auto *RInst = cast<Instruction>(RV); 624 625 // Compare loop depths. 626 const BasicBlock *LParent = LInst->getParent(), 627 *RParent = RInst->getParent(); 628 if (LParent != RParent) { 629 unsigned LDepth = LI->getLoopDepth(LParent), 630 RDepth = LI->getLoopDepth(RParent); 631 if (LDepth != RDepth) 632 return (int)LDepth - (int)RDepth; 633 } 634 635 // Compare the number of operands. 636 unsigned LNumOps = LInst->getNumOperands(), 637 RNumOps = RInst->getNumOperands(); 638 if (LNumOps != RNumOps) 639 return (int)LNumOps - (int)RNumOps; 640 641 for (unsigned Idx : seq(0u, LNumOps)) { 642 int Result = 643 CompareValueComplexity(EqCacheValue, LI, LInst->getOperand(Idx), 644 RInst->getOperand(Idx), Depth + 1); 645 if (Result != 0) 646 return Result; 647 } 648 } 649 650 EqCacheValue.unionSets(LV, RV); 651 return 0; 652 } 653 654 // Return negative, zero, or positive, if LHS is less than, equal to, or greater 655 // than RHS, respectively. A three-way result allows recursive comparisons to be 656 // more efficient. 657 static int CompareSCEVComplexity( 658 EquivalenceClasses<const SCEV *> &EqCacheSCEV, 659 EquivalenceClasses<const Value *> &EqCacheValue, 660 const LoopInfo *const LI, const SCEV *LHS, const SCEV *RHS, 661 DominatorTree &DT, unsigned Depth = 0) { 662 // Fast-path: SCEVs are uniqued so we can do a quick equality check. 663 if (LHS == RHS) 664 return 0; 665 666 // Primarily, sort the SCEVs by their getSCEVType(). 667 unsigned LType = LHS->getSCEVType(), RType = RHS->getSCEVType(); 668 if (LType != RType) 669 return (int)LType - (int)RType; 670 671 if (Depth > MaxSCEVCompareDepth || EqCacheSCEV.isEquivalent(LHS, RHS)) 672 return 0; 673 // Aside from the getSCEVType() ordering, the particular ordering 674 // isn't very important except that it's beneficial to be consistent, 675 // so that (a + b) and (b + a) don't end up as different expressions. 676 switch (static_cast<SCEVTypes>(LType)) { 677 case scUnknown: { 678 const SCEVUnknown *LU = cast<SCEVUnknown>(LHS); 679 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS); 680 681 int X = CompareValueComplexity(EqCacheValue, LI, LU->getValue(), 682 RU->getValue(), Depth + 1); 683 if (X == 0) 684 EqCacheSCEV.unionSets(LHS, RHS); 685 return X; 686 } 687 688 case scConstant: { 689 const SCEVConstant *LC = cast<SCEVConstant>(LHS); 690 const SCEVConstant *RC = cast<SCEVConstant>(RHS); 691 692 // Compare constant values. 693 const APInt &LA = LC->getAPInt(); 694 const APInt &RA = RC->getAPInt(); 695 unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth(); 696 if (LBitWidth != RBitWidth) 697 return (int)LBitWidth - (int)RBitWidth; 698 return LA.ult(RA) ? -1 : 1; 699 } 700 701 case scAddRecExpr: { 702 const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS); 703 const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS); 704 705 // There is always a dominance between two recs that are used by one SCEV, 706 // so we can safely sort recs by loop header dominance. We require such 707 // order in getAddExpr. 708 const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop(); 709 if (LLoop != RLoop) { 710 const BasicBlock *LHead = LLoop->getHeader(), *RHead = RLoop->getHeader(); 711 assert(LHead != RHead && "Two loops share the same header?"); 712 if (DT.dominates(LHead, RHead)) 713 return 1; 714 else 715 assert(DT.dominates(RHead, LHead) && 716 "No dominance between recurrences used by one SCEV?"); 717 return -1; 718 } 719 720 // Addrec complexity grows with operand count. 721 unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands(); 722 if (LNumOps != RNumOps) 723 return (int)LNumOps - (int)RNumOps; 724 725 // Lexicographically compare. 726 for (unsigned i = 0; i != LNumOps; ++i) { 727 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 728 LA->getOperand(i), RA->getOperand(i), DT, 729 Depth + 1); 730 if (X != 0) 731 return X; 732 } 733 EqCacheSCEV.unionSets(LHS, RHS); 734 return 0; 735 } 736 737 case scAddExpr: 738 case scMulExpr: 739 case scSMaxExpr: 740 case scUMaxExpr: 741 case scSMinExpr: 742 case scUMinExpr: { 743 const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS); 744 const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS); 745 746 // Lexicographically compare n-ary expressions. 747 unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands(); 748 if (LNumOps != RNumOps) 749 return (int)LNumOps - (int)RNumOps; 750 751 for (unsigned i = 0; i != LNumOps; ++i) { 752 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 753 LC->getOperand(i), RC->getOperand(i), DT, 754 Depth + 1); 755 if (X != 0) 756 return X; 757 } 758 EqCacheSCEV.unionSets(LHS, RHS); 759 return 0; 760 } 761 762 case scUDivExpr: { 763 const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS); 764 const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS); 765 766 // Lexicographically compare udiv expressions. 767 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getLHS(), 768 RC->getLHS(), DT, Depth + 1); 769 if (X != 0) 770 return X; 771 X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getRHS(), 772 RC->getRHS(), DT, Depth + 1); 773 if (X == 0) 774 EqCacheSCEV.unionSets(LHS, RHS); 775 return X; 776 } 777 778 case scTruncate: 779 case scZeroExtend: 780 case scSignExtend: { 781 const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS); 782 const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS); 783 784 // Compare cast expressions by operand. 785 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 786 LC->getOperand(), RC->getOperand(), DT, 787 Depth + 1); 788 if (X == 0) 789 EqCacheSCEV.unionSets(LHS, RHS); 790 return X; 791 } 792 793 case scCouldNotCompute: 794 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 795 } 796 llvm_unreachable("Unknown SCEV kind!"); 797 } 798 799 /// Given a list of SCEV objects, order them by their complexity, and group 800 /// objects of the same complexity together by value. When this routine is 801 /// finished, we know that any duplicates in the vector are consecutive and that 802 /// complexity is monotonically increasing. 803 /// 804 /// Note that we go take special precautions to ensure that we get deterministic 805 /// results from this routine. In other words, we don't want the results of 806 /// this to depend on where the addresses of various SCEV objects happened to 807 /// land in memory. 808 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops, 809 LoopInfo *LI, DominatorTree &DT) { 810 if (Ops.size() < 2) return; // Noop 811 812 EquivalenceClasses<const SCEV *> EqCacheSCEV; 813 EquivalenceClasses<const Value *> EqCacheValue; 814 if (Ops.size() == 2) { 815 // This is the common case, which also happens to be trivially simple. 816 // Special case it. 817 const SCEV *&LHS = Ops[0], *&RHS = Ops[1]; 818 if (CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, RHS, LHS, DT) < 0) 819 std::swap(LHS, RHS); 820 return; 821 } 822 823 // Do the rough sort by complexity. 824 llvm::stable_sort(Ops, [&](const SCEV *LHS, const SCEV *RHS) { 825 return CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LHS, RHS, DT) < 826 0; 827 }); 828 829 // Now that we are sorted by complexity, group elements of the same 830 // complexity. Note that this is, at worst, N^2, but the vector is likely to 831 // be extremely short in practice. Note that we take this approach because we 832 // do not want to depend on the addresses of the objects we are grouping. 833 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) { 834 const SCEV *S = Ops[i]; 835 unsigned Complexity = S->getSCEVType(); 836 837 // If there are any objects of the same complexity and same value as this 838 // one, group them. 839 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) { 840 if (Ops[j] == S) { // Found a duplicate. 841 // Move it to immediately after i'th element. 842 std::swap(Ops[i+1], Ops[j]); 843 ++i; // no need to rescan it. 844 if (i == e-2) return; // Done! 845 } 846 } 847 } 848 } 849 850 // Returns the size of the SCEV S. 851 static inline int sizeOfSCEV(const SCEV *S) { 852 struct FindSCEVSize { 853 int Size = 0; 854 855 FindSCEVSize() = default; 856 857 bool follow(const SCEV *S) { 858 ++Size; 859 // Keep looking at all operands of S. 860 return true; 861 } 862 863 bool isDone() const { 864 return false; 865 } 866 }; 867 868 FindSCEVSize F; 869 SCEVTraversal<FindSCEVSize> ST(F); 870 ST.visitAll(S); 871 return F.Size; 872 } 873 874 /// Returns true if \p Ops contains a huge SCEV (the subtree of S contains at 875 /// least HugeExprThreshold nodes). 876 static bool hasHugeExpression(ArrayRef<const SCEV *> Ops) { 877 return any_of(Ops, [](const SCEV *S) { 878 return S->getExpressionSize() >= HugeExprThreshold; 879 }); 880 } 881 882 namespace { 883 884 struct SCEVDivision : public SCEVVisitor<SCEVDivision, void> { 885 public: 886 // Computes the Quotient and Remainder of the division of Numerator by 887 // Denominator. 888 static void divide(ScalarEvolution &SE, const SCEV *Numerator, 889 const SCEV *Denominator, const SCEV **Quotient, 890 const SCEV **Remainder) { 891 assert(Numerator && Denominator && "Uninitialized SCEV"); 892 893 SCEVDivision D(SE, Numerator, Denominator); 894 895 // Check for the trivial case here to avoid having to check for it in the 896 // rest of the code. 897 if (Numerator == Denominator) { 898 *Quotient = D.One; 899 *Remainder = D.Zero; 900 return; 901 } 902 903 if (Numerator->isZero()) { 904 *Quotient = D.Zero; 905 *Remainder = D.Zero; 906 return; 907 } 908 909 // A simple case when N/1. The quotient is N. 910 if (Denominator->isOne()) { 911 *Quotient = Numerator; 912 *Remainder = D.Zero; 913 return; 914 } 915 916 // Split the Denominator when it is a product. 917 if (const SCEVMulExpr *T = dyn_cast<SCEVMulExpr>(Denominator)) { 918 const SCEV *Q, *R; 919 *Quotient = Numerator; 920 for (const SCEV *Op : T->operands()) { 921 divide(SE, *Quotient, Op, &Q, &R); 922 *Quotient = Q; 923 924 // Bail out when the Numerator is not divisible by one of the terms of 925 // the Denominator. 926 if (!R->isZero()) { 927 *Quotient = D.Zero; 928 *Remainder = Numerator; 929 return; 930 } 931 } 932 *Remainder = D.Zero; 933 return; 934 } 935 936 D.visit(Numerator); 937 *Quotient = D.Quotient; 938 *Remainder = D.Remainder; 939 } 940 941 // Except in the trivial case described above, we do not know how to divide 942 // Expr by Denominator for the following functions with empty implementation. 943 void visitTruncateExpr(const SCEVTruncateExpr *Numerator) {} 944 void visitZeroExtendExpr(const SCEVZeroExtendExpr *Numerator) {} 945 void visitSignExtendExpr(const SCEVSignExtendExpr *Numerator) {} 946 void visitUDivExpr(const SCEVUDivExpr *Numerator) {} 947 void visitSMaxExpr(const SCEVSMaxExpr *Numerator) {} 948 void visitUMaxExpr(const SCEVUMaxExpr *Numerator) {} 949 void visitSMinExpr(const SCEVSMinExpr *Numerator) {} 950 void visitUMinExpr(const SCEVUMinExpr *Numerator) {} 951 void visitUnknown(const SCEVUnknown *Numerator) {} 952 void visitCouldNotCompute(const SCEVCouldNotCompute *Numerator) {} 953 954 void visitConstant(const SCEVConstant *Numerator) { 955 if (const SCEVConstant *D = dyn_cast<SCEVConstant>(Denominator)) { 956 APInt NumeratorVal = Numerator->getAPInt(); 957 APInt DenominatorVal = D->getAPInt(); 958 uint32_t NumeratorBW = NumeratorVal.getBitWidth(); 959 uint32_t DenominatorBW = DenominatorVal.getBitWidth(); 960 961 if (NumeratorBW > DenominatorBW) 962 DenominatorVal = DenominatorVal.sext(NumeratorBW); 963 else if (NumeratorBW < DenominatorBW) 964 NumeratorVal = NumeratorVal.sext(DenominatorBW); 965 966 APInt QuotientVal(NumeratorVal.getBitWidth(), 0); 967 APInt RemainderVal(NumeratorVal.getBitWidth(), 0); 968 APInt::sdivrem(NumeratorVal, DenominatorVal, QuotientVal, RemainderVal); 969 Quotient = SE.getConstant(QuotientVal); 970 Remainder = SE.getConstant(RemainderVal); 971 return; 972 } 973 } 974 975 void visitAddRecExpr(const SCEVAddRecExpr *Numerator) { 976 const SCEV *StartQ, *StartR, *StepQ, *StepR; 977 if (!Numerator->isAffine()) 978 return cannotDivide(Numerator); 979 divide(SE, Numerator->getStart(), Denominator, &StartQ, &StartR); 980 divide(SE, Numerator->getStepRecurrence(SE), Denominator, &StepQ, &StepR); 981 // Bail out if the types do not match. 982 Type *Ty = Denominator->getType(); 983 if (Ty != StartQ->getType() || Ty != StartR->getType() || 984 Ty != StepQ->getType() || Ty != StepR->getType()) 985 return cannotDivide(Numerator); 986 Quotient = SE.getAddRecExpr(StartQ, StepQ, Numerator->getLoop(), 987 Numerator->getNoWrapFlags()); 988 Remainder = SE.getAddRecExpr(StartR, StepR, Numerator->getLoop(), 989 Numerator->getNoWrapFlags()); 990 } 991 992 void visitAddExpr(const SCEVAddExpr *Numerator) { 993 SmallVector<const SCEV *, 2> Qs, Rs; 994 Type *Ty = Denominator->getType(); 995 996 for (const SCEV *Op : Numerator->operands()) { 997 const SCEV *Q, *R; 998 divide(SE, Op, Denominator, &Q, &R); 999 1000 // Bail out if types do not match. 1001 if (Ty != Q->getType() || Ty != R->getType()) 1002 return cannotDivide(Numerator); 1003 1004 Qs.push_back(Q); 1005 Rs.push_back(R); 1006 } 1007 1008 if (Qs.size() == 1) { 1009 Quotient = Qs[0]; 1010 Remainder = Rs[0]; 1011 return; 1012 } 1013 1014 Quotient = SE.getAddExpr(Qs); 1015 Remainder = SE.getAddExpr(Rs); 1016 } 1017 1018 void visitMulExpr(const SCEVMulExpr *Numerator) { 1019 SmallVector<const SCEV *, 2> Qs; 1020 Type *Ty = Denominator->getType(); 1021 1022 bool FoundDenominatorTerm = false; 1023 for (const SCEV *Op : Numerator->operands()) { 1024 // Bail out if types do not match. 1025 if (Ty != Op->getType()) 1026 return cannotDivide(Numerator); 1027 1028 if (FoundDenominatorTerm) { 1029 Qs.push_back(Op); 1030 continue; 1031 } 1032 1033 // Check whether Denominator divides one of the product operands. 1034 const SCEV *Q, *R; 1035 divide(SE, Op, Denominator, &Q, &R); 1036 if (!R->isZero()) { 1037 Qs.push_back(Op); 1038 continue; 1039 } 1040 1041 // Bail out if types do not match. 1042 if (Ty != Q->getType()) 1043 return cannotDivide(Numerator); 1044 1045 FoundDenominatorTerm = true; 1046 Qs.push_back(Q); 1047 } 1048 1049 if (FoundDenominatorTerm) { 1050 Remainder = Zero; 1051 if (Qs.size() == 1) 1052 Quotient = Qs[0]; 1053 else 1054 Quotient = SE.getMulExpr(Qs); 1055 return; 1056 } 1057 1058 if (!isa<SCEVUnknown>(Denominator)) 1059 return cannotDivide(Numerator); 1060 1061 // The Remainder is obtained by replacing Denominator by 0 in Numerator. 1062 ValueToValueMap RewriteMap; 1063 RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] = 1064 cast<SCEVConstant>(Zero)->getValue(); 1065 Remainder = SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true); 1066 1067 if (Remainder->isZero()) { 1068 // The Quotient is obtained by replacing Denominator by 1 in Numerator. 1069 RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] = 1070 cast<SCEVConstant>(One)->getValue(); 1071 Quotient = 1072 SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true); 1073 return; 1074 } 1075 1076 // Quotient is (Numerator - Remainder) divided by Denominator. 1077 const SCEV *Q, *R; 1078 const SCEV *Diff = SE.getMinusSCEV(Numerator, Remainder); 1079 // This SCEV does not seem to simplify: fail the division here. 1080 if (sizeOfSCEV(Diff) > sizeOfSCEV(Numerator)) 1081 return cannotDivide(Numerator); 1082 divide(SE, Diff, Denominator, &Q, &R); 1083 if (R != Zero) 1084 return cannotDivide(Numerator); 1085 Quotient = Q; 1086 } 1087 1088 private: 1089 SCEVDivision(ScalarEvolution &S, const SCEV *Numerator, 1090 const SCEV *Denominator) 1091 : SE(S), Denominator(Denominator) { 1092 Zero = SE.getZero(Denominator->getType()); 1093 One = SE.getOne(Denominator->getType()); 1094 1095 // We generally do not know how to divide Expr by Denominator. We 1096 // initialize the division to a "cannot divide" state to simplify the rest 1097 // of the code. 1098 cannotDivide(Numerator); 1099 } 1100 1101 // Convenience function for giving up on the division. We set the quotient to 1102 // be equal to zero and the remainder to be equal to the numerator. 1103 void cannotDivide(const SCEV *Numerator) { 1104 Quotient = Zero; 1105 Remainder = Numerator; 1106 } 1107 1108 ScalarEvolution &SE; 1109 const SCEV *Denominator, *Quotient, *Remainder, *Zero, *One; 1110 }; 1111 1112 } // end anonymous namespace 1113 1114 //===----------------------------------------------------------------------===// 1115 // Simple SCEV method implementations 1116 //===----------------------------------------------------------------------===// 1117 1118 /// Compute BC(It, K). The result has width W. Assume, K > 0. 1119 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K, 1120 ScalarEvolution &SE, 1121 Type *ResultTy) { 1122 // Handle the simplest case efficiently. 1123 if (K == 1) 1124 return SE.getTruncateOrZeroExtend(It, ResultTy); 1125 1126 // We are using the following formula for BC(It, K): 1127 // 1128 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K! 1129 // 1130 // Suppose, W is the bitwidth of the return value. We must be prepared for 1131 // overflow. Hence, we must assure that the result of our computation is 1132 // equal to the accurate one modulo 2^W. Unfortunately, division isn't 1133 // safe in modular arithmetic. 1134 // 1135 // However, this code doesn't use exactly that formula; the formula it uses 1136 // is something like the following, where T is the number of factors of 2 in 1137 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is 1138 // exponentiation: 1139 // 1140 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T) 1141 // 1142 // This formula is trivially equivalent to the previous formula. However, 1143 // this formula can be implemented much more efficiently. The trick is that 1144 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular 1145 // arithmetic. To do exact division in modular arithmetic, all we have 1146 // to do is multiply by the inverse. Therefore, this step can be done at 1147 // width W. 1148 // 1149 // The next issue is how to safely do the division by 2^T. The way this 1150 // is done is by doing the multiplication step at a width of at least W + T 1151 // bits. This way, the bottom W+T bits of the product are accurate. Then, 1152 // when we perform the division by 2^T (which is equivalent to a right shift 1153 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get 1154 // truncated out after the division by 2^T. 1155 // 1156 // In comparison to just directly using the first formula, this technique 1157 // is much more efficient; using the first formula requires W * K bits, 1158 // but this formula less than W + K bits. Also, the first formula requires 1159 // a division step, whereas this formula only requires multiplies and shifts. 1160 // 1161 // It doesn't matter whether the subtraction step is done in the calculation 1162 // width or the input iteration count's width; if the subtraction overflows, 1163 // the result must be zero anyway. We prefer here to do it in the width of 1164 // the induction variable because it helps a lot for certain cases; CodeGen 1165 // isn't smart enough to ignore the overflow, which leads to much less 1166 // efficient code if the width of the subtraction is wider than the native 1167 // register width. 1168 // 1169 // (It's possible to not widen at all by pulling out factors of 2 before 1170 // the multiplication; for example, K=2 can be calculated as 1171 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires 1172 // extra arithmetic, so it's not an obvious win, and it gets 1173 // much more complicated for K > 3.) 1174 1175 // Protection from insane SCEVs; this bound is conservative, 1176 // but it probably doesn't matter. 1177 if (K > 1000) 1178 return SE.getCouldNotCompute(); 1179 1180 unsigned W = SE.getTypeSizeInBits(ResultTy); 1181 1182 // Calculate K! / 2^T and T; we divide out the factors of two before 1183 // multiplying for calculating K! / 2^T to avoid overflow. 1184 // Other overflow doesn't matter because we only care about the bottom 1185 // W bits of the result. 1186 APInt OddFactorial(W, 1); 1187 unsigned T = 1; 1188 for (unsigned i = 3; i <= K; ++i) { 1189 APInt Mult(W, i); 1190 unsigned TwoFactors = Mult.countTrailingZeros(); 1191 T += TwoFactors; 1192 Mult.lshrInPlace(TwoFactors); 1193 OddFactorial *= Mult; 1194 } 1195 1196 // We need at least W + T bits for the multiplication step 1197 unsigned CalculationBits = W + T; 1198 1199 // Calculate 2^T, at width T+W. 1200 APInt DivFactor = APInt::getOneBitSet(CalculationBits, T); 1201 1202 // Calculate the multiplicative inverse of K! / 2^T; 1203 // this multiplication factor will perform the exact division by 1204 // K! / 2^T. 1205 APInt Mod = APInt::getSignedMinValue(W+1); 1206 APInt MultiplyFactor = OddFactorial.zext(W+1); 1207 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod); 1208 MultiplyFactor = MultiplyFactor.trunc(W); 1209 1210 // Calculate the product, at width T+W 1211 IntegerType *CalculationTy = IntegerType::get(SE.getContext(), 1212 CalculationBits); 1213 const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); 1214 for (unsigned i = 1; i != K; ++i) { 1215 const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i)); 1216 Dividend = SE.getMulExpr(Dividend, 1217 SE.getTruncateOrZeroExtend(S, CalculationTy)); 1218 } 1219 1220 // Divide by 2^T 1221 const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor)); 1222 1223 // Truncate the result, and divide by K! / 2^T. 1224 1225 return SE.getMulExpr(SE.getConstant(MultiplyFactor), 1226 SE.getTruncateOrZeroExtend(DivResult, ResultTy)); 1227 } 1228 1229 /// Return the value of this chain of recurrences at the specified iteration 1230 /// number. We can evaluate this recurrence by multiplying each element in the 1231 /// chain by the binomial coefficient corresponding to it. In other words, we 1232 /// can evaluate {A,+,B,+,C,+,D} as: 1233 /// 1234 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3) 1235 /// 1236 /// where BC(It, k) stands for binomial coefficient. 1237 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It, 1238 ScalarEvolution &SE) const { 1239 const SCEV *Result = getStart(); 1240 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 1241 // The computation is correct in the face of overflow provided that the 1242 // multiplication is performed _after_ the evaluation of the binomial 1243 // coefficient. 1244 const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType()); 1245 if (isa<SCEVCouldNotCompute>(Coeff)) 1246 return Coeff; 1247 1248 Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff)); 1249 } 1250 return Result; 1251 } 1252 1253 //===----------------------------------------------------------------------===// 1254 // SCEV Expression folder implementations 1255 //===----------------------------------------------------------------------===// 1256 1257 const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, Type *Ty, 1258 unsigned Depth) { 1259 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) && 1260 "This is not a truncating conversion!"); 1261 assert(isSCEVable(Ty) && 1262 "This is not a conversion to a SCEVable type!"); 1263 Ty = getEffectiveSCEVType(Ty); 1264 1265 FoldingSetNodeID ID; 1266 ID.AddInteger(scTruncate); 1267 ID.AddPointer(Op); 1268 ID.AddPointer(Ty); 1269 void *IP = nullptr; 1270 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1271 1272 // Fold if the operand is constant. 1273 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1274 return getConstant( 1275 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty))); 1276 1277 // trunc(trunc(x)) --> trunc(x) 1278 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) 1279 return getTruncateExpr(ST->getOperand(), Ty, Depth + 1); 1280 1281 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing 1282 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1283 return getTruncateOrSignExtend(SS->getOperand(), Ty, Depth + 1); 1284 1285 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing 1286 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1287 return getTruncateOrZeroExtend(SZ->getOperand(), Ty, Depth + 1); 1288 1289 if (Depth > MaxCastDepth) { 1290 SCEV *S = 1291 new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), Op, Ty); 1292 UniqueSCEVs.InsertNode(S, IP); 1293 addToLoopUseLists(S); 1294 return S; 1295 } 1296 1297 // trunc(x1 + ... + xN) --> trunc(x1) + ... + trunc(xN) and 1298 // trunc(x1 * ... * xN) --> trunc(x1) * ... * trunc(xN), 1299 // if after transforming we have at most one truncate, not counting truncates 1300 // that replace other casts. 1301 if (isa<SCEVAddExpr>(Op) || isa<SCEVMulExpr>(Op)) { 1302 auto *CommOp = cast<SCEVCommutativeExpr>(Op); 1303 SmallVector<const SCEV *, 4> Operands; 1304 unsigned numTruncs = 0; 1305 for (unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2; 1306 ++i) { 1307 const SCEV *S = getTruncateExpr(CommOp->getOperand(i), Ty, Depth + 1); 1308 if (!isa<SCEVCastExpr>(CommOp->getOperand(i)) && isa<SCEVTruncateExpr>(S)) 1309 numTruncs++; 1310 Operands.push_back(S); 1311 } 1312 if (numTruncs < 2) { 1313 if (isa<SCEVAddExpr>(Op)) 1314 return getAddExpr(Operands); 1315 else if (isa<SCEVMulExpr>(Op)) 1316 return getMulExpr(Operands); 1317 else 1318 llvm_unreachable("Unexpected SCEV type for Op."); 1319 } 1320 // Although we checked in the beginning that ID is not in the cache, it is 1321 // possible that during recursion and different modification ID was inserted 1322 // into the cache. So if we find it, just return it. 1323 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 1324 return S; 1325 } 1326 1327 // If the input value is a chrec scev, truncate the chrec's operands. 1328 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 1329 SmallVector<const SCEV *, 4> Operands; 1330 for (const SCEV *Op : AddRec->operands()) 1331 Operands.push_back(getTruncateExpr(Op, Ty, Depth + 1)); 1332 return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap); 1333 } 1334 1335 // The cast wasn't folded; create an explicit cast node. We can reuse 1336 // the existing insert position since if we get here, we won't have 1337 // made any changes which would invalidate it. 1338 SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), 1339 Op, Ty); 1340 UniqueSCEVs.InsertNode(S, IP); 1341 addToLoopUseLists(S); 1342 return S; 1343 } 1344 1345 // Get the limit of a recurrence such that incrementing by Step cannot cause 1346 // signed overflow as long as the value of the recurrence within the 1347 // loop does not exceed this limit before incrementing. 1348 static const SCEV *getSignedOverflowLimitForStep(const SCEV *Step, 1349 ICmpInst::Predicate *Pred, 1350 ScalarEvolution *SE) { 1351 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1352 if (SE->isKnownPositive(Step)) { 1353 *Pred = ICmpInst::ICMP_SLT; 1354 return SE->getConstant(APInt::getSignedMinValue(BitWidth) - 1355 SE->getSignedRangeMax(Step)); 1356 } 1357 if (SE->isKnownNegative(Step)) { 1358 *Pred = ICmpInst::ICMP_SGT; 1359 return SE->getConstant(APInt::getSignedMaxValue(BitWidth) - 1360 SE->getSignedRangeMin(Step)); 1361 } 1362 return nullptr; 1363 } 1364 1365 // Get the limit of a recurrence such that incrementing by Step cannot cause 1366 // unsigned overflow as long as the value of the recurrence within the loop does 1367 // not exceed this limit before incrementing. 1368 static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step, 1369 ICmpInst::Predicate *Pred, 1370 ScalarEvolution *SE) { 1371 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1372 *Pred = ICmpInst::ICMP_ULT; 1373 1374 return SE->getConstant(APInt::getMinValue(BitWidth) - 1375 SE->getUnsignedRangeMax(Step)); 1376 } 1377 1378 namespace { 1379 1380 struct ExtendOpTraitsBase { 1381 typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(const SCEV *, Type *, 1382 unsigned); 1383 }; 1384 1385 // Used to make code generic over signed and unsigned overflow. 1386 template <typename ExtendOp> struct ExtendOpTraits { 1387 // Members present: 1388 // 1389 // static const SCEV::NoWrapFlags WrapType; 1390 // 1391 // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr; 1392 // 1393 // static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1394 // ICmpInst::Predicate *Pred, 1395 // ScalarEvolution *SE); 1396 }; 1397 1398 template <> 1399 struct ExtendOpTraits<SCEVSignExtendExpr> : public ExtendOpTraitsBase { 1400 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNSW; 1401 1402 static const GetExtendExprTy GetExtendExpr; 1403 1404 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1405 ICmpInst::Predicate *Pred, 1406 ScalarEvolution *SE) { 1407 return getSignedOverflowLimitForStep(Step, Pred, SE); 1408 } 1409 }; 1410 1411 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1412 SCEVSignExtendExpr>::GetExtendExpr = &ScalarEvolution::getSignExtendExpr; 1413 1414 template <> 1415 struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase { 1416 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNUW; 1417 1418 static const GetExtendExprTy GetExtendExpr; 1419 1420 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1421 ICmpInst::Predicate *Pred, 1422 ScalarEvolution *SE) { 1423 return getUnsignedOverflowLimitForStep(Step, Pred, SE); 1424 } 1425 }; 1426 1427 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1428 SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr; 1429 1430 } // end anonymous namespace 1431 1432 // The recurrence AR has been shown to have no signed/unsigned wrap or something 1433 // close to it. Typically, if we can prove NSW/NUW for AR, then we can just as 1434 // easily prove NSW/NUW for its preincrement or postincrement sibling. This 1435 // allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step + 1436 // Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the 1437 // expression "Step + sext/zext(PreIncAR)" is congruent with 1438 // "sext/zext(PostIncAR)" 1439 template <typename ExtendOpTy> 1440 static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty, 1441 ScalarEvolution *SE, unsigned Depth) { 1442 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1443 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1444 1445 const Loop *L = AR->getLoop(); 1446 const SCEV *Start = AR->getStart(); 1447 const SCEV *Step = AR->getStepRecurrence(*SE); 1448 1449 // Check for a simple looking step prior to loop entry. 1450 const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start); 1451 if (!SA) 1452 return nullptr; 1453 1454 // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV 1455 // subtraction is expensive. For this purpose, perform a quick and dirty 1456 // difference, by checking for Step in the operand list. 1457 SmallVector<const SCEV *, 4> DiffOps; 1458 for (const SCEV *Op : SA->operands()) 1459 if (Op != Step) 1460 DiffOps.push_back(Op); 1461 1462 if (DiffOps.size() == SA->getNumOperands()) 1463 return nullptr; 1464 1465 // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` + 1466 // `Step`: 1467 1468 // 1. NSW/NUW flags on the step increment. 1469 auto PreStartFlags = 1470 ScalarEvolution::maskFlags(SA->getNoWrapFlags(), SCEV::FlagNUW); 1471 const SCEV *PreStart = SE->getAddExpr(DiffOps, PreStartFlags); 1472 const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>( 1473 SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap)); 1474 1475 // "{S,+,X} is <nsw>/<nuw>" and "the backedge is taken at least once" implies 1476 // "S+X does not sign/unsign-overflow". 1477 // 1478 1479 const SCEV *BECount = SE->getBackedgeTakenCount(L); 1480 if (PreAR && PreAR->getNoWrapFlags(WrapType) && 1481 !isa<SCEVCouldNotCompute>(BECount) && SE->isKnownPositive(BECount)) 1482 return PreStart; 1483 1484 // 2. Direct overflow check on the step operation's expression. 1485 unsigned BitWidth = SE->getTypeSizeInBits(AR->getType()); 1486 Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2); 1487 const SCEV *OperandExtendedStart = 1488 SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy, Depth), 1489 (SE->*GetExtendExpr)(Step, WideTy, Depth)); 1490 if ((SE->*GetExtendExpr)(Start, WideTy, Depth) == OperandExtendedStart) { 1491 if (PreAR && AR->getNoWrapFlags(WrapType)) { 1492 // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW 1493 // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then 1494 // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`. Cache this fact. 1495 const_cast<SCEVAddRecExpr *>(PreAR)->setNoWrapFlags(WrapType); 1496 } 1497 return PreStart; 1498 } 1499 1500 // 3. Loop precondition. 1501 ICmpInst::Predicate Pred; 1502 const SCEV *OverflowLimit = 1503 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE); 1504 1505 if (OverflowLimit && 1506 SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit)) 1507 return PreStart; 1508 1509 return nullptr; 1510 } 1511 1512 // Get the normalized zero or sign extended expression for this AddRec's Start. 1513 template <typename ExtendOpTy> 1514 static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty, 1515 ScalarEvolution *SE, 1516 unsigned Depth) { 1517 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1518 1519 const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE, Depth); 1520 if (!PreStart) 1521 return (SE->*GetExtendExpr)(AR->getStart(), Ty, Depth); 1522 1523 return SE->getAddExpr((SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty, 1524 Depth), 1525 (SE->*GetExtendExpr)(PreStart, Ty, Depth)); 1526 } 1527 1528 // Try to prove away overflow by looking at "nearby" add recurrences. A 1529 // motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it 1530 // does not itself wrap then we can conclude that `{1,+,4}` is `nuw`. 1531 // 1532 // Formally: 1533 // 1534 // {S,+,X} == {S-T,+,X} + T 1535 // => Ext({S,+,X}) == Ext({S-T,+,X} + T) 1536 // 1537 // If ({S-T,+,X} + T) does not overflow ... (1) 1538 // 1539 // RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T) 1540 // 1541 // If {S-T,+,X} does not overflow ... (2) 1542 // 1543 // RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T) 1544 // == {Ext(S-T)+Ext(T),+,Ext(X)} 1545 // 1546 // If (S-T)+T does not overflow ... (3) 1547 // 1548 // RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)} 1549 // == {Ext(S),+,Ext(X)} == LHS 1550 // 1551 // Thus, if (1), (2) and (3) are true for some T, then 1552 // Ext({S,+,X}) == {Ext(S),+,Ext(X)} 1553 // 1554 // (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T) 1555 // does not overflow" restricted to the 0th iteration. Therefore we only need 1556 // to check for (1) and (2). 1557 // 1558 // In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T 1559 // is `Delta` (defined below). 1560 template <typename ExtendOpTy> 1561 bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start, 1562 const SCEV *Step, 1563 const Loop *L) { 1564 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1565 1566 // We restrict `Start` to a constant to prevent SCEV from spending too much 1567 // time here. It is correct (but more expensive) to continue with a 1568 // non-constant `Start` and do a general SCEV subtraction to compute 1569 // `PreStart` below. 1570 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start); 1571 if (!StartC) 1572 return false; 1573 1574 APInt StartAI = StartC->getAPInt(); 1575 1576 for (unsigned Delta : {-2, -1, 1, 2}) { 1577 const SCEV *PreStart = getConstant(StartAI - Delta); 1578 1579 FoldingSetNodeID ID; 1580 ID.AddInteger(scAddRecExpr); 1581 ID.AddPointer(PreStart); 1582 ID.AddPointer(Step); 1583 ID.AddPointer(L); 1584 void *IP = nullptr; 1585 const auto *PreAR = 1586 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 1587 1588 // Give up if we don't already have the add recurrence we need because 1589 // actually constructing an add recurrence is relatively expensive. 1590 if (PreAR && PreAR->getNoWrapFlags(WrapType)) { // proves (2) 1591 const SCEV *DeltaS = getConstant(StartC->getType(), Delta); 1592 ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE; 1593 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep( 1594 DeltaS, &Pred, this); 1595 if (Limit && isKnownPredicate(Pred, PreAR, Limit)) // proves (1) 1596 return true; 1597 } 1598 } 1599 1600 return false; 1601 } 1602 1603 // Finds an integer D for an expression (C + x + y + ...) such that the top 1604 // level addition in (D + (C - D + x + y + ...)) would not wrap (signed or 1605 // unsigned) and the number of trailing zeros of (C - D + x + y + ...) is 1606 // maximized, where C is the \p ConstantTerm, x, y, ... are arbitrary SCEVs, and 1607 // the (C + x + y + ...) expression is \p WholeAddExpr. 1608 static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, 1609 const SCEVConstant *ConstantTerm, 1610 const SCEVAddExpr *WholeAddExpr) { 1611 const APInt C = ConstantTerm->getAPInt(); 1612 const unsigned BitWidth = C.getBitWidth(); 1613 // Find number of trailing zeros of (x + y + ...) w/o the C first: 1614 uint32_t TZ = BitWidth; 1615 for (unsigned I = 1, E = WholeAddExpr->getNumOperands(); I < E && TZ; ++I) 1616 TZ = std::min(TZ, SE.GetMinTrailingZeros(WholeAddExpr->getOperand(I))); 1617 if (TZ) { 1618 // Set D to be as many least significant bits of C as possible while still 1619 // guaranteeing that adding D to (C - D + x + y + ...) won't cause a wrap: 1620 return TZ < BitWidth ? C.trunc(TZ).zext(BitWidth) : C; 1621 } 1622 return APInt(BitWidth, 0); 1623 } 1624 1625 // Finds an integer D for an affine AddRec expression {C,+,x} such that the top 1626 // level addition in (D + {C-D,+,x}) would not wrap (signed or unsigned) and the 1627 // number of trailing zeros of (C - D + x * n) is maximized, where C is the \p 1628 // ConstantStart, x is an arbitrary \p Step, and n is the loop trip count. 1629 static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, 1630 const APInt &ConstantStart, 1631 const SCEV *Step) { 1632 const unsigned BitWidth = ConstantStart.getBitWidth(); 1633 const uint32_t TZ = SE.GetMinTrailingZeros(Step); 1634 if (TZ) 1635 return TZ < BitWidth ? ConstantStart.trunc(TZ).zext(BitWidth) 1636 : ConstantStart; 1637 return APInt(BitWidth, 0); 1638 } 1639 1640 const SCEV * 1641 ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { 1642 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1643 "This is not an extending conversion!"); 1644 assert(isSCEVable(Ty) && 1645 "This is not a conversion to a SCEVable type!"); 1646 Ty = getEffectiveSCEVType(Ty); 1647 1648 // Fold if the operand is constant. 1649 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1650 return getConstant( 1651 cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty))); 1652 1653 // zext(zext(x)) --> zext(x) 1654 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1655 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); 1656 1657 // Before doing any expensive analysis, check to see if we've already 1658 // computed a SCEV for this Op and Ty. 1659 FoldingSetNodeID ID; 1660 ID.AddInteger(scZeroExtend); 1661 ID.AddPointer(Op); 1662 ID.AddPointer(Ty); 1663 void *IP = nullptr; 1664 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1665 if (Depth > MaxCastDepth) { 1666 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1667 Op, Ty); 1668 UniqueSCEVs.InsertNode(S, IP); 1669 addToLoopUseLists(S); 1670 return S; 1671 } 1672 1673 // zext(trunc(x)) --> zext(x) or x or trunc(x) 1674 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1675 // It's possible the bits taken off by the truncate were all zero bits. If 1676 // so, we should be able to simplify this further. 1677 const SCEV *X = ST->getOperand(); 1678 ConstantRange CR = getUnsignedRange(X); 1679 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1680 unsigned NewBits = getTypeSizeInBits(Ty); 1681 if (CR.truncate(TruncBits).zeroExtend(NewBits).contains( 1682 CR.zextOrTrunc(NewBits))) 1683 return getTruncateOrZeroExtend(X, Ty, Depth); 1684 } 1685 1686 // If the input value is a chrec scev, and we can prove that the value 1687 // did not overflow the old, smaller, value, we can zero extend all of the 1688 // operands (often constants). This allows analysis of something like 1689 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; } 1690 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1691 if (AR->isAffine()) { 1692 const SCEV *Start = AR->getStart(); 1693 const SCEV *Step = AR->getStepRecurrence(*this); 1694 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1695 const Loop *L = AR->getLoop(); 1696 1697 if (!AR->hasNoUnsignedWrap()) { 1698 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1699 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(NewFlags); 1700 } 1701 1702 // If we have special knowledge that this addrec won't overflow, 1703 // we don't need to do any further analysis. 1704 if (AR->hasNoUnsignedWrap()) 1705 return getAddRecExpr( 1706 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1), 1707 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1708 1709 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1710 // Note that this serves two purposes: It filters out loops that are 1711 // simply not analyzable, and it covers the case where this code is 1712 // being called from within backedge-taken count analysis, such that 1713 // attempting to ask for the backedge-taken count would likely result 1714 // in infinite recursion. In the later case, the analysis code will 1715 // cope with a conservative value, and it will take care to purge 1716 // that value once it has finished. 1717 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 1718 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1719 // Manually compute the final value for AR, checking for 1720 // overflow. 1721 1722 // Check whether the backedge-taken count can be losslessly casted to 1723 // the addrec's type. The count is always unsigned. 1724 const SCEV *CastedMaxBECount = 1725 getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth); 1726 const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend( 1727 CastedMaxBECount, MaxBECount->getType(), Depth); 1728 if (MaxBECount == RecastedMaxBECount) { 1729 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1730 // Check whether Start+Step*MaxBECount has no unsigned overflow. 1731 const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step, 1732 SCEV::FlagAnyWrap, Depth + 1); 1733 const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul, 1734 SCEV::FlagAnyWrap, 1735 Depth + 1), 1736 WideTy, Depth + 1); 1737 const SCEV *WideStart = getZeroExtendExpr(Start, WideTy, Depth + 1); 1738 const SCEV *WideMaxBECount = 1739 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); 1740 const SCEV *OperandExtendedAdd = 1741 getAddExpr(WideStart, 1742 getMulExpr(WideMaxBECount, 1743 getZeroExtendExpr(Step, WideTy, Depth + 1), 1744 SCEV::FlagAnyWrap, Depth + 1), 1745 SCEV::FlagAnyWrap, Depth + 1); 1746 if (ZAdd == OperandExtendedAdd) { 1747 // Cache knowledge of AR NUW, which is propagated to this AddRec. 1748 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1749 // Return the expression with the addrec on the outside. 1750 return getAddRecExpr( 1751 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1752 Depth + 1), 1753 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1754 AR->getNoWrapFlags()); 1755 } 1756 // Similar to above, only this time treat the step value as signed. 1757 // This covers loops that count down. 1758 OperandExtendedAdd = 1759 getAddExpr(WideStart, 1760 getMulExpr(WideMaxBECount, 1761 getSignExtendExpr(Step, WideTy, Depth + 1), 1762 SCEV::FlagAnyWrap, Depth + 1), 1763 SCEV::FlagAnyWrap, Depth + 1); 1764 if (ZAdd == OperandExtendedAdd) { 1765 // Cache knowledge of AR NW, which is propagated to this AddRec. 1766 // Negative step causes unsigned wrap, but it still can't self-wrap. 1767 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 1768 // Return the expression with the addrec on the outside. 1769 return getAddRecExpr( 1770 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1771 Depth + 1), 1772 getSignExtendExpr(Step, Ty, Depth + 1), L, 1773 AR->getNoWrapFlags()); 1774 } 1775 } 1776 } 1777 1778 // Normally, in the cases we can prove no-overflow via a 1779 // backedge guarding condition, we can also compute a backedge 1780 // taken count for the loop. The exceptions are assumptions and 1781 // guards present in the loop -- SCEV is not great at exploiting 1782 // these to compute max backedge taken counts, but can still use 1783 // these to prove lack of overflow. Use this fact to avoid 1784 // doing extra work that may not pay off. 1785 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards || 1786 !AC.assumptions().empty()) { 1787 // If the backedge is guarded by a comparison with the pre-inc 1788 // value the addrec is safe. Also, if the entry is guarded by 1789 // a comparison with the start value and the backedge is 1790 // guarded by a comparison with the post-inc value, the addrec 1791 // is safe. 1792 if (isKnownPositive(Step)) { 1793 const SCEV *N = getConstant(APInt::getMinValue(BitWidth) - 1794 getUnsignedRangeMax(Step)); 1795 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) || 1796 isKnownOnEveryIteration(ICmpInst::ICMP_ULT, AR, N)) { 1797 // Cache knowledge of AR NUW, which is propagated to this 1798 // AddRec. 1799 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1800 // Return the expression with the addrec on the outside. 1801 return getAddRecExpr( 1802 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1803 Depth + 1), 1804 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1805 AR->getNoWrapFlags()); 1806 } 1807 } else if (isKnownNegative(Step)) { 1808 const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) - 1809 getSignedRangeMin(Step)); 1810 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) || 1811 isKnownOnEveryIteration(ICmpInst::ICMP_UGT, AR, N)) { 1812 // Cache knowledge of AR NW, which is propagated to this 1813 // AddRec. Negative step causes unsigned wrap, but it 1814 // still can't self-wrap. 1815 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 1816 // Return the expression with the addrec on the outside. 1817 return getAddRecExpr( 1818 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1819 Depth + 1), 1820 getSignExtendExpr(Step, Ty, Depth + 1), L, 1821 AR->getNoWrapFlags()); 1822 } 1823 } 1824 } 1825 1826 // zext({C,+,Step}) --> (zext(D) + zext({C-D,+,Step}))<nuw><nsw> 1827 // if D + (C - D + Step * n) could be proven to not unsigned wrap 1828 // where D maximizes the number of trailing zeros of (C - D + Step * n) 1829 if (const auto *SC = dyn_cast<SCEVConstant>(Start)) { 1830 const APInt &C = SC->getAPInt(); 1831 const APInt &D = extractConstantWithoutWrapping(*this, C, Step); 1832 if (D != 0) { 1833 const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth); 1834 const SCEV *SResidual = 1835 getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags()); 1836 const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1); 1837 return getAddExpr(SZExtD, SZExtR, 1838 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1839 Depth + 1); 1840 } 1841 } 1842 1843 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) { 1844 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1845 return getAddRecExpr( 1846 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1), 1847 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1848 } 1849 } 1850 1851 // zext(A % B) --> zext(A) % zext(B) 1852 { 1853 const SCEV *LHS; 1854 const SCEV *RHS; 1855 if (matchURem(Op, LHS, RHS)) 1856 return getURemExpr(getZeroExtendExpr(LHS, Ty, Depth + 1), 1857 getZeroExtendExpr(RHS, Ty, Depth + 1)); 1858 } 1859 1860 // zext(A / B) --> zext(A) / zext(B). 1861 if (auto *Div = dyn_cast<SCEVUDivExpr>(Op)) 1862 return getUDivExpr(getZeroExtendExpr(Div->getLHS(), Ty, Depth + 1), 1863 getZeroExtendExpr(Div->getRHS(), Ty, Depth + 1)); 1864 1865 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1866 // zext((A + B + ...)<nuw>) --> (zext(A) + zext(B) + ...)<nuw> 1867 if (SA->hasNoUnsignedWrap()) { 1868 // If the addition does not unsign overflow then we can, by definition, 1869 // commute the zero extension with the addition operation. 1870 SmallVector<const SCEV *, 4> Ops; 1871 for (const auto *Op : SA->operands()) 1872 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); 1873 return getAddExpr(Ops, SCEV::FlagNUW, Depth + 1); 1874 } 1875 1876 // zext(C + x + y + ...) --> (zext(D) + zext((C - D) + x + y + ...)) 1877 // if D + (C - D + x + y + ...) could be proven to not unsigned wrap 1878 // where D maximizes the number of trailing zeros of (C - D + x + y + ...) 1879 // 1880 // Often address arithmetics contain expressions like 1881 // (zext (add (shl X, C1), C2)), for instance, (zext (5 + (4 * X))). 1882 // This transformation is useful while proving that such expressions are 1883 // equal or differ by a small constant amount, see LoadStoreVectorizer pass. 1884 if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) { 1885 const APInt &D = extractConstantWithoutWrapping(*this, SC, SA); 1886 if (D != 0) { 1887 const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth); 1888 const SCEV *SResidual = 1889 getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth); 1890 const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1); 1891 return getAddExpr(SZExtD, SZExtR, 1892 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1893 Depth + 1); 1894 } 1895 } 1896 } 1897 1898 if (auto *SM = dyn_cast<SCEVMulExpr>(Op)) { 1899 // zext((A * B * ...)<nuw>) --> (zext(A) * zext(B) * ...)<nuw> 1900 if (SM->hasNoUnsignedWrap()) { 1901 // If the multiply does not unsign overflow then we can, by definition, 1902 // commute the zero extension with the multiply operation. 1903 SmallVector<const SCEV *, 4> Ops; 1904 for (const auto *Op : SM->operands()) 1905 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); 1906 return getMulExpr(Ops, SCEV::FlagNUW, Depth + 1); 1907 } 1908 1909 // zext(2^K * (trunc X to iN)) to iM -> 1910 // 2^K * (zext(trunc X to i{N-K}) to iM)<nuw> 1911 // 1912 // Proof: 1913 // 1914 // zext(2^K * (trunc X to iN)) to iM 1915 // = zext((trunc X to iN) << K) to iM 1916 // = zext((trunc X to i{N-K}) << K)<nuw> to iM 1917 // (because shl removes the top K bits) 1918 // = zext((2^K * (trunc X to i{N-K}))<nuw>) to iM 1919 // = (2^K * (zext(trunc X to i{N-K}) to iM))<nuw>. 1920 // 1921 if (SM->getNumOperands() == 2) 1922 if (auto *MulLHS = dyn_cast<SCEVConstant>(SM->getOperand(0))) 1923 if (MulLHS->getAPInt().isPowerOf2()) 1924 if (auto *TruncRHS = dyn_cast<SCEVTruncateExpr>(SM->getOperand(1))) { 1925 int NewTruncBits = getTypeSizeInBits(TruncRHS->getType()) - 1926 MulLHS->getAPInt().logBase2(); 1927 Type *NewTruncTy = IntegerType::get(getContext(), NewTruncBits); 1928 return getMulExpr( 1929 getZeroExtendExpr(MulLHS, Ty), 1930 getZeroExtendExpr( 1931 getTruncateExpr(TruncRHS->getOperand(), NewTruncTy), Ty), 1932 SCEV::FlagNUW, Depth + 1); 1933 } 1934 } 1935 1936 // The cast wasn't folded; create an explicit cast node. 1937 // Recompute the insert position, as it may have been invalidated. 1938 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1939 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1940 Op, Ty); 1941 UniqueSCEVs.InsertNode(S, IP); 1942 addToLoopUseLists(S); 1943 return S; 1944 } 1945 1946 const SCEV * 1947 ScalarEvolution::getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { 1948 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1949 "This is not an extending conversion!"); 1950 assert(isSCEVable(Ty) && 1951 "This is not a conversion to a SCEVable type!"); 1952 Ty = getEffectiveSCEVType(Ty); 1953 1954 // Fold if the operand is constant. 1955 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1956 return getConstant( 1957 cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty))); 1958 1959 // sext(sext(x)) --> sext(x) 1960 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1961 return getSignExtendExpr(SS->getOperand(), Ty, Depth + 1); 1962 1963 // sext(zext(x)) --> zext(x) 1964 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1965 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); 1966 1967 // Before doing any expensive analysis, check to see if we've already 1968 // computed a SCEV for this Op and Ty. 1969 FoldingSetNodeID ID; 1970 ID.AddInteger(scSignExtend); 1971 ID.AddPointer(Op); 1972 ID.AddPointer(Ty); 1973 void *IP = nullptr; 1974 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1975 // Limit recursion depth. 1976 if (Depth > MaxCastDepth) { 1977 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 1978 Op, Ty); 1979 UniqueSCEVs.InsertNode(S, IP); 1980 addToLoopUseLists(S); 1981 return S; 1982 } 1983 1984 // sext(trunc(x)) --> sext(x) or x or trunc(x) 1985 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1986 // It's possible the bits taken off by the truncate were all sign bits. If 1987 // so, we should be able to simplify this further. 1988 const SCEV *X = ST->getOperand(); 1989 ConstantRange CR = getSignedRange(X); 1990 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1991 unsigned NewBits = getTypeSizeInBits(Ty); 1992 if (CR.truncate(TruncBits).signExtend(NewBits).contains( 1993 CR.sextOrTrunc(NewBits))) 1994 return getTruncateOrSignExtend(X, Ty, Depth); 1995 } 1996 1997 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1998 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 1999 if (SA->hasNoSignedWrap()) { 2000 // If the addition does not sign overflow then we can, by definition, 2001 // commute the sign extension with the addition operation. 2002 SmallVector<const SCEV *, 4> Ops; 2003 for (const auto *Op : SA->operands()) 2004 Ops.push_back(getSignExtendExpr(Op, Ty, Depth + 1)); 2005 return getAddExpr(Ops, SCEV::FlagNSW, Depth + 1); 2006 } 2007 2008 // sext(C + x + y + ...) --> (sext(D) + sext((C - D) + x + y + ...)) 2009 // if D + (C - D + x + y + ...) could be proven to not signed wrap 2010 // where D maximizes the number of trailing zeros of (C - D + x + y + ...) 2011 // 2012 // For instance, this will bring two seemingly different expressions: 2013 // 1 + sext(5 + 20 * %x + 24 * %y) and 2014 // sext(6 + 20 * %x + 24 * %y) 2015 // to the same form: 2016 // 2 + sext(4 + 20 * %x + 24 * %y) 2017 if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) { 2018 const APInt &D = extractConstantWithoutWrapping(*this, SC, SA); 2019 if (D != 0) { 2020 const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth); 2021 const SCEV *SResidual = 2022 getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth); 2023 const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1); 2024 return getAddExpr(SSExtD, SSExtR, 2025 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 2026 Depth + 1); 2027 } 2028 } 2029 } 2030 // If the input value is a chrec scev, and we can prove that the value 2031 // did not overflow the old, smaller, value, we can sign extend all of the 2032 // operands (often constants). This allows analysis of something like 2033 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; } 2034 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 2035 if (AR->isAffine()) { 2036 const SCEV *Start = AR->getStart(); 2037 const SCEV *Step = AR->getStepRecurrence(*this); 2038 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 2039 const Loop *L = AR->getLoop(); 2040 2041 if (!AR->hasNoSignedWrap()) { 2042 auto NewFlags = proveNoWrapViaConstantRanges(AR); 2043 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(NewFlags); 2044 } 2045 2046 // If we have special knowledge that this addrec won't overflow, 2047 // we don't need to do any further analysis. 2048 if (AR->hasNoSignedWrap()) 2049 return getAddRecExpr( 2050 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 2051 getSignExtendExpr(Step, Ty, Depth + 1), L, SCEV::FlagNSW); 2052 2053 // Check whether the backedge-taken count is SCEVCouldNotCompute. 2054 // Note that this serves two purposes: It filters out loops that are 2055 // simply not analyzable, and it covers the case where this code is 2056 // being called from within backedge-taken count analysis, such that 2057 // attempting to ask for the backedge-taken count would likely result 2058 // in infinite recursion. In the later case, the analysis code will 2059 // cope with a conservative value, and it will take care to purge 2060 // that value once it has finished. 2061 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 2062 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 2063 // Manually compute the final value for AR, checking for 2064 // overflow. 2065 2066 // Check whether the backedge-taken count can be losslessly casted to 2067 // the addrec's type. The count is always unsigned. 2068 const SCEV *CastedMaxBECount = 2069 getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth); 2070 const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend( 2071 CastedMaxBECount, MaxBECount->getType(), Depth); 2072 if (MaxBECount == RecastedMaxBECount) { 2073 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 2074 // Check whether Start+Step*MaxBECount has no signed overflow. 2075 const SCEV *SMul = getMulExpr(CastedMaxBECount, Step, 2076 SCEV::FlagAnyWrap, Depth + 1); 2077 const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul, 2078 SCEV::FlagAnyWrap, 2079 Depth + 1), 2080 WideTy, Depth + 1); 2081 const SCEV *WideStart = getSignExtendExpr(Start, WideTy, Depth + 1); 2082 const SCEV *WideMaxBECount = 2083 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); 2084 const SCEV *OperandExtendedAdd = 2085 getAddExpr(WideStart, 2086 getMulExpr(WideMaxBECount, 2087 getSignExtendExpr(Step, WideTy, Depth + 1), 2088 SCEV::FlagAnyWrap, Depth + 1), 2089 SCEV::FlagAnyWrap, Depth + 1); 2090 if (SAdd == OperandExtendedAdd) { 2091 // Cache knowledge of AR NSW, which is propagated to this AddRec. 2092 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 2093 // Return the expression with the addrec on the outside. 2094 return getAddRecExpr( 2095 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, 2096 Depth + 1), 2097 getSignExtendExpr(Step, Ty, Depth + 1), L, 2098 AR->getNoWrapFlags()); 2099 } 2100 // Similar to above, only this time treat the step value as unsigned. 2101 // This covers loops that count up with an unsigned step. 2102 OperandExtendedAdd = 2103 getAddExpr(WideStart, 2104 getMulExpr(WideMaxBECount, 2105 getZeroExtendExpr(Step, WideTy, Depth + 1), 2106 SCEV::FlagAnyWrap, Depth + 1), 2107 SCEV::FlagAnyWrap, Depth + 1); 2108 if (SAdd == OperandExtendedAdd) { 2109 // If AR wraps around then 2110 // 2111 // abs(Step) * MaxBECount > unsigned-max(AR->getType()) 2112 // => SAdd != OperandExtendedAdd 2113 // 2114 // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=> 2115 // (SAdd == OperandExtendedAdd => AR is NW) 2116 2117 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 2118 2119 // Return the expression with the addrec on the outside. 2120 return getAddRecExpr( 2121 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, 2122 Depth + 1), 2123 getZeroExtendExpr(Step, Ty, Depth + 1), L, 2124 AR->getNoWrapFlags()); 2125 } 2126 } 2127 } 2128 2129 // Normally, in the cases we can prove no-overflow via a 2130 // backedge guarding condition, we can also compute a backedge 2131 // taken count for the loop. The exceptions are assumptions and 2132 // guards present in the loop -- SCEV is not great at exploiting 2133 // these to compute max backedge taken counts, but can still use 2134 // these to prove lack of overflow. Use this fact to avoid 2135 // doing extra work that may not pay off. 2136 2137 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards || 2138 !AC.assumptions().empty()) { 2139 // If the backedge is guarded by a comparison with the pre-inc 2140 // value the addrec is safe. Also, if the entry is guarded by 2141 // a comparison with the start value and the backedge is 2142 // guarded by a comparison with the post-inc value, the addrec 2143 // is safe. 2144 ICmpInst::Predicate Pred; 2145 const SCEV *OverflowLimit = 2146 getSignedOverflowLimitForStep(Step, &Pred, this); 2147 if (OverflowLimit && 2148 (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) || 2149 isKnownOnEveryIteration(Pred, AR, OverflowLimit))) { 2150 // Cache knowledge of AR NSW, then propagate NSW to the wide AddRec. 2151 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 2152 return getAddRecExpr( 2153 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 2154 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 2155 } 2156 } 2157 2158 // sext({C,+,Step}) --> (sext(D) + sext({C-D,+,Step}))<nuw><nsw> 2159 // if D + (C - D + Step * n) could be proven to not signed wrap 2160 // where D maximizes the number of trailing zeros of (C - D + Step * n) 2161 if (const auto *SC = dyn_cast<SCEVConstant>(Start)) { 2162 const APInt &C = SC->getAPInt(); 2163 const APInt &D = extractConstantWithoutWrapping(*this, C, Step); 2164 if (D != 0) { 2165 const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth); 2166 const SCEV *SResidual = 2167 getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags()); 2168 const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1); 2169 return getAddExpr(SSExtD, SSExtR, 2170 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 2171 Depth + 1); 2172 } 2173 } 2174 2175 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) { 2176 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 2177 return getAddRecExpr( 2178 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 2179 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 2180 } 2181 } 2182 2183 // If the input value is provably positive and we could not simplify 2184 // away the sext build a zext instead. 2185 if (isKnownNonNegative(Op)) 2186 return getZeroExtendExpr(Op, Ty, Depth + 1); 2187 2188 // The cast wasn't folded; create an explicit cast node. 2189 // Recompute the insert position, as it may have been invalidated. 2190 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 2191 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 2192 Op, Ty); 2193 UniqueSCEVs.InsertNode(S, IP); 2194 addToLoopUseLists(S); 2195 return S; 2196 } 2197 2198 /// getAnyExtendExpr - Return a SCEV for the given operand extended with 2199 /// unspecified bits out to the given type. 2200 const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op, 2201 Type *Ty) { 2202 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 2203 "This is not an extending conversion!"); 2204 assert(isSCEVable(Ty) && 2205 "This is not a conversion to a SCEVable type!"); 2206 Ty = getEffectiveSCEVType(Ty); 2207 2208 // Sign-extend negative constants. 2209 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 2210 if (SC->getAPInt().isNegative()) 2211 return getSignExtendExpr(Op, Ty); 2212 2213 // Peel off a truncate cast. 2214 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) { 2215 const SCEV *NewOp = T->getOperand(); 2216 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty)) 2217 return getAnyExtendExpr(NewOp, Ty); 2218 return getTruncateOrNoop(NewOp, Ty); 2219 } 2220 2221 // Next try a zext cast. If the cast is folded, use it. 2222 const SCEV *ZExt = getZeroExtendExpr(Op, Ty); 2223 if (!isa<SCEVZeroExtendExpr>(ZExt)) 2224 return ZExt; 2225 2226 // Next try a sext cast. If the cast is folded, use it. 2227 const SCEV *SExt = getSignExtendExpr(Op, Ty); 2228 if (!isa<SCEVSignExtendExpr>(SExt)) 2229 return SExt; 2230 2231 // Force the cast to be folded into the operands of an addrec. 2232 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) { 2233 SmallVector<const SCEV *, 4> Ops; 2234 for (const SCEV *Op : AR->operands()) 2235 Ops.push_back(getAnyExtendExpr(Op, Ty)); 2236 return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW); 2237 } 2238 2239 // If the expression is obviously signed, use the sext cast value. 2240 if (isa<SCEVSMaxExpr>(Op)) 2241 return SExt; 2242 2243 // Absent any other information, use the zext cast value. 2244 return ZExt; 2245 } 2246 2247 /// Process the given Ops list, which is a list of operands to be added under 2248 /// the given scale, update the given map. This is a helper function for 2249 /// getAddRecExpr. As an example of what it does, given a sequence of operands 2250 /// that would form an add expression like this: 2251 /// 2252 /// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r) 2253 /// 2254 /// where A and B are constants, update the map with these values: 2255 /// 2256 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0) 2257 /// 2258 /// and add 13 + A*B*29 to AccumulatedConstant. 2259 /// This will allow getAddRecExpr to produce this: 2260 /// 2261 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B) 2262 /// 2263 /// This form often exposes folding opportunities that are hidden in 2264 /// the original operand list. 2265 /// 2266 /// Return true iff it appears that any interesting folding opportunities 2267 /// may be exposed. This helps getAddRecExpr short-circuit extra work in 2268 /// the common case where no interesting opportunities are present, and 2269 /// is also used as a check to avoid infinite recursion. 2270 static bool 2271 CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M, 2272 SmallVectorImpl<const SCEV *> &NewOps, 2273 APInt &AccumulatedConstant, 2274 const SCEV *const *Ops, size_t NumOperands, 2275 const APInt &Scale, 2276 ScalarEvolution &SE) { 2277 bool Interesting = false; 2278 2279 // Iterate over the add operands. They are sorted, with constants first. 2280 unsigned i = 0; 2281 while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2282 ++i; 2283 // Pull a buried constant out to the outside. 2284 if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero()) 2285 Interesting = true; 2286 AccumulatedConstant += Scale * C->getAPInt(); 2287 } 2288 2289 // Next comes everything else. We're especially interested in multiplies 2290 // here, but they're in the middle, so just visit the rest with one loop. 2291 for (; i != NumOperands; ++i) { 2292 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]); 2293 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) { 2294 APInt NewScale = 2295 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getAPInt(); 2296 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) { 2297 // A multiplication of a constant with another add; recurse. 2298 const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1)); 2299 Interesting |= 2300 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2301 Add->op_begin(), Add->getNumOperands(), 2302 NewScale, SE); 2303 } else { 2304 // A multiplication of a constant with some other value. Update 2305 // the map. 2306 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end()); 2307 const SCEV *Key = SE.getMulExpr(MulOps); 2308 auto Pair = M.insert({Key, NewScale}); 2309 if (Pair.second) { 2310 NewOps.push_back(Pair.first->first); 2311 } else { 2312 Pair.first->second += NewScale; 2313 // The map already had an entry for this value, which may indicate 2314 // a folding opportunity. 2315 Interesting = true; 2316 } 2317 } 2318 } else { 2319 // An ordinary operand. Update the map. 2320 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair = 2321 M.insert({Ops[i], Scale}); 2322 if (Pair.second) { 2323 NewOps.push_back(Pair.first->first); 2324 } else { 2325 Pair.first->second += Scale; 2326 // The map already had an entry for this value, which may indicate 2327 // a folding opportunity. 2328 Interesting = true; 2329 } 2330 } 2331 } 2332 2333 return Interesting; 2334 } 2335 2336 // We're trying to construct a SCEV of type `Type' with `Ops' as operands and 2337 // `OldFlags' as can't-wrap behavior. Infer a more aggressive set of 2338 // can't-overflow flags for the operation if possible. 2339 static SCEV::NoWrapFlags 2340 StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type, 2341 const ArrayRef<const SCEV *> Ops, 2342 SCEV::NoWrapFlags Flags) { 2343 using namespace std::placeholders; 2344 2345 using OBO = OverflowingBinaryOperator; 2346 2347 bool CanAnalyze = 2348 Type == scAddExpr || Type == scAddRecExpr || Type == scMulExpr; 2349 (void)CanAnalyze; 2350 assert(CanAnalyze && "don't call from other places!"); 2351 2352 int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW; 2353 SCEV::NoWrapFlags SignOrUnsignWrap = 2354 ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2355 2356 // If FlagNSW is true and all the operands are non-negative, infer FlagNUW. 2357 auto IsKnownNonNegative = [&](const SCEV *S) { 2358 return SE->isKnownNonNegative(S); 2359 }; 2360 2361 if (SignOrUnsignWrap == SCEV::FlagNSW && all_of(Ops, IsKnownNonNegative)) 2362 Flags = 2363 ScalarEvolution::setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask); 2364 2365 SignOrUnsignWrap = ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2366 2367 if (SignOrUnsignWrap != SignOrUnsignMask && 2368 (Type == scAddExpr || Type == scMulExpr) && Ops.size() == 2 && 2369 isa<SCEVConstant>(Ops[0])) { 2370 2371 auto Opcode = [&] { 2372 switch (Type) { 2373 case scAddExpr: 2374 return Instruction::Add; 2375 case scMulExpr: 2376 return Instruction::Mul; 2377 default: 2378 llvm_unreachable("Unexpected SCEV op."); 2379 } 2380 }(); 2381 2382 const APInt &C = cast<SCEVConstant>(Ops[0])->getAPInt(); 2383 2384 // (A <opcode> C) --> (A <opcode> C)<nsw> if the op doesn't sign overflow. 2385 if (!(SignOrUnsignWrap & SCEV::FlagNSW)) { 2386 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2387 Opcode, C, OBO::NoSignedWrap); 2388 if (NSWRegion.contains(SE->getSignedRange(Ops[1]))) 2389 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 2390 } 2391 2392 // (A <opcode> C) --> (A <opcode> C)<nuw> if the op doesn't unsign overflow. 2393 if (!(SignOrUnsignWrap & SCEV::FlagNUW)) { 2394 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2395 Opcode, C, OBO::NoUnsignedWrap); 2396 if (NUWRegion.contains(SE->getUnsignedRange(Ops[1]))) 2397 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2398 } 2399 } 2400 2401 return Flags; 2402 } 2403 2404 bool ScalarEvolution::isAvailableAtLoopEntry(const SCEV *S, const Loop *L) { 2405 return isLoopInvariant(S, L) && properlyDominates(S, L->getHeader()); 2406 } 2407 2408 /// Get a canonical add expression, or something simpler if possible. 2409 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops, 2410 SCEV::NoWrapFlags Flags, 2411 unsigned Depth) { 2412 assert(!(Flags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) && 2413 "only nuw or nsw allowed"); 2414 assert(!Ops.empty() && "Cannot get empty add!"); 2415 if (Ops.size() == 1) return Ops[0]; 2416 #ifndef NDEBUG 2417 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2418 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2419 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2420 "SCEVAddExpr operand types don't match!"); 2421 #endif 2422 2423 // Sort by complexity, this groups all similar expression types together. 2424 GroupByComplexity(Ops, &LI, DT); 2425 2426 Flags = StrengthenNoWrapFlags(this, scAddExpr, Ops, Flags); 2427 2428 // If there are any constants, fold them together. 2429 unsigned Idx = 0; 2430 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2431 ++Idx; 2432 assert(Idx < Ops.size()); 2433 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2434 // We found two constants, fold them together! 2435 Ops[0] = getConstant(LHSC->getAPInt() + RHSC->getAPInt()); 2436 if (Ops.size() == 2) return Ops[0]; 2437 Ops.erase(Ops.begin()+1); // Erase the folded element 2438 LHSC = cast<SCEVConstant>(Ops[0]); 2439 } 2440 2441 // If we are left with a constant zero being added, strip it off. 2442 if (LHSC->getValue()->isZero()) { 2443 Ops.erase(Ops.begin()); 2444 --Idx; 2445 } 2446 2447 if (Ops.size() == 1) return Ops[0]; 2448 } 2449 2450 // Limit recursion calls depth. 2451 if (Depth > MaxArithDepth || hasHugeExpression(Ops)) 2452 return getOrCreateAddExpr(Ops, Flags); 2453 2454 if (SCEV *S = std::get<0>(findExistingSCEVInCache(scAddExpr, Ops))) { 2455 static_cast<SCEVAddExpr *>(S)->setNoWrapFlags(Flags); 2456 return S; 2457 } 2458 2459 // Okay, check to see if the same value occurs in the operand list more than 2460 // once. If so, merge them together into an multiply expression. Since we 2461 // sorted the list, these values are required to be adjacent. 2462 Type *Ty = Ops[0]->getType(); 2463 bool FoundMatch = false; 2464 for (unsigned i = 0, e = Ops.size(); i != e-1; ++i) 2465 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2 2466 // Scan ahead to count how many equal operands there are. 2467 unsigned Count = 2; 2468 while (i+Count != e && Ops[i+Count] == Ops[i]) 2469 ++Count; 2470 // Merge the values into a multiply. 2471 const SCEV *Scale = getConstant(Ty, Count); 2472 const SCEV *Mul = getMulExpr(Scale, Ops[i], SCEV::FlagAnyWrap, Depth + 1); 2473 if (Ops.size() == Count) 2474 return Mul; 2475 Ops[i] = Mul; 2476 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count); 2477 --i; e -= Count - 1; 2478 FoundMatch = true; 2479 } 2480 if (FoundMatch) 2481 return getAddExpr(Ops, Flags, Depth + 1); 2482 2483 // Check for truncates. If all the operands are truncated from the same 2484 // type, see if factoring out the truncate would permit the result to be 2485 // folded. eg., n*trunc(x) + m*trunc(y) --> trunc(trunc(m)*x + trunc(n)*y) 2486 // if the contents of the resulting outer trunc fold to something simple. 2487 auto FindTruncSrcType = [&]() -> Type * { 2488 // We're ultimately looking to fold an addrec of truncs and muls of only 2489 // constants and truncs, so if we find any other types of SCEV 2490 // as operands of the addrec then we bail and return nullptr here. 2491 // Otherwise, we return the type of the operand of a trunc that we find. 2492 if (auto *T = dyn_cast<SCEVTruncateExpr>(Ops[Idx])) 2493 return T->getOperand()->getType(); 2494 if (const auto *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 2495 const auto *LastOp = Mul->getOperand(Mul->getNumOperands() - 1); 2496 if (const auto *T = dyn_cast<SCEVTruncateExpr>(LastOp)) 2497 return T->getOperand()->getType(); 2498 } 2499 return nullptr; 2500 }; 2501 if (auto *SrcType = FindTruncSrcType()) { 2502 SmallVector<const SCEV *, 8> LargeOps; 2503 bool Ok = true; 2504 // Check all the operands to see if they can be represented in the 2505 // source type of the truncate. 2506 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 2507 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) { 2508 if (T->getOperand()->getType() != SrcType) { 2509 Ok = false; 2510 break; 2511 } 2512 LargeOps.push_back(T->getOperand()); 2513 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2514 LargeOps.push_back(getAnyExtendExpr(C, SrcType)); 2515 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) { 2516 SmallVector<const SCEV *, 8> LargeMulOps; 2517 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) { 2518 if (const SCEVTruncateExpr *T = 2519 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) { 2520 if (T->getOperand()->getType() != SrcType) { 2521 Ok = false; 2522 break; 2523 } 2524 LargeMulOps.push_back(T->getOperand()); 2525 } else if (const auto *C = dyn_cast<SCEVConstant>(M->getOperand(j))) { 2526 LargeMulOps.push_back(getAnyExtendExpr(C, SrcType)); 2527 } else { 2528 Ok = false; 2529 break; 2530 } 2531 } 2532 if (Ok) 2533 LargeOps.push_back(getMulExpr(LargeMulOps, SCEV::FlagAnyWrap, Depth + 1)); 2534 } else { 2535 Ok = false; 2536 break; 2537 } 2538 } 2539 if (Ok) { 2540 // Evaluate the expression in the larger type. 2541 const SCEV *Fold = getAddExpr(LargeOps, SCEV::FlagAnyWrap, Depth + 1); 2542 // If it folds to something simple, use it. Otherwise, don't. 2543 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold)) 2544 return getTruncateExpr(Fold, Ty); 2545 } 2546 } 2547 2548 // Skip past any other cast SCEVs. 2549 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr) 2550 ++Idx; 2551 2552 // If there are add operands they would be next. 2553 if (Idx < Ops.size()) { 2554 bool DeletedAdd = false; 2555 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) { 2556 if (Ops.size() > AddOpsInlineThreshold || 2557 Add->getNumOperands() > AddOpsInlineThreshold) 2558 break; 2559 // If we have an add, expand the add operands onto the end of the operands 2560 // list. 2561 Ops.erase(Ops.begin()+Idx); 2562 Ops.append(Add->op_begin(), Add->op_end()); 2563 DeletedAdd = true; 2564 } 2565 2566 // If we deleted at least one add, we added operands to the end of the list, 2567 // and they are not necessarily sorted. Recurse to resort and resimplify 2568 // any operands we just acquired. 2569 if (DeletedAdd) 2570 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2571 } 2572 2573 // Skip over the add expression until we get to a multiply. 2574 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 2575 ++Idx; 2576 2577 // Check to see if there are any folding opportunities present with 2578 // operands multiplied by constant values. 2579 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) { 2580 uint64_t BitWidth = getTypeSizeInBits(Ty); 2581 DenseMap<const SCEV *, APInt> M; 2582 SmallVector<const SCEV *, 8> NewOps; 2583 APInt AccumulatedConstant(BitWidth, 0); 2584 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2585 Ops.data(), Ops.size(), 2586 APInt(BitWidth, 1), *this)) { 2587 struct APIntCompare { 2588 bool operator()(const APInt &LHS, const APInt &RHS) const { 2589 return LHS.ult(RHS); 2590 } 2591 }; 2592 2593 // Some interesting folding opportunity is present, so its worthwhile to 2594 // re-generate the operands list. Group the operands by constant scale, 2595 // to avoid multiplying by the same constant scale multiple times. 2596 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists; 2597 for (const SCEV *NewOp : NewOps) 2598 MulOpLists[M.find(NewOp)->second].push_back(NewOp); 2599 // Re-generate the operands list. 2600 Ops.clear(); 2601 if (AccumulatedConstant != 0) 2602 Ops.push_back(getConstant(AccumulatedConstant)); 2603 for (auto &MulOp : MulOpLists) 2604 if (MulOp.first != 0) 2605 Ops.push_back(getMulExpr( 2606 getConstant(MulOp.first), 2607 getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1), 2608 SCEV::FlagAnyWrap, Depth + 1)); 2609 if (Ops.empty()) 2610 return getZero(Ty); 2611 if (Ops.size() == 1) 2612 return Ops[0]; 2613 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2614 } 2615 } 2616 2617 // If we are adding something to a multiply expression, make sure the 2618 // something is not already an operand of the multiply. If so, merge it into 2619 // the multiply. 2620 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) { 2621 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]); 2622 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) { 2623 const SCEV *MulOpSCEV = Mul->getOperand(MulOp); 2624 if (isa<SCEVConstant>(MulOpSCEV)) 2625 continue; 2626 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp) 2627 if (MulOpSCEV == Ops[AddOp]) { 2628 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1)) 2629 const SCEV *InnerMul = Mul->getOperand(MulOp == 0); 2630 if (Mul->getNumOperands() != 2) { 2631 // If the multiply has more than two operands, we must get the 2632 // Y*Z term. 2633 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2634 Mul->op_begin()+MulOp); 2635 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2636 InnerMul = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2637 } 2638 SmallVector<const SCEV *, 2> TwoOps = {getOne(Ty), InnerMul}; 2639 const SCEV *AddOne = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2640 const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV, 2641 SCEV::FlagAnyWrap, Depth + 1); 2642 if (Ops.size() == 2) return OuterMul; 2643 if (AddOp < Idx) { 2644 Ops.erase(Ops.begin()+AddOp); 2645 Ops.erase(Ops.begin()+Idx-1); 2646 } else { 2647 Ops.erase(Ops.begin()+Idx); 2648 Ops.erase(Ops.begin()+AddOp-1); 2649 } 2650 Ops.push_back(OuterMul); 2651 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2652 } 2653 2654 // Check this multiply against other multiplies being added together. 2655 for (unsigned OtherMulIdx = Idx+1; 2656 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]); 2657 ++OtherMulIdx) { 2658 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]); 2659 // If MulOp occurs in OtherMul, we can fold the two multiplies 2660 // together. 2661 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands(); 2662 OMulOp != e; ++OMulOp) 2663 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) { 2664 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E)) 2665 const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0); 2666 if (Mul->getNumOperands() != 2) { 2667 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2668 Mul->op_begin()+MulOp); 2669 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2670 InnerMul1 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2671 } 2672 const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0); 2673 if (OtherMul->getNumOperands() != 2) { 2674 SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(), 2675 OtherMul->op_begin()+OMulOp); 2676 MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end()); 2677 InnerMul2 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2678 } 2679 SmallVector<const SCEV *, 2> TwoOps = {InnerMul1, InnerMul2}; 2680 const SCEV *InnerMulSum = 2681 getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2682 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum, 2683 SCEV::FlagAnyWrap, Depth + 1); 2684 if (Ops.size() == 2) return OuterMul; 2685 Ops.erase(Ops.begin()+Idx); 2686 Ops.erase(Ops.begin()+OtherMulIdx-1); 2687 Ops.push_back(OuterMul); 2688 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2689 } 2690 } 2691 } 2692 } 2693 2694 // If there are any add recurrences in the operands list, see if any other 2695 // added values are loop invariant. If so, we can fold them into the 2696 // recurrence. 2697 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 2698 ++Idx; 2699 2700 // Scan over all recurrences, trying to fold loop invariants into them. 2701 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 2702 // Scan all of the other operands to this add and add them to the vector if 2703 // they are loop invariant w.r.t. the recurrence. 2704 SmallVector<const SCEV *, 8> LIOps; 2705 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 2706 const Loop *AddRecLoop = AddRec->getLoop(); 2707 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2708 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 2709 LIOps.push_back(Ops[i]); 2710 Ops.erase(Ops.begin()+i); 2711 --i; --e; 2712 } 2713 2714 // If we found some loop invariants, fold them into the recurrence. 2715 if (!LIOps.empty()) { 2716 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step} 2717 LIOps.push_back(AddRec->getStart()); 2718 2719 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), 2720 AddRec->op_end()); 2721 // This follows from the fact that the no-wrap flags on the outer add 2722 // expression are applicable on the 0th iteration, when the add recurrence 2723 // will be equal to its start value. 2724 AddRecOps[0] = getAddExpr(LIOps, Flags, Depth + 1); 2725 2726 // Build the new addrec. Propagate the NUW and NSW flags if both the 2727 // outer add and the inner addrec are guaranteed to have no overflow. 2728 // Always propagate NW. 2729 Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW)); 2730 const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags); 2731 2732 // If all of the other operands were loop invariant, we are done. 2733 if (Ops.size() == 1) return NewRec; 2734 2735 // Otherwise, add the folded AddRec by the non-invariant parts. 2736 for (unsigned i = 0;; ++i) 2737 if (Ops[i] == AddRec) { 2738 Ops[i] = NewRec; 2739 break; 2740 } 2741 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2742 } 2743 2744 // Okay, if there weren't any loop invariants to be folded, check to see if 2745 // there are multiple AddRec's with the same loop induction variable being 2746 // added together. If so, we can fold them. 2747 for (unsigned OtherIdx = Idx+1; 2748 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2749 ++OtherIdx) { 2750 // We expect the AddRecExpr's to be sorted in reverse dominance order, 2751 // so that the 1st found AddRecExpr is dominated by all others. 2752 assert(DT.dominates( 2753 cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(), 2754 AddRec->getLoop()->getHeader()) && 2755 "AddRecExprs are not sorted in reverse dominance order?"); 2756 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) { 2757 // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L> 2758 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), 2759 AddRec->op_end()); 2760 for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2761 ++OtherIdx) { 2762 const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]); 2763 if (OtherAddRec->getLoop() == AddRecLoop) { 2764 for (unsigned i = 0, e = OtherAddRec->getNumOperands(); 2765 i != e; ++i) { 2766 if (i >= AddRecOps.size()) { 2767 AddRecOps.append(OtherAddRec->op_begin()+i, 2768 OtherAddRec->op_end()); 2769 break; 2770 } 2771 SmallVector<const SCEV *, 2> TwoOps = { 2772 AddRecOps[i], OtherAddRec->getOperand(i)}; 2773 AddRecOps[i] = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2774 } 2775 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 2776 } 2777 } 2778 // Step size has changed, so we cannot guarantee no self-wraparound. 2779 Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap); 2780 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2781 } 2782 } 2783 2784 // Otherwise couldn't fold anything into this recurrence. Move onto the 2785 // next one. 2786 } 2787 2788 // Okay, it looks like we really DO need an add expr. Check to see if we 2789 // already have one, otherwise create a new one. 2790 return getOrCreateAddExpr(Ops, Flags); 2791 } 2792 2793 const SCEV * 2794 ScalarEvolution::getOrCreateAddExpr(ArrayRef<const SCEV *> Ops, 2795 SCEV::NoWrapFlags Flags) { 2796 FoldingSetNodeID ID; 2797 ID.AddInteger(scAddExpr); 2798 for (const SCEV *Op : Ops) 2799 ID.AddPointer(Op); 2800 void *IP = nullptr; 2801 SCEVAddExpr *S = 2802 static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2803 if (!S) { 2804 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2805 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2806 S = new (SCEVAllocator) 2807 SCEVAddExpr(ID.Intern(SCEVAllocator), O, Ops.size()); 2808 UniqueSCEVs.InsertNode(S, IP); 2809 addToLoopUseLists(S); 2810 } 2811 S->setNoWrapFlags(Flags); 2812 return S; 2813 } 2814 2815 const SCEV * 2816 ScalarEvolution::getOrCreateAddRecExpr(ArrayRef<const SCEV *> Ops, 2817 const Loop *L, SCEV::NoWrapFlags Flags) { 2818 FoldingSetNodeID ID; 2819 ID.AddInteger(scAddRecExpr); 2820 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2821 ID.AddPointer(Ops[i]); 2822 ID.AddPointer(L); 2823 void *IP = nullptr; 2824 SCEVAddRecExpr *S = 2825 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2826 if (!S) { 2827 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2828 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2829 S = new (SCEVAllocator) 2830 SCEVAddRecExpr(ID.Intern(SCEVAllocator), O, Ops.size(), L); 2831 UniqueSCEVs.InsertNode(S, IP); 2832 addToLoopUseLists(S); 2833 } 2834 S->setNoWrapFlags(Flags); 2835 return S; 2836 } 2837 2838 const SCEV * 2839 ScalarEvolution::getOrCreateMulExpr(ArrayRef<const SCEV *> Ops, 2840 SCEV::NoWrapFlags Flags) { 2841 FoldingSetNodeID ID; 2842 ID.AddInteger(scMulExpr); 2843 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2844 ID.AddPointer(Ops[i]); 2845 void *IP = nullptr; 2846 SCEVMulExpr *S = 2847 static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2848 if (!S) { 2849 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2850 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2851 S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator), 2852 O, Ops.size()); 2853 UniqueSCEVs.InsertNode(S, IP); 2854 addToLoopUseLists(S); 2855 } 2856 S->setNoWrapFlags(Flags); 2857 return S; 2858 } 2859 2860 static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) { 2861 uint64_t k = i*j; 2862 if (j > 1 && k / j != i) Overflow = true; 2863 return k; 2864 } 2865 2866 /// Compute the result of "n choose k", the binomial coefficient. If an 2867 /// intermediate computation overflows, Overflow will be set and the return will 2868 /// be garbage. Overflow is not cleared on absence of overflow. 2869 static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) { 2870 // We use the multiplicative formula: 2871 // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 . 2872 // At each iteration, we take the n-th term of the numeral and divide by the 2873 // (k-n)th term of the denominator. This division will always produce an 2874 // integral result, and helps reduce the chance of overflow in the 2875 // intermediate computations. However, we can still overflow even when the 2876 // final result would fit. 2877 2878 if (n == 0 || n == k) return 1; 2879 if (k > n) return 0; 2880 2881 if (k > n/2) 2882 k = n-k; 2883 2884 uint64_t r = 1; 2885 for (uint64_t i = 1; i <= k; ++i) { 2886 r = umul_ov(r, n-(i-1), Overflow); 2887 r /= i; 2888 } 2889 return r; 2890 } 2891 2892 /// Determine if any of the operands in this SCEV are a constant or if 2893 /// any of the add or multiply expressions in this SCEV contain a constant. 2894 static bool containsConstantInAddMulChain(const SCEV *StartExpr) { 2895 struct FindConstantInAddMulChain { 2896 bool FoundConstant = false; 2897 2898 bool follow(const SCEV *S) { 2899 FoundConstant |= isa<SCEVConstant>(S); 2900 return isa<SCEVAddExpr>(S) || isa<SCEVMulExpr>(S); 2901 } 2902 2903 bool isDone() const { 2904 return FoundConstant; 2905 } 2906 }; 2907 2908 FindConstantInAddMulChain F; 2909 SCEVTraversal<FindConstantInAddMulChain> ST(F); 2910 ST.visitAll(StartExpr); 2911 return F.FoundConstant; 2912 } 2913 2914 /// Get a canonical multiply expression, or something simpler if possible. 2915 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops, 2916 SCEV::NoWrapFlags Flags, 2917 unsigned Depth) { 2918 assert(Flags == maskFlags(Flags, SCEV::FlagNUW | SCEV::FlagNSW) && 2919 "only nuw or nsw allowed"); 2920 assert(!Ops.empty() && "Cannot get empty mul!"); 2921 if (Ops.size() == 1) return Ops[0]; 2922 #ifndef NDEBUG 2923 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2924 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2925 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2926 "SCEVMulExpr operand types don't match!"); 2927 #endif 2928 2929 // Sort by complexity, this groups all similar expression types together. 2930 GroupByComplexity(Ops, &LI, DT); 2931 2932 Flags = StrengthenNoWrapFlags(this, scMulExpr, Ops, Flags); 2933 2934 // Limit recursion calls depth, but fold all-constant expressions. 2935 // `Ops` is sorted, so it's enough to check just last one. 2936 if ((Depth > MaxArithDepth || hasHugeExpression(Ops)) && 2937 !isa<SCEVConstant>(Ops.back())) 2938 return getOrCreateMulExpr(Ops, Flags); 2939 2940 if (SCEV *S = std::get<0>(findExistingSCEVInCache(scMulExpr, Ops))) { 2941 static_cast<SCEVMulExpr *>(S)->setNoWrapFlags(Flags); 2942 return S; 2943 } 2944 2945 // If there are any constants, fold them together. 2946 unsigned Idx = 0; 2947 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2948 2949 if (Ops.size() == 2) 2950 // C1*(C2+V) -> C1*C2 + C1*V 2951 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) 2952 // If any of Add's ops are Adds or Muls with a constant, apply this 2953 // transformation as well. 2954 // 2955 // TODO: There are some cases where this transformation is not 2956 // profitable; for example, Add = (C0 + X) * Y + Z. Maybe the scope of 2957 // this transformation should be narrowed down. 2958 if (Add->getNumOperands() == 2 && containsConstantInAddMulChain(Add)) 2959 return getAddExpr(getMulExpr(LHSC, Add->getOperand(0), 2960 SCEV::FlagAnyWrap, Depth + 1), 2961 getMulExpr(LHSC, Add->getOperand(1), 2962 SCEV::FlagAnyWrap, Depth + 1), 2963 SCEV::FlagAnyWrap, Depth + 1); 2964 2965 ++Idx; 2966 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2967 // We found two constants, fold them together! 2968 ConstantInt *Fold = 2969 ConstantInt::get(getContext(), LHSC->getAPInt() * RHSC->getAPInt()); 2970 Ops[0] = getConstant(Fold); 2971 Ops.erase(Ops.begin()+1); // Erase the folded element 2972 if (Ops.size() == 1) return Ops[0]; 2973 LHSC = cast<SCEVConstant>(Ops[0]); 2974 } 2975 2976 // If we are left with a constant one being multiplied, strip it off. 2977 if (cast<SCEVConstant>(Ops[0])->getValue()->isOne()) { 2978 Ops.erase(Ops.begin()); 2979 --Idx; 2980 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) { 2981 // If we have a multiply of zero, it will always be zero. 2982 return Ops[0]; 2983 } else if (Ops[0]->isAllOnesValue()) { 2984 // If we have a mul by -1 of an add, try distributing the -1 among the 2985 // add operands. 2986 if (Ops.size() == 2) { 2987 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) { 2988 SmallVector<const SCEV *, 4> NewOps; 2989 bool AnyFolded = false; 2990 for (const SCEV *AddOp : Add->operands()) { 2991 const SCEV *Mul = getMulExpr(Ops[0], AddOp, SCEV::FlagAnyWrap, 2992 Depth + 1); 2993 if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true; 2994 NewOps.push_back(Mul); 2995 } 2996 if (AnyFolded) 2997 return getAddExpr(NewOps, SCEV::FlagAnyWrap, Depth + 1); 2998 } else if (const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) { 2999 // Negation preserves a recurrence's no self-wrap property. 3000 SmallVector<const SCEV *, 4> Operands; 3001 for (const SCEV *AddRecOp : AddRec->operands()) 3002 Operands.push_back(getMulExpr(Ops[0], AddRecOp, SCEV::FlagAnyWrap, 3003 Depth + 1)); 3004 3005 return getAddRecExpr(Operands, AddRec->getLoop(), 3006 AddRec->getNoWrapFlags(SCEV::FlagNW)); 3007 } 3008 } 3009 } 3010 3011 if (Ops.size() == 1) 3012 return Ops[0]; 3013 } 3014 3015 // Skip over the add expression until we get to a multiply. 3016 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 3017 ++Idx; 3018 3019 // If there are mul operands inline them all into this expression. 3020 if (Idx < Ops.size()) { 3021 bool DeletedMul = false; 3022 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 3023 if (Ops.size() > MulOpsInlineThreshold) 3024 break; 3025 // If we have an mul, expand the mul operands onto the end of the 3026 // operands list. 3027 Ops.erase(Ops.begin()+Idx); 3028 Ops.append(Mul->op_begin(), Mul->op_end()); 3029 DeletedMul = true; 3030 } 3031 3032 // If we deleted at least one mul, we added operands to the end of the 3033 // list, and they are not necessarily sorted. Recurse to resort and 3034 // resimplify any operands we just acquired. 3035 if (DeletedMul) 3036 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 3037 } 3038 3039 // If there are any add recurrences in the operands list, see if any other 3040 // added values are loop invariant. If so, we can fold them into the 3041 // recurrence. 3042 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 3043 ++Idx; 3044 3045 // Scan over all recurrences, trying to fold loop invariants into them. 3046 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 3047 // Scan all of the other operands to this mul and add them to the vector 3048 // if they are loop invariant w.r.t. the recurrence. 3049 SmallVector<const SCEV *, 8> LIOps; 3050 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 3051 const Loop *AddRecLoop = AddRec->getLoop(); 3052 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3053 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 3054 LIOps.push_back(Ops[i]); 3055 Ops.erase(Ops.begin()+i); 3056 --i; --e; 3057 } 3058 3059 // If we found some loop invariants, fold them into the recurrence. 3060 if (!LIOps.empty()) { 3061 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step} 3062 SmallVector<const SCEV *, 4> NewOps; 3063 NewOps.reserve(AddRec->getNumOperands()); 3064 const SCEV *Scale = getMulExpr(LIOps, SCEV::FlagAnyWrap, Depth + 1); 3065 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) 3066 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i), 3067 SCEV::FlagAnyWrap, Depth + 1)); 3068 3069 // Build the new addrec. Propagate the NUW and NSW flags if both the 3070 // outer mul and the inner addrec are guaranteed to have no overflow. 3071 // 3072 // No self-wrap cannot be guaranteed after changing the step size, but 3073 // will be inferred if either NUW or NSW is true. 3074 Flags = AddRec->getNoWrapFlags(clearFlags(Flags, SCEV::FlagNW)); 3075 const SCEV *NewRec = getAddRecExpr(NewOps, AddRecLoop, Flags); 3076 3077 // If all of the other operands were loop invariant, we are done. 3078 if (Ops.size() == 1) return NewRec; 3079 3080 // Otherwise, multiply the folded AddRec by the non-invariant parts. 3081 for (unsigned i = 0;; ++i) 3082 if (Ops[i] == AddRec) { 3083 Ops[i] = NewRec; 3084 break; 3085 } 3086 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 3087 } 3088 3089 // Okay, if there weren't any loop invariants to be folded, check to see 3090 // if there are multiple AddRec's with the same loop induction variable 3091 // being multiplied together. If so, we can fold them. 3092 3093 // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L> 3094 // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [ 3095 // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z 3096 // ]]],+,...up to x=2n}. 3097 // Note that the arguments to choose() are always integers with values 3098 // known at compile time, never SCEV objects. 3099 // 3100 // The implementation avoids pointless extra computations when the two 3101 // addrec's are of different length (mathematically, it's equivalent to 3102 // an infinite stream of zeros on the right). 3103 bool OpsModified = false; 3104 for (unsigned OtherIdx = Idx+1; 3105 OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 3106 ++OtherIdx) { 3107 const SCEVAddRecExpr *OtherAddRec = 3108 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]); 3109 if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop) 3110 continue; 3111 3112 // Limit max number of arguments to avoid creation of unreasonably big 3113 // SCEVAddRecs with very complex operands. 3114 if (AddRec->getNumOperands() + OtherAddRec->getNumOperands() - 1 > 3115 MaxAddRecSize || hasHugeExpression({AddRec, OtherAddRec})) 3116 continue; 3117 3118 bool Overflow = false; 3119 Type *Ty = AddRec->getType(); 3120 bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64; 3121 SmallVector<const SCEV*, 7> AddRecOps; 3122 for (int x = 0, xe = AddRec->getNumOperands() + 3123 OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) { 3124 SmallVector <const SCEV *, 7> SumOps; 3125 for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) { 3126 uint64_t Coeff1 = Choose(x, 2*x - y, Overflow); 3127 for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1), 3128 ze = std::min(x+1, (int)OtherAddRec->getNumOperands()); 3129 z < ze && !Overflow; ++z) { 3130 uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow); 3131 uint64_t Coeff; 3132 if (LargerThan64Bits) 3133 Coeff = umul_ov(Coeff1, Coeff2, Overflow); 3134 else 3135 Coeff = Coeff1*Coeff2; 3136 const SCEV *CoeffTerm = getConstant(Ty, Coeff); 3137 const SCEV *Term1 = AddRec->getOperand(y-z); 3138 const SCEV *Term2 = OtherAddRec->getOperand(z); 3139 SumOps.push_back(getMulExpr(CoeffTerm, Term1, Term2, 3140 SCEV::FlagAnyWrap, Depth + 1)); 3141 } 3142 } 3143 if (SumOps.empty()) 3144 SumOps.push_back(getZero(Ty)); 3145 AddRecOps.push_back(getAddExpr(SumOps, SCEV::FlagAnyWrap, Depth + 1)); 3146 } 3147 if (!Overflow) { 3148 const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRecLoop, 3149 SCEV::FlagAnyWrap); 3150 if (Ops.size() == 2) return NewAddRec; 3151 Ops[Idx] = NewAddRec; 3152 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 3153 OpsModified = true; 3154 AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec); 3155 if (!AddRec) 3156 break; 3157 } 3158 } 3159 if (OpsModified) 3160 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 3161 3162 // Otherwise couldn't fold anything into this recurrence. Move onto the 3163 // next one. 3164 } 3165 3166 // Okay, it looks like we really DO need an mul expr. Check to see if we 3167 // already have one, otherwise create a new one. 3168 return getOrCreateMulExpr(Ops, Flags); 3169 } 3170 3171 /// Represents an unsigned remainder expression based on unsigned division. 3172 const SCEV *ScalarEvolution::getURemExpr(const SCEV *LHS, 3173 const SCEV *RHS) { 3174 assert(getEffectiveSCEVType(LHS->getType()) == 3175 getEffectiveSCEVType(RHS->getType()) && 3176 "SCEVURemExpr operand types don't match!"); 3177 3178 // Short-circuit easy cases 3179 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 3180 // If constant is one, the result is trivial 3181 if (RHSC->getValue()->isOne()) 3182 return getZero(LHS->getType()); // X urem 1 --> 0 3183 3184 // If constant is a power of two, fold into a zext(trunc(LHS)). 3185 if (RHSC->getAPInt().isPowerOf2()) { 3186 Type *FullTy = LHS->getType(); 3187 Type *TruncTy = 3188 IntegerType::get(getContext(), RHSC->getAPInt().logBase2()); 3189 return getZeroExtendExpr(getTruncateExpr(LHS, TruncTy), FullTy); 3190 } 3191 } 3192 3193 // Fallback to %a == %x urem %y == %x -<nuw> ((%x udiv %y) *<nuw> %y) 3194 const SCEV *UDiv = getUDivExpr(LHS, RHS); 3195 const SCEV *Mult = getMulExpr(UDiv, RHS, SCEV::FlagNUW); 3196 return getMinusSCEV(LHS, Mult, SCEV::FlagNUW); 3197 } 3198 3199 /// Get a canonical unsigned division expression, or something simpler if 3200 /// possible. 3201 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS, 3202 const SCEV *RHS) { 3203 assert(getEffectiveSCEVType(LHS->getType()) == 3204 getEffectiveSCEVType(RHS->getType()) && 3205 "SCEVUDivExpr operand types don't match!"); 3206 3207 FoldingSetNodeID ID; 3208 ID.AddInteger(scUDivExpr); 3209 ID.AddPointer(LHS); 3210 ID.AddPointer(RHS); 3211 void *IP = nullptr; 3212 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 3213 return S; 3214 3215 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 3216 if (RHSC->getValue()->isOne()) 3217 return LHS; // X udiv 1 --> x 3218 // If the denominator is zero, the result of the udiv is undefined. Don't 3219 // try to analyze it, because the resolution chosen here may differ from 3220 // the resolution chosen in other parts of the compiler. 3221 if (!RHSC->getValue()->isZero()) { 3222 // Determine if the division can be folded into the operands of 3223 // its operands. 3224 // TODO: Generalize this to non-constants by using known-bits information. 3225 Type *Ty = LHS->getType(); 3226 unsigned LZ = RHSC->getAPInt().countLeadingZeros(); 3227 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1; 3228 // For non-power-of-two values, effectively round the value up to the 3229 // nearest power of two. 3230 if (!RHSC->getAPInt().isPowerOf2()) 3231 ++MaxShiftAmt; 3232 IntegerType *ExtTy = 3233 IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt); 3234 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) 3235 if (const SCEVConstant *Step = 3236 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) { 3237 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded. 3238 const APInt &StepInt = Step->getAPInt(); 3239 const APInt &DivInt = RHSC->getAPInt(); 3240 if (!StepInt.urem(DivInt) && 3241 getZeroExtendExpr(AR, ExtTy) == 3242 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 3243 getZeroExtendExpr(Step, ExtTy), 3244 AR->getLoop(), SCEV::FlagAnyWrap)) { 3245 SmallVector<const SCEV *, 4> Operands; 3246 for (const SCEV *Op : AR->operands()) 3247 Operands.push_back(getUDivExpr(Op, RHS)); 3248 return getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagNW); 3249 } 3250 /// Get a canonical UDivExpr for a recurrence. 3251 /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0. 3252 // We can currently only fold X%N if X is constant. 3253 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart()); 3254 if (StartC && !DivInt.urem(StepInt) && 3255 getZeroExtendExpr(AR, ExtTy) == 3256 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 3257 getZeroExtendExpr(Step, ExtTy), 3258 AR->getLoop(), SCEV::FlagAnyWrap)) { 3259 const APInt &StartInt = StartC->getAPInt(); 3260 const APInt &StartRem = StartInt.urem(StepInt); 3261 if (StartRem != 0) { 3262 const SCEV *NewLHS = 3263 getAddRecExpr(getConstant(StartInt - StartRem), Step, 3264 AR->getLoop(), SCEV::FlagNW); 3265 if (LHS != NewLHS) { 3266 LHS = NewLHS; 3267 3268 // Reset the ID to include the new LHS, and check if it is 3269 // already cached. 3270 ID.clear(); 3271 ID.AddInteger(scUDivExpr); 3272 ID.AddPointer(LHS); 3273 ID.AddPointer(RHS); 3274 IP = nullptr; 3275 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 3276 return S; 3277 } 3278 } 3279 } 3280 } 3281 // (A*B)/C --> A*(B/C) if safe and B/C can be folded. 3282 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) { 3283 SmallVector<const SCEV *, 4> Operands; 3284 for (const SCEV *Op : M->operands()) 3285 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3286 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands)) 3287 // Find an operand that's safely divisible. 3288 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 3289 const SCEV *Op = M->getOperand(i); 3290 const SCEV *Div = getUDivExpr(Op, RHSC); 3291 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) { 3292 Operands = SmallVector<const SCEV *, 4>(M->op_begin(), 3293 M->op_end()); 3294 Operands[i] = Div; 3295 return getMulExpr(Operands); 3296 } 3297 } 3298 } 3299 3300 // (A/B)/C --> A/(B*C) if safe and B*C can be folded. 3301 if (const SCEVUDivExpr *OtherDiv = dyn_cast<SCEVUDivExpr>(LHS)) { 3302 if (auto *DivisorConstant = 3303 dyn_cast<SCEVConstant>(OtherDiv->getRHS())) { 3304 bool Overflow = false; 3305 APInt NewRHS = 3306 DivisorConstant->getAPInt().umul_ov(RHSC->getAPInt(), Overflow); 3307 if (Overflow) { 3308 return getConstant(RHSC->getType(), 0, false); 3309 } 3310 return getUDivExpr(OtherDiv->getLHS(), getConstant(NewRHS)); 3311 } 3312 } 3313 3314 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded. 3315 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) { 3316 SmallVector<const SCEV *, 4> Operands; 3317 for (const SCEV *Op : A->operands()) 3318 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3319 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) { 3320 Operands.clear(); 3321 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) { 3322 const SCEV *Op = getUDivExpr(A->getOperand(i), RHS); 3323 if (isa<SCEVUDivExpr>(Op) || 3324 getMulExpr(Op, RHS) != A->getOperand(i)) 3325 break; 3326 Operands.push_back(Op); 3327 } 3328 if (Operands.size() == A->getNumOperands()) 3329 return getAddExpr(Operands); 3330 } 3331 } 3332 3333 // Fold if both operands are constant. 3334 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 3335 Constant *LHSCV = LHSC->getValue(); 3336 Constant *RHSCV = RHSC->getValue(); 3337 return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV, 3338 RHSCV))); 3339 } 3340 } 3341 } 3342 3343 // The Insertion Point (IP) might be invalid by now (due to UniqueSCEVs 3344 // changes). Make sure we get a new one. 3345 IP = nullptr; 3346 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3347 SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator), 3348 LHS, RHS); 3349 UniqueSCEVs.InsertNode(S, IP); 3350 addToLoopUseLists(S); 3351 return S; 3352 } 3353 3354 static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) { 3355 APInt A = C1->getAPInt().abs(); 3356 APInt B = C2->getAPInt().abs(); 3357 uint32_t ABW = A.getBitWidth(); 3358 uint32_t BBW = B.getBitWidth(); 3359 3360 if (ABW > BBW) 3361 B = B.zext(ABW); 3362 else if (ABW < BBW) 3363 A = A.zext(BBW); 3364 3365 return APIntOps::GreatestCommonDivisor(std::move(A), std::move(B)); 3366 } 3367 3368 /// Get a canonical unsigned division expression, or something simpler if 3369 /// possible. There is no representation for an exact udiv in SCEV IR, but we 3370 /// can attempt to remove factors from the LHS and RHS. We can't do this when 3371 /// it's not exact because the udiv may be clearing bits. 3372 const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS, 3373 const SCEV *RHS) { 3374 // TODO: we could try to find factors in all sorts of things, but for now we 3375 // just deal with u/exact (multiply, constant). See SCEVDivision towards the 3376 // end of this file for inspiration. 3377 3378 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS); 3379 if (!Mul || !Mul->hasNoUnsignedWrap()) 3380 return getUDivExpr(LHS, RHS); 3381 3382 if (const SCEVConstant *RHSCst = dyn_cast<SCEVConstant>(RHS)) { 3383 // If the mulexpr multiplies by a constant, then that constant must be the 3384 // first element of the mulexpr. 3385 if (const auto *LHSCst = dyn_cast<SCEVConstant>(Mul->getOperand(0))) { 3386 if (LHSCst == RHSCst) { 3387 SmallVector<const SCEV *, 2> Operands; 3388 Operands.append(Mul->op_begin() + 1, Mul->op_end()); 3389 return getMulExpr(Operands); 3390 } 3391 3392 // We can't just assume that LHSCst divides RHSCst cleanly, it could be 3393 // that there's a factor provided by one of the other terms. We need to 3394 // check. 3395 APInt Factor = gcd(LHSCst, RHSCst); 3396 if (!Factor.isIntN(1)) { 3397 LHSCst = 3398 cast<SCEVConstant>(getConstant(LHSCst->getAPInt().udiv(Factor))); 3399 RHSCst = 3400 cast<SCEVConstant>(getConstant(RHSCst->getAPInt().udiv(Factor))); 3401 SmallVector<const SCEV *, 2> Operands; 3402 Operands.push_back(LHSCst); 3403 Operands.append(Mul->op_begin() + 1, Mul->op_end()); 3404 LHS = getMulExpr(Operands); 3405 RHS = RHSCst; 3406 Mul = dyn_cast<SCEVMulExpr>(LHS); 3407 if (!Mul) 3408 return getUDivExactExpr(LHS, RHS); 3409 } 3410 } 3411 } 3412 3413 for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) { 3414 if (Mul->getOperand(i) == RHS) { 3415 SmallVector<const SCEV *, 2> Operands; 3416 Operands.append(Mul->op_begin(), Mul->op_begin() + i); 3417 Operands.append(Mul->op_begin() + i + 1, Mul->op_end()); 3418 return getMulExpr(Operands); 3419 } 3420 } 3421 3422 return getUDivExpr(LHS, RHS); 3423 } 3424 3425 /// Get an add recurrence expression for the specified loop. Simplify the 3426 /// expression as much as possible. 3427 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step, 3428 const Loop *L, 3429 SCEV::NoWrapFlags Flags) { 3430 SmallVector<const SCEV *, 4> Operands; 3431 Operands.push_back(Start); 3432 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step)) 3433 if (StepChrec->getLoop() == L) { 3434 Operands.append(StepChrec->op_begin(), StepChrec->op_end()); 3435 return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW)); 3436 } 3437 3438 Operands.push_back(Step); 3439 return getAddRecExpr(Operands, L, Flags); 3440 } 3441 3442 /// Get an add recurrence expression for the specified loop. Simplify the 3443 /// expression as much as possible. 3444 const SCEV * 3445 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands, 3446 const Loop *L, SCEV::NoWrapFlags Flags) { 3447 if (Operands.size() == 1) return Operands[0]; 3448 #ifndef NDEBUG 3449 Type *ETy = getEffectiveSCEVType(Operands[0]->getType()); 3450 for (unsigned i = 1, e = Operands.size(); i != e; ++i) 3451 assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy && 3452 "SCEVAddRecExpr operand types don't match!"); 3453 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 3454 assert(isLoopInvariant(Operands[i], L) && 3455 "SCEVAddRecExpr operand is not loop-invariant!"); 3456 #endif 3457 3458 if (Operands.back()->isZero()) { 3459 Operands.pop_back(); 3460 return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X 3461 } 3462 3463 // It's tempting to want to call getConstantMaxBackedgeTakenCount count here and 3464 // use that information to infer NUW and NSW flags. However, computing a 3465 // BE count requires calling getAddRecExpr, so we may not yet have a 3466 // meaningful BE count at this point (and if we don't, we'd be stuck 3467 // with a SCEVCouldNotCompute as the cached BE count). 3468 3469 Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags); 3470 3471 // Canonicalize nested AddRecs in by nesting them in order of loop depth. 3472 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) { 3473 const Loop *NestedLoop = NestedAR->getLoop(); 3474 if (L->contains(NestedLoop) 3475 ? (L->getLoopDepth() < NestedLoop->getLoopDepth()) 3476 : (!NestedLoop->contains(L) && 3477 DT.dominates(L->getHeader(), NestedLoop->getHeader()))) { 3478 SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(), 3479 NestedAR->op_end()); 3480 Operands[0] = NestedAR->getStart(); 3481 // AddRecs require their operands be loop-invariant with respect to their 3482 // loops. Don't perform this transformation if it would break this 3483 // requirement. 3484 bool AllInvariant = all_of( 3485 Operands, [&](const SCEV *Op) { return isLoopInvariant(Op, L); }); 3486 3487 if (AllInvariant) { 3488 // Create a recurrence for the outer loop with the same step size. 3489 // 3490 // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the 3491 // inner recurrence has the same property. 3492 SCEV::NoWrapFlags OuterFlags = 3493 maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags()); 3494 3495 NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags); 3496 AllInvariant = all_of(NestedOperands, [&](const SCEV *Op) { 3497 return isLoopInvariant(Op, NestedLoop); 3498 }); 3499 3500 if (AllInvariant) { 3501 // Ok, both add recurrences are valid after the transformation. 3502 // 3503 // The inner recurrence keeps its NW flag but only keeps NUW/NSW if 3504 // the outer recurrence has the same property. 3505 SCEV::NoWrapFlags InnerFlags = 3506 maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags); 3507 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags); 3508 } 3509 } 3510 // Reset Operands to its original state. 3511 Operands[0] = NestedAR; 3512 } 3513 } 3514 3515 // Okay, it looks like we really DO need an addrec expr. Check to see if we 3516 // already have one, otherwise create a new one. 3517 return getOrCreateAddRecExpr(Operands, L, Flags); 3518 } 3519 3520 const SCEV * 3521 ScalarEvolution::getGEPExpr(GEPOperator *GEP, 3522 const SmallVectorImpl<const SCEV *> &IndexExprs) { 3523 const SCEV *BaseExpr = getSCEV(GEP->getPointerOperand()); 3524 // getSCEV(Base)->getType() has the same address space as Base->getType() 3525 // because SCEV::getType() preserves the address space. 3526 Type *IntIdxTy = getEffectiveSCEVType(BaseExpr->getType()); 3527 // FIXME(PR23527): Don't blindly transfer the inbounds flag from the GEP 3528 // instruction to its SCEV, because the Instruction may be guarded by control 3529 // flow and the no-overflow bits may not be valid for the expression in any 3530 // context. This can be fixed similarly to how these flags are handled for 3531 // adds. 3532 SCEV::NoWrapFlags Wrap = GEP->isInBounds() ? SCEV::FlagNSW 3533 : SCEV::FlagAnyWrap; 3534 3535 const SCEV *TotalOffset = getZero(IntIdxTy); 3536 Type *CurTy = GEP->getType(); 3537 bool FirstIter = true; 3538 for (const SCEV *IndexExpr : IndexExprs) { 3539 // Compute the (potentially symbolic) offset in bytes for this index. 3540 if (StructType *STy = dyn_cast<StructType>(CurTy)) { 3541 // For a struct, add the member offset. 3542 ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue(); 3543 unsigned FieldNo = Index->getZExtValue(); 3544 const SCEV *FieldOffset = getOffsetOfExpr(IntIdxTy, STy, FieldNo); 3545 3546 // Add the field offset to the running total offset. 3547 TotalOffset = getAddExpr(TotalOffset, FieldOffset); 3548 3549 // Update CurTy to the type of the field at Index. 3550 CurTy = STy->getTypeAtIndex(Index); 3551 } else { 3552 // Update CurTy to its element type. 3553 if (FirstIter) { 3554 assert(isa<PointerType>(CurTy) && 3555 "The first index of a GEP indexes a pointer"); 3556 CurTy = GEP->getSourceElementType(); 3557 FirstIter = false; 3558 } else { 3559 CurTy = GetElementPtrInst::getTypeAtIndex(CurTy, (uint64_t)0); 3560 } 3561 // For an array, add the element offset, explicitly scaled. 3562 const SCEV *ElementSize = getSizeOfExpr(IntIdxTy, CurTy); 3563 // Getelementptr indices are signed. 3564 IndexExpr = getTruncateOrSignExtend(IndexExpr, IntIdxTy); 3565 3566 // Multiply the index by the element size to compute the element offset. 3567 const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, Wrap); 3568 3569 // Add the element offset to the running total offset. 3570 TotalOffset = getAddExpr(TotalOffset, LocalOffset); 3571 } 3572 } 3573 3574 // Add the total offset from all the GEP indices to the base. 3575 return getAddExpr(BaseExpr, TotalOffset, Wrap); 3576 } 3577 3578 std::tuple<SCEV *, FoldingSetNodeID, void *> 3579 ScalarEvolution::findExistingSCEVInCache(int SCEVType, 3580 ArrayRef<const SCEV *> Ops) { 3581 FoldingSetNodeID ID; 3582 void *IP = nullptr; 3583 ID.AddInteger(SCEVType); 3584 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3585 ID.AddPointer(Ops[i]); 3586 return std::tuple<SCEV *, FoldingSetNodeID, void *>( 3587 UniqueSCEVs.FindNodeOrInsertPos(ID, IP), std::move(ID), IP); 3588 } 3589 3590 const SCEV *ScalarEvolution::getMinMaxExpr(unsigned Kind, 3591 SmallVectorImpl<const SCEV *> &Ops) { 3592 assert(!Ops.empty() && "Cannot get empty (u|s)(min|max)!"); 3593 if (Ops.size() == 1) return Ops[0]; 3594 #ifndef NDEBUG 3595 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 3596 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 3597 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 3598 "Operand types don't match!"); 3599 #endif 3600 3601 bool IsSigned = Kind == scSMaxExpr || Kind == scSMinExpr; 3602 bool IsMax = Kind == scSMaxExpr || Kind == scUMaxExpr; 3603 3604 // Sort by complexity, this groups all similar expression types together. 3605 GroupByComplexity(Ops, &LI, DT); 3606 3607 // Check if we have created the same expression before. 3608 if (const SCEV *S = std::get<0>(findExistingSCEVInCache(Kind, Ops))) { 3609 return S; 3610 } 3611 3612 // If there are any constants, fold them together. 3613 unsigned Idx = 0; 3614 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3615 ++Idx; 3616 assert(Idx < Ops.size()); 3617 auto FoldOp = [&](const APInt &LHS, const APInt &RHS) { 3618 if (Kind == scSMaxExpr) 3619 return APIntOps::smax(LHS, RHS); 3620 else if (Kind == scSMinExpr) 3621 return APIntOps::smin(LHS, RHS); 3622 else if (Kind == scUMaxExpr) 3623 return APIntOps::umax(LHS, RHS); 3624 else if (Kind == scUMinExpr) 3625 return APIntOps::umin(LHS, RHS); 3626 llvm_unreachable("Unknown SCEV min/max opcode"); 3627 }; 3628 3629 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 3630 // We found two constants, fold them together! 3631 ConstantInt *Fold = ConstantInt::get( 3632 getContext(), FoldOp(LHSC->getAPInt(), RHSC->getAPInt())); 3633 Ops[0] = getConstant(Fold); 3634 Ops.erase(Ops.begin()+1); // Erase the folded element 3635 if (Ops.size() == 1) return Ops[0]; 3636 LHSC = cast<SCEVConstant>(Ops[0]); 3637 } 3638 3639 bool IsMinV = LHSC->getValue()->isMinValue(IsSigned); 3640 bool IsMaxV = LHSC->getValue()->isMaxValue(IsSigned); 3641 3642 if (IsMax ? IsMinV : IsMaxV) { 3643 // If we are left with a constant minimum(/maximum)-int, strip it off. 3644 Ops.erase(Ops.begin()); 3645 --Idx; 3646 } else if (IsMax ? IsMaxV : IsMinV) { 3647 // If we have a max(/min) with a constant maximum(/minimum)-int, 3648 // it will always be the extremum. 3649 return LHSC; 3650 } 3651 3652 if (Ops.size() == 1) return Ops[0]; 3653 } 3654 3655 // Find the first operation of the same kind 3656 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < Kind) 3657 ++Idx; 3658 3659 // Check to see if one of the operands is of the same kind. If so, expand its 3660 // operands onto our operand list, and recurse to simplify. 3661 if (Idx < Ops.size()) { 3662 bool DeletedAny = false; 3663 while (Ops[Idx]->getSCEVType() == Kind) { 3664 const SCEVMinMaxExpr *SMME = cast<SCEVMinMaxExpr>(Ops[Idx]); 3665 Ops.erase(Ops.begin()+Idx); 3666 Ops.append(SMME->op_begin(), SMME->op_end()); 3667 DeletedAny = true; 3668 } 3669 3670 if (DeletedAny) 3671 return getMinMaxExpr(Kind, Ops); 3672 } 3673 3674 // Okay, check to see if the same value occurs in the operand list twice. If 3675 // so, delete one. Since we sorted the list, these values are required to 3676 // be adjacent. 3677 llvm::CmpInst::Predicate GEPred = 3678 IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; 3679 llvm::CmpInst::Predicate LEPred = 3680 IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; 3681 llvm::CmpInst::Predicate FirstPred = IsMax ? GEPred : LEPred; 3682 llvm::CmpInst::Predicate SecondPred = IsMax ? LEPred : GEPred; 3683 for (unsigned i = 0, e = Ops.size() - 1; i != e; ++i) { 3684 if (Ops[i] == Ops[i + 1] || 3685 isKnownViaNonRecursiveReasoning(FirstPred, Ops[i], Ops[i + 1])) { 3686 // X op Y op Y --> X op Y 3687 // X op Y --> X, if we know X, Y are ordered appropriately 3688 Ops.erase(Ops.begin() + i + 1, Ops.begin() + i + 2); 3689 --i; 3690 --e; 3691 } else if (isKnownViaNonRecursiveReasoning(SecondPred, Ops[i], 3692 Ops[i + 1])) { 3693 // X op Y --> Y, if we know X, Y are ordered appropriately 3694 Ops.erase(Ops.begin() + i, Ops.begin() + i + 1); 3695 --i; 3696 --e; 3697 } 3698 } 3699 3700 if (Ops.size() == 1) return Ops[0]; 3701 3702 assert(!Ops.empty() && "Reduced smax down to nothing!"); 3703 3704 // Okay, it looks like we really DO need an expr. Check to see if we 3705 // already have one, otherwise create a new one. 3706 const SCEV *ExistingSCEV; 3707 FoldingSetNodeID ID; 3708 void *IP; 3709 std::tie(ExistingSCEV, ID, IP) = findExistingSCEVInCache(Kind, Ops); 3710 if (ExistingSCEV) 3711 return ExistingSCEV; 3712 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 3713 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 3714 SCEV *S = new (SCEVAllocator) SCEVMinMaxExpr( 3715 ID.Intern(SCEVAllocator), static_cast<SCEVTypes>(Kind), O, Ops.size()); 3716 3717 UniqueSCEVs.InsertNode(S, IP); 3718 addToLoopUseLists(S); 3719 return S; 3720 } 3721 3722 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, const SCEV *RHS) { 3723 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 3724 return getSMaxExpr(Ops); 3725 } 3726 3727 const SCEV *ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 3728 return getMinMaxExpr(scSMaxExpr, Ops); 3729 } 3730 3731 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, const SCEV *RHS) { 3732 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 3733 return getUMaxExpr(Ops); 3734 } 3735 3736 const SCEV *ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 3737 return getMinMaxExpr(scUMaxExpr, Ops); 3738 } 3739 3740 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS, 3741 const SCEV *RHS) { 3742 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 3743 return getSMinExpr(Ops); 3744 } 3745 3746 const SCEV *ScalarEvolution::getSMinExpr(SmallVectorImpl<const SCEV *> &Ops) { 3747 return getMinMaxExpr(scSMinExpr, Ops); 3748 } 3749 3750 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS, 3751 const SCEV *RHS) { 3752 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 3753 return getUMinExpr(Ops); 3754 } 3755 3756 const SCEV *ScalarEvolution::getUMinExpr(SmallVectorImpl<const SCEV *> &Ops) { 3757 return getMinMaxExpr(scUMinExpr, Ops); 3758 } 3759 3760 const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) { 3761 // We can bypass creating a target-independent 3762 // constant expression and then folding it back into a ConstantInt. 3763 // This is just a compile-time optimization. 3764 if (isa<ScalableVectorType>(AllocTy)) { 3765 Constant *NullPtr = Constant::getNullValue(AllocTy->getPointerTo()); 3766 Constant *One = ConstantInt::get(IntTy, 1); 3767 Constant *GEP = ConstantExpr::getGetElementPtr(AllocTy, NullPtr, One); 3768 return getSCEV(ConstantExpr::getPtrToInt(GEP, IntTy)); 3769 } 3770 return getConstant(IntTy, getDataLayout().getTypeAllocSize(AllocTy)); 3771 } 3772 3773 const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy, 3774 StructType *STy, 3775 unsigned FieldNo) { 3776 // We can bypass creating a target-independent 3777 // constant expression and then folding it back into a ConstantInt. 3778 // This is just a compile-time optimization. 3779 return getConstant( 3780 IntTy, getDataLayout().getStructLayout(STy)->getElementOffset(FieldNo)); 3781 } 3782 3783 const SCEV *ScalarEvolution::getUnknown(Value *V) { 3784 // Don't attempt to do anything other than create a SCEVUnknown object 3785 // here. createSCEV only calls getUnknown after checking for all other 3786 // interesting possibilities, and any other code that calls getUnknown 3787 // is doing so in order to hide a value from SCEV canonicalization. 3788 3789 FoldingSetNodeID ID; 3790 ID.AddInteger(scUnknown); 3791 ID.AddPointer(V); 3792 void *IP = nullptr; 3793 if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) { 3794 assert(cast<SCEVUnknown>(S)->getValue() == V && 3795 "Stale SCEVUnknown in uniquing map!"); 3796 return S; 3797 } 3798 SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this, 3799 FirstUnknown); 3800 FirstUnknown = cast<SCEVUnknown>(S); 3801 UniqueSCEVs.InsertNode(S, IP); 3802 return S; 3803 } 3804 3805 //===----------------------------------------------------------------------===// 3806 // Basic SCEV Analysis and PHI Idiom Recognition Code 3807 // 3808 3809 /// Test if values of the given type are analyzable within the SCEV 3810 /// framework. This primarily includes integer types, and it can optionally 3811 /// include pointer types if the ScalarEvolution class has access to 3812 /// target-specific information. 3813 bool ScalarEvolution::isSCEVable(Type *Ty) const { 3814 // Integers and pointers are always SCEVable. 3815 return Ty->isIntOrPtrTy(); 3816 } 3817 3818 /// Return the size in bits of the specified type, for which isSCEVable must 3819 /// return true. 3820 uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const { 3821 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 3822 if (Ty->isPointerTy()) 3823 return getDataLayout().getIndexTypeSizeInBits(Ty); 3824 return getDataLayout().getTypeSizeInBits(Ty); 3825 } 3826 3827 /// Return a type with the same bitwidth as the given type and which represents 3828 /// how SCEV will treat the given type, for which isSCEVable must return 3829 /// true. For pointer types, this is the pointer index sized integer type. 3830 Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const { 3831 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 3832 3833 if (Ty->isIntegerTy()) 3834 return Ty; 3835 3836 // The only other support type is pointer. 3837 assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!"); 3838 return getDataLayout().getIndexType(Ty); 3839 } 3840 3841 Type *ScalarEvolution::getWiderType(Type *T1, Type *T2) const { 3842 return getTypeSizeInBits(T1) >= getTypeSizeInBits(T2) ? T1 : T2; 3843 } 3844 3845 const SCEV *ScalarEvolution::getCouldNotCompute() { 3846 return CouldNotCompute.get(); 3847 } 3848 3849 bool ScalarEvolution::checkValidity(const SCEV *S) const { 3850 bool ContainsNulls = SCEVExprContains(S, [](const SCEV *S) { 3851 auto *SU = dyn_cast<SCEVUnknown>(S); 3852 return SU && SU->getValue() == nullptr; 3853 }); 3854 3855 return !ContainsNulls; 3856 } 3857 3858 bool ScalarEvolution::containsAddRecurrence(const SCEV *S) { 3859 HasRecMapType::iterator I = HasRecMap.find(S); 3860 if (I != HasRecMap.end()) 3861 return I->second; 3862 3863 bool FoundAddRec = 3864 SCEVExprContains(S, [](const SCEV *S) { return isa<SCEVAddRecExpr>(S); }); 3865 HasRecMap.insert({S, FoundAddRec}); 3866 return FoundAddRec; 3867 } 3868 3869 /// Try to split a SCEVAddExpr into a pair of {SCEV, ConstantInt}. 3870 /// If \p S is a SCEVAddExpr and is composed of a sub SCEV S' and an 3871 /// offset I, then return {S', I}, else return {\p S, nullptr}. 3872 static std::pair<const SCEV *, ConstantInt *> splitAddExpr(const SCEV *S) { 3873 const auto *Add = dyn_cast<SCEVAddExpr>(S); 3874 if (!Add) 3875 return {S, nullptr}; 3876 3877 if (Add->getNumOperands() != 2) 3878 return {S, nullptr}; 3879 3880 auto *ConstOp = dyn_cast<SCEVConstant>(Add->getOperand(0)); 3881 if (!ConstOp) 3882 return {S, nullptr}; 3883 3884 return {Add->getOperand(1), ConstOp->getValue()}; 3885 } 3886 3887 /// Return the ValueOffsetPair set for \p S. \p S can be represented 3888 /// by the value and offset from any ValueOffsetPair in the set. 3889 SetVector<ScalarEvolution::ValueOffsetPair> * 3890 ScalarEvolution::getSCEVValues(const SCEV *S) { 3891 ExprValueMapType::iterator SI = ExprValueMap.find_as(S); 3892 if (SI == ExprValueMap.end()) 3893 return nullptr; 3894 #ifndef NDEBUG 3895 if (VerifySCEVMap) { 3896 // Check there is no dangling Value in the set returned. 3897 for (const auto &VE : SI->second) 3898 assert(ValueExprMap.count(VE.first)); 3899 } 3900 #endif 3901 return &SI->second; 3902 } 3903 3904 /// Erase Value from ValueExprMap and ExprValueMap. ValueExprMap.erase(V) 3905 /// cannot be used separately. eraseValueFromMap should be used to remove 3906 /// V from ValueExprMap and ExprValueMap at the same time. 3907 void ScalarEvolution::eraseValueFromMap(Value *V) { 3908 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 3909 if (I != ValueExprMap.end()) { 3910 const SCEV *S = I->second; 3911 // Remove {V, 0} from the set of ExprValueMap[S] 3912 if (SetVector<ValueOffsetPair> *SV = getSCEVValues(S)) 3913 SV->remove({V, nullptr}); 3914 3915 // Remove {V, Offset} from the set of ExprValueMap[Stripped] 3916 const SCEV *Stripped; 3917 ConstantInt *Offset; 3918 std::tie(Stripped, Offset) = splitAddExpr(S); 3919 if (Offset != nullptr) { 3920 if (SetVector<ValueOffsetPair> *SV = getSCEVValues(Stripped)) 3921 SV->remove({V, Offset}); 3922 } 3923 ValueExprMap.erase(V); 3924 } 3925 } 3926 3927 /// Check whether value has nuw/nsw/exact set but SCEV does not. 3928 /// TODO: In reality it is better to check the poison recursively 3929 /// but this is better than nothing. 3930 static bool SCEVLostPoisonFlags(const SCEV *S, const Value *V) { 3931 if (auto *I = dyn_cast<Instruction>(V)) { 3932 if (isa<OverflowingBinaryOperator>(I)) { 3933 if (auto *NS = dyn_cast<SCEVNAryExpr>(S)) { 3934 if (I->hasNoSignedWrap() && !NS->hasNoSignedWrap()) 3935 return true; 3936 if (I->hasNoUnsignedWrap() && !NS->hasNoUnsignedWrap()) 3937 return true; 3938 } 3939 } else if (isa<PossiblyExactOperator>(I) && I->isExact()) 3940 return true; 3941 } 3942 return false; 3943 } 3944 3945 /// Return an existing SCEV if it exists, otherwise analyze the expression and 3946 /// create a new one. 3947 const SCEV *ScalarEvolution::getSCEV(Value *V) { 3948 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 3949 3950 const SCEV *S = getExistingSCEV(V); 3951 if (S == nullptr) { 3952 S = createSCEV(V); 3953 // During PHI resolution, it is possible to create two SCEVs for the same 3954 // V, so it is needed to double check whether V->S is inserted into 3955 // ValueExprMap before insert S->{V, 0} into ExprValueMap. 3956 std::pair<ValueExprMapType::iterator, bool> Pair = 3957 ValueExprMap.insert({SCEVCallbackVH(V, this), S}); 3958 if (Pair.second && !SCEVLostPoisonFlags(S, V)) { 3959 ExprValueMap[S].insert({V, nullptr}); 3960 3961 // If S == Stripped + Offset, add Stripped -> {V, Offset} into 3962 // ExprValueMap. 3963 const SCEV *Stripped = S; 3964 ConstantInt *Offset = nullptr; 3965 std::tie(Stripped, Offset) = splitAddExpr(S); 3966 // If stripped is SCEVUnknown, don't bother to save 3967 // Stripped -> {V, offset}. It doesn't simplify and sometimes even 3968 // increase the complexity of the expansion code. 3969 // If V is GetElementPtrInst, don't save Stripped -> {V, offset} 3970 // because it may generate add/sub instead of GEP in SCEV expansion. 3971 if (Offset != nullptr && !isa<SCEVUnknown>(Stripped) && 3972 !isa<GetElementPtrInst>(V)) 3973 ExprValueMap[Stripped].insert({V, Offset}); 3974 } 3975 } 3976 return S; 3977 } 3978 3979 const SCEV *ScalarEvolution::getExistingSCEV(Value *V) { 3980 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 3981 3982 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 3983 if (I != ValueExprMap.end()) { 3984 const SCEV *S = I->second; 3985 if (checkValidity(S)) 3986 return S; 3987 eraseValueFromMap(V); 3988 forgetMemoizedResults(S); 3989 } 3990 return nullptr; 3991 } 3992 3993 /// Return a SCEV corresponding to -V = -1*V 3994 const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V, 3995 SCEV::NoWrapFlags Flags) { 3996 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 3997 return getConstant( 3998 cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue()))); 3999 4000 Type *Ty = V->getType(); 4001 Ty = getEffectiveSCEVType(Ty); 4002 return getMulExpr( 4003 V, getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))), Flags); 4004 } 4005 4006 /// If Expr computes ~A, return A else return nullptr 4007 static const SCEV *MatchNotExpr(const SCEV *Expr) { 4008 const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Expr); 4009 if (!Add || Add->getNumOperands() != 2 || 4010 !Add->getOperand(0)->isAllOnesValue()) 4011 return nullptr; 4012 4013 const SCEVMulExpr *AddRHS = dyn_cast<SCEVMulExpr>(Add->getOperand(1)); 4014 if (!AddRHS || AddRHS->getNumOperands() != 2 || 4015 !AddRHS->getOperand(0)->isAllOnesValue()) 4016 return nullptr; 4017 4018 return AddRHS->getOperand(1); 4019 } 4020 4021 /// Return a SCEV corresponding to ~V = -1-V 4022 const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) { 4023 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 4024 return getConstant( 4025 cast<ConstantInt>(ConstantExpr::getNot(VC->getValue()))); 4026 4027 // Fold ~(u|s)(min|max)(~x, ~y) to (u|s)(max|min)(x, y) 4028 if (const SCEVMinMaxExpr *MME = dyn_cast<SCEVMinMaxExpr>(V)) { 4029 auto MatchMinMaxNegation = [&](const SCEVMinMaxExpr *MME) { 4030 SmallVector<const SCEV *, 2> MatchedOperands; 4031 for (const SCEV *Operand : MME->operands()) { 4032 const SCEV *Matched = MatchNotExpr(Operand); 4033 if (!Matched) 4034 return (const SCEV *)nullptr; 4035 MatchedOperands.push_back(Matched); 4036 } 4037 return getMinMaxExpr( 4038 SCEVMinMaxExpr::negate(static_cast<SCEVTypes>(MME->getSCEVType())), 4039 MatchedOperands); 4040 }; 4041 if (const SCEV *Replaced = MatchMinMaxNegation(MME)) 4042 return Replaced; 4043 } 4044 4045 Type *Ty = V->getType(); 4046 Ty = getEffectiveSCEVType(Ty); 4047 const SCEV *AllOnes = 4048 getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))); 4049 return getMinusSCEV(AllOnes, V); 4050 } 4051 4052 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS, 4053 SCEV::NoWrapFlags Flags, 4054 unsigned Depth) { 4055 // Fast path: X - X --> 0. 4056 if (LHS == RHS) 4057 return getZero(LHS->getType()); 4058 4059 // We represent LHS - RHS as LHS + (-1)*RHS. This transformation 4060 // makes it so that we cannot make much use of NUW. 4061 auto AddFlags = SCEV::FlagAnyWrap; 4062 const bool RHSIsNotMinSigned = 4063 !getSignedRangeMin(RHS).isMinSignedValue(); 4064 if (maskFlags(Flags, SCEV::FlagNSW) == SCEV::FlagNSW) { 4065 // Let M be the minimum representable signed value. Then (-1)*RHS 4066 // signed-wraps if and only if RHS is M. That can happen even for 4067 // a NSW subtraction because e.g. (-1)*M signed-wraps even though 4068 // -1 - M does not. So to transfer NSW from LHS - RHS to LHS + 4069 // (-1)*RHS, we need to prove that RHS != M. 4070 // 4071 // If LHS is non-negative and we know that LHS - RHS does not 4072 // signed-wrap, then RHS cannot be M. So we can rule out signed-wrap 4073 // either by proving that RHS > M or that LHS >= 0. 4074 if (RHSIsNotMinSigned || isKnownNonNegative(LHS)) { 4075 AddFlags = SCEV::FlagNSW; 4076 } 4077 } 4078 4079 // FIXME: Find a correct way to transfer NSW to (-1)*M when LHS - 4080 // RHS is NSW and LHS >= 0. 4081 // 4082 // The difficulty here is that the NSW flag may have been proven 4083 // relative to a loop that is to be found in a recurrence in LHS and 4084 // not in RHS. Applying NSW to (-1)*M may then let the NSW have a 4085 // larger scope than intended. 4086 auto NegFlags = RHSIsNotMinSigned ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 4087 4088 return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags, Depth); 4089 } 4090 4091 const SCEV *ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty, 4092 unsigned Depth) { 4093 Type *SrcTy = V->getType(); 4094 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4095 "Cannot truncate or zero extend with non-integer arguments!"); 4096 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4097 return V; // No conversion 4098 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 4099 return getTruncateExpr(V, Ty, Depth); 4100 return getZeroExtendExpr(V, Ty, Depth); 4101 } 4102 4103 const SCEV *ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, Type *Ty, 4104 unsigned Depth) { 4105 Type *SrcTy = V->getType(); 4106 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4107 "Cannot truncate or zero extend with non-integer arguments!"); 4108 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4109 return V; // No conversion 4110 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 4111 return getTruncateExpr(V, Ty, Depth); 4112 return getSignExtendExpr(V, Ty, Depth); 4113 } 4114 4115 const SCEV * 4116 ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) { 4117 Type *SrcTy = V->getType(); 4118 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4119 "Cannot noop or zero extend with non-integer arguments!"); 4120 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 4121 "getNoopOrZeroExtend cannot truncate!"); 4122 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4123 return V; // No conversion 4124 return getZeroExtendExpr(V, Ty); 4125 } 4126 4127 const SCEV * 4128 ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) { 4129 Type *SrcTy = V->getType(); 4130 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4131 "Cannot noop or sign extend with non-integer arguments!"); 4132 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 4133 "getNoopOrSignExtend cannot truncate!"); 4134 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4135 return V; // No conversion 4136 return getSignExtendExpr(V, Ty); 4137 } 4138 4139 const SCEV * 4140 ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) { 4141 Type *SrcTy = V->getType(); 4142 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4143 "Cannot noop or any extend with non-integer arguments!"); 4144 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 4145 "getNoopOrAnyExtend cannot truncate!"); 4146 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4147 return V; // No conversion 4148 return getAnyExtendExpr(V, Ty); 4149 } 4150 4151 const SCEV * 4152 ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) { 4153 Type *SrcTy = V->getType(); 4154 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4155 "Cannot truncate or noop with non-integer arguments!"); 4156 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) && 4157 "getTruncateOrNoop cannot extend!"); 4158 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4159 return V; // No conversion 4160 return getTruncateExpr(V, Ty); 4161 } 4162 4163 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS, 4164 const SCEV *RHS) { 4165 const SCEV *PromotedLHS = LHS; 4166 const SCEV *PromotedRHS = RHS; 4167 4168 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 4169 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 4170 else 4171 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 4172 4173 return getUMaxExpr(PromotedLHS, PromotedRHS); 4174 } 4175 4176 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS, 4177 const SCEV *RHS) { 4178 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 4179 return getUMinFromMismatchedTypes(Ops); 4180 } 4181 4182 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes( 4183 SmallVectorImpl<const SCEV *> &Ops) { 4184 assert(!Ops.empty() && "At least one operand must be!"); 4185 // Trivial case. 4186 if (Ops.size() == 1) 4187 return Ops[0]; 4188 4189 // Find the max type first. 4190 Type *MaxType = nullptr; 4191 for (auto *S : Ops) 4192 if (MaxType) 4193 MaxType = getWiderType(MaxType, S->getType()); 4194 else 4195 MaxType = S->getType(); 4196 4197 // Extend all ops to max type. 4198 SmallVector<const SCEV *, 2> PromotedOps; 4199 for (auto *S : Ops) 4200 PromotedOps.push_back(getNoopOrZeroExtend(S, MaxType)); 4201 4202 // Generate umin. 4203 return getUMinExpr(PromotedOps); 4204 } 4205 4206 const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) { 4207 // A pointer operand may evaluate to a nonpointer expression, such as null. 4208 if (!V->getType()->isPointerTy()) 4209 return V; 4210 4211 if (const SCEVCastExpr *Cast = dyn_cast<SCEVCastExpr>(V)) { 4212 return getPointerBase(Cast->getOperand()); 4213 } else if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(V)) { 4214 const SCEV *PtrOp = nullptr; 4215 for (const SCEV *NAryOp : NAry->operands()) { 4216 if (NAryOp->getType()->isPointerTy()) { 4217 // Cannot find the base of an expression with multiple pointer operands. 4218 if (PtrOp) 4219 return V; 4220 PtrOp = NAryOp; 4221 } 4222 } 4223 if (!PtrOp) 4224 return V; 4225 return getPointerBase(PtrOp); 4226 } 4227 return V; 4228 } 4229 4230 /// Push users of the given Instruction onto the given Worklist. 4231 static void 4232 PushDefUseChildren(Instruction *I, 4233 SmallVectorImpl<Instruction *> &Worklist) { 4234 // Push the def-use children onto the Worklist stack. 4235 for (User *U : I->users()) 4236 Worklist.push_back(cast<Instruction>(U)); 4237 } 4238 4239 void ScalarEvolution::forgetSymbolicName(Instruction *PN, const SCEV *SymName) { 4240 SmallVector<Instruction *, 16> Worklist; 4241 PushDefUseChildren(PN, Worklist); 4242 4243 SmallPtrSet<Instruction *, 8> Visited; 4244 Visited.insert(PN); 4245 while (!Worklist.empty()) { 4246 Instruction *I = Worklist.pop_back_val(); 4247 if (!Visited.insert(I).second) 4248 continue; 4249 4250 auto It = ValueExprMap.find_as(static_cast<Value *>(I)); 4251 if (It != ValueExprMap.end()) { 4252 const SCEV *Old = It->second; 4253 4254 // Short-circuit the def-use traversal if the symbolic name 4255 // ceases to appear in expressions. 4256 if (Old != SymName && !hasOperand(Old, SymName)) 4257 continue; 4258 4259 // SCEVUnknown for a PHI either means that it has an unrecognized 4260 // structure, it's a PHI that's in the progress of being computed 4261 // by createNodeForPHI, or it's a single-value PHI. In the first case, 4262 // additional loop trip count information isn't going to change anything. 4263 // In the second case, createNodeForPHI will perform the necessary 4264 // updates on its own when it gets to that point. In the third, we do 4265 // want to forget the SCEVUnknown. 4266 if (!isa<PHINode>(I) || 4267 !isa<SCEVUnknown>(Old) || 4268 (I != PN && Old == SymName)) { 4269 eraseValueFromMap(It->first); 4270 forgetMemoizedResults(Old); 4271 } 4272 } 4273 4274 PushDefUseChildren(I, Worklist); 4275 } 4276 } 4277 4278 namespace { 4279 4280 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its start 4281 /// expression in case its Loop is L. If it is not L then 4282 /// if IgnoreOtherLoops is true then use AddRec itself 4283 /// otherwise rewrite cannot be done. 4284 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done. 4285 class SCEVInitRewriter : public SCEVRewriteVisitor<SCEVInitRewriter> { 4286 public: 4287 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, 4288 bool IgnoreOtherLoops = true) { 4289 SCEVInitRewriter Rewriter(L, SE); 4290 const SCEV *Result = Rewriter.visit(S); 4291 if (Rewriter.hasSeenLoopVariantSCEVUnknown()) 4292 return SE.getCouldNotCompute(); 4293 return Rewriter.hasSeenOtherLoops() && !IgnoreOtherLoops 4294 ? SE.getCouldNotCompute() 4295 : Result; 4296 } 4297 4298 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4299 if (!SE.isLoopInvariant(Expr, L)) 4300 SeenLoopVariantSCEVUnknown = true; 4301 return Expr; 4302 } 4303 4304 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4305 // Only re-write AddRecExprs for this loop. 4306 if (Expr->getLoop() == L) 4307 return Expr->getStart(); 4308 SeenOtherLoops = true; 4309 return Expr; 4310 } 4311 4312 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; } 4313 4314 bool hasSeenOtherLoops() { return SeenOtherLoops; } 4315 4316 private: 4317 explicit SCEVInitRewriter(const Loop *L, ScalarEvolution &SE) 4318 : SCEVRewriteVisitor(SE), L(L) {} 4319 4320 const Loop *L; 4321 bool SeenLoopVariantSCEVUnknown = false; 4322 bool SeenOtherLoops = false; 4323 }; 4324 4325 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its post 4326 /// increment expression in case its Loop is L. If it is not L then 4327 /// use AddRec itself. 4328 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done. 4329 class SCEVPostIncRewriter : public SCEVRewriteVisitor<SCEVPostIncRewriter> { 4330 public: 4331 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE) { 4332 SCEVPostIncRewriter Rewriter(L, SE); 4333 const SCEV *Result = Rewriter.visit(S); 4334 return Rewriter.hasSeenLoopVariantSCEVUnknown() 4335 ? SE.getCouldNotCompute() 4336 : Result; 4337 } 4338 4339 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4340 if (!SE.isLoopInvariant(Expr, L)) 4341 SeenLoopVariantSCEVUnknown = true; 4342 return Expr; 4343 } 4344 4345 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4346 // Only re-write AddRecExprs for this loop. 4347 if (Expr->getLoop() == L) 4348 return Expr->getPostIncExpr(SE); 4349 SeenOtherLoops = true; 4350 return Expr; 4351 } 4352 4353 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; } 4354 4355 bool hasSeenOtherLoops() { return SeenOtherLoops; } 4356 4357 private: 4358 explicit SCEVPostIncRewriter(const Loop *L, ScalarEvolution &SE) 4359 : SCEVRewriteVisitor(SE), L(L) {} 4360 4361 const Loop *L; 4362 bool SeenLoopVariantSCEVUnknown = false; 4363 bool SeenOtherLoops = false; 4364 }; 4365 4366 /// This class evaluates the compare condition by matching it against the 4367 /// condition of loop latch. If there is a match we assume a true value 4368 /// for the condition while building SCEV nodes. 4369 class SCEVBackedgeConditionFolder 4370 : public SCEVRewriteVisitor<SCEVBackedgeConditionFolder> { 4371 public: 4372 static const SCEV *rewrite(const SCEV *S, const Loop *L, 4373 ScalarEvolution &SE) { 4374 bool IsPosBECond = false; 4375 Value *BECond = nullptr; 4376 if (BasicBlock *Latch = L->getLoopLatch()) { 4377 BranchInst *BI = dyn_cast<BranchInst>(Latch->getTerminator()); 4378 if (BI && BI->isConditional()) { 4379 assert(BI->getSuccessor(0) != BI->getSuccessor(1) && 4380 "Both outgoing branches should not target same header!"); 4381 BECond = BI->getCondition(); 4382 IsPosBECond = BI->getSuccessor(0) == L->getHeader(); 4383 } else { 4384 return S; 4385 } 4386 } 4387 SCEVBackedgeConditionFolder Rewriter(L, BECond, IsPosBECond, SE); 4388 return Rewriter.visit(S); 4389 } 4390 4391 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4392 const SCEV *Result = Expr; 4393 bool InvariantF = SE.isLoopInvariant(Expr, L); 4394 4395 if (!InvariantF) { 4396 Instruction *I = cast<Instruction>(Expr->getValue()); 4397 switch (I->getOpcode()) { 4398 case Instruction::Select: { 4399 SelectInst *SI = cast<SelectInst>(I); 4400 Optional<const SCEV *> Res = 4401 compareWithBackedgeCondition(SI->getCondition()); 4402 if (Res.hasValue()) { 4403 bool IsOne = cast<SCEVConstant>(Res.getValue())->getValue()->isOne(); 4404 Result = SE.getSCEV(IsOne ? SI->getTrueValue() : SI->getFalseValue()); 4405 } 4406 break; 4407 } 4408 default: { 4409 Optional<const SCEV *> Res = compareWithBackedgeCondition(I); 4410 if (Res.hasValue()) 4411 Result = Res.getValue(); 4412 break; 4413 } 4414 } 4415 } 4416 return Result; 4417 } 4418 4419 private: 4420 explicit SCEVBackedgeConditionFolder(const Loop *L, Value *BECond, 4421 bool IsPosBECond, ScalarEvolution &SE) 4422 : SCEVRewriteVisitor(SE), L(L), BackedgeCond(BECond), 4423 IsPositiveBECond(IsPosBECond) {} 4424 4425 Optional<const SCEV *> compareWithBackedgeCondition(Value *IC); 4426 4427 const Loop *L; 4428 /// Loop back condition. 4429 Value *BackedgeCond = nullptr; 4430 /// Set to true if loop back is on positive branch condition. 4431 bool IsPositiveBECond; 4432 }; 4433 4434 Optional<const SCEV *> 4435 SCEVBackedgeConditionFolder::compareWithBackedgeCondition(Value *IC) { 4436 4437 // If value matches the backedge condition for loop latch, 4438 // then return a constant evolution node based on loopback 4439 // branch taken. 4440 if (BackedgeCond == IC) 4441 return IsPositiveBECond ? SE.getOne(Type::getInt1Ty(SE.getContext())) 4442 : SE.getZero(Type::getInt1Ty(SE.getContext())); 4443 return None; 4444 } 4445 4446 class SCEVShiftRewriter : public SCEVRewriteVisitor<SCEVShiftRewriter> { 4447 public: 4448 static const SCEV *rewrite(const SCEV *S, const Loop *L, 4449 ScalarEvolution &SE) { 4450 SCEVShiftRewriter Rewriter(L, SE); 4451 const SCEV *Result = Rewriter.visit(S); 4452 return Rewriter.isValid() ? Result : SE.getCouldNotCompute(); 4453 } 4454 4455 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4456 // Only allow AddRecExprs for this loop. 4457 if (!SE.isLoopInvariant(Expr, L)) 4458 Valid = false; 4459 return Expr; 4460 } 4461 4462 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4463 if (Expr->getLoop() == L && Expr->isAffine()) 4464 return SE.getMinusSCEV(Expr, Expr->getStepRecurrence(SE)); 4465 Valid = false; 4466 return Expr; 4467 } 4468 4469 bool isValid() { return Valid; } 4470 4471 private: 4472 explicit SCEVShiftRewriter(const Loop *L, ScalarEvolution &SE) 4473 : SCEVRewriteVisitor(SE), L(L) {} 4474 4475 const Loop *L; 4476 bool Valid = true; 4477 }; 4478 4479 } // end anonymous namespace 4480 4481 SCEV::NoWrapFlags 4482 ScalarEvolution::proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR) { 4483 if (!AR->isAffine()) 4484 return SCEV::FlagAnyWrap; 4485 4486 using OBO = OverflowingBinaryOperator; 4487 4488 SCEV::NoWrapFlags Result = SCEV::FlagAnyWrap; 4489 4490 if (!AR->hasNoSignedWrap()) { 4491 ConstantRange AddRecRange = getSignedRange(AR); 4492 ConstantRange IncRange = getSignedRange(AR->getStepRecurrence(*this)); 4493 4494 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 4495 Instruction::Add, IncRange, OBO::NoSignedWrap); 4496 if (NSWRegion.contains(AddRecRange)) 4497 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNSW); 4498 } 4499 4500 if (!AR->hasNoUnsignedWrap()) { 4501 ConstantRange AddRecRange = getUnsignedRange(AR); 4502 ConstantRange IncRange = getUnsignedRange(AR->getStepRecurrence(*this)); 4503 4504 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 4505 Instruction::Add, IncRange, OBO::NoUnsignedWrap); 4506 if (NUWRegion.contains(AddRecRange)) 4507 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNUW); 4508 } 4509 4510 return Result; 4511 } 4512 4513 namespace { 4514 4515 /// Represents an abstract binary operation. This may exist as a 4516 /// normal instruction or constant expression, or may have been 4517 /// derived from an expression tree. 4518 struct BinaryOp { 4519 unsigned Opcode; 4520 Value *LHS; 4521 Value *RHS; 4522 bool IsNSW = false; 4523 bool IsNUW = false; 4524 4525 /// Op is set if this BinaryOp corresponds to a concrete LLVM instruction or 4526 /// constant expression. 4527 Operator *Op = nullptr; 4528 4529 explicit BinaryOp(Operator *Op) 4530 : Opcode(Op->getOpcode()), LHS(Op->getOperand(0)), RHS(Op->getOperand(1)), 4531 Op(Op) { 4532 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(Op)) { 4533 IsNSW = OBO->hasNoSignedWrap(); 4534 IsNUW = OBO->hasNoUnsignedWrap(); 4535 } 4536 } 4537 4538 explicit BinaryOp(unsigned Opcode, Value *LHS, Value *RHS, bool IsNSW = false, 4539 bool IsNUW = false) 4540 : Opcode(Opcode), LHS(LHS), RHS(RHS), IsNSW(IsNSW), IsNUW(IsNUW) {} 4541 }; 4542 4543 } // end anonymous namespace 4544 4545 /// Try to map \p V into a BinaryOp, and return \c None on failure. 4546 static Optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) { 4547 auto *Op = dyn_cast<Operator>(V); 4548 if (!Op) 4549 return None; 4550 4551 // Implementation detail: all the cleverness here should happen without 4552 // creating new SCEV expressions -- our caller knowns tricks to avoid creating 4553 // SCEV expressions when possible, and we should not break that. 4554 4555 switch (Op->getOpcode()) { 4556 case Instruction::Add: 4557 case Instruction::Sub: 4558 case Instruction::Mul: 4559 case Instruction::UDiv: 4560 case Instruction::URem: 4561 case Instruction::And: 4562 case Instruction::Or: 4563 case Instruction::AShr: 4564 case Instruction::Shl: 4565 return BinaryOp(Op); 4566 4567 case Instruction::Xor: 4568 if (auto *RHSC = dyn_cast<ConstantInt>(Op->getOperand(1))) 4569 // If the RHS of the xor is a signmask, then this is just an add. 4570 // Instcombine turns add of signmask into xor as a strength reduction step. 4571 if (RHSC->getValue().isSignMask()) 4572 return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1)); 4573 return BinaryOp(Op); 4574 4575 case Instruction::LShr: 4576 // Turn logical shift right of a constant into a unsigned divide. 4577 if (ConstantInt *SA = dyn_cast<ConstantInt>(Op->getOperand(1))) { 4578 uint32_t BitWidth = cast<IntegerType>(Op->getType())->getBitWidth(); 4579 4580 // If the shift count is not less than the bitwidth, the result of 4581 // the shift is undefined. Don't try to analyze it, because the 4582 // resolution chosen here may differ from the resolution chosen in 4583 // other parts of the compiler. 4584 if (SA->getValue().ult(BitWidth)) { 4585 Constant *X = 4586 ConstantInt::get(SA->getContext(), 4587 APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 4588 return BinaryOp(Instruction::UDiv, Op->getOperand(0), X); 4589 } 4590 } 4591 return BinaryOp(Op); 4592 4593 case Instruction::ExtractValue: { 4594 auto *EVI = cast<ExtractValueInst>(Op); 4595 if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0) 4596 break; 4597 4598 auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand()); 4599 if (!WO) 4600 break; 4601 4602 Instruction::BinaryOps BinOp = WO->getBinaryOp(); 4603 bool Signed = WO->isSigned(); 4604 // TODO: Should add nuw/nsw flags for mul as well. 4605 if (BinOp == Instruction::Mul || !isOverflowIntrinsicNoWrap(WO, DT)) 4606 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS()); 4607 4608 // Now that we know that all uses of the arithmetic-result component of 4609 // CI are guarded by the overflow check, we can go ahead and pretend 4610 // that the arithmetic is non-overflowing. 4611 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS(), 4612 /* IsNSW = */ Signed, /* IsNUW = */ !Signed); 4613 } 4614 4615 default: 4616 break; 4617 } 4618 4619 // Recognise intrinsic loop.decrement.reg, and as this has exactly the same 4620 // semantics as a Sub, return a binary sub expression. 4621 if (auto *II = dyn_cast<IntrinsicInst>(V)) 4622 if (II->getIntrinsicID() == Intrinsic::loop_decrement_reg) 4623 return BinaryOp(Instruction::Sub, II->getOperand(0), II->getOperand(1)); 4624 4625 return None; 4626 } 4627 4628 /// Helper function to createAddRecFromPHIWithCasts. We have a phi 4629 /// node whose symbolic (unknown) SCEV is \p SymbolicPHI, which is updated via 4630 /// the loop backedge by a SCEVAddExpr, possibly also with a few casts on the 4631 /// way. This function checks if \p Op, an operand of this SCEVAddExpr, 4632 /// follows one of the following patterns: 4633 /// Op == (SExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) 4634 /// Op == (ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) 4635 /// If the SCEV expression of \p Op conforms with one of the expected patterns 4636 /// we return the type of the truncation operation, and indicate whether the 4637 /// truncated type should be treated as signed/unsigned by setting 4638 /// \p Signed to true/false, respectively. 4639 static Type *isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI, 4640 bool &Signed, ScalarEvolution &SE) { 4641 // The case where Op == SymbolicPHI (that is, with no type conversions on 4642 // the way) is handled by the regular add recurrence creating logic and 4643 // would have already been triggered in createAddRecForPHI. Reaching it here 4644 // means that createAddRecFromPHI had failed for this PHI before (e.g., 4645 // because one of the other operands of the SCEVAddExpr updating this PHI is 4646 // not invariant). 4647 // 4648 // Here we look for the case where Op = (ext(trunc(SymbolicPHI))), and in 4649 // this case predicates that allow us to prove that Op == SymbolicPHI will 4650 // be added. 4651 if (Op == SymbolicPHI) 4652 return nullptr; 4653 4654 unsigned SourceBits = SE.getTypeSizeInBits(SymbolicPHI->getType()); 4655 unsigned NewBits = SE.getTypeSizeInBits(Op->getType()); 4656 if (SourceBits != NewBits) 4657 return nullptr; 4658 4659 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(Op); 4660 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(Op); 4661 if (!SExt && !ZExt) 4662 return nullptr; 4663 const SCEVTruncateExpr *Trunc = 4664 SExt ? dyn_cast<SCEVTruncateExpr>(SExt->getOperand()) 4665 : dyn_cast<SCEVTruncateExpr>(ZExt->getOperand()); 4666 if (!Trunc) 4667 return nullptr; 4668 const SCEV *X = Trunc->getOperand(); 4669 if (X != SymbolicPHI) 4670 return nullptr; 4671 Signed = SExt != nullptr; 4672 return Trunc->getType(); 4673 } 4674 4675 static const Loop *isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI) { 4676 if (!PN->getType()->isIntegerTy()) 4677 return nullptr; 4678 const Loop *L = LI.getLoopFor(PN->getParent()); 4679 if (!L || L->getHeader() != PN->getParent()) 4680 return nullptr; 4681 return L; 4682 } 4683 4684 // Analyze \p SymbolicPHI, a SCEV expression of a phi node, and check if the 4685 // computation that updates the phi follows the following pattern: 4686 // (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum 4687 // which correspond to a phi->trunc->sext/zext->add->phi update chain. 4688 // If so, try to see if it can be rewritten as an AddRecExpr under some 4689 // Predicates. If successful, return them as a pair. Also cache the results 4690 // of the analysis. 4691 // 4692 // Example usage scenario: 4693 // Say the Rewriter is called for the following SCEV: 4694 // 8 * ((sext i32 (trunc i64 %X to i32) to i64) + %Step) 4695 // where: 4696 // %X = phi i64 (%Start, %BEValue) 4697 // It will visitMul->visitAdd->visitSExt->visitTrunc->visitUnknown(%X), 4698 // and call this function with %SymbolicPHI = %X. 4699 // 4700 // The analysis will find that the value coming around the backedge has 4701 // the following SCEV: 4702 // BEValue = ((sext i32 (trunc i64 %X to i32) to i64) + %Step) 4703 // Upon concluding that this matches the desired pattern, the function 4704 // will return the pair {NewAddRec, SmallPredsVec} where: 4705 // NewAddRec = {%Start,+,%Step} 4706 // SmallPredsVec = {P1, P2, P3} as follows: 4707 // P1(WrapPred): AR: {trunc(%Start),+,(trunc %Step)}<nsw> Flags: <nssw> 4708 // P2(EqualPred): %Start == (sext i32 (trunc i64 %Start to i32) to i64) 4709 // P3(EqualPred): %Step == (sext i32 (trunc i64 %Step to i32) to i64) 4710 // The returned pair means that SymbolicPHI can be rewritten into NewAddRec 4711 // under the predicates {P1,P2,P3}. 4712 // This predicated rewrite will be cached in PredicatedSCEVRewrites: 4713 // PredicatedSCEVRewrites[{%X,L}] = {NewAddRec, {P1,P2,P3)} 4714 // 4715 // TODO's: 4716 // 4717 // 1) Extend the Induction descriptor to also support inductions that involve 4718 // casts: When needed (namely, when we are called in the context of the 4719 // vectorizer induction analysis), a Set of cast instructions will be 4720 // populated by this method, and provided back to isInductionPHI. This is 4721 // needed to allow the vectorizer to properly record them to be ignored by 4722 // the cost model and to avoid vectorizing them (otherwise these casts, 4723 // which are redundant under the runtime overflow checks, will be 4724 // vectorized, which can be costly). 4725 // 4726 // 2) Support additional induction/PHISCEV patterns: We also want to support 4727 // inductions where the sext-trunc / zext-trunc operations (partly) occur 4728 // after the induction update operation (the induction increment): 4729 // 4730 // (Trunc iy (SExt/ZExt ix (%SymbolicPHI + InvariantAccum) to iy) to ix) 4731 // which correspond to a phi->add->trunc->sext/zext->phi update chain. 4732 // 4733 // (Trunc iy ((SExt/ZExt ix (%SymbolicPhi) to iy) + InvariantAccum) to ix) 4734 // which correspond to a phi->trunc->add->sext/zext->phi update chain. 4735 // 4736 // 3) Outline common code with createAddRecFromPHI to avoid duplication. 4737 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4738 ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI) { 4739 SmallVector<const SCEVPredicate *, 3> Predicates; 4740 4741 // *** Part1: Analyze if we have a phi-with-cast pattern for which we can 4742 // return an AddRec expression under some predicate. 4743 4744 auto *PN = cast<PHINode>(SymbolicPHI->getValue()); 4745 const Loop *L = isIntegerLoopHeaderPHI(PN, LI); 4746 assert(L && "Expecting an integer loop header phi"); 4747 4748 // The loop may have multiple entrances or multiple exits; we can analyze 4749 // this phi as an addrec if it has a unique entry value and a unique 4750 // backedge value. 4751 Value *BEValueV = nullptr, *StartValueV = nullptr; 4752 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 4753 Value *V = PN->getIncomingValue(i); 4754 if (L->contains(PN->getIncomingBlock(i))) { 4755 if (!BEValueV) { 4756 BEValueV = V; 4757 } else if (BEValueV != V) { 4758 BEValueV = nullptr; 4759 break; 4760 } 4761 } else if (!StartValueV) { 4762 StartValueV = V; 4763 } else if (StartValueV != V) { 4764 StartValueV = nullptr; 4765 break; 4766 } 4767 } 4768 if (!BEValueV || !StartValueV) 4769 return None; 4770 4771 const SCEV *BEValue = getSCEV(BEValueV); 4772 4773 // If the value coming around the backedge is an add with the symbolic 4774 // value we just inserted, possibly with casts that we can ignore under 4775 // an appropriate runtime guard, then we found a simple induction variable! 4776 const auto *Add = dyn_cast<SCEVAddExpr>(BEValue); 4777 if (!Add) 4778 return None; 4779 4780 // If there is a single occurrence of the symbolic value, possibly 4781 // casted, replace it with a recurrence. 4782 unsigned FoundIndex = Add->getNumOperands(); 4783 Type *TruncTy = nullptr; 4784 bool Signed; 4785 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4786 if ((TruncTy = 4787 isSimpleCastedPHI(Add->getOperand(i), SymbolicPHI, Signed, *this))) 4788 if (FoundIndex == e) { 4789 FoundIndex = i; 4790 break; 4791 } 4792 4793 if (FoundIndex == Add->getNumOperands()) 4794 return None; 4795 4796 // Create an add with everything but the specified operand. 4797 SmallVector<const SCEV *, 8> Ops; 4798 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4799 if (i != FoundIndex) 4800 Ops.push_back(Add->getOperand(i)); 4801 const SCEV *Accum = getAddExpr(Ops); 4802 4803 // The runtime checks will not be valid if the step amount is 4804 // varying inside the loop. 4805 if (!isLoopInvariant(Accum, L)) 4806 return None; 4807 4808 // *** Part2: Create the predicates 4809 4810 // Analysis was successful: we have a phi-with-cast pattern for which we 4811 // can return an AddRec expression under the following predicates: 4812 // 4813 // P1: A Wrap predicate that guarantees that Trunc(Start) + i*Trunc(Accum) 4814 // fits within the truncated type (does not overflow) for i = 0 to n-1. 4815 // P2: An Equal predicate that guarantees that 4816 // Start = (Ext ix (Trunc iy (Start) to ix) to iy) 4817 // P3: An Equal predicate that guarantees that 4818 // Accum = (Ext ix (Trunc iy (Accum) to ix) to iy) 4819 // 4820 // As we next prove, the above predicates guarantee that: 4821 // Start + i*Accum = (Ext ix (Trunc iy ( Start + i*Accum ) to ix) to iy) 4822 // 4823 // 4824 // More formally, we want to prove that: 4825 // Expr(i+1) = Start + (i+1) * Accum 4826 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum 4827 // 4828 // Given that: 4829 // 1) Expr(0) = Start 4830 // 2) Expr(1) = Start + Accum 4831 // = (Ext ix (Trunc iy (Start) to ix) to iy) + Accum :: from P2 4832 // 3) Induction hypothesis (step i): 4833 // Expr(i) = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum 4834 // 4835 // Proof: 4836 // Expr(i+1) = 4837 // = Start + (i+1)*Accum 4838 // = (Start + i*Accum) + Accum 4839 // = Expr(i) + Accum 4840 // = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum + Accum 4841 // :: from step i 4842 // 4843 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) + Accum + Accum 4844 // 4845 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) 4846 // + (Ext ix (Trunc iy (Accum) to ix) to iy) 4847 // + Accum :: from P3 4848 // 4849 // = (Ext ix (Trunc iy ((Start + (i-1)*Accum) + Accum) to ix) to iy) 4850 // + Accum :: from P1: Ext(x)+Ext(y)=>Ext(x+y) 4851 // 4852 // = (Ext ix (Trunc iy (Start + i*Accum) to ix) to iy) + Accum 4853 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum 4854 // 4855 // By induction, the same applies to all iterations 1<=i<n: 4856 // 4857 4858 // Create a truncated addrec for which we will add a no overflow check (P1). 4859 const SCEV *StartVal = getSCEV(StartValueV); 4860 const SCEV *PHISCEV = 4861 getAddRecExpr(getTruncateExpr(StartVal, TruncTy), 4862 getTruncateExpr(Accum, TruncTy), L, SCEV::FlagAnyWrap); 4863 4864 // PHISCEV can be either a SCEVConstant or a SCEVAddRecExpr. 4865 // ex: If truncated Accum is 0 and StartVal is a constant, then PHISCEV 4866 // will be constant. 4867 // 4868 // If PHISCEV is a constant, then P1 degenerates into P2 or P3, so we don't 4869 // add P1. 4870 if (const auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) { 4871 SCEVWrapPredicate::IncrementWrapFlags AddedFlags = 4872 Signed ? SCEVWrapPredicate::IncrementNSSW 4873 : SCEVWrapPredicate::IncrementNUSW; 4874 const SCEVPredicate *AddRecPred = getWrapPredicate(AR, AddedFlags); 4875 Predicates.push_back(AddRecPred); 4876 } 4877 4878 // Create the Equal Predicates P2,P3: 4879 4880 // It is possible that the predicates P2 and/or P3 are computable at 4881 // compile time due to StartVal and/or Accum being constants. 4882 // If either one is, then we can check that now and escape if either P2 4883 // or P3 is false. 4884 4885 // Construct the extended SCEV: (Ext ix (Trunc iy (Expr) to ix) to iy) 4886 // for each of StartVal and Accum 4887 auto getExtendedExpr = [&](const SCEV *Expr, 4888 bool CreateSignExtend) -> const SCEV * { 4889 assert(isLoopInvariant(Expr, L) && "Expr is expected to be invariant"); 4890 const SCEV *TruncatedExpr = getTruncateExpr(Expr, TruncTy); 4891 const SCEV *ExtendedExpr = 4892 CreateSignExtend ? getSignExtendExpr(TruncatedExpr, Expr->getType()) 4893 : getZeroExtendExpr(TruncatedExpr, Expr->getType()); 4894 return ExtendedExpr; 4895 }; 4896 4897 // Given: 4898 // ExtendedExpr = (Ext ix (Trunc iy (Expr) to ix) to iy 4899 // = getExtendedExpr(Expr) 4900 // Determine whether the predicate P: Expr == ExtendedExpr 4901 // is known to be false at compile time 4902 auto PredIsKnownFalse = [&](const SCEV *Expr, 4903 const SCEV *ExtendedExpr) -> bool { 4904 return Expr != ExtendedExpr && 4905 isKnownPredicate(ICmpInst::ICMP_NE, Expr, ExtendedExpr); 4906 }; 4907 4908 const SCEV *StartExtended = getExtendedExpr(StartVal, Signed); 4909 if (PredIsKnownFalse(StartVal, StartExtended)) { 4910 LLVM_DEBUG(dbgs() << "P2 is compile-time false\n";); 4911 return None; 4912 } 4913 4914 // The Step is always Signed (because the overflow checks are either 4915 // NSSW or NUSW) 4916 const SCEV *AccumExtended = getExtendedExpr(Accum, /*CreateSignExtend=*/true); 4917 if (PredIsKnownFalse(Accum, AccumExtended)) { 4918 LLVM_DEBUG(dbgs() << "P3 is compile-time false\n";); 4919 return None; 4920 } 4921 4922 auto AppendPredicate = [&](const SCEV *Expr, 4923 const SCEV *ExtendedExpr) -> void { 4924 if (Expr != ExtendedExpr && 4925 !isKnownPredicate(ICmpInst::ICMP_EQ, Expr, ExtendedExpr)) { 4926 const SCEVPredicate *Pred = getEqualPredicate(Expr, ExtendedExpr); 4927 LLVM_DEBUG(dbgs() << "Added Predicate: " << *Pred); 4928 Predicates.push_back(Pred); 4929 } 4930 }; 4931 4932 AppendPredicate(StartVal, StartExtended); 4933 AppendPredicate(Accum, AccumExtended); 4934 4935 // *** Part3: Predicates are ready. Now go ahead and create the new addrec in 4936 // which the casts had been folded away. The caller can rewrite SymbolicPHI 4937 // into NewAR if it will also add the runtime overflow checks specified in 4938 // Predicates. 4939 auto *NewAR = getAddRecExpr(StartVal, Accum, L, SCEV::FlagAnyWrap); 4940 4941 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> PredRewrite = 4942 std::make_pair(NewAR, Predicates); 4943 // Remember the result of the analysis for this SCEV at this locayyytion. 4944 PredicatedSCEVRewrites[{SymbolicPHI, L}] = PredRewrite; 4945 return PredRewrite; 4946 } 4947 4948 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4949 ScalarEvolution::createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI) { 4950 auto *PN = cast<PHINode>(SymbolicPHI->getValue()); 4951 const Loop *L = isIntegerLoopHeaderPHI(PN, LI); 4952 if (!L) 4953 return None; 4954 4955 // Check to see if we already analyzed this PHI. 4956 auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L}); 4957 if (I != PredicatedSCEVRewrites.end()) { 4958 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> Rewrite = 4959 I->second; 4960 // Analysis was done before and failed to create an AddRec: 4961 if (Rewrite.first == SymbolicPHI) 4962 return None; 4963 // Analysis was done before and succeeded to create an AddRec under 4964 // a predicate: 4965 assert(isa<SCEVAddRecExpr>(Rewrite.first) && "Expected an AddRec"); 4966 assert(!(Rewrite.second).empty() && "Expected to find Predicates"); 4967 return Rewrite; 4968 } 4969 4970 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4971 Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI); 4972 4973 // Record in the cache that the analysis failed 4974 if (!Rewrite) { 4975 SmallVector<const SCEVPredicate *, 3> Predicates; 4976 PredicatedSCEVRewrites[{SymbolicPHI, L}] = {SymbolicPHI, Predicates}; 4977 return None; 4978 } 4979 4980 return Rewrite; 4981 } 4982 4983 // FIXME: This utility is currently required because the Rewriter currently 4984 // does not rewrite this expression: 4985 // {0, +, (sext ix (trunc iy to ix) to iy)} 4986 // into {0, +, %step}, 4987 // even when the following Equal predicate exists: 4988 // "%step == (sext ix (trunc iy to ix) to iy)". 4989 bool PredicatedScalarEvolution::areAddRecsEqualWithPreds( 4990 const SCEVAddRecExpr *AR1, const SCEVAddRecExpr *AR2) const { 4991 if (AR1 == AR2) 4992 return true; 4993 4994 auto areExprsEqual = [&](const SCEV *Expr1, const SCEV *Expr2) -> bool { 4995 if (Expr1 != Expr2 && !Preds.implies(SE.getEqualPredicate(Expr1, Expr2)) && 4996 !Preds.implies(SE.getEqualPredicate(Expr2, Expr1))) 4997 return false; 4998 return true; 4999 }; 5000 5001 if (!areExprsEqual(AR1->getStart(), AR2->getStart()) || 5002 !areExprsEqual(AR1->getStepRecurrence(SE), AR2->getStepRecurrence(SE))) 5003 return false; 5004 return true; 5005 } 5006 5007 /// A helper function for createAddRecFromPHI to handle simple cases. 5008 /// 5009 /// This function tries to find an AddRec expression for the simplest (yet most 5010 /// common) cases: PN = PHI(Start, OP(Self, LoopInvariant)). 5011 /// If it fails, createAddRecFromPHI will use a more general, but slow, 5012 /// technique for finding the AddRec expression. 5013 const SCEV *ScalarEvolution::createSimpleAffineAddRec(PHINode *PN, 5014 Value *BEValueV, 5015 Value *StartValueV) { 5016 const Loop *L = LI.getLoopFor(PN->getParent()); 5017 assert(L && L->getHeader() == PN->getParent()); 5018 assert(BEValueV && StartValueV); 5019 5020 auto BO = MatchBinaryOp(BEValueV, DT); 5021 if (!BO) 5022 return nullptr; 5023 5024 if (BO->Opcode != Instruction::Add) 5025 return nullptr; 5026 5027 const SCEV *Accum = nullptr; 5028 if (BO->LHS == PN && L->isLoopInvariant(BO->RHS)) 5029 Accum = getSCEV(BO->RHS); 5030 else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS)) 5031 Accum = getSCEV(BO->LHS); 5032 5033 if (!Accum) 5034 return nullptr; 5035 5036 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5037 if (BO->IsNUW) 5038 Flags = setFlags(Flags, SCEV::FlagNUW); 5039 if (BO->IsNSW) 5040 Flags = setFlags(Flags, SCEV::FlagNSW); 5041 5042 const SCEV *StartVal = getSCEV(StartValueV); 5043 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 5044 5045 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 5046 5047 // We can add Flags to the post-inc expression only if we 5048 // know that it is *undefined behavior* for BEValueV to 5049 // overflow. 5050 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) 5051 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) 5052 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 5053 5054 return PHISCEV; 5055 } 5056 5057 const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) { 5058 const Loop *L = LI.getLoopFor(PN->getParent()); 5059 if (!L || L->getHeader() != PN->getParent()) 5060 return nullptr; 5061 5062 // The loop may have multiple entrances or multiple exits; we can analyze 5063 // this phi as an addrec if it has a unique entry value and a unique 5064 // backedge value. 5065 Value *BEValueV = nullptr, *StartValueV = nullptr; 5066 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 5067 Value *V = PN->getIncomingValue(i); 5068 if (L->contains(PN->getIncomingBlock(i))) { 5069 if (!BEValueV) { 5070 BEValueV = V; 5071 } else if (BEValueV != V) { 5072 BEValueV = nullptr; 5073 break; 5074 } 5075 } else if (!StartValueV) { 5076 StartValueV = V; 5077 } else if (StartValueV != V) { 5078 StartValueV = nullptr; 5079 break; 5080 } 5081 } 5082 if (!BEValueV || !StartValueV) 5083 return nullptr; 5084 5085 assert(ValueExprMap.find_as(PN) == ValueExprMap.end() && 5086 "PHI node already processed?"); 5087 5088 // First, try to find AddRec expression without creating a fictituos symbolic 5089 // value for PN. 5090 if (auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV)) 5091 return S; 5092 5093 // Handle PHI node value symbolically. 5094 const SCEV *SymbolicName = getUnknown(PN); 5095 ValueExprMap.insert({SCEVCallbackVH(PN, this), SymbolicName}); 5096 5097 // Using this symbolic name for the PHI, analyze the value coming around 5098 // the back-edge. 5099 const SCEV *BEValue = getSCEV(BEValueV); 5100 5101 // NOTE: If BEValue is loop invariant, we know that the PHI node just 5102 // has a special value for the first iteration of the loop. 5103 5104 // If the value coming around the backedge is an add with the symbolic 5105 // value we just inserted, then we found a simple induction variable! 5106 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) { 5107 // If there is a single occurrence of the symbolic value, replace it 5108 // with a recurrence. 5109 unsigned FoundIndex = Add->getNumOperands(); 5110 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 5111 if (Add->getOperand(i) == SymbolicName) 5112 if (FoundIndex == e) { 5113 FoundIndex = i; 5114 break; 5115 } 5116 5117 if (FoundIndex != Add->getNumOperands()) { 5118 // Create an add with everything but the specified operand. 5119 SmallVector<const SCEV *, 8> Ops; 5120 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 5121 if (i != FoundIndex) 5122 Ops.push_back(SCEVBackedgeConditionFolder::rewrite(Add->getOperand(i), 5123 L, *this)); 5124 const SCEV *Accum = getAddExpr(Ops); 5125 5126 // This is not a valid addrec if the step amount is varying each 5127 // loop iteration, but is not itself an addrec in this loop. 5128 if (isLoopInvariant(Accum, L) || 5129 (isa<SCEVAddRecExpr>(Accum) && 5130 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) { 5131 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5132 5133 if (auto BO = MatchBinaryOp(BEValueV, DT)) { 5134 if (BO->Opcode == Instruction::Add && BO->LHS == PN) { 5135 if (BO->IsNUW) 5136 Flags = setFlags(Flags, SCEV::FlagNUW); 5137 if (BO->IsNSW) 5138 Flags = setFlags(Flags, SCEV::FlagNSW); 5139 } 5140 } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) { 5141 // If the increment is an inbounds GEP, then we know the address 5142 // space cannot be wrapped around. We cannot make any guarantee 5143 // about signed or unsigned overflow because pointers are 5144 // unsigned but we may have a negative index from the base 5145 // pointer. We can guarantee that no unsigned wrap occurs if the 5146 // indices form a positive value. 5147 if (GEP->isInBounds() && GEP->getOperand(0) == PN) { 5148 Flags = setFlags(Flags, SCEV::FlagNW); 5149 5150 const SCEV *Ptr = getSCEV(GEP->getPointerOperand()); 5151 if (isKnownPositive(getMinusSCEV(getSCEV(GEP), Ptr))) 5152 Flags = setFlags(Flags, SCEV::FlagNUW); 5153 } 5154 5155 // We cannot transfer nuw and nsw flags from subtraction 5156 // operations -- sub nuw X, Y is not the same as add nuw X, -Y 5157 // for instance. 5158 } 5159 5160 const SCEV *StartVal = getSCEV(StartValueV); 5161 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 5162 5163 // Okay, for the entire analysis of this edge we assumed the PHI 5164 // to be symbolic. We now need to go back and purge all of the 5165 // entries for the scalars that use the symbolic expression. 5166 forgetSymbolicName(PN, SymbolicName); 5167 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 5168 5169 // We can add Flags to the post-inc expression only if we 5170 // know that it is *undefined behavior* for BEValueV to 5171 // overflow. 5172 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) 5173 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) 5174 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 5175 5176 return PHISCEV; 5177 } 5178 } 5179 } else { 5180 // Otherwise, this could be a loop like this: 5181 // i = 0; for (j = 1; ..; ++j) { .... i = j; } 5182 // In this case, j = {1,+,1} and BEValue is j. 5183 // Because the other in-value of i (0) fits the evolution of BEValue 5184 // i really is an addrec evolution. 5185 // 5186 // We can generalize this saying that i is the shifted value of BEValue 5187 // by one iteration: 5188 // PHI(f(0), f({1,+,1})) --> f({0,+,1}) 5189 const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *this); 5190 const SCEV *Start = SCEVInitRewriter::rewrite(Shifted, L, *this, false); 5191 if (Shifted != getCouldNotCompute() && 5192 Start != getCouldNotCompute()) { 5193 const SCEV *StartVal = getSCEV(StartValueV); 5194 if (Start == StartVal) { 5195 // Okay, for the entire analysis of this edge we assumed the PHI 5196 // to be symbolic. We now need to go back and purge all of the 5197 // entries for the scalars that use the symbolic expression. 5198 forgetSymbolicName(PN, SymbolicName); 5199 ValueExprMap[SCEVCallbackVH(PN, this)] = Shifted; 5200 return Shifted; 5201 } 5202 } 5203 } 5204 5205 // Remove the temporary PHI node SCEV that has been inserted while intending 5206 // to create an AddRecExpr for this PHI node. We can not keep this temporary 5207 // as it will prevent later (possibly simpler) SCEV expressions to be added 5208 // to the ValueExprMap. 5209 eraseValueFromMap(PN); 5210 5211 return nullptr; 5212 } 5213 5214 // Checks if the SCEV S is available at BB. S is considered available at BB 5215 // if S can be materialized at BB without introducing a fault. 5216 static bool IsAvailableOnEntry(const Loop *L, DominatorTree &DT, const SCEV *S, 5217 BasicBlock *BB) { 5218 struct CheckAvailable { 5219 bool TraversalDone = false; 5220 bool Available = true; 5221 5222 const Loop *L = nullptr; // The loop BB is in (can be nullptr) 5223 BasicBlock *BB = nullptr; 5224 DominatorTree &DT; 5225 5226 CheckAvailable(const Loop *L, BasicBlock *BB, DominatorTree &DT) 5227 : L(L), BB(BB), DT(DT) {} 5228 5229 bool setUnavailable() { 5230 TraversalDone = true; 5231 Available = false; 5232 return false; 5233 } 5234 5235 bool follow(const SCEV *S) { 5236 switch (S->getSCEVType()) { 5237 case scConstant: case scTruncate: case scZeroExtend: case scSignExtend: 5238 case scAddExpr: case scMulExpr: case scUMaxExpr: case scSMaxExpr: 5239 case scUMinExpr: 5240 case scSMinExpr: 5241 // These expressions are available if their operand(s) is/are. 5242 return true; 5243 5244 case scAddRecExpr: { 5245 // We allow add recurrences that are on the loop BB is in, or some 5246 // outer loop. This guarantees availability because the value of the 5247 // add recurrence at BB is simply the "current" value of the induction 5248 // variable. We can relax this in the future; for instance an add 5249 // recurrence on a sibling dominating loop is also available at BB. 5250 const auto *ARLoop = cast<SCEVAddRecExpr>(S)->getLoop(); 5251 if (L && (ARLoop == L || ARLoop->contains(L))) 5252 return true; 5253 5254 return setUnavailable(); 5255 } 5256 5257 case scUnknown: { 5258 // For SCEVUnknown, we check for simple dominance. 5259 const auto *SU = cast<SCEVUnknown>(S); 5260 Value *V = SU->getValue(); 5261 5262 if (isa<Argument>(V)) 5263 return false; 5264 5265 if (isa<Instruction>(V) && DT.dominates(cast<Instruction>(V), BB)) 5266 return false; 5267 5268 return setUnavailable(); 5269 } 5270 5271 case scUDivExpr: 5272 case scCouldNotCompute: 5273 // We do not try to smart about these at all. 5274 return setUnavailable(); 5275 } 5276 llvm_unreachable("switch should be fully covered!"); 5277 } 5278 5279 bool isDone() { return TraversalDone; } 5280 }; 5281 5282 CheckAvailable CA(L, BB, DT); 5283 SCEVTraversal<CheckAvailable> ST(CA); 5284 5285 ST.visitAll(S); 5286 return CA.Available; 5287 } 5288 5289 // Try to match a control flow sequence that branches out at BI and merges back 5290 // at Merge into a "C ? LHS : RHS" select pattern. Return true on a successful 5291 // match. 5292 static bool BrPHIToSelect(DominatorTree &DT, BranchInst *BI, PHINode *Merge, 5293 Value *&C, Value *&LHS, Value *&RHS) { 5294 C = BI->getCondition(); 5295 5296 BasicBlockEdge LeftEdge(BI->getParent(), BI->getSuccessor(0)); 5297 BasicBlockEdge RightEdge(BI->getParent(), BI->getSuccessor(1)); 5298 5299 if (!LeftEdge.isSingleEdge()) 5300 return false; 5301 5302 assert(RightEdge.isSingleEdge() && "Follows from LeftEdge.isSingleEdge()"); 5303 5304 Use &LeftUse = Merge->getOperandUse(0); 5305 Use &RightUse = Merge->getOperandUse(1); 5306 5307 if (DT.dominates(LeftEdge, LeftUse) && DT.dominates(RightEdge, RightUse)) { 5308 LHS = LeftUse; 5309 RHS = RightUse; 5310 return true; 5311 } 5312 5313 if (DT.dominates(LeftEdge, RightUse) && DT.dominates(RightEdge, LeftUse)) { 5314 LHS = RightUse; 5315 RHS = LeftUse; 5316 return true; 5317 } 5318 5319 return false; 5320 } 5321 5322 const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(PHINode *PN) { 5323 auto IsReachable = 5324 [&](BasicBlock *BB) { return DT.isReachableFromEntry(BB); }; 5325 if (PN->getNumIncomingValues() == 2 && all_of(PN->blocks(), IsReachable)) { 5326 const Loop *L = LI.getLoopFor(PN->getParent()); 5327 5328 // We don't want to break LCSSA, even in a SCEV expression tree. 5329 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 5330 if (LI.getLoopFor(PN->getIncomingBlock(i)) != L) 5331 return nullptr; 5332 5333 // Try to match 5334 // 5335 // br %cond, label %left, label %right 5336 // left: 5337 // br label %merge 5338 // right: 5339 // br label %merge 5340 // merge: 5341 // V = phi [ %x, %left ], [ %y, %right ] 5342 // 5343 // as "select %cond, %x, %y" 5344 5345 BasicBlock *IDom = DT[PN->getParent()]->getIDom()->getBlock(); 5346 assert(IDom && "At least the entry block should dominate PN"); 5347 5348 auto *BI = dyn_cast<BranchInst>(IDom->getTerminator()); 5349 Value *Cond = nullptr, *LHS = nullptr, *RHS = nullptr; 5350 5351 if (BI && BI->isConditional() && 5352 BrPHIToSelect(DT, BI, PN, Cond, LHS, RHS) && 5353 IsAvailableOnEntry(L, DT, getSCEV(LHS), PN->getParent()) && 5354 IsAvailableOnEntry(L, DT, getSCEV(RHS), PN->getParent())) 5355 return createNodeForSelectOrPHI(PN, Cond, LHS, RHS); 5356 } 5357 5358 return nullptr; 5359 } 5360 5361 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) { 5362 if (const SCEV *S = createAddRecFromPHI(PN)) 5363 return S; 5364 5365 if (const SCEV *S = createNodeFromSelectLikePHI(PN)) 5366 return S; 5367 5368 // If the PHI has a single incoming value, follow that value, unless the 5369 // PHI's incoming blocks are in a different loop, in which case doing so 5370 // risks breaking LCSSA form. Instcombine would normally zap these, but 5371 // it doesn't have DominatorTree information, so it may miss cases. 5372 if (Value *V = SimplifyInstruction(PN, {getDataLayout(), &TLI, &DT, &AC})) 5373 if (LI.replacementPreservesLCSSAForm(PN, V)) 5374 return getSCEV(V); 5375 5376 // If it's not a loop phi, we can't handle it yet. 5377 return getUnknown(PN); 5378 } 5379 5380 const SCEV *ScalarEvolution::createNodeForSelectOrPHI(Instruction *I, 5381 Value *Cond, 5382 Value *TrueVal, 5383 Value *FalseVal) { 5384 // Handle "constant" branch or select. This can occur for instance when a 5385 // loop pass transforms an inner loop and moves on to process the outer loop. 5386 if (auto *CI = dyn_cast<ConstantInt>(Cond)) 5387 return getSCEV(CI->isOne() ? TrueVal : FalseVal); 5388 5389 // Try to match some simple smax or umax patterns. 5390 auto *ICI = dyn_cast<ICmpInst>(Cond); 5391 if (!ICI) 5392 return getUnknown(I); 5393 5394 Value *LHS = ICI->getOperand(0); 5395 Value *RHS = ICI->getOperand(1); 5396 5397 switch (ICI->getPredicate()) { 5398 case ICmpInst::ICMP_SLT: 5399 case ICmpInst::ICMP_SLE: 5400 std::swap(LHS, RHS); 5401 LLVM_FALLTHROUGH; 5402 case ICmpInst::ICMP_SGT: 5403 case ICmpInst::ICMP_SGE: 5404 // a >s b ? a+x : b+x -> smax(a, b)+x 5405 // a >s b ? b+x : a+x -> smin(a, b)+x 5406 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { 5407 const SCEV *LS = getNoopOrSignExtend(getSCEV(LHS), I->getType()); 5408 const SCEV *RS = getNoopOrSignExtend(getSCEV(RHS), I->getType()); 5409 const SCEV *LA = getSCEV(TrueVal); 5410 const SCEV *RA = getSCEV(FalseVal); 5411 const SCEV *LDiff = getMinusSCEV(LA, LS); 5412 const SCEV *RDiff = getMinusSCEV(RA, RS); 5413 if (LDiff == RDiff) 5414 return getAddExpr(getSMaxExpr(LS, RS), LDiff); 5415 LDiff = getMinusSCEV(LA, RS); 5416 RDiff = getMinusSCEV(RA, LS); 5417 if (LDiff == RDiff) 5418 return getAddExpr(getSMinExpr(LS, RS), LDiff); 5419 } 5420 break; 5421 case ICmpInst::ICMP_ULT: 5422 case ICmpInst::ICMP_ULE: 5423 std::swap(LHS, RHS); 5424 LLVM_FALLTHROUGH; 5425 case ICmpInst::ICMP_UGT: 5426 case ICmpInst::ICMP_UGE: 5427 // a >u b ? a+x : b+x -> umax(a, b)+x 5428 // a >u b ? b+x : a+x -> umin(a, b)+x 5429 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { 5430 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5431 const SCEV *RS = getNoopOrZeroExtend(getSCEV(RHS), I->getType()); 5432 const SCEV *LA = getSCEV(TrueVal); 5433 const SCEV *RA = getSCEV(FalseVal); 5434 const SCEV *LDiff = getMinusSCEV(LA, LS); 5435 const SCEV *RDiff = getMinusSCEV(RA, RS); 5436 if (LDiff == RDiff) 5437 return getAddExpr(getUMaxExpr(LS, RS), LDiff); 5438 LDiff = getMinusSCEV(LA, RS); 5439 RDiff = getMinusSCEV(RA, LS); 5440 if (LDiff == RDiff) 5441 return getAddExpr(getUMinExpr(LS, RS), LDiff); 5442 } 5443 break; 5444 case ICmpInst::ICMP_NE: 5445 // n != 0 ? n+x : 1+x -> umax(n, 1)+x 5446 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 5447 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 5448 const SCEV *One = getOne(I->getType()); 5449 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5450 const SCEV *LA = getSCEV(TrueVal); 5451 const SCEV *RA = getSCEV(FalseVal); 5452 const SCEV *LDiff = getMinusSCEV(LA, LS); 5453 const SCEV *RDiff = getMinusSCEV(RA, One); 5454 if (LDiff == RDiff) 5455 return getAddExpr(getUMaxExpr(One, LS), LDiff); 5456 } 5457 break; 5458 case ICmpInst::ICMP_EQ: 5459 // n == 0 ? 1+x : n+x -> umax(n, 1)+x 5460 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 5461 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 5462 const SCEV *One = getOne(I->getType()); 5463 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5464 const SCEV *LA = getSCEV(TrueVal); 5465 const SCEV *RA = getSCEV(FalseVal); 5466 const SCEV *LDiff = getMinusSCEV(LA, One); 5467 const SCEV *RDiff = getMinusSCEV(RA, LS); 5468 if (LDiff == RDiff) 5469 return getAddExpr(getUMaxExpr(One, LS), LDiff); 5470 } 5471 break; 5472 default: 5473 break; 5474 } 5475 5476 return getUnknown(I); 5477 } 5478 5479 /// Expand GEP instructions into add and multiply operations. This allows them 5480 /// to be analyzed by regular SCEV code. 5481 const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) { 5482 // Don't attempt to analyze GEPs over unsized objects. 5483 if (!GEP->getSourceElementType()->isSized()) 5484 return getUnknown(GEP); 5485 5486 SmallVector<const SCEV *, 4> IndexExprs; 5487 for (auto Index = GEP->idx_begin(); Index != GEP->idx_end(); ++Index) 5488 IndexExprs.push_back(getSCEV(*Index)); 5489 return getGEPExpr(GEP, IndexExprs); 5490 } 5491 5492 uint32_t ScalarEvolution::GetMinTrailingZerosImpl(const SCEV *S) { 5493 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 5494 return C->getAPInt().countTrailingZeros(); 5495 5496 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S)) 5497 return std::min(GetMinTrailingZeros(T->getOperand()), 5498 (uint32_t)getTypeSizeInBits(T->getType())); 5499 5500 if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) { 5501 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 5502 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) 5503 ? getTypeSizeInBits(E->getType()) 5504 : OpRes; 5505 } 5506 5507 if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) { 5508 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 5509 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) 5510 ? getTypeSizeInBits(E->getType()) 5511 : OpRes; 5512 } 5513 5514 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) { 5515 // The result is the min of all operands results. 5516 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 5517 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 5518 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 5519 return MinOpRes; 5520 } 5521 5522 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { 5523 // The result is the sum of all operands results. 5524 uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0)); 5525 uint32_t BitWidth = getTypeSizeInBits(M->getType()); 5526 for (unsigned i = 1, e = M->getNumOperands(); 5527 SumOpRes != BitWidth && i != e; ++i) 5528 SumOpRes = 5529 std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)), BitWidth); 5530 return SumOpRes; 5531 } 5532 5533 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { 5534 // The result is the min of all operands results. 5535 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 5536 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 5537 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 5538 return MinOpRes; 5539 } 5540 5541 if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) { 5542 // The result is the min of all operands results. 5543 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 5544 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 5545 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 5546 return MinOpRes; 5547 } 5548 5549 if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) { 5550 // The result is the min of all operands results. 5551 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 5552 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 5553 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 5554 return MinOpRes; 5555 } 5556 5557 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 5558 // For a SCEVUnknown, ask ValueTracking. 5559 KnownBits Known = computeKnownBits(U->getValue(), getDataLayout(), 0, &AC, nullptr, &DT); 5560 return Known.countMinTrailingZeros(); 5561 } 5562 5563 // SCEVUDivExpr 5564 return 0; 5565 } 5566 5567 uint32_t ScalarEvolution::GetMinTrailingZeros(const SCEV *S) { 5568 auto I = MinTrailingZerosCache.find(S); 5569 if (I != MinTrailingZerosCache.end()) 5570 return I->second; 5571 5572 uint32_t Result = GetMinTrailingZerosImpl(S); 5573 auto InsertPair = MinTrailingZerosCache.insert({S, Result}); 5574 assert(InsertPair.second && "Should insert a new key"); 5575 return InsertPair.first->second; 5576 } 5577 5578 /// Helper method to assign a range to V from metadata present in the IR. 5579 static Optional<ConstantRange> GetRangeFromMetadata(Value *V) { 5580 if (Instruction *I = dyn_cast<Instruction>(V)) 5581 if (MDNode *MD = I->getMetadata(LLVMContext::MD_range)) 5582 return getConstantRangeFromMetadata(*MD); 5583 5584 return None; 5585 } 5586 5587 /// Determine the range for a particular SCEV. If SignHint is 5588 /// HINT_RANGE_UNSIGNED (resp. HINT_RANGE_SIGNED) then getRange prefers ranges 5589 /// with a "cleaner" unsigned (resp. signed) representation. 5590 const ConstantRange & 5591 ScalarEvolution::getRangeRef(const SCEV *S, 5592 ScalarEvolution::RangeSignHint SignHint) { 5593 DenseMap<const SCEV *, ConstantRange> &Cache = 5594 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges 5595 : SignedRanges; 5596 ConstantRange::PreferredRangeType RangeType = 5597 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED 5598 ? ConstantRange::Unsigned : ConstantRange::Signed; 5599 5600 // See if we've computed this range already. 5601 DenseMap<const SCEV *, ConstantRange>::iterator I = Cache.find(S); 5602 if (I != Cache.end()) 5603 return I->second; 5604 5605 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 5606 return setRange(C, SignHint, ConstantRange(C->getAPInt())); 5607 5608 unsigned BitWidth = getTypeSizeInBits(S->getType()); 5609 ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true); 5610 using OBO = OverflowingBinaryOperator; 5611 5612 // If the value has known zeros, the maximum value will have those known zeros 5613 // as well. 5614 uint32_t TZ = GetMinTrailingZeros(S); 5615 if (TZ != 0) { 5616 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) 5617 ConservativeResult = 5618 ConstantRange(APInt::getMinValue(BitWidth), 5619 APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1); 5620 else 5621 ConservativeResult = ConstantRange( 5622 APInt::getSignedMinValue(BitWidth), 5623 APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1); 5624 } 5625 5626 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 5627 ConstantRange X = getRangeRef(Add->getOperand(0), SignHint); 5628 unsigned WrapType = OBO::AnyWrap; 5629 if (Add->hasNoSignedWrap()) 5630 WrapType |= OBO::NoSignedWrap; 5631 if (Add->hasNoUnsignedWrap()) 5632 WrapType |= OBO::NoUnsignedWrap; 5633 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i) 5634 X = X.addWithNoWrap(getRangeRef(Add->getOperand(i), SignHint), 5635 WrapType, RangeType); 5636 return setRange(Add, SignHint, 5637 ConservativeResult.intersectWith(X, RangeType)); 5638 } 5639 5640 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 5641 ConstantRange X = getRangeRef(Mul->getOperand(0), SignHint); 5642 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i) 5643 X = X.multiply(getRangeRef(Mul->getOperand(i), SignHint)); 5644 return setRange(Mul, SignHint, 5645 ConservativeResult.intersectWith(X, RangeType)); 5646 } 5647 5648 if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) { 5649 ConstantRange X = getRangeRef(SMax->getOperand(0), SignHint); 5650 for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i) 5651 X = X.smax(getRangeRef(SMax->getOperand(i), SignHint)); 5652 return setRange(SMax, SignHint, 5653 ConservativeResult.intersectWith(X, RangeType)); 5654 } 5655 5656 if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) { 5657 ConstantRange X = getRangeRef(UMax->getOperand(0), SignHint); 5658 for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i) 5659 X = X.umax(getRangeRef(UMax->getOperand(i), SignHint)); 5660 return setRange(UMax, SignHint, 5661 ConservativeResult.intersectWith(X, RangeType)); 5662 } 5663 5664 if (const SCEVSMinExpr *SMin = dyn_cast<SCEVSMinExpr>(S)) { 5665 ConstantRange X = getRangeRef(SMin->getOperand(0), SignHint); 5666 for (unsigned i = 1, e = SMin->getNumOperands(); i != e; ++i) 5667 X = X.smin(getRangeRef(SMin->getOperand(i), SignHint)); 5668 return setRange(SMin, SignHint, 5669 ConservativeResult.intersectWith(X, RangeType)); 5670 } 5671 5672 if (const SCEVUMinExpr *UMin = dyn_cast<SCEVUMinExpr>(S)) { 5673 ConstantRange X = getRangeRef(UMin->getOperand(0), SignHint); 5674 for (unsigned i = 1, e = UMin->getNumOperands(); i != e; ++i) 5675 X = X.umin(getRangeRef(UMin->getOperand(i), SignHint)); 5676 return setRange(UMin, SignHint, 5677 ConservativeResult.intersectWith(X, RangeType)); 5678 } 5679 5680 if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) { 5681 ConstantRange X = getRangeRef(UDiv->getLHS(), SignHint); 5682 ConstantRange Y = getRangeRef(UDiv->getRHS(), SignHint); 5683 return setRange(UDiv, SignHint, 5684 ConservativeResult.intersectWith(X.udiv(Y), RangeType)); 5685 } 5686 5687 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) { 5688 ConstantRange X = getRangeRef(ZExt->getOperand(), SignHint); 5689 return setRange(ZExt, SignHint, 5690 ConservativeResult.intersectWith(X.zeroExtend(BitWidth), 5691 RangeType)); 5692 } 5693 5694 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) { 5695 ConstantRange X = getRangeRef(SExt->getOperand(), SignHint); 5696 return setRange(SExt, SignHint, 5697 ConservativeResult.intersectWith(X.signExtend(BitWidth), 5698 RangeType)); 5699 } 5700 5701 if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) { 5702 ConstantRange X = getRangeRef(Trunc->getOperand(), SignHint); 5703 return setRange(Trunc, SignHint, 5704 ConservativeResult.intersectWith(X.truncate(BitWidth), 5705 RangeType)); 5706 } 5707 5708 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) { 5709 // If there's no unsigned wrap, the value will never be less than its 5710 // initial value. 5711 if (AddRec->hasNoUnsignedWrap()) { 5712 APInt UnsignedMinValue = getUnsignedRangeMin(AddRec->getStart()); 5713 if (!UnsignedMinValue.isNullValue()) 5714 ConservativeResult = ConservativeResult.intersectWith( 5715 ConstantRange(UnsignedMinValue, APInt(BitWidth, 0)), RangeType); 5716 } 5717 5718 // If there's no signed wrap, and all the operands except initial value have 5719 // the same sign or zero, the value won't ever be: 5720 // 1: smaller than initial value if operands are non negative, 5721 // 2: bigger than initial value if operands are non positive. 5722 // For both cases, value can not cross signed min/max boundary. 5723 if (AddRec->hasNoSignedWrap()) { 5724 bool AllNonNeg = true; 5725 bool AllNonPos = true; 5726 for (unsigned i = 1, e = AddRec->getNumOperands(); i != e; ++i) { 5727 if (!isKnownNonNegative(AddRec->getOperand(i))) 5728 AllNonNeg = false; 5729 if (!isKnownNonPositive(AddRec->getOperand(i))) 5730 AllNonPos = false; 5731 } 5732 if (AllNonNeg) 5733 ConservativeResult = ConservativeResult.intersectWith( 5734 ConstantRange::getNonEmpty(getSignedRangeMin(AddRec->getStart()), 5735 APInt::getSignedMinValue(BitWidth)), 5736 RangeType); 5737 else if (AllNonPos) 5738 ConservativeResult = ConservativeResult.intersectWith( 5739 ConstantRange::getNonEmpty( 5740 APInt::getSignedMinValue(BitWidth), 5741 getSignedRangeMax(AddRec->getStart()) + 1), 5742 RangeType); 5743 } 5744 5745 // TODO: non-affine addrec 5746 if (AddRec->isAffine()) { 5747 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(AddRec->getLoop()); 5748 if (!isa<SCEVCouldNotCompute>(MaxBECount) && 5749 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) { 5750 auto RangeFromAffine = getRangeForAffineAR( 5751 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 5752 BitWidth); 5753 if (!RangeFromAffine.isFullSet()) 5754 ConservativeResult = 5755 ConservativeResult.intersectWith(RangeFromAffine, RangeType); 5756 5757 auto RangeFromFactoring = getRangeViaFactoring( 5758 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 5759 BitWidth); 5760 if (!RangeFromFactoring.isFullSet()) 5761 ConservativeResult = 5762 ConservativeResult.intersectWith(RangeFromFactoring, RangeType); 5763 } 5764 } 5765 5766 return setRange(AddRec, SignHint, std::move(ConservativeResult)); 5767 } 5768 5769 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 5770 // Check if the IR explicitly contains !range metadata. 5771 Optional<ConstantRange> MDRange = GetRangeFromMetadata(U->getValue()); 5772 if (MDRange.hasValue()) 5773 ConservativeResult = ConservativeResult.intersectWith(MDRange.getValue(), 5774 RangeType); 5775 5776 // Split here to avoid paying the compile-time cost of calling both 5777 // computeKnownBits and ComputeNumSignBits. This restriction can be lifted 5778 // if needed. 5779 const DataLayout &DL = getDataLayout(); 5780 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) { 5781 // For a SCEVUnknown, ask ValueTracking. 5782 KnownBits Known = computeKnownBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 5783 if (Known.getBitWidth() != BitWidth) 5784 Known = Known.zextOrTrunc(BitWidth); 5785 // If Known does not result in full-set, intersect with it. 5786 if (Known.getMinValue() != Known.getMaxValue() + 1) 5787 ConservativeResult = ConservativeResult.intersectWith( 5788 ConstantRange(Known.getMinValue(), Known.getMaxValue() + 1), 5789 RangeType); 5790 } else { 5791 assert(SignHint == ScalarEvolution::HINT_RANGE_SIGNED && 5792 "generalize as needed!"); 5793 unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 5794 // If the pointer size is larger than the index size type, this can cause 5795 // NS to be larger than BitWidth. So compensate for this. 5796 if (U->getType()->isPointerTy()) { 5797 unsigned ptrSize = DL.getPointerTypeSizeInBits(U->getType()); 5798 int ptrIdxDiff = ptrSize - BitWidth; 5799 if (ptrIdxDiff > 0 && ptrSize > BitWidth && NS > (unsigned)ptrIdxDiff) 5800 NS -= ptrIdxDiff; 5801 } 5802 5803 if (NS > 1) 5804 ConservativeResult = ConservativeResult.intersectWith( 5805 ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1), 5806 APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1), 5807 RangeType); 5808 } 5809 5810 // A range of Phi is a subset of union of all ranges of its input. 5811 if (const PHINode *Phi = dyn_cast<PHINode>(U->getValue())) { 5812 // Make sure that we do not run over cycled Phis. 5813 if (PendingPhiRanges.insert(Phi).second) { 5814 ConstantRange RangeFromOps(BitWidth, /*isFullSet=*/false); 5815 for (auto &Op : Phi->operands()) { 5816 auto OpRange = getRangeRef(getSCEV(Op), SignHint); 5817 RangeFromOps = RangeFromOps.unionWith(OpRange); 5818 // No point to continue if we already have a full set. 5819 if (RangeFromOps.isFullSet()) 5820 break; 5821 } 5822 ConservativeResult = 5823 ConservativeResult.intersectWith(RangeFromOps, RangeType); 5824 bool Erased = PendingPhiRanges.erase(Phi); 5825 assert(Erased && "Failed to erase Phi properly?"); 5826 (void) Erased; 5827 } 5828 } 5829 5830 return setRange(U, SignHint, std::move(ConservativeResult)); 5831 } 5832 5833 return setRange(S, SignHint, std::move(ConservativeResult)); 5834 } 5835 5836 // Given a StartRange, Step and MaxBECount for an expression compute a range of 5837 // values that the expression can take. Initially, the expression has a value 5838 // from StartRange and then is changed by Step up to MaxBECount times. Signed 5839 // argument defines if we treat Step as signed or unsigned. 5840 static ConstantRange getRangeForAffineARHelper(APInt Step, 5841 const ConstantRange &StartRange, 5842 const APInt &MaxBECount, 5843 unsigned BitWidth, bool Signed) { 5844 // If either Step or MaxBECount is 0, then the expression won't change, and we 5845 // just need to return the initial range. 5846 if (Step == 0 || MaxBECount == 0) 5847 return StartRange; 5848 5849 // If we don't know anything about the initial value (i.e. StartRange is 5850 // FullRange), then we don't know anything about the final range either. 5851 // Return FullRange. 5852 if (StartRange.isFullSet()) 5853 return ConstantRange::getFull(BitWidth); 5854 5855 // If Step is signed and negative, then we use its absolute value, but we also 5856 // note that we're moving in the opposite direction. 5857 bool Descending = Signed && Step.isNegative(); 5858 5859 if (Signed) 5860 // This is correct even for INT_SMIN. Let's look at i8 to illustrate this: 5861 // abs(INT_SMIN) = abs(-128) = abs(0x80) = -0x80 = 0x80 = 128. 5862 // This equations hold true due to the well-defined wrap-around behavior of 5863 // APInt. 5864 Step = Step.abs(); 5865 5866 // Check if Offset is more than full span of BitWidth. If it is, the 5867 // expression is guaranteed to overflow. 5868 if (APInt::getMaxValue(StartRange.getBitWidth()).udiv(Step).ult(MaxBECount)) 5869 return ConstantRange::getFull(BitWidth); 5870 5871 // Offset is by how much the expression can change. Checks above guarantee no 5872 // overflow here. 5873 APInt Offset = Step * MaxBECount; 5874 5875 // Minimum value of the final range will match the minimal value of StartRange 5876 // if the expression is increasing and will be decreased by Offset otherwise. 5877 // Maximum value of the final range will match the maximal value of StartRange 5878 // if the expression is decreasing and will be increased by Offset otherwise. 5879 APInt StartLower = StartRange.getLower(); 5880 APInt StartUpper = StartRange.getUpper() - 1; 5881 APInt MovedBoundary = Descending ? (StartLower - std::move(Offset)) 5882 : (StartUpper + std::move(Offset)); 5883 5884 // It's possible that the new minimum/maximum value will fall into the initial 5885 // range (due to wrap around). This means that the expression can take any 5886 // value in this bitwidth, and we have to return full range. 5887 if (StartRange.contains(MovedBoundary)) 5888 return ConstantRange::getFull(BitWidth); 5889 5890 APInt NewLower = 5891 Descending ? std::move(MovedBoundary) : std::move(StartLower); 5892 APInt NewUpper = 5893 Descending ? std::move(StartUpper) : std::move(MovedBoundary); 5894 NewUpper += 1; 5895 5896 // No overflow detected, return [StartLower, StartUpper + Offset + 1) range. 5897 return ConstantRange::getNonEmpty(std::move(NewLower), std::move(NewUpper)); 5898 } 5899 5900 ConstantRange ScalarEvolution::getRangeForAffineAR(const SCEV *Start, 5901 const SCEV *Step, 5902 const SCEV *MaxBECount, 5903 unsigned BitWidth) { 5904 assert(!isa<SCEVCouldNotCompute>(MaxBECount) && 5905 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth && 5906 "Precondition!"); 5907 5908 MaxBECount = getNoopOrZeroExtend(MaxBECount, Start->getType()); 5909 APInt MaxBECountValue = getUnsignedRangeMax(MaxBECount); 5910 5911 // First, consider step signed. 5912 ConstantRange StartSRange = getSignedRange(Start); 5913 ConstantRange StepSRange = getSignedRange(Step); 5914 5915 // If Step can be both positive and negative, we need to find ranges for the 5916 // maximum absolute step values in both directions and union them. 5917 ConstantRange SR = 5918 getRangeForAffineARHelper(StepSRange.getSignedMin(), StartSRange, 5919 MaxBECountValue, BitWidth, /* Signed = */ true); 5920 SR = SR.unionWith(getRangeForAffineARHelper(StepSRange.getSignedMax(), 5921 StartSRange, MaxBECountValue, 5922 BitWidth, /* Signed = */ true)); 5923 5924 // Next, consider step unsigned. 5925 ConstantRange UR = getRangeForAffineARHelper( 5926 getUnsignedRangeMax(Step), getUnsignedRange(Start), 5927 MaxBECountValue, BitWidth, /* Signed = */ false); 5928 5929 // Finally, intersect signed and unsigned ranges. 5930 return SR.intersectWith(UR, ConstantRange::Smallest); 5931 } 5932 5933 ConstantRange ScalarEvolution::getRangeViaFactoring(const SCEV *Start, 5934 const SCEV *Step, 5935 const SCEV *MaxBECount, 5936 unsigned BitWidth) { 5937 // RangeOf({C?A:B,+,C?P:Q}) == RangeOf(C?{A,+,P}:{B,+,Q}) 5938 // == RangeOf({A,+,P}) union RangeOf({B,+,Q}) 5939 5940 struct SelectPattern { 5941 Value *Condition = nullptr; 5942 APInt TrueValue; 5943 APInt FalseValue; 5944 5945 explicit SelectPattern(ScalarEvolution &SE, unsigned BitWidth, 5946 const SCEV *S) { 5947 Optional<unsigned> CastOp; 5948 APInt Offset(BitWidth, 0); 5949 5950 assert(SE.getTypeSizeInBits(S->getType()) == BitWidth && 5951 "Should be!"); 5952 5953 // Peel off a constant offset: 5954 if (auto *SA = dyn_cast<SCEVAddExpr>(S)) { 5955 // In the future we could consider being smarter here and handle 5956 // {Start+Step,+,Step} too. 5957 if (SA->getNumOperands() != 2 || !isa<SCEVConstant>(SA->getOperand(0))) 5958 return; 5959 5960 Offset = cast<SCEVConstant>(SA->getOperand(0))->getAPInt(); 5961 S = SA->getOperand(1); 5962 } 5963 5964 // Peel off a cast operation 5965 if (auto *SCast = dyn_cast<SCEVCastExpr>(S)) { 5966 CastOp = SCast->getSCEVType(); 5967 S = SCast->getOperand(); 5968 } 5969 5970 using namespace llvm::PatternMatch; 5971 5972 auto *SU = dyn_cast<SCEVUnknown>(S); 5973 const APInt *TrueVal, *FalseVal; 5974 if (!SU || 5975 !match(SU->getValue(), m_Select(m_Value(Condition), m_APInt(TrueVal), 5976 m_APInt(FalseVal)))) { 5977 Condition = nullptr; 5978 return; 5979 } 5980 5981 TrueValue = *TrueVal; 5982 FalseValue = *FalseVal; 5983 5984 // Re-apply the cast we peeled off earlier 5985 if (CastOp.hasValue()) 5986 switch (*CastOp) { 5987 default: 5988 llvm_unreachable("Unknown SCEV cast type!"); 5989 5990 case scTruncate: 5991 TrueValue = TrueValue.trunc(BitWidth); 5992 FalseValue = FalseValue.trunc(BitWidth); 5993 break; 5994 case scZeroExtend: 5995 TrueValue = TrueValue.zext(BitWidth); 5996 FalseValue = FalseValue.zext(BitWidth); 5997 break; 5998 case scSignExtend: 5999 TrueValue = TrueValue.sext(BitWidth); 6000 FalseValue = FalseValue.sext(BitWidth); 6001 break; 6002 } 6003 6004 // Re-apply the constant offset we peeled off earlier 6005 TrueValue += Offset; 6006 FalseValue += Offset; 6007 } 6008 6009 bool isRecognized() { return Condition != nullptr; } 6010 }; 6011 6012 SelectPattern StartPattern(*this, BitWidth, Start); 6013 if (!StartPattern.isRecognized()) 6014 return ConstantRange::getFull(BitWidth); 6015 6016 SelectPattern StepPattern(*this, BitWidth, Step); 6017 if (!StepPattern.isRecognized()) 6018 return ConstantRange::getFull(BitWidth); 6019 6020 if (StartPattern.Condition != StepPattern.Condition) { 6021 // We don't handle this case today; but we could, by considering four 6022 // possibilities below instead of two. I'm not sure if there are cases where 6023 // that will help over what getRange already does, though. 6024 return ConstantRange::getFull(BitWidth); 6025 } 6026 6027 // NB! Calling ScalarEvolution::getConstant is fine, but we should not try to 6028 // construct arbitrary general SCEV expressions here. This function is called 6029 // from deep in the call stack, and calling getSCEV (on a sext instruction, 6030 // say) can end up caching a suboptimal value. 6031 6032 // FIXME: without the explicit `this` receiver below, MSVC errors out with 6033 // C2352 and C2512 (otherwise it isn't needed). 6034 6035 const SCEV *TrueStart = this->getConstant(StartPattern.TrueValue); 6036 const SCEV *TrueStep = this->getConstant(StepPattern.TrueValue); 6037 const SCEV *FalseStart = this->getConstant(StartPattern.FalseValue); 6038 const SCEV *FalseStep = this->getConstant(StepPattern.FalseValue); 6039 6040 ConstantRange TrueRange = 6041 this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount, BitWidth); 6042 ConstantRange FalseRange = 6043 this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount, BitWidth); 6044 6045 return TrueRange.unionWith(FalseRange); 6046 } 6047 6048 SCEV::NoWrapFlags ScalarEvolution::getNoWrapFlagsFromUB(const Value *V) { 6049 if (isa<ConstantExpr>(V)) return SCEV::FlagAnyWrap; 6050 const BinaryOperator *BinOp = cast<BinaryOperator>(V); 6051 6052 // Return early if there are no flags to propagate to the SCEV. 6053 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 6054 if (BinOp->hasNoUnsignedWrap()) 6055 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 6056 if (BinOp->hasNoSignedWrap()) 6057 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 6058 if (Flags == SCEV::FlagAnyWrap) 6059 return SCEV::FlagAnyWrap; 6060 6061 return isSCEVExprNeverPoison(BinOp) ? Flags : SCEV::FlagAnyWrap; 6062 } 6063 6064 bool ScalarEvolution::isSCEVExprNeverPoison(const Instruction *I) { 6065 // Here we check that I is in the header of the innermost loop containing I, 6066 // since we only deal with instructions in the loop header. The actual loop we 6067 // need to check later will come from an add recurrence, but getting that 6068 // requires computing the SCEV of the operands, which can be expensive. This 6069 // check we can do cheaply to rule out some cases early. 6070 Loop *InnermostContainingLoop = LI.getLoopFor(I->getParent()); 6071 if (InnermostContainingLoop == nullptr || 6072 InnermostContainingLoop->getHeader() != I->getParent()) 6073 return false; 6074 6075 // Only proceed if we can prove that I does not yield poison. 6076 if (!programUndefinedIfPoison(I)) 6077 return false; 6078 6079 // At this point we know that if I is executed, then it does not wrap 6080 // according to at least one of NSW or NUW. If I is not executed, then we do 6081 // not know if the calculation that I represents would wrap. Multiple 6082 // instructions can map to the same SCEV. If we apply NSW or NUW from I to 6083 // the SCEV, we must guarantee no wrapping for that SCEV also when it is 6084 // derived from other instructions that map to the same SCEV. We cannot make 6085 // that guarantee for cases where I is not executed. So we need to find the 6086 // loop that I is considered in relation to and prove that I is executed for 6087 // every iteration of that loop. That implies that the value that I 6088 // calculates does not wrap anywhere in the loop, so then we can apply the 6089 // flags to the SCEV. 6090 // 6091 // We check isLoopInvariant to disambiguate in case we are adding recurrences 6092 // from different loops, so that we know which loop to prove that I is 6093 // executed in. 6094 for (unsigned OpIndex = 0; OpIndex < I->getNumOperands(); ++OpIndex) { 6095 // I could be an extractvalue from a call to an overflow intrinsic. 6096 // TODO: We can do better here in some cases. 6097 if (!isSCEVable(I->getOperand(OpIndex)->getType())) 6098 return false; 6099 const SCEV *Op = getSCEV(I->getOperand(OpIndex)); 6100 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 6101 bool AllOtherOpsLoopInvariant = true; 6102 for (unsigned OtherOpIndex = 0; OtherOpIndex < I->getNumOperands(); 6103 ++OtherOpIndex) { 6104 if (OtherOpIndex != OpIndex) { 6105 const SCEV *OtherOp = getSCEV(I->getOperand(OtherOpIndex)); 6106 if (!isLoopInvariant(OtherOp, AddRec->getLoop())) { 6107 AllOtherOpsLoopInvariant = false; 6108 break; 6109 } 6110 } 6111 } 6112 if (AllOtherOpsLoopInvariant && 6113 isGuaranteedToExecuteForEveryIteration(I, AddRec->getLoop())) 6114 return true; 6115 } 6116 } 6117 return false; 6118 } 6119 6120 bool ScalarEvolution::isAddRecNeverPoison(const Instruction *I, const Loop *L) { 6121 // If we know that \c I can never be poison period, then that's enough. 6122 if (isSCEVExprNeverPoison(I)) 6123 return true; 6124 6125 // For an add recurrence specifically, we assume that infinite loops without 6126 // side effects are undefined behavior, and then reason as follows: 6127 // 6128 // If the add recurrence is poison in any iteration, it is poison on all 6129 // future iterations (since incrementing poison yields poison). If the result 6130 // of the add recurrence is fed into the loop latch condition and the loop 6131 // does not contain any throws or exiting blocks other than the latch, we now 6132 // have the ability to "choose" whether the backedge is taken or not (by 6133 // choosing a sufficiently evil value for the poison feeding into the branch) 6134 // for every iteration including and after the one in which \p I first became 6135 // poison. There are two possibilities (let's call the iteration in which \p 6136 // I first became poison as K): 6137 // 6138 // 1. In the set of iterations including and after K, the loop body executes 6139 // no side effects. In this case executing the backege an infinte number 6140 // of times will yield undefined behavior. 6141 // 6142 // 2. In the set of iterations including and after K, the loop body executes 6143 // at least one side effect. In this case, that specific instance of side 6144 // effect is control dependent on poison, which also yields undefined 6145 // behavior. 6146 6147 auto *ExitingBB = L->getExitingBlock(); 6148 auto *LatchBB = L->getLoopLatch(); 6149 if (!ExitingBB || !LatchBB || ExitingBB != LatchBB) 6150 return false; 6151 6152 SmallPtrSet<const Instruction *, 16> Pushed; 6153 SmallVector<const Instruction *, 8> PoisonStack; 6154 6155 // We start by assuming \c I, the post-inc add recurrence, is poison. Only 6156 // things that are known to be poison under that assumption go on the 6157 // PoisonStack. 6158 Pushed.insert(I); 6159 PoisonStack.push_back(I); 6160 6161 bool LatchControlDependentOnPoison = false; 6162 while (!PoisonStack.empty() && !LatchControlDependentOnPoison) { 6163 const Instruction *Poison = PoisonStack.pop_back_val(); 6164 6165 for (auto *PoisonUser : Poison->users()) { 6166 if (propagatesPoison(cast<Instruction>(PoisonUser))) { 6167 if (Pushed.insert(cast<Instruction>(PoisonUser)).second) 6168 PoisonStack.push_back(cast<Instruction>(PoisonUser)); 6169 } else if (auto *BI = dyn_cast<BranchInst>(PoisonUser)) { 6170 assert(BI->isConditional() && "Only possibility!"); 6171 if (BI->getParent() == LatchBB) { 6172 LatchControlDependentOnPoison = true; 6173 break; 6174 } 6175 } 6176 } 6177 } 6178 6179 return LatchControlDependentOnPoison && loopHasNoAbnormalExits(L); 6180 } 6181 6182 ScalarEvolution::LoopProperties 6183 ScalarEvolution::getLoopProperties(const Loop *L) { 6184 using LoopProperties = ScalarEvolution::LoopProperties; 6185 6186 auto Itr = LoopPropertiesCache.find(L); 6187 if (Itr == LoopPropertiesCache.end()) { 6188 auto HasSideEffects = [](Instruction *I) { 6189 if (auto *SI = dyn_cast<StoreInst>(I)) 6190 return !SI->isSimple(); 6191 6192 return I->mayHaveSideEffects(); 6193 }; 6194 6195 LoopProperties LP = {/* HasNoAbnormalExits */ true, 6196 /*HasNoSideEffects*/ true}; 6197 6198 for (auto *BB : L->getBlocks()) 6199 for (auto &I : *BB) { 6200 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 6201 LP.HasNoAbnormalExits = false; 6202 if (HasSideEffects(&I)) 6203 LP.HasNoSideEffects = false; 6204 if (!LP.HasNoAbnormalExits && !LP.HasNoSideEffects) 6205 break; // We're already as pessimistic as we can get. 6206 } 6207 6208 auto InsertPair = LoopPropertiesCache.insert({L, LP}); 6209 assert(InsertPair.second && "We just checked!"); 6210 Itr = InsertPair.first; 6211 } 6212 6213 return Itr->second; 6214 } 6215 6216 const SCEV *ScalarEvolution::createSCEV(Value *V) { 6217 if (!isSCEVable(V->getType())) 6218 return getUnknown(V); 6219 6220 if (Instruction *I = dyn_cast<Instruction>(V)) { 6221 // Don't attempt to analyze instructions in blocks that aren't 6222 // reachable. Such instructions don't matter, and they aren't required 6223 // to obey basic rules for definitions dominating uses which this 6224 // analysis depends on. 6225 if (!DT.isReachableFromEntry(I->getParent())) 6226 return getUnknown(UndefValue::get(V->getType())); 6227 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) 6228 return getConstant(CI); 6229 else if (isa<ConstantPointerNull>(V)) 6230 return getZero(V->getType()); 6231 else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) 6232 return GA->isInterposable() ? getUnknown(V) : getSCEV(GA->getAliasee()); 6233 else if (!isa<ConstantExpr>(V)) 6234 return getUnknown(V); 6235 6236 Operator *U = cast<Operator>(V); 6237 if (auto BO = MatchBinaryOp(U, DT)) { 6238 switch (BO->Opcode) { 6239 case Instruction::Add: { 6240 // The simple thing to do would be to just call getSCEV on both operands 6241 // and call getAddExpr with the result. However if we're looking at a 6242 // bunch of things all added together, this can be quite inefficient, 6243 // because it leads to N-1 getAddExpr calls for N ultimate operands. 6244 // Instead, gather up all the operands and make a single getAddExpr call. 6245 // LLVM IR canonical form means we need only traverse the left operands. 6246 SmallVector<const SCEV *, 4> AddOps; 6247 do { 6248 if (BO->Op) { 6249 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 6250 AddOps.push_back(OpSCEV); 6251 break; 6252 } 6253 6254 // If a NUW or NSW flag can be applied to the SCEV for this 6255 // addition, then compute the SCEV for this addition by itself 6256 // with a separate call to getAddExpr. We need to do that 6257 // instead of pushing the operands of the addition onto AddOps, 6258 // since the flags are only known to apply to this particular 6259 // addition - they may not apply to other additions that can be 6260 // formed with operands from AddOps. 6261 const SCEV *RHS = getSCEV(BO->RHS); 6262 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 6263 if (Flags != SCEV::FlagAnyWrap) { 6264 const SCEV *LHS = getSCEV(BO->LHS); 6265 if (BO->Opcode == Instruction::Sub) 6266 AddOps.push_back(getMinusSCEV(LHS, RHS, Flags)); 6267 else 6268 AddOps.push_back(getAddExpr(LHS, RHS, Flags)); 6269 break; 6270 } 6271 } 6272 6273 if (BO->Opcode == Instruction::Sub) 6274 AddOps.push_back(getNegativeSCEV(getSCEV(BO->RHS))); 6275 else 6276 AddOps.push_back(getSCEV(BO->RHS)); 6277 6278 auto NewBO = MatchBinaryOp(BO->LHS, DT); 6279 if (!NewBO || (NewBO->Opcode != Instruction::Add && 6280 NewBO->Opcode != Instruction::Sub)) { 6281 AddOps.push_back(getSCEV(BO->LHS)); 6282 break; 6283 } 6284 BO = NewBO; 6285 } while (true); 6286 6287 return getAddExpr(AddOps); 6288 } 6289 6290 case Instruction::Mul: { 6291 SmallVector<const SCEV *, 4> MulOps; 6292 do { 6293 if (BO->Op) { 6294 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 6295 MulOps.push_back(OpSCEV); 6296 break; 6297 } 6298 6299 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 6300 if (Flags != SCEV::FlagAnyWrap) { 6301 MulOps.push_back( 6302 getMulExpr(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags)); 6303 break; 6304 } 6305 } 6306 6307 MulOps.push_back(getSCEV(BO->RHS)); 6308 auto NewBO = MatchBinaryOp(BO->LHS, DT); 6309 if (!NewBO || NewBO->Opcode != Instruction::Mul) { 6310 MulOps.push_back(getSCEV(BO->LHS)); 6311 break; 6312 } 6313 BO = NewBO; 6314 } while (true); 6315 6316 return getMulExpr(MulOps); 6317 } 6318 case Instruction::UDiv: 6319 return getUDivExpr(getSCEV(BO->LHS), getSCEV(BO->RHS)); 6320 case Instruction::URem: 6321 return getURemExpr(getSCEV(BO->LHS), getSCEV(BO->RHS)); 6322 case Instruction::Sub: { 6323 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 6324 if (BO->Op) 6325 Flags = getNoWrapFlagsFromUB(BO->Op); 6326 return getMinusSCEV(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags); 6327 } 6328 case Instruction::And: 6329 // For an expression like x&255 that merely masks off the high bits, 6330 // use zext(trunc(x)) as the SCEV expression. 6331 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6332 if (CI->isZero()) 6333 return getSCEV(BO->RHS); 6334 if (CI->isMinusOne()) 6335 return getSCEV(BO->LHS); 6336 const APInt &A = CI->getValue(); 6337 6338 // Instcombine's ShrinkDemandedConstant may strip bits out of 6339 // constants, obscuring what would otherwise be a low-bits mask. 6340 // Use computeKnownBits to compute what ShrinkDemandedConstant 6341 // knew about to reconstruct a low-bits mask value. 6342 unsigned LZ = A.countLeadingZeros(); 6343 unsigned TZ = A.countTrailingZeros(); 6344 unsigned BitWidth = A.getBitWidth(); 6345 KnownBits Known(BitWidth); 6346 computeKnownBits(BO->LHS, Known, getDataLayout(), 6347 0, &AC, nullptr, &DT); 6348 6349 APInt EffectiveMask = 6350 APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ); 6351 if ((LZ != 0 || TZ != 0) && !((~A & ~Known.Zero) & EffectiveMask)) { 6352 const SCEV *MulCount = getConstant(APInt::getOneBitSet(BitWidth, TZ)); 6353 const SCEV *LHS = getSCEV(BO->LHS); 6354 const SCEV *ShiftedLHS = nullptr; 6355 if (auto *LHSMul = dyn_cast<SCEVMulExpr>(LHS)) { 6356 if (auto *OpC = dyn_cast<SCEVConstant>(LHSMul->getOperand(0))) { 6357 // For an expression like (x * 8) & 8, simplify the multiply. 6358 unsigned MulZeros = OpC->getAPInt().countTrailingZeros(); 6359 unsigned GCD = std::min(MulZeros, TZ); 6360 APInt DivAmt = APInt::getOneBitSet(BitWidth, TZ - GCD); 6361 SmallVector<const SCEV*, 4> MulOps; 6362 MulOps.push_back(getConstant(OpC->getAPInt().lshr(GCD))); 6363 MulOps.append(LHSMul->op_begin() + 1, LHSMul->op_end()); 6364 auto *NewMul = getMulExpr(MulOps, LHSMul->getNoWrapFlags()); 6365 ShiftedLHS = getUDivExpr(NewMul, getConstant(DivAmt)); 6366 } 6367 } 6368 if (!ShiftedLHS) 6369 ShiftedLHS = getUDivExpr(LHS, MulCount); 6370 return getMulExpr( 6371 getZeroExtendExpr( 6372 getTruncateExpr(ShiftedLHS, 6373 IntegerType::get(getContext(), BitWidth - LZ - TZ)), 6374 BO->LHS->getType()), 6375 MulCount); 6376 } 6377 } 6378 break; 6379 6380 case Instruction::Or: 6381 // If the RHS of the Or is a constant, we may have something like: 6382 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop 6383 // optimizations will transparently handle this case. 6384 // 6385 // In order for this transformation to be safe, the LHS must be of the 6386 // form X*(2^n) and the Or constant must be less than 2^n. 6387 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6388 const SCEV *LHS = getSCEV(BO->LHS); 6389 const APInt &CIVal = CI->getValue(); 6390 if (GetMinTrailingZeros(LHS) >= 6391 (CIVal.getBitWidth() - CIVal.countLeadingZeros())) { 6392 // Build a plain add SCEV. 6393 return getAddExpr(LHS, getSCEV(CI), 6394 (SCEV::NoWrapFlags)(SCEV::FlagNUW | SCEV::FlagNSW)); 6395 } 6396 } 6397 break; 6398 6399 case Instruction::Xor: 6400 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6401 // If the RHS of xor is -1, then this is a not operation. 6402 if (CI->isMinusOne()) 6403 return getNotSCEV(getSCEV(BO->LHS)); 6404 6405 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask. 6406 // This is a variant of the check for xor with -1, and it handles 6407 // the case where instcombine has trimmed non-demanded bits out 6408 // of an xor with -1. 6409 if (auto *LBO = dyn_cast<BinaryOperator>(BO->LHS)) 6410 if (ConstantInt *LCI = dyn_cast<ConstantInt>(LBO->getOperand(1))) 6411 if (LBO->getOpcode() == Instruction::And && 6412 LCI->getValue() == CI->getValue()) 6413 if (const SCEVZeroExtendExpr *Z = 6414 dyn_cast<SCEVZeroExtendExpr>(getSCEV(BO->LHS))) { 6415 Type *UTy = BO->LHS->getType(); 6416 const SCEV *Z0 = Z->getOperand(); 6417 Type *Z0Ty = Z0->getType(); 6418 unsigned Z0TySize = getTypeSizeInBits(Z0Ty); 6419 6420 // If C is a low-bits mask, the zero extend is serving to 6421 // mask off the high bits. Complement the operand and 6422 // re-apply the zext. 6423 if (CI->getValue().isMask(Z0TySize)) 6424 return getZeroExtendExpr(getNotSCEV(Z0), UTy); 6425 6426 // If C is a single bit, it may be in the sign-bit position 6427 // before the zero-extend. In this case, represent the xor 6428 // using an add, which is equivalent, and re-apply the zext. 6429 APInt Trunc = CI->getValue().trunc(Z0TySize); 6430 if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() && 6431 Trunc.isSignMask()) 6432 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)), 6433 UTy); 6434 } 6435 } 6436 break; 6437 6438 case Instruction::Shl: 6439 // Turn shift left of a constant amount into a multiply. 6440 if (ConstantInt *SA = dyn_cast<ConstantInt>(BO->RHS)) { 6441 uint32_t BitWidth = cast<IntegerType>(SA->getType())->getBitWidth(); 6442 6443 // If the shift count is not less than the bitwidth, the result of 6444 // the shift is undefined. Don't try to analyze it, because the 6445 // resolution chosen here may differ from the resolution chosen in 6446 // other parts of the compiler. 6447 if (SA->getValue().uge(BitWidth)) 6448 break; 6449 6450 // We can safely preserve the nuw flag in all cases. It's also safe to 6451 // turn a nuw nsw shl into a nuw nsw mul. However, nsw in isolation 6452 // requires special handling. It can be preserved as long as we're not 6453 // left shifting by bitwidth - 1. 6454 auto Flags = SCEV::FlagAnyWrap; 6455 if (BO->Op) { 6456 auto MulFlags = getNoWrapFlagsFromUB(BO->Op); 6457 if ((MulFlags & SCEV::FlagNSW) && 6458 ((MulFlags & SCEV::FlagNUW) || SA->getValue().ult(BitWidth - 1))) 6459 Flags = (SCEV::NoWrapFlags)(Flags | SCEV::FlagNSW); 6460 if (MulFlags & SCEV::FlagNUW) 6461 Flags = (SCEV::NoWrapFlags)(Flags | SCEV::FlagNUW); 6462 } 6463 6464 Constant *X = ConstantInt::get( 6465 getContext(), APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 6466 return getMulExpr(getSCEV(BO->LHS), getSCEV(X), Flags); 6467 } 6468 break; 6469 6470 case Instruction::AShr: { 6471 // AShr X, C, where C is a constant. 6472 ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS); 6473 if (!CI) 6474 break; 6475 6476 Type *OuterTy = BO->LHS->getType(); 6477 uint64_t BitWidth = getTypeSizeInBits(OuterTy); 6478 // If the shift count is not less than the bitwidth, the result of 6479 // the shift is undefined. Don't try to analyze it, because the 6480 // resolution chosen here may differ from the resolution chosen in 6481 // other parts of the compiler. 6482 if (CI->getValue().uge(BitWidth)) 6483 break; 6484 6485 if (CI->isZero()) 6486 return getSCEV(BO->LHS); // shift by zero --> noop 6487 6488 uint64_t AShrAmt = CI->getZExtValue(); 6489 Type *TruncTy = IntegerType::get(getContext(), BitWidth - AShrAmt); 6490 6491 Operator *L = dyn_cast<Operator>(BO->LHS); 6492 if (L && L->getOpcode() == Instruction::Shl) { 6493 // X = Shl A, n 6494 // Y = AShr X, m 6495 // Both n and m are constant. 6496 6497 const SCEV *ShlOp0SCEV = getSCEV(L->getOperand(0)); 6498 if (L->getOperand(1) == BO->RHS) 6499 // For a two-shift sext-inreg, i.e. n = m, 6500 // use sext(trunc(x)) as the SCEV expression. 6501 return getSignExtendExpr( 6502 getTruncateExpr(ShlOp0SCEV, TruncTy), OuterTy); 6503 6504 ConstantInt *ShlAmtCI = dyn_cast<ConstantInt>(L->getOperand(1)); 6505 if (ShlAmtCI && ShlAmtCI->getValue().ult(BitWidth)) { 6506 uint64_t ShlAmt = ShlAmtCI->getZExtValue(); 6507 if (ShlAmt > AShrAmt) { 6508 // When n > m, use sext(mul(trunc(x), 2^(n-m)))) as the SCEV 6509 // expression. We already checked that ShlAmt < BitWidth, so 6510 // the multiplier, 1 << (ShlAmt - AShrAmt), fits into TruncTy as 6511 // ShlAmt - AShrAmt < Amt. 6512 APInt Mul = APInt::getOneBitSet(BitWidth - AShrAmt, 6513 ShlAmt - AShrAmt); 6514 return getSignExtendExpr( 6515 getMulExpr(getTruncateExpr(ShlOp0SCEV, TruncTy), 6516 getConstant(Mul)), OuterTy); 6517 } 6518 } 6519 } 6520 break; 6521 } 6522 } 6523 } 6524 6525 switch (U->getOpcode()) { 6526 case Instruction::Trunc: 6527 return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType()); 6528 6529 case Instruction::ZExt: 6530 return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 6531 6532 case Instruction::SExt: 6533 if (auto BO = MatchBinaryOp(U->getOperand(0), DT)) { 6534 // The NSW flag of a subtract does not always survive the conversion to 6535 // A + (-1)*B. By pushing sign extension onto its operands we are much 6536 // more likely to preserve NSW and allow later AddRec optimisations. 6537 // 6538 // NOTE: This is effectively duplicating this logic from getSignExtend: 6539 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 6540 // but by that point the NSW information has potentially been lost. 6541 if (BO->Opcode == Instruction::Sub && BO->IsNSW) { 6542 Type *Ty = U->getType(); 6543 auto *V1 = getSignExtendExpr(getSCEV(BO->LHS), Ty); 6544 auto *V2 = getSignExtendExpr(getSCEV(BO->RHS), Ty); 6545 return getMinusSCEV(V1, V2, SCEV::FlagNSW); 6546 } 6547 } 6548 return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 6549 6550 case Instruction::BitCast: 6551 // BitCasts are no-op casts so we just eliminate the cast. 6552 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType())) 6553 return getSCEV(U->getOperand(0)); 6554 break; 6555 6556 // It's tempting to handle inttoptr and ptrtoint as no-ops, however this can 6557 // lead to pointer expressions which cannot safely be expanded to GEPs, 6558 // because ScalarEvolution doesn't respect the GEP aliasing rules when 6559 // simplifying integer expressions. 6560 6561 case Instruction::GetElementPtr: 6562 return createNodeForGEP(cast<GEPOperator>(U)); 6563 6564 case Instruction::PHI: 6565 return createNodeForPHI(cast<PHINode>(U)); 6566 6567 case Instruction::Select: 6568 // U can also be a select constant expr, which let fall through. Since 6569 // createNodeForSelect only works for a condition that is an `ICmpInst`, and 6570 // constant expressions cannot have instructions as operands, we'd have 6571 // returned getUnknown for a select constant expressions anyway. 6572 if (isa<Instruction>(U)) 6573 return createNodeForSelectOrPHI(cast<Instruction>(U), U->getOperand(0), 6574 U->getOperand(1), U->getOperand(2)); 6575 break; 6576 6577 case Instruction::Call: 6578 case Instruction::Invoke: 6579 if (Value *RV = cast<CallBase>(U)->getReturnedArgOperand()) 6580 return getSCEV(RV); 6581 break; 6582 } 6583 6584 return getUnknown(V); 6585 } 6586 6587 //===----------------------------------------------------------------------===// 6588 // Iteration Count Computation Code 6589 // 6590 6591 static unsigned getConstantTripCount(const SCEVConstant *ExitCount) { 6592 if (!ExitCount) 6593 return 0; 6594 6595 ConstantInt *ExitConst = ExitCount->getValue(); 6596 6597 // Guard against huge trip counts. 6598 if (ExitConst->getValue().getActiveBits() > 32) 6599 return 0; 6600 6601 // In case of integer overflow, this returns 0, which is correct. 6602 return ((unsigned)ExitConst->getZExtValue()) + 1; 6603 } 6604 6605 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L) { 6606 if (BasicBlock *ExitingBB = L->getExitingBlock()) 6607 return getSmallConstantTripCount(L, ExitingBB); 6608 6609 // No trip count information for multiple exits. 6610 return 0; 6611 } 6612 6613 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L, 6614 BasicBlock *ExitingBlock) { 6615 assert(ExitingBlock && "Must pass a non-null exiting block!"); 6616 assert(L->isLoopExiting(ExitingBlock) && 6617 "Exiting block must actually branch out of the loop!"); 6618 const SCEVConstant *ExitCount = 6619 dyn_cast<SCEVConstant>(getExitCount(L, ExitingBlock)); 6620 return getConstantTripCount(ExitCount); 6621 } 6622 6623 unsigned ScalarEvolution::getSmallConstantMaxTripCount(const Loop *L) { 6624 const auto *MaxExitCount = 6625 dyn_cast<SCEVConstant>(getConstantMaxBackedgeTakenCount(L)); 6626 return getConstantTripCount(MaxExitCount); 6627 } 6628 6629 unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L) { 6630 if (BasicBlock *ExitingBB = L->getExitingBlock()) 6631 return getSmallConstantTripMultiple(L, ExitingBB); 6632 6633 // No trip multiple information for multiple exits. 6634 return 0; 6635 } 6636 6637 /// Returns the largest constant divisor of the trip count of this loop as a 6638 /// normal unsigned value, if possible. This means that the actual trip count is 6639 /// always a multiple of the returned value (don't forget the trip count could 6640 /// very well be zero as well!). 6641 /// 6642 /// Returns 1 if the trip count is unknown or not guaranteed to be the 6643 /// multiple of a constant (which is also the case if the trip count is simply 6644 /// constant, use getSmallConstantTripCount for that case), Will also return 1 6645 /// if the trip count is very large (>= 2^32). 6646 /// 6647 /// As explained in the comments for getSmallConstantTripCount, this assumes 6648 /// that control exits the loop via ExitingBlock. 6649 unsigned 6650 ScalarEvolution::getSmallConstantTripMultiple(const Loop *L, 6651 BasicBlock *ExitingBlock) { 6652 assert(ExitingBlock && "Must pass a non-null exiting block!"); 6653 assert(L->isLoopExiting(ExitingBlock) && 6654 "Exiting block must actually branch out of the loop!"); 6655 const SCEV *ExitCount = getExitCount(L, ExitingBlock); 6656 if (ExitCount == getCouldNotCompute()) 6657 return 1; 6658 6659 // Get the trip count from the BE count by adding 1. 6660 const SCEV *TCExpr = getAddExpr(ExitCount, getOne(ExitCount->getType())); 6661 6662 const SCEVConstant *TC = dyn_cast<SCEVConstant>(TCExpr); 6663 if (!TC) 6664 // Attempt to factor more general cases. Returns the greatest power of 6665 // two divisor. If overflow happens, the trip count expression is still 6666 // divisible by the greatest power of 2 divisor returned. 6667 return 1U << std::min((uint32_t)31, GetMinTrailingZeros(TCExpr)); 6668 6669 ConstantInt *Result = TC->getValue(); 6670 6671 // Guard against huge trip counts (this requires checking 6672 // for zero to handle the case where the trip count == -1 and the 6673 // addition wraps). 6674 if (!Result || Result->getValue().getActiveBits() > 32 || 6675 Result->getValue().getActiveBits() == 0) 6676 return 1; 6677 6678 return (unsigned)Result->getZExtValue(); 6679 } 6680 6681 const SCEV *ScalarEvolution::getExitCount(const Loop *L, 6682 BasicBlock *ExitingBlock, 6683 ExitCountKind Kind) { 6684 switch (Kind) { 6685 case Exact: 6686 return getBackedgeTakenInfo(L).getExact(ExitingBlock, this); 6687 case ConstantMaximum: 6688 return getBackedgeTakenInfo(L).getMax(ExitingBlock, this); 6689 }; 6690 llvm_unreachable("Invalid ExitCountKind!"); 6691 } 6692 6693 const SCEV * 6694 ScalarEvolution::getPredicatedBackedgeTakenCount(const Loop *L, 6695 SCEVUnionPredicate &Preds) { 6696 return getPredicatedBackedgeTakenInfo(L).getExact(L, this, &Preds); 6697 } 6698 6699 const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L, 6700 ExitCountKind Kind) { 6701 switch (Kind) { 6702 case Exact: 6703 return getBackedgeTakenInfo(L).getExact(L, this); 6704 case ConstantMaximum: 6705 return getBackedgeTakenInfo(L).getMax(this); 6706 }; 6707 llvm_unreachable("Invalid ExitCountKind!"); 6708 } 6709 6710 bool ScalarEvolution::isBackedgeTakenCountMaxOrZero(const Loop *L) { 6711 return getBackedgeTakenInfo(L).isMaxOrZero(this); 6712 } 6713 6714 /// Push PHI nodes in the header of the given loop onto the given Worklist. 6715 static void 6716 PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) { 6717 BasicBlock *Header = L->getHeader(); 6718 6719 // Push all Loop-header PHIs onto the Worklist stack. 6720 for (PHINode &PN : Header->phis()) 6721 Worklist.push_back(&PN); 6722 } 6723 6724 const ScalarEvolution::BackedgeTakenInfo & 6725 ScalarEvolution::getPredicatedBackedgeTakenInfo(const Loop *L) { 6726 auto &BTI = getBackedgeTakenInfo(L); 6727 if (BTI.hasFullInfo()) 6728 return BTI; 6729 6730 auto Pair = PredicatedBackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 6731 6732 if (!Pair.second) 6733 return Pair.first->second; 6734 6735 BackedgeTakenInfo Result = 6736 computeBackedgeTakenCount(L, /*AllowPredicates=*/true); 6737 6738 return PredicatedBackedgeTakenCounts.find(L)->second = std::move(Result); 6739 } 6740 6741 const ScalarEvolution::BackedgeTakenInfo & 6742 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) { 6743 // Initially insert an invalid entry for this loop. If the insertion 6744 // succeeds, proceed to actually compute a backedge-taken count and 6745 // update the value. The temporary CouldNotCompute value tells SCEV 6746 // code elsewhere that it shouldn't attempt to request a new 6747 // backedge-taken count, which could result in infinite recursion. 6748 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair = 6749 BackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 6750 if (!Pair.second) 6751 return Pair.first->second; 6752 6753 // computeBackedgeTakenCount may allocate memory for its result. Inserting it 6754 // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result 6755 // must be cleared in this scope. 6756 BackedgeTakenInfo Result = computeBackedgeTakenCount(L); 6757 6758 // In product build, there are no usage of statistic. 6759 (void)NumTripCountsComputed; 6760 (void)NumTripCountsNotComputed; 6761 #if LLVM_ENABLE_STATS || !defined(NDEBUG) 6762 const SCEV *BEExact = Result.getExact(L, this); 6763 if (BEExact != getCouldNotCompute()) { 6764 assert(isLoopInvariant(BEExact, L) && 6765 isLoopInvariant(Result.getMax(this), L) && 6766 "Computed backedge-taken count isn't loop invariant for loop!"); 6767 ++NumTripCountsComputed; 6768 } 6769 else if (Result.getMax(this) == getCouldNotCompute() && 6770 isa<PHINode>(L->getHeader()->begin())) { 6771 // Only count loops that have phi nodes as not being computable. 6772 ++NumTripCountsNotComputed; 6773 } 6774 #endif // LLVM_ENABLE_STATS || !defined(NDEBUG) 6775 6776 // Now that we know more about the trip count for this loop, forget any 6777 // existing SCEV values for PHI nodes in this loop since they are only 6778 // conservative estimates made without the benefit of trip count 6779 // information. This is similar to the code in forgetLoop, except that 6780 // it handles SCEVUnknown PHI nodes specially. 6781 if (Result.hasAnyInfo()) { 6782 SmallVector<Instruction *, 16> Worklist; 6783 PushLoopPHIs(L, Worklist); 6784 6785 SmallPtrSet<Instruction *, 8> Discovered; 6786 while (!Worklist.empty()) { 6787 Instruction *I = Worklist.pop_back_val(); 6788 6789 ValueExprMapType::iterator It = 6790 ValueExprMap.find_as(static_cast<Value *>(I)); 6791 if (It != ValueExprMap.end()) { 6792 const SCEV *Old = It->second; 6793 6794 // SCEVUnknown for a PHI either means that it has an unrecognized 6795 // structure, or it's a PHI that's in the progress of being computed 6796 // by createNodeForPHI. In the former case, additional loop trip 6797 // count information isn't going to change anything. In the later 6798 // case, createNodeForPHI will perform the necessary updates on its 6799 // own when it gets to that point. 6800 if (!isa<PHINode>(I) || !isa<SCEVUnknown>(Old)) { 6801 eraseValueFromMap(It->first); 6802 forgetMemoizedResults(Old); 6803 } 6804 if (PHINode *PN = dyn_cast<PHINode>(I)) 6805 ConstantEvolutionLoopExitValue.erase(PN); 6806 } 6807 6808 // Since we don't need to invalidate anything for correctness and we're 6809 // only invalidating to make SCEV's results more precise, we get to stop 6810 // early to avoid invalidating too much. This is especially important in 6811 // cases like: 6812 // 6813 // %v = f(pn0, pn1) // pn0 and pn1 used through some other phi node 6814 // loop0: 6815 // %pn0 = phi 6816 // ... 6817 // loop1: 6818 // %pn1 = phi 6819 // ... 6820 // 6821 // where both loop0 and loop1's backedge taken count uses the SCEV 6822 // expression for %v. If we don't have the early stop below then in cases 6823 // like the above, getBackedgeTakenInfo(loop1) will clear out the trip 6824 // count for loop0 and getBackedgeTakenInfo(loop0) will clear out the trip 6825 // count for loop1, effectively nullifying SCEV's trip count cache. 6826 for (auto *U : I->users()) 6827 if (auto *I = dyn_cast<Instruction>(U)) { 6828 auto *LoopForUser = LI.getLoopFor(I->getParent()); 6829 if (LoopForUser && L->contains(LoopForUser) && 6830 Discovered.insert(I).second) 6831 Worklist.push_back(I); 6832 } 6833 } 6834 } 6835 6836 // Re-lookup the insert position, since the call to 6837 // computeBackedgeTakenCount above could result in a 6838 // recusive call to getBackedgeTakenInfo (on a different 6839 // loop), which would invalidate the iterator computed 6840 // earlier. 6841 return BackedgeTakenCounts.find(L)->second = std::move(Result); 6842 } 6843 6844 void ScalarEvolution::forgetAllLoops() { 6845 // This method is intended to forget all info about loops. It should 6846 // invalidate caches as if the following happened: 6847 // - The trip counts of all loops have changed arbitrarily 6848 // - Every llvm::Value has been updated in place to produce a different 6849 // result. 6850 BackedgeTakenCounts.clear(); 6851 PredicatedBackedgeTakenCounts.clear(); 6852 LoopPropertiesCache.clear(); 6853 ConstantEvolutionLoopExitValue.clear(); 6854 ValueExprMap.clear(); 6855 ValuesAtScopes.clear(); 6856 LoopDispositions.clear(); 6857 BlockDispositions.clear(); 6858 UnsignedRanges.clear(); 6859 SignedRanges.clear(); 6860 ExprValueMap.clear(); 6861 HasRecMap.clear(); 6862 MinTrailingZerosCache.clear(); 6863 PredicatedSCEVRewrites.clear(); 6864 } 6865 6866 void ScalarEvolution::forgetLoop(const Loop *L) { 6867 // Drop any stored trip count value. 6868 auto RemoveLoopFromBackedgeMap = 6869 [](DenseMap<const Loop *, BackedgeTakenInfo> &Map, const Loop *L) { 6870 auto BTCPos = Map.find(L); 6871 if (BTCPos != Map.end()) { 6872 BTCPos->second.clear(); 6873 Map.erase(BTCPos); 6874 } 6875 }; 6876 6877 SmallVector<const Loop *, 16> LoopWorklist(1, L); 6878 SmallVector<Instruction *, 32> Worklist; 6879 SmallPtrSet<Instruction *, 16> Visited; 6880 6881 // Iterate over all the loops and sub-loops to drop SCEV information. 6882 while (!LoopWorklist.empty()) { 6883 auto *CurrL = LoopWorklist.pop_back_val(); 6884 6885 RemoveLoopFromBackedgeMap(BackedgeTakenCounts, CurrL); 6886 RemoveLoopFromBackedgeMap(PredicatedBackedgeTakenCounts, CurrL); 6887 6888 // Drop information about predicated SCEV rewrites for this loop. 6889 for (auto I = PredicatedSCEVRewrites.begin(); 6890 I != PredicatedSCEVRewrites.end();) { 6891 std::pair<const SCEV *, const Loop *> Entry = I->first; 6892 if (Entry.second == CurrL) 6893 PredicatedSCEVRewrites.erase(I++); 6894 else 6895 ++I; 6896 } 6897 6898 auto LoopUsersItr = LoopUsers.find(CurrL); 6899 if (LoopUsersItr != LoopUsers.end()) { 6900 for (auto *S : LoopUsersItr->second) 6901 forgetMemoizedResults(S); 6902 LoopUsers.erase(LoopUsersItr); 6903 } 6904 6905 // Drop information about expressions based on loop-header PHIs. 6906 PushLoopPHIs(CurrL, Worklist); 6907 6908 while (!Worklist.empty()) { 6909 Instruction *I = Worklist.pop_back_val(); 6910 if (!Visited.insert(I).second) 6911 continue; 6912 6913 ValueExprMapType::iterator It = 6914 ValueExprMap.find_as(static_cast<Value *>(I)); 6915 if (It != ValueExprMap.end()) { 6916 eraseValueFromMap(It->first); 6917 forgetMemoizedResults(It->second); 6918 if (PHINode *PN = dyn_cast<PHINode>(I)) 6919 ConstantEvolutionLoopExitValue.erase(PN); 6920 } 6921 6922 PushDefUseChildren(I, Worklist); 6923 } 6924 6925 LoopPropertiesCache.erase(CurrL); 6926 // Forget all contained loops too, to avoid dangling entries in the 6927 // ValuesAtScopes map. 6928 LoopWorklist.append(CurrL->begin(), CurrL->end()); 6929 } 6930 } 6931 6932 void ScalarEvolution::forgetTopmostLoop(const Loop *L) { 6933 while (Loop *Parent = L->getParentLoop()) 6934 L = Parent; 6935 forgetLoop(L); 6936 } 6937 6938 void ScalarEvolution::forgetValue(Value *V) { 6939 Instruction *I = dyn_cast<Instruction>(V); 6940 if (!I) return; 6941 6942 // Drop information about expressions based on loop-header PHIs. 6943 SmallVector<Instruction *, 16> Worklist; 6944 Worklist.push_back(I); 6945 6946 SmallPtrSet<Instruction *, 8> Visited; 6947 while (!Worklist.empty()) { 6948 I = Worklist.pop_back_val(); 6949 if (!Visited.insert(I).second) 6950 continue; 6951 6952 ValueExprMapType::iterator It = 6953 ValueExprMap.find_as(static_cast<Value *>(I)); 6954 if (It != ValueExprMap.end()) { 6955 eraseValueFromMap(It->first); 6956 forgetMemoizedResults(It->second); 6957 if (PHINode *PN = dyn_cast<PHINode>(I)) 6958 ConstantEvolutionLoopExitValue.erase(PN); 6959 } 6960 6961 PushDefUseChildren(I, Worklist); 6962 } 6963 } 6964 6965 void ScalarEvolution::forgetLoopDispositions(const Loop *L) { 6966 LoopDispositions.clear(); 6967 } 6968 6969 /// Get the exact loop backedge taken count considering all loop exits. A 6970 /// computable result can only be returned for loops with all exiting blocks 6971 /// dominating the latch. howFarToZero assumes that the limit of each loop test 6972 /// is never skipped. This is a valid assumption as long as the loop exits via 6973 /// that test. For precise results, it is the caller's responsibility to specify 6974 /// the relevant loop exiting block using getExact(ExitingBlock, SE). 6975 const SCEV * 6976 ScalarEvolution::BackedgeTakenInfo::getExact(const Loop *L, ScalarEvolution *SE, 6977 SCEVUnionPredicate *Preds) const { 6978 // If any exits were not computable, the loop is not computable. 6979 if (!isComplete() || ExitNotTaken.empty()) 6980 return SE->getCouldNotCompute(); 6981 6982 const BasicBlock *Latch = L->getLoopLatch(); 6983 // All exiting blocks we have collected must dominate the only backedge. 6984 if (!Latch) 6985 return SE->getCouldNotCompute(); 6986 6987 // All exiting blocks we have gathered dominate loop's latch, so exact trip 6988 // count is simply a minimum out of all these calculated exit counts. 6989 SmallVector<const SCEV *, 2> Ops; 6990 for (auto &ENT : ExitNotTaken) { 6991 const SCEV *BECount = ENT.ExactNotTaken; 6992 assert(BECount != SE->getCouldNotCompute() && "Bad exit SCEV!"); 6993 assert(SE->DT.dominates(ENT.ExitingBlock, Latch) && 6994 "We should only have known counts for exiting blocks that dominate " 6995 "latch!"); 6996 6997 Ops.push_back(BECount); 6998 6999 if (Preds && !ENT.hasAlwaysTruePredicate()) 7000 Preds->add(ENT.Predicate.get()); 7001 7002 assert((Preds || ENT.hasAlwaysTruePredicate()) && 7003 "Predicate should be always true!"); 7004 } 7005 7006 return SE->getUMinFromMismatchedTypes(Ops); 7007 } 7008 7009 /// Get the exact not taken count for this loop exit. 7010 const SCEV * 7011 ScalarEvolution::BackedgeTakenInfo::getExact(BasicBlock *ExitingBlock, 7012 ScalarEvolution *SE) const { 7013 for (auto &ENT : ExitNotTaken) 7014 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate()) 7015 return ENT.ExactNotTaken; 7016 7017 return SE->getCouldNotCompute(); 7018 } 7019 7020 const SCEV * 7021 ScalarEvolution::BackedgeTakenInfo::getMax(BasicBlock *ExitingBlock, 7022 ScalarEvolution *SE) const { 7023 for (auto &ENT : ExitNotTaken) 7024 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate()) 7025 return ENT.MaxNotTaken; 7026 7027 return SE->getCouldNotCompute(); 7028 } 7029 7030 /// getMax - Get the max backedge taken count for the loop. 7031 const SCEV * 7032 ScalarEvolution::BackedgeTakenInfo::getMax(ScalarEvolution *SE) const { 7033 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 7034 return !ENT.hasAlwaysTruePredicate(); 7035 }; 7036 7037 if (any_of(ExitNotTaken, PredicateNotAlwaysTrue) || !getMax()) 7038 return SE->getCouldNotCompute(); 7039 7040 assert((isa<SCEVCouldNotCompute>(getMax()) || isa<SCEVConstant>(getMax())) && 7041 "No point in having a non-constant max backedge taken count!"); 7042 return getMax(); 7043 } 7044 7045 bool ScalarEvolution::BackedgeTakenInfo::isMaxOrZero(ScalarEvolution *SE) const { 7046 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 7047 return !ENT.hasAlwaysTruePredicate(); 7048 }; 7049 return MaxOrZero && !any_of(ExitNotTaken, PredicateNotAlwaysTrue); 7050 } 7051 7052 bool ScalarEvolution::BackedgeTakenInfo::hasOperand(const SCEV *S, 7053 ScalarEvolution *SE) const { 7054 if (getMax() && getMax() != SE->getCouldNotCompute() && 7055 SE->hasOperand(getMax(), S)) 7056 return true; 7057 7058 for (auto &ENT : ExitNotTaken) 7059 if (ENT.ExactNotTaken != SE->getCouldNotCompute() && 7060 SE->hasOperand(ENT.ExactNotTaken, S)) 7061 return true; 7062 7063 return false; 7064 } 7065 7066 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E) 7067 : ExactNotTaken(E), MaxNotTaken(E) { 7068 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 7069 isa<SCEVConstant>(MaxNotTaken)) && 7070 "No point in having a non-constant max backedge taken count!"); 7071 } 7072 7073 ScalarEvolution::ExitLimit::ExitLimit( 7074 const SCEV *E, const SCEV *M, bool MaxOrZero, 7075 ArrayRef<const SmallPtrSetImpl<const SCEVPredicate *> *> PredSetList) 7076 : ExactNotTaken(E), MaxNotTaken(M), MaxOrZero(MaxOrZero) { 7077 assert((isa<SCEVCouldNotCompute>(ExactNotTaken) || 7078 !isa<SCEVCouldNotCompute>(MaxNotTaken)) && 7079 "Exact is not allowed to be less precise than Max"); 7080 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 7081 isa<SCEVConstant>(MaxNotTaken)) && 7082 "No point in having a non-constant max backedge taken count!"); 7083 for (auto *PredSet : PredSetList) 7084 for (auto *P : *PredSet) 7085 addPredicate(P); 7086 } 7087 7088 ScalarEvolution::ExitLimit::ExitLimit( 7089 const SCEV *E, const SCEV *M, bool MaxOrZero, 7090 const SmallPtrSetImpl<const SCEVPredicate *> &PredSet) 7091 : ExitLimit(E, M, MaxOrZero, {&PredSet}) { 7092 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 7093 isa<SCEVConstant>(MaxNotTaken)) && 7094 "No point in having a non-constant max backedge taken count!"); 7095 } 7096 7097 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E, const SCEV *M, 7098 bool MaxOrZero) 7099 : ExitLimit(E, M, MaxOrZero, None) { 7100 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 7101 isa<SCEVConstant>(MaxNotTaken)) && 7102 "No point in having a non-constant max backedge taken count!"); 7103 } 7104 7105 /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each 7106 /// computable exit into a persistent ExitNotTakenInfo array. 7107 ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo( 7108 ArrayRef<ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo> 7109 ExitCounts, 7110 bool Complete, const SCEV *MaxCount, bool MaxOrZero) 7111 : MaxAndComplete(MaxCount, Complete), MaxOrZero(MaxOrZero) { 7112 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; 7113 7114 ExitNotTaken.reserve(ExitCounts.size()); 7115 std::transform( 7116 ExitCounts.begin(), ExitCounts.end(), std::back_inserter(ExitNotTaken), 7117 [&](const EdgeExitInfo &EEI) { 7118 BasicBlock *ExitBB = EEI.first; 7119 const ExitLimit &EL = EEI.second; 7120 if (EL.Predicates.empty()) 7121 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, EL.MaxNotTaken, 7122 nullptr); 7123 7124 std::unique_ptr<SCEVUnionPredicate> Predicate(new SCEVUnionPredicate); 7125 for (auto *Pred : EL.Predicates) 7126 Predicate->add(Pred); 7127 7128 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, EL.MaxNotTaken, 7129 std::move(Predicate)); 7130 }); 7131 assert((isa<SCEVCouldNotCompute>(MaxCount) || isa<SCEVConstant>(MaxCount)) && 7132 "No point in having a non-constant max backedge taken count!"); 7133 } 7134 7135 /// Invalidate this result and free the ExitNotTakenInfo array. 7136 void ScalarEvolution::BackedgeTakenInfo::clear() { 7137 ExitNotTaken.clear(); 7138 } 7139 7140 /// Compute the number of times the backedge of the specified loop will execute. 7141 ScalarEvolution::BackedgeTakenInfo 7142 ScalarEvolution::computeBackedgeTakenCount(const Loop *L, 7143 bool AllowPredicates) { 7144 SmallVector<BasicBlock *, 8> ExitingBlocks; 7145 L->getExitingBlocks(ExitingBlocks); 7146 7147 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; 7148 7149 SmallVector<EdgeExitInfo, 4> ExitCounts; 7150 bool CouldComputeBECount = true; 7151 BasicBlock *Latch = L->getLoopLatch(); // may be NULL. 7152 const SCEV *MustExitMaxBECount = nullptr; 7153 const SCEV *MayExitMaxBECount = nullptr; 7154 bool MustExitMaxOrZero = false; 7155 7156 // Compute the ExitLimit for each loop exit. Use this to populate ExitCounts 7157 // and compute maxBECount. 7158 // Do a union of all the predicates here. 7159 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { 7160 BasicBlock *ExitBB = ExitingBlocks[i]; 7161 7162 // We canonicalize untaken exits to br (constant), ignore them so that 7163 // proving an exit untaken doesn't negatively impact our ability to reason 7164 // about the loop as whole. 7165 if (auto *BI = dyn_cast<BranchInst>(ExitBB->getTerminator())) 7166 if (auto *CI = dyn_cast<ConstantInt>(BI->getCondition())) { 7167 bool ExitIfTrue = !L->contains(BI->getSuccessor(0)); 7168 if ((ExitIfTrue && CI->isZero()) || (!ExitIfTrue && CI->isOne())) 7169 continue; 7170 } 7171 7172 ExitLimit EL = computeExitLimit(L, ExitBB, AllowPredicates); 7173 7174 assert((AllowPredicates || EL.Predicates.empty()) && 7175 "Predicated exit limit when predicates are not allowed!"); 7176 7177 // 1. For each exit that can be computed, add an entry to ExitCounts. 7178 // CouldComputeBECount is true only if all exits can be computed. 7179 if (EL.ExactNotTaken == getCouldNotCompute()) 7180 // We couldn't compute an exact value for this exit, so 7181 // we won't be able to compute an exact value for the loop. 7182 CouldComputeBECount = false; 7183 else 7184 ExitCounts.emplace_back(ExitBB, EL); 7185 7186 // 2. Derive the loop's MaxBECount from each exit's max number of 7187 // non-exiting iterations. Partition the loop exits into two kinds: 7188 // LoopMustExits and LoopMayExits. 7189 // 7190 // If the exit dominates the loop latch, it is a LoopMustExit otherwise it 7191 // is a LoopMayExit. If any computable LoopMustExit is found, then 7192 // MaxBECount is the minimum EL.MaxNotTaken of computable 7193 // LoopMustExits. Otherwise, MaxBECount is conservatively the maximum 7194 // EL.MaxNotTaken, where CouldNotCompute is considered greater than any 7195 // computable EL.MaxNotTaken. 7196 if (EL.MaxNotTaken != getCouldNotCompute() && Latch && 7197 DT.dominates(ExitBB, Latch)) { 7198 if (!MustExitMaxBECount) { 7199 MustExitMaxBECount = EL.MaxNotTaken; 7200 MustExitMaxOrZero = EL.MaxOrZero; 7201 } else { 7202 MustExitMaxBECount = 7203 getUMinFromMismatchedTypes(MustExitMaxBECount, EL.MaxNotTaken); 7204 } 7205 } else if (MayExitMaxBECount != getCouldNotCompute()) { 7206 if (!MayExitMaxBECount || EL.MaxNotTaken == getCouldNotCompute()) 7207 MayExitMaxBECount = EL.MaxNotTaken; 7208 else { 7209 MayExitMaxBECount = 7210 getUMaxFromMismatchedTypes(MayExitMaxBECount, EL.MaxNotTaken); 7211 } 7212 } 7213 } 7214 const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount : 7215 (MayExitMaxBECount ? MayExitMaxBECount : getCouldNotCompute()); 7216 // The loop backedge will be taken the maximum or zero times if there's 7217 // a single exit that must be taken the maximum or zero times. 7218 bool MaxOrZero = (MustExitMaxOrZero && ExitingBlocks.size() == 1); 7219 return BackedgeTakenInfo(std::move(ExitCounts), CouldComputeBECount, 7220 MaxBECount, MaxOrZero); 7221 } 7222 7223 ScalarEvolution::ExitLimit 7224 ScalarEvolution::computeExitLimit(const Loop *L, BasicBlock *ExitingBlock, 7225 bool AllowPredicates) { 7226 assert(L->contains(ExitingBlock) && "Exit count for non-loop block?"); 7227 // If our exiting block does not dominate the latch, then its connection with 7228 // loop's exit limit may be far from trivial. 7229 const BasicBlock *Latch = L->getLoopLatch(); 7230 if (!Latch || !DT.dominates(ExitingBlock, Latch)) 7231 return getCouldNotCompute(); 7232 7233 bool IsOnlyExit = (L->getExitingBlock() != nullptr); 7234 Instruction *Term = ExitingBlock->getTerminator(); 7235 if (BranchInst *BI = dyn_cast<BranchInst>(Term)) { 7236 assert(BI->isConditional() && "If unconditional, it can't be in loop!"); 7237 bool ExitIfTrue = !L->contains(BI->getSuccessor(0)); 7238 assert(ExitIfTrue == L->contains(BI->getSuccessor(1)) && 7239 "It should have one successor in loop and one exit block!"); 7240 // Proceed to the next level to examine the exit condition expression. 7241 return computeExitLimitFromCond( 7242 L, BI->getCondition(), ExitIfTrue, 7243 /*ControlsExit=*/IsOnlyExit, AllowPredicates); 7244 } 7245 7246 if (SwitchInst *SI = dyn_cast<SwitchInst>(Term)) { 7247 // For switch, make sure that there is a single exit from the loop. 7248 BasicBlock *Exit = nullptr; 7249 for (auto *SBB : successors(ExitingBlock)) 7250 if (!L->contains(SBB)) { 7251 if (Exit) // Multiple exit successors. 7252 return getCouldNotCompute(); 7253 Exit = SBB; 7254 } 7255 assert(Exit && "Exiting block must have at least one exit"); 7256 return computeExitLimitFromSingleExitSwitch(L, SI, Exit, 7257 /*ControlsExit=*/IsOnlyExit); 7258 } 7259 7260 return getCouldNotCompute(); 7261 } 7262 7263 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCond( 7264 const Loop *L, Value *ExitCond, bool ExitIfTrue, 7265 bool ControlsExit, bool AllowPredicates) { 7266 ScalarEvolution::ExitLimitCacheTy Cache(L, ExitIfTrue, AllowPredicates); 7267 return computeExitLimitFromCondCached(Cache, L, ExitCond, ExitIfTrue, 7268 ControlsExit, AllowPredicates); 7269 } 7270 7271 Optional<ScalarEvolution::ExitLimit> 7272 ScalarEvolution::ExitLimitCache::find(const Loop *L, Value *ExitCond, 7273 bool ExitIfTrue, bool ControlsExit, 7274 bool AllowPredicates) { 7275 (void)this->L; 7276 (void)this->ExitIfTrue; 7277 (void)this->AllowPredicates; 7278 7279 assert(this->L == L && this->ExitIfTrue == ExitIfTrue && 7280 this->AllowPredicates == AllowPredicates && 7281 "Variance in assumed invariant key components!"); 7282 auto Itr = TripCountMap.find({ExitCond, ControlsExit}); 7283 if (Itr == TripCountMap.end()) 7284 return None; 7285 return Itr->second; 7286 } 7287 7288 void ScalarEvolution::ExitLimitCache::insert(const Loop *L, Value *ExitCond, 7289 bool ExitIfTrue, 7290 bool ControlsExit, 7291 bool AllowPredicates, 7292 const ExitLimit &EL) { 7293 assert(this->L == L && this->ExitIfTrue == ExitIfTrue && 7294 this->AllowPredicates == AllowPredicates && 7295 "Variance in assumed invariant key components!"); 7296 7297 auto InsertResult = TripCountMap.insert({{ExitCond, ControlsExit}, EL}); 7298 assert(InsertResult.second && "Expected successful insertion!"); 7299 (void)InsertResult; 7300 (void)ExitIfTrue; 7301 } 7302 7303 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondCached( 7304 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, 7305 bool ControlsExit, bool AllowPredicates) { 7306 7307 if (auto MaybeEL = 7308 Cache.find(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates)) 7309 return *MaybeEL; 7310 7311 ExitLimit EL = computeExitLimitFromCondImpl(Cache, L, ExitCond, ExitIfTrue, 7312 ControlsExit, AllowPredicates); 7313 Cache.insert(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates, EL); 7314 return EL; 7315 } 7316 7317 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondImpl( 7318 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, 7319 bool ControlsExit, bool AllowPredicates) { 7320 // Check if the controlling expression for this loop is an And or Or. 7321 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) { 7322 if (BO->getOpcode() == Instruction::And) { 7323 // Recurse on the operands of the and. 7324 bool EitherMayExit = !ExitIfTrue; 7325 ExitLimit EL0 = computeExitLimitFromCondCached( 7326 Cache, L, BO->getOperand(0), ExitIfTrue, 7327 ControlsExit && !EitherMayExit, AllowPredicates); 7328 ExitLimit EL1 = computeExitLimitFromCondCached( 7329 Cache, L, BO->getOperand(1), ExitIfTrue, 7330 ControlsExit && !EitherMayExit, AllowPredicates); 7331 // Be robust against unsimplified IR for the form "and i1 X, true" 7332 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) 7333 return CI->isOne() ? EL0 : EL1; 7334 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(0))) 7335 return CI->isOne() ? EL1 : EL0; 7336 const SCEV *BECount = getCouldNotCompute(); 7337 const SCEV *MaxBECount = getCouldNotCompute(); 7338 if (EitherMayExit) { 7339 // Both conditions must be true for the loop to continue executing. 7340 // Choose the less conservative count. 7341 if (EL0.ExactNotTaken == getCouldNotCompute() || 7342 EL1.ExactNotTaken == getCouldNotCompute()) 7343 BECount = getCouldNotCompute(); 7344 else 7345 BECount = 7346 getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken); 7347 if (EL0.MaxNotTaken == getCouldNotCompute()) 7348 MaxBECount = EL1.MaxNotTaken; 7349 else if (EL1.MaxNotTaken == getCouldNotCompute()) 7350 MaxBECount = EL0.MaxNotTaken; 7351 else 7352 MaxBECount = 7353 getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken); 7354 } else { 7355 // Both conditions must be true at the same time for the loop to exit. 7356 // For now, be conservative. 7357 if (EL0.MaxNotTaken == EL1.MaxNotTaken) 7358 MaxBECount = EL0.MaxNotTaken; 7359 if (EL0.ExactNotTaken == EL1.ExactNotTaken) 7360 BECount = EL0.ExactNotTaken; 7361 } 7362 7363 // There are cases (e.g. PR26207) where computeExitLimitFromCond is able 7364 // to be more aggressive when computing BECount than when computing 7365 // MaxBECount. In these cases it is possible for EL0.ExactNotTaken and 7366 // EL1.ExactNotTaken to match, but for EL0.MaxNotTaken and EL1.MaxNotTaken 7367 // to not. 7368 if (isa<SCEVCouldNotCompute>(MaxBECount) && 7369 !isa<SCEVCouldNotCompute>(BECount)) 7370 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 7371 7372 return ExitLimit(BECount, MaxBECount, false, 7373 {&EL0.Predicates, &EL1.Predicates}); 7374 } 7375 if (BO->getOpcode() == Instruction::Or) { 7376 // Recurse on the operands of the or. 7377 bool EitherMayExit = ExitIfTrue; 7378 ExitLimit EL0 = computeExitLimitFromCondCached( 7379 Cache, L, BO->getOperand(0), ExitIfTrue, 7380 ControlsExit && !EitherMayExit, AllowPredicates); 7381 ExitLimit EL1 = computeExitLimitFromCondCached( 7382 Cache, L, BO->getOperand(1), ExitIfTrue, 7383 ControlsExit && !EitherMayExit, AllowPredicates); 7384 // Be robust against unsimplified IR for the form "or i1 X, true" 7385 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) 7386 return CI->isZero() ? EL0 : EL1; 7387 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(0))) 7388 return CI->isZero() ? EL1 : EL0; 7389 const SCEV *BECount = getCouldNotCompute(); 7390 const SCEV *MaxBECount = getCouldNotCompute(); 7391 if (EitherMayExit) { 7392 // Both conditions must be false for the loop to continue executing. 7393 // Choose the less conservative count. 7394 if (EL0.ExactNotTaken == getCouldNotCompute() || 7395 EL1.ExactNotTaken == getCouldNotCompute()) 7396 BECount = getCouldNotCompute(); 7397 else 7398 BECount = 7399 getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken); 7400 if (EL0.MaxNotTaken == getCouldNotCompute()) 7401 MaxBECount = EL1.MaxNotTaken; 7402 else if (EL1.MaxNotTaken == getCouldNotCompute()) 7403 MaxBECount = EL0.MaxNotTaken; 7404 else 7405 MaxBECount = 7406 getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken); 7407 } else { 7408 // Both conditions must be false at the same time for the loop to exit. 7409 // For now, be conservative. 7410 if (EL0.MaxNotTaken == EL1.MaxNotTaken) 7411 MaxBECount = EL0.MaxNotTaken; 7412 if (EL0.ExactNotTaken == EL1.ExactNotTaken) 7413 BECount = EL0.ExactNotTaken; 7414 } 7415 // There are cases (e.g. PR26207) where computeExitLimitFromCond is able 7416 // to be more aggressive when computing BECount than when computing 7417 // MaxBECount. In these cases it is possible for EL0.ExactNotTaken and 7418 // EL1.ExactNotTaken to match, but for EL0.MaxNotTaken and EL1.MaxNotTaken 7419 // to not. 7420 if (isa<SCEVCouldNotCompute>(MaxBECount) && 7421 !isa<SCEVCouldNotCompute>(BECount)) 7422 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 7423 7424 return ExitLimit(BECount, MaxBECount, false, 7425 {&EL0.Predicates, &EL1.Predicates}); 7426 } 7427 } 7428 7429 // With an icmp, it may be feasible to compute an exact backedge-taken count. 7430 // Proceed to the next level to examine the icmp. 7431 if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) { 7432 ExitLimit EL = 7433 computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit); 7434 if (EL.hasFullInfo() || !AllowPredicates) 7435 return EL; 7436 7437 // Try again, but use SCEV predicates this time. 7438 return computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit, 7439 /*AllowPredicates=*/true); 7440 } 7441 7442 // Check for a constant condition. These are normally stripped out by 7443 // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to 7444 // preserve the CFG and is temporarily leaving constant conditions 7445 // in place. 7446 if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) { 7447 if (ExitIfTrue == !CI->getZExtValue()) 7448 // The backedge is always taken. 7449 return getCouldNotCompute(); 7450 else 7451 // The backedge is never taken. 7452 return getZero(CI->getType()); 7453 } 7454 7455 // If it's not an integer or pointer comparison then compute it the hard way. 7456 return computeExitCountExhaustively(L, ExitCond, ExitIfTrue); 7457 } 7458 7459 ScalarEvolution::ExitLimit 7460 ScalarEvolution::computeExitLimitFromICmp(const Loop *L, 7461 ICmpInst *ExitCond, 7462 bool ExitIfTrue, 7463 bool ControlsExit, 7464 bool AllowPredicates) { 7465 // If the condition was exit on true, convert the condition to exit on false 7466 ICmpInst::Predicate Pred; 7467 if (!ExitIfTrue) 7468 Pred = ExitCond->getPredicate(); 7469 else 7470 Pred = ExitCond->getInversePredicate(); 7471 const ICmpInst::Predicate OriginalPred = Pred; 7472 7473 // Handle common loops like: for (X = "string"; *X; ++X) 7474 if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0))) 7475 if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) { 7476 ExitLimit ItCnt = 7477 computeLoadConstantCompareExitLimit(LI, RHS, L, Pred); 7478 if (ItCnt.hasAnyInfo()) 7479 return ItCnt; 7480 } 7481 7482 const SCEV *LHS = getSCEV(ExitCond->getOperand(0)); 7483 const SCEV *RHS = getSCEV(ExitCond->getOperand(1)); 7484 7485 // Try to evaluate any dependencies out of the loop. 7486 LHS = getSCEVAtScope(LHS, L); 7487 RHS = getSCEVAtScope(RHS, L); 7488 7489 // At this point, we would like to compute how many iterations of the 7490 // loop the predicate will return true for these inputs. 7491 if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) { 7492 // If there is a loop-invariant, force it into the RHS. 7493 std::swap(LHS, RHS); 7494 Pred = ICmpInst::getSwappedPredicate(Pred); 7495 } 7496 7497 // Simplify the operands before analyzing them. 7498 (void)SimplifyICmpOperands(Pred, LHS, RHS); 7499 7500 // If we have a comparison of a chrec against a constant, try to use value 7501 // ranges to answer this query. 7502 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) 7503 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS)) 7504 if (AddRec->getLoop() == L) { 7505 // Form the constant range. 7506 ConstantRange CompRange = 7507 ConstantRange::makeExactICmpRegion(Pred, RHSC->getAPInt()); 7508 7509 const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this); 7510 if (!isa<SCEVCouldNotCompute>(Ret)) return Ret; 7511 } 7512 7513 switch (Pred) { 7514 case ICmpInst::ICMP_NE: { // while (X != Y) 7515 // Convert to: while (X-Y != 0) 7516 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit, 7517 AllowPredicates); 7518 if (EL.hasAnyInfo()) return EL; 7519 break; 7520 } 7521 case ICmpInst::ICMP_EQ: { // while (X == Y) 7522 // Convert to: while (X-Y == 0) 7523 ExitLimit EL = howFarToNonZero(getMinusSCEV(LHS, RHS), L); 7524 if (EL.hasAnyInfo()) return EL; 7525 break; 7526 } 7527 case ICmpInst::ICMP_SLT: 7528 case ICmpInst::ICMP_ULT: { // while (X < Y) 7529 bool IsSigned = Pred == ICmpInst::ICMP_SLT; 7530 ExitLimit EL = howManyLessThans(LHS, RHS, L, IsSigned, ControlsExit, 7531 AllowPredicates); 7532 if (EL.hasAnyInfo()) return EL; 7533 break; 7534 } 7535 case ICmpInst::ICMP_SGT: 7536 case ICmpInst::ICMP_UGT: { // while (X > Y) 7537 bool IsSigned = Pred == ICmpInst::ICMP_SGT; 7538 ExitLimit EL = 7539 howManyGreaterThans(LHS, RHS, L, IsSigned, ControlsExit, 7540 AllowPredicates); 7541 if (EL.hasAnyInfo()) return EL; 7542 break; 7543 } 7544 default: 7545 break; 7546 } 7547 7548 auto *ExhaustiveCount = 7549 computeExitCountExhaustively(L, ExitCond, ExitIfTrue); 7550 7551 if (!isa<SCEVCouldNotCompute>(ExhaustiveCount)) 7552 return ExhaustiveCount; 7553 7554 return computeShiftCompareExitLimit(ExitCond->getOperand(0), 7555 ExitCond->getOperand(1), L, OriginalPred); 7556 } 7557 7558 ScalarEvolution::ExitLimit 7559 ScalarEvolution::computeExitLimitFromSingleExitSwitch(const Loop *L, 7560 SwitchInst *Switch, 7561 BasicBlock *ExitingBlock, 7562 bool ControlsExit) { 7563 assert(!L->contains(ExitingBlock) && "Not an exiting block!"); 7564 7565 // Give up if the exit is the default dest of a switch. 7566 if (Switch->getDefaultDest() == ExitingBlock) 7567 return getCouldNotCompute(); 7568 7569 assert(L->contains(Switch->getDefaultDest()) && 7570 "Default case must not exit the loop!"); 7571 const SCEV *LHS = getSCEVAtScope(Switch->getCondition(), L); 7572 const SCEV *RHS = getConstant(Switch->findCaseDest(ExitingBlock)); 7573 7574 // while (X != Y) --> while (X-Y != 0) 7575 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit); 7576 if (EL.hasAnyInfo()) 7577 return EL; 7578 7579 return getCouldNotCompute(); 7580 } 7581 7582 static ConstantInt * 7583 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, 7584 ScalarEvolution &SE) { 7585 const SCEV *InVal = SE.getConstant(C); 7586 const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE); 7587 assert(isa<SCEVConstant>(Val) && 7588 "Evaluation of SCEV at constant didn't fold correctly?"); 7589 return cast<SCEVConstant>(Val)->getValue(); 7590 } 7591 7592 /// Given an exit condition of 'icmp op load X, cst', try to see if we can 7593 /// compute the backedge execution count. 7594 ScalarEvolution::ExitLimit 7595 ScalarEvolution::computeLoadConstantCompareExitLimit( 7596 LoadInst *LI, 7597 Constant *RHS, 7598 const Loop *L, 7599 ICmpInst::Predicate predicate) { 7600 if (LI->isVolatile()) return getCouldNotCompute(); 7601 7602 // Check to see if the loaded pointer is a getelementptr of a global. 7603 // TODO: Use SCEV instead of manually grubbing with GEPs. 7604 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)); 7605 if (!GEP) return getCouldNotCompute(); 7606 7607 // Make sure that it is really a constant global we are gepping, with an 7608 // initializer, and make sure the first IDX is really 0. 7609 GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)); 7610 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() || 7611 GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) || 7612 !cast<Constant>(GEP->getOperand(1))->isNullValue()) 7613 return getCouldNotCompute(); 7614 7615 // Okay, we allow one non-constant index into the GEP instruction. 7616 Value *VarIdx = nullptr; 7617 std::vector<Constant*> Indexes; 7618 unsigned VarIdxNum = 0; 7619 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i) 7620 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) { 7621 Indexes.push_back(CI); 7622 } else if (!isa<ConstantInt>(GEP->getOperand(i))) { 7623 if (VarIdx) return getCouldNotCompute(); // Multiple non-constant idx's. 7624 VarIdx = GEP->getOperand(i); 7625 VarIdxNum = i-2; 7626 Indexes.push_back(nullptr); 7627 } 7628 7629 // Loop-invariant loads may be a byproduct of loop optimization. Skip them. 7630 if (!VarIdx) 7631 return getCouldNotCompute(); 7632 7633 // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant. 7634 // Check to see if X is a loop variant variable value now. 7635 const SCEV *Idx = getSCEV(VarIdx); 7636 Idx = getSCEVAtScope(Idx, L); 7637 7638 // We can only recognize very limited forms of loop index expressions, in 7639 // particular, only affine AddRec's like {C1,+,C2}. 7640 const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx); 7641 if (!IdxExpr || !IdxExpr->isAffine() || isLoopInvariant(IdxExpr, L) || 7642 !isa<SCEVConstant>(IdxExpr->getOperand(0)) || 7643 !isa<SCEVConstant>(IdxExpr->getOperand(1))) 7644 return getCouldNotCompute(); 7645 7646 unsigned MaxSteps = MaxBruteForceIterations; 7647 for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) { 7648 ConstantInt *ItCst = ConstantInt::get( 7649 cast<IntegerType>(IdxExpr->getType()), IterationNum); 7650 ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this); 7651 7652 // Form the GEP offset. 7653 Indexes[VarIdxNum] = Val; 7654 7655 Constant *Result = ConstantFoldLoadThroughGEPIndices(GV->getInitializer(), 7656 Indexes); 7657 if (!Result) break; // Cannot compute! 7658 7659 // Evaluate the condition for this iteration. 7660 Result = ConstantExpr::getICmp(predicate, Result, RHS); 7661 if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure 7662 if (cast<ConstantInt>(Result)->getValue().isMinValue()) { 7663 ++NumArrayLenItCounts; 7664 return getConstant(ItCst); // Found terminating iteration! 7665 } 7666 } 7667 return getCouldNotCompute(); 7668 } 7669 7670 ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit( 7671 Value *LHS, Value *RHSV, const Loop *L, ICmpInst::Predicate Pred) { 7672 ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV); 7673 if (!RHS) 7674 return getCouldNotCompute(); 7675 7676 const BasicBlock *Latch = L->getLoopLatch(); 7677 if (!Latch) 7678 return getCouldNotCompute(); 7679 7680 const BasicBlock *Predecessor = L->getLoopPredecessor(); 7681 if (!Predecessor) 7682 return getCouldNotCompute(); 7683 7684 // Return true if V is of the form "LHS `shift_op` <positive constant>". 7685 // Return LHS in OutLHS and shift_opt in OutOpCode. 7686 auto MatchPositiveShift = 7687 [](Value *V, Value *&OutLHS, Instruction::BinaryOps &OutOpCode) { 7688 7689 using namespace PatternMatch; 7690 7691 ConstantInt *ShiftAmt; 7692 if (match(V, m_LShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 7693 OutOpCode = Instruction::LShr; 7694 else if (match(V, m_AShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 7695 OutOpCode = Instruction::AShr; 7696 else if (match(V, m_Shl(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 7697 OutOpCode = Instruction::Shl; 7698 else 7699 return false; 7700 7701 return ShiftAmt->getValue().isStrictlyPositive(); 7702 }; 7703 7704 // Recognize a "shift recurrence" either of the form %iv or of %iv.shifted in 7705 // 7706 // loop: 7707 // %iv = phi i32 [ %iv.shifted, %loop ], [ %val, %preheader ] 7708 // %iv.shifted = lshr i32 %iv, <positive constant> 7709 // 7710 // Return true on a successful match. Return the corresponding PHI node (%iv 7711 // above) in PNOut and the opcode of the shift operation in OpCodeOut. 7712 auto MatchShiftRecurrence = 7713 [&](Value *V, PHINode *&PNOut, Instruction::BinaryOps &OpCodeOut) { 7714 Optional<Instruction::BinaryOps> PostShiftOpCode; 7715 7716 { 7717 Instruction::BinaryOps OpC; 7718 Value *V; 7719 7720 // If we encounter a shift instruction, "peel off" the shift operation, 7721 // and remember that we did so. Later when we inspect %iv's backedge 7722 // value, we will make sure that the backedge value uses the same 7723 // operation. 7724 // 7725 // Note: the peeled shift operation does not have to be the same 7726 // instruction as the one feeding into the PHI's backedge value. We only 7727 // really care about it being the same *kind* of shift instruction -- 7728 // that's all that is required for our later inferences to hold. 7729 if (MatchPositiveShift(LHS, V, OpC)) { 7730 PostShiftOpCode = OpC; 7731 LHS = V; 7732 } 7733 } 7734 7735 PNOut = dyn_cast<PHINode>(LHS); 7736 if (!PNOut || PNOut->getParent() != L->getHeader()) 7737 return false; 7738 7739 Value *BEValue = PNOut->getIncomingValueForBlock(Latch); 7740 Value *OpLHS; 7741 7742 return 7743 // The backedge value for the PHI node must be a shift by a positive 7744 // amount 7745 MatchPositiveShift(BEValue, OpLHS, OpCodeOut) && 7746 7747 // of the PHI node itself 7748 OpLHS == PNOut && 7749 7750 // and the kind of shift should be match the kind of shift we peeled 7751 // off, if any. 7752 (!PostShiftOpCode.hasValue() || *PostShiftOpCode == OpCodeOut); 7753 }; 7754 7755 PHINode *PN; 7756 Instruction::BinaryOps OpCode; 7757 if (!MatchShiftRecurrence(LHS, PN, OpCode)) 7758 return getCouldNotCompute(); 7759 7760 const DataLayout &DL = getDataLayout(); 7761 7762 // The key rationale for this optimization is that for some kinds of shift 7763 // recurrences, the value of the recurrence "stabilizes" to either 0 or -1 7764 // within a finite number of iterations. If the condition guarding the 7765 // backedge (in the sense that the backedge is taken if the condition is true) 7766 // is false for the value the shift recurrence stabilizes to, then we know 7767 // that the backedge is taken only a finite number of times. 7768 7769 ConstantInt *StableValue = nullptr; 7770 switch (OpCode) { 7771 default: 7772 llvm_unreachable("Impossible case!"); 7773 7774 case Instruction::AShr: { 7775 // {K,ashr,<positive-constant>} stabilizes to signum(K) in at most 7776 // bitwidth(K) iterations. 7777 Value *FirstValue = PN->getIncomingValueForBlock(Predecessor); 7778 KnownBits Known = computeKnownBits(FirstValue, DL, 0, nullptr, 7779 Predecessor->getTerminator(), &DT); 7780 auto *Ty = cast<IntegerType>(RHS->getType()); 7781 if (Known.isNonNegative()) 7782 StableValue = ConstantInt::get(Ty, 0); 7783 else if (Known.isNegative()) 7784 StableValue = ConstantInt::get(Ty, -1, true); 7785 else 7786 return getCouldNotCompute(); 7787 7788 break; 7789 } 7790 case Instruction::LShr: 7791 case Instruction::Shl: 7792 // Both {K,lshr,<positive-constant>} and {K,shl,<positive-constant>} 7793 // stabilize to 0 in at most bitwidth(K) iterations. 7794 StableValue = ConstantInt::get(cast<IntegerType>(RHS->getType()), 0); 7795 break; 7796 } 7797 7798 auto *Result = 7799 ConstantFoldCompareInstOperands(Pred, StableValue, RHS, DL, &TLI); 7800 assert(Result->getType()->isIntegerTy(1) && 7801 "Otherwise cannot be an operand to a branch instruction"); 7802 7803 if (Result->isZeroValue()) { 7804 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 7805 const SCEV *UpperBound = 7806 getConstant(getEffectiveSCEVType(RHS->getType()), BitWidth); 7807 return ExitLimit(getCouldNotCompute(), UpperBound, false); 7808 } 7809 7810 return getCouldNotCompute(); 7811 } 7812 7813 /// Return true if we can constant fold an instruction of the specified type, 7814 /// assuming that all operands were constants. 7815 static bool CanConstantFold(const Instruction *I) { 7816 if (isa<BinaryOperator>(I) || isa<CmpInst>(I) || 7817 isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || 7818 isa<LoadInst>(I) || isa<ExtractValueInst>(I)) 7819 return true; 7820 7821 if (const CallInst *CI = dyn_cast<CallInst>(I)) 7822 if (const Function *F = CI->getCalledFunction()) 7823 return canConstantFoldCallTo(CI, F); 7824 return false; 7825 } 7826 7827 /// Determine whether this instruction can constant evolve within this loop 7828 /// assuming its operands can all constant evolve. 7829 static bool canConstantEvolve(Instruction *I, const Loop *L) { 7830 // An instruction outside of the loop can't be derived from a loop PHI. 7831 if (!L->contains(I)) return false; 7832 7833 if (isa<PHINode>(I)) { 7834 // We don't currently keep track of the control flow needed to evaluate 7835 // PHIs, so we cannot handle PHIs inside of loops. 7836 return L->getHeader() == I->getParent(); 7837 } 7838 7839 // If we won't be able to constant fold this expression even if the operands 7840 // are constants, bail early. 7841 return CanConstantFold(I); 7842 } 7843 7844 /// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by 7845 /// recursing through each instruction operand until reaching a loop header phi. 7846 static PHINode * 7847 getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L, 7848 DenseMap<Instruction *, PHINode *> &PHIMap, 7849 unsigned Depth) { 7850 if (Depth > MaxConstantEvolvingDepth) 7851 return nullptr; 7852 7853 // Otherwise, we can evaluate this instruction if all of its operands are 7854 // constant or derived from a PHI node themselves. 7855 PHINode *PHI = nullptr; 7856 for (Value *Op : UseInst->operands()) { 7857 if (isa<Constant>(Op)) continue; 7858 7859 Instruction *OpInst = dyn_cast<Instruction>(Op); 7860 if (!OpInst || !canConstantEvolve(OpInst, L)) return nullptr; 7861 7862 PHINode *P = dyn_cast<PHINode>(OpInst); 7863 if (!P) 7864 // If this operand is already visited, reuse the prior result. 7865 // We may have P != PHI if this is the deepest point at which the 7866 // inconsistent paths meet. 7867 P = PHIMap.lookup(OpInst); 7868 if (!P) { 7869 // Recurse and memoize the results, whether a phi is found or not. 7870 // This recursive call invalidates pointers into PHIMap. 7871 P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap, Depth + 1); 7872 PHIMap[OpInst] = P; 7873 } 7874 if (!P) 7875 return nullptr; // Not evolving from PHI 7876 if (PHI && PHI != P) 7877 return nullptr; // Evolving from multiple different PHIs. 7878 PHI = P; 7879 } 7880 // This is a expression evolving from a constant PHI! 7881 return PHI; 7882 } 7883 7884 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node 7885 /// in the loop that V is derived from. We allow arbitrary operations along the 7886 /// way, but the operands of an operation must either be constants or a value 7887 /// derived from a constant PHI. If this expression does not fit with these 7888 /// constraints, return null. 7889 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) { 7890 Instruction *I = dyn_cast<Instruction>(V); 7891 if (!I || !canConstantEvolve(I, L)) return nullptr; 7892 7893 if (PHINode *PN = dyn_cast<PHINode>(I)) 7894 return PN; 7895 7896 // Record non-constant instructions contained by the loop. 7897 DenseMap<Instruction *, PHINode *> PHIMap; 7898 return getConstantEvolvingPHIOperands(I, L, PHIMap, 0); 7899 } 7900 7901 /// EvaluateExpression - Given an expression that passes the 7902 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node 7903 /// in the loop has the value PHIVal. If we can't fold this expression for some 7904 /// reason, return null. 7905 static Constant *EvaluateExpression(Value *V, const Loop *L, 7906 DenseMap<Instruction *, Constant *> &Vals, 7907 const DataLayout &DL, 7908 const TargetLibraryInfo *TLI) { 7909 // Convenient constant check, but redundant for recursive calls. 7910 if (Constant *C = dyn_cast<Constant>(V)) return C; 7911 Instruction *I = dyn_cast<Instruction>(V); 7912 if (!I) return nullptr; 7913 7914 if (Constant *C = Vals.lookup(I)) return C; 7915 7916 // An instruction inside the loop depends on a value outside the loop that we 7917 // weren't given a mapping for, or a value such as a call inside the loop. 7918 if (!canConstantEvolve(I, L)) return nullptr; 7919 7920 // An unmapped PHI can be due to a branch or another loop inside this loop, 7921 // or due to this not being the initial iteration through a loop where we 7922 // couldn't compute the evolution of this particular PHI last time. 7923 if (isa<PHINode>(I)) return nullptr; 7924 7925 std::vector<Constant*> Operands(I->getNumOperands()); 7926 7927 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 7928 Instruction *Operand = dyn_cast<Instruction>(I->getOperand(i)); 7929 if (!Operand) { 7930 Operands[i] = dyn_cast<Constant>(I->getOperand(i)); 7931 if (!Operands[i]) return nullptr; 7932 continue; 7933 } 7934 Constant *C = EvaluateExpression(Operand, L, Vals, DL, TLI); 7935 Vals[Operand] = C; 7936 if (!C) return nullptr; 7937 Operands[i] = C; 7938 } 7939 7940 if (CmpInst *CI = dyn_cast<CmpInst>(I)) 7941 return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 7942 Operands[1], DL, TLI); 7943 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 7944 if (!LI->isVolatile()) 7945 return ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL); 7946 } 7947 return ConstantFoldInstOperands(I, Operands, DL, TLI); 7948 } 7949 7950 7951 // If every incoming value to PN except the one for BB is a specific Constant, 7952 // return that, else return nullptr. 7953 static Constant *getOtherIncomingValue(PHINode *PN, BasicBlock *BB) { 7954 Constant *IncomingVal = nullptr; 7955 7956 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 7957 if (PN->getIncomingBlock(i) == BB) 7958 continue; 7959 7960 auto *CurrentVal = dyn_cast<Constant>(PN->getIncomingValue(i)); 7961 if (!CurrentVal) 7962 return nullptr; 7963 7964 if (IncomingVal != CurrentVal) { 7965 if (IncomingVal) 7966 return nullptr; 7967 IncomingVal = CurrentVal; 7968 } 7969 } 7970 7971 return IncomingVal; 7972 } 7973 7974 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is 7975 /// in the header of its containing loop, we know the loop executes a 7976 /// constant number of times, and the PHI node is just a recurrence 7977 /// involving constants, fold it. 7978 Constant * 7979 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN, 7980 const APInt &BEs, 7981 const Loop *L) { 7982 auto I = ConstantEvolutionLoopExitValue.find(PN); 7983 if (I != ConstantEvolutionLoopExitValue.end()) 7984 return I->second; 7985 7986 if (BEs.ugt(MaxBruteForceIterations)) 7987 return ConstantEvolutionLoopExitValue[PN] = nullptr; // Not going to evaluate it. 7988 7989 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN]; 7990 7991 DenseMap<Instruction *, Constant *> CurrentIterVals; 7992 BasicBlock *Header = L->getHeader(); 7993 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 7994 7995 BasicBlock *Latch = L->getLoopLatch(); 7996 if (!Latch) 7997 return nullptr; 7998 7999 for (PHINode &PHI : Header->phis()) { 8000 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch)) 8001 CurrentIterVals[&PHI] = StartCST; 8002 } 8003 if (!CurrentIterVals.count(PN)) 8004 return RetVal = nullptr; 8005 8006 Value *BEValue = PN->getIncomingValueForBlock(Latch); 8007 8008 // Execute the loop symbolically to determine the exit value. 8009 assert(BEs.getActiveBits() < CHAR_BIT * sizeof(unsigned) && 8010 "BEs is <= MaxBruteForceIterations which is an 'unsigned'!"); 8011 8012 unsigned NumIterations = BEs.getZExtValue(); // must be in range 8013 unsigned IterationNum = 0; 8014 const DataLayout &DL = getDataLayout(); 8015 for (; ; ++IterationNum) { 8016 if (IterationNum == NumIterations) 8017 return RetVal = CurrentIterVals[PN]; // Got exit value! 8018 8019 // Compute the value of the PHIs for the next iteration. 8020 // EvaluateExpression adds non-phi values to the CurrentIterVals map. 8021 DenseMap<Instruction *, Constant *> NextIterVals; 8022 Constant *NextPHI = 8023 EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 8024 if (!NextPHI) 8025 return nullptr; // Couldn't evaluate! 8026 NextIterVals[PN] = NextPHI; 8027 8028 bool StoppedEvolving = NextPHI == CurrentIterVals[PN]; 8029 8030 // Also evaluate the other PHI nodes. However, we don't get to stop if we 8031 // cease to be able to evaluate one of them or if they stop evolving, 8032 // because that doesn't necessarily prevent us from computing PN. 8033 SmallVector<std::pair<PHINode *, Constant *>, 8> PHIsToCompute; 8034 for (const auto &I : CurrentIterVals) { 8035 PHINode *PHI = dyn_cast<PHINode>(I.first); 8036 if (!PHI || PHI == PN || PHI->getParent() != Header) continue; 8037 PHIsToCompute.emplace_back(PHI, I.second); 8038 } 8039 // We use two distinct loops because EvaluateExpression may invalidate any 8040 // iterators into CurrentIterVals. 8041 for (const auto &I : PHIsToCompute) { 8042 PHINode *PHI = I.first; 8043 Constant *&NextPHI = NextIterVals[PHI]; 8044 if (!NextPHI) { // Not already computed. 8045 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 8046 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 8047 } 8048 if (NextPHI != I.second) 8049 StoppedEvolving = false; 8050 } 8051 8052 // If all entries in CurrentIterVals == NextIterVals then we can stop 8053 // iterating, the loop can't continue to change. 8054 if (StoppedEvolving) 8055 return RetVal = CurrentIterVals[PN]; 8056 8057 CurrentIterVals.swap(NextIterVals); 8058 } 8059 } 8060 8061 const SCEV *ScalarEvolution::computeExitCountExhaustively(const Loop *L, 8062 Value *Cond, 8063 bool ExitWhen) { 8064 PHINode *PN = getConstantEvolvingPHI(Cond, L); 8065 if (!PN) return getCouldNotCompute(); 8066 8067 // If the loop is canonicalized, the PHI will have exactly two entries. 8068 // That's the only form we support here. 8069 if (PN->getNumIncomingValues() != 2) return getCouldNotCompute(); 8070 8071 DenseMap<Instruction *, Constant *> CurrentIterVals; 8072 BasicBlock *Header = L->getHeader(); 8073 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 8074 8075 BasicBlock *Latch = L->getLoopLatch(); 8076 assert(Latch && "Should follow from NumIncomingValues == 2!"); 8077 8078 for (PHINode &PHI : Header->phis()) { 8079 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch)) 8080 CurrentIterVals[&PHI] = StartCST; 8081 } 8082 if (!CurrentIterVals.count(PN)) 8083 return getCouldNotCompute(); 8084 8085 // Okay, we find a PHI node that defines the trip count of this loop. Execute 8086 // the loop symbolically to determine when the condition gets a value of 8087 // "ExitWhen". 8088 unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis. 8089 const DataLayout &DL = getDataLayout(); 8090 for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){ 8091 auto *CondVal = dyn_cast_or_null<ConstantInt>( 8092 EvaluateExpression(Cond, L, CurrentIterVals, DL, &TLI)); 8093 8094 // Couldn't symbolically evaluate. 8095 if (!CondVal) return getCouldNotCompute(); 8096 8097 if (CondVal->getValue() == uint64_t(ExitWhen)) { 8098 ++NumBruteForceTripCountsComputed; 8099 return getConstant(Type::getInt32Ty(getContext()), IterationNum); 8100 } 8101 8102 // Update all the PHI nodes for the next iteration. 8103 DenseMap<Instruction *, Constant *> NextIterVals; 8104 8105 // Create a list of which PHIs we need to compute. We want to do this before 8106 // calling EvaluateExpression on them because that may invalidate iterators 8107 // into CurrentIterVals. 8108 SmallVector<PHINode *, 8> PHIsToCompute; 8109 for (const auto &I : CurrentIterVals) { 8110 PHINode *PHI = dyn_cast<PHINode>(I.first); 8111 if (!PHI || PHI->getParent() != Header) continue; 8112 PHIsToCompute.push_back(PHI); 8113 } 8114 for (PHINode *PHI : PHIsToCompute) { 8115 Constant *&NextPHI = NextIterVals[PHI]; 8116 if (NextPHI) continue; // Already computed! 8117 8118 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 8119 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 8120 } 8121 CurrentIterVals.swap(NextIterVals); 8122 } 8123 8124 // Too many iterations were needed to evaluate. 8125 return getCouldNotCompute(); 8126 } 8127 8128 const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { 8129 SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values = 8130 ValuesAtScopes[V]; 8131 // Check to see if we've folded this expression at this loop before. 8132 for (auto &LS : Values) 8133 if (LS.first == L) 8134 return LS.second ? LS.second : V; 8135 8136 Values.emplace_back(L, nullptr); 8137 8138 // Otherwise compute it. 8139 const SCEV *C = computeSCEVAtScope(V, L); 8140 for (auto &LS : reverse(ValuesAtScopes[V])) 8141 if (LS.first == L) { 8142 LS.second = C; 8143 break; 8144 } 8145 return C; 8146 } 8147 8148 /// This builds up a Constant using the ConstantExpr interface. That way, we 8149 /// will return Constants for objects which aren't represented by a 8150 /// SCEVConstant, because SCEVConstant is restricted to ConstantInt. 8151 /// Returns NULL if the SCEV isn't representable as a Constant. 8152 static Constant *BuildConstantFromSCEV(const SCEV *V) { 8153 switch (static_cast<SCEVTypes>(V->getSCEVType())) { 8154 case scCouldNotCompute: 8155 case scAddRecExpr: 8156 break; 8157 case scConstant: 8158 return cast<SCEVConstant>(V)->getValue(); 8159 case scUnknown: 8160 return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue()); 8161 case scSignExtend: { 8162 const SCEVSignExtendExpr *SS = cast<SCEVSignExtendExpr>(V); 8163 if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand())) 8164 return ConstantExpr::getSExt(CastOp, SS->getType()); 8165 break; 8166 } 8167 case scZeroExtend: { 8168 const SCEVZeroExtendExpr *SZ = cast<SCEVZeroExtendExpr>(V); 8169 if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand())) 8170 return ConstantExpr::getZExt(CastOp, SZ->getType()); 8171 break; 8172 } 8173 case scTruncate: { 8174 const SCEVTruncateExpr *ST = cast<SCEVTruncateExpr>(V); 8175 if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand())) 8176 return ConstantExpr::getTrunc(CastOp, ST->getType()); 8177 break; 8178 } 8179 case scAddExpr: { 8180 const SCEVAddExpr *SA = cast<SCEVAddExpr>(V); 8181 if (Constant *C = BuildConstantFromSCEV(SA->getOperand(0))) { 8182 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { 8183 unsigned AS = PTy->getAddressSpace(); 8184 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 8185 C = ConstantExpr::getBitCast(C, DestPtrTy); 8186 } 8187 for (unsigned i = 1, e = SA->getNumOperands(); i != e; ++i) { 8188 Constant *C2 = BuildConstantFromSCEV(SA->getOperand(i)); 8189 if (!C2) return nullptr; 8190 8191 // First pointer! 8192 if (!C->getType()->isPointerTy() && C2->getType()->isPointerTy()) { 8193 unsigned AS = C2->getType()->getPointerAddressSpace(); 8194 std::swap(C, C2); 8195 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 8196 // The offsets have been converted to bytes. We can add bytes to an 8197 // i8* by GEP with the byte count in the first index. 8198 C = ConstantExpr::getBitCast(C, DestPtrTy); 8199 } 8200 8201 // Don't bother trying to sum two pointers. We probably can't 8202 // statically compute a load that results from it anyway. 8203 if (C2->getType()->isPointerTy()) 8204 return nullptr; 8205 8206 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { 8207 if (PTy->getElementType()->isStructTy()) 8208 C2 = ConstantExpr::getIntegerCast( 8209 C2, Type::getInt32Ty(C->getContext()), true); 8210 C = ConstantExpr::getGetElementPtr(PTy->getElementType(), C, C2); 8211 } else 8212 C = ConstantExpr::getAdd(C, C2); 8213 } 8214 return C; 8215 } 8216 break; 8217 } 8218 case scMulExpr: { 8219 const SCEVMulExpr *SM = cast<SCEVMulExpr>(V); 8220 if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0))) { 8221 // Don't bother with pointers at all. 8222 if (C->getType()->isPointerTy()) return nullptr; 8223 for (unsigned i = 1, e = SM->getNumOperands(); i != e; ++i) { 8224 Constant *C2 = BuildConstantFromSCEV(SM->getOperand(i)); 8225 if (!C2 || C2->getType()->isPointerTy()) return nullptr; 8226 C = ConstantExpr::getMul(C, C2); 8227 } 8228 return C; 8229 } 8230 break; 8231 } 8232 case scUDivExpr: { 8233 const SCEVUDivExpr *SU = cast<SCEVUDivExpr>(V); 8234 if (Constant *LHS = BuildConstantFromSCEV(SU->getLHS())) 8235 if (Constant *RHS = BuildConstantFromSCEV(SU->getRHS())) 8236 if (LHS->getType() == RHS->getType()) 8237 return ConstantExpr::getUDiv(LHS, RHS); 8238 break; 8239 } 8240 case scSMaxExpr: 8241 case scUMaxExpr: 8242 case scSMinExpr: 8243 case scUMinExpr: 8244 break; // TODO: smax, umax, smin, umax. 8245 } 8246 return nullptr; 8247 } 8248 8249 const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) { 8250 if (isa<SCEVConstant>(V)) return V; 8251 8252 // If this instruction is evolved from a constant-evolving PHI, compute the 8253 // exit value from the loop without using SCEVs. 8254 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) { 8255 if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) { 8256 if (PHINode *PN = dyn_cast<PHINode>(I)) { 8257 const Loop *LI = this->LI[I->getParent()]; 8258 // Looking for loop exit value. 8259 if (LI && LI->getParentLoop() == L && 8260 PN->getParent() == LI->getHeader()) { 8261 // Okay, there is no closed form solution for the PHI node. Check 8262 // to see if the loop that contains it has a known backedge-taken 8263 // count. If so, we may be able to force computation of the exit 8264 // value. 8265 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI); 8266 // This trivial case can show up in some degenerate cases where 8267 // the incoming IR has not yet been fully simplified. 8268 if (BackedgeTakenCount->isZero()) { 8269 Value *InitValue = nullptr; 8270 bool MultipleInitValues = false; 8271 for (unsigned i = 0; i < PN->getNumIncomingValues(); i++) { 8272 if (!LI->contains(PN->getIncomingBlock(i))) { 8273 if (!InitValue) 8274 InitValue = PN->getIncomingValue(i); 8275 else if (InitValue != PN->getIncomingValue(i)) { 8276 MultipleInitValues = true; 8277 break; 8278 } 8279 } 8280 } 8281 if (!MultipleInitValues && InitValue) 8282 return getSCEV(InitValue); 8283 } 8284 // Do we have a loop invariant value flowing around the backedge 8285 // for a loop which must execute the backedge? 8286 if (!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && 8287 isKnownPositive(BackedgeTakenCount) && 8288 PN->getNumIncomingValues() == 2) { 8289 8290 unsigned InLoopPred = LI->contains(PN->getIncomingBlock(0)) ? 0 : 1; 8291 Value *BackedgeVal = PN->getIncomingValue(InLoopPred); 8292 if (LI->isLoopInvariant(BackedgeVal)) 8293 return getSCEV(BackedgeVal); 8294 } 8295 if (auto *BTCC = dyn_cast<SCEVConstant>(BackedgeTakenCount)) { 8296 // Okay, we know how many times the containing loop executes. If 8297 // this is a constant evolving PHI node, get the final value at 8298 // the specified iteration number. 8299 Constant *RV = 8300 getConstantEvolutionLoopExitValue(PN, BTCC->getAPInt(), LI); 8301 if (RV) return getSCEV(RV); 8302 } 8303 } 8304 8305 // If there is a single-input Phi, evaluate it at our scope. If we can 8306 // prove that this replacement does not break LCSSA form, use new value. 8307 if (PN->getNumOperands() == 1) { 8308 const SCEV *Input = getSCEV(PN->getOperand(0)); 8309 const SCEV *InputAtScope = getSCEVAtScope(Input, L); 8310 // TODO: We can generalize it using LI.replacementPreservesLCSSAForm, 8311 // for the simplest case just support constants. 8312 if (isa<SCEVConstant>(InputAtScope)) return InputAtScope; 8313 } 8314 } 8315 8316 // Okay, this is an expression that we cannot symbolically evaluate 8317 // into a SCEV. Check to see if it's possible to symbolically evaluate 8318 // the arguments into constants, and if so, try to constant propagate the 8319 // result. This is particularly useful for computing loop exit values. 8320 if (CanConstantFold(I)) { 8321 SmallVector<Constant *, 4> Operands; 8322 bool MadeImprovement = false; 8323 for (Value *Op : I->operands()) { 8324 if (Constant *C = dyn_cast<Constant>(Op)) { 8325 Operands.push_back(C); 8326 continue; 8327 } 8328 8329 // If any of the operands is non-constant and if they are 8330 // non-integer and non-pointer, don't even try to analyze them 8331 // with scev techniques. 8332 if (!isSCEVable(Op->getType())) 8333 return V; 8334 8335 const SCEV *OrigV = getSCEV(Op); 8336 const SCEV *OpV = getSCEVAtScope(OrigV, L); 8337 MadeImprovement |= OrigV != OpV; 8338 8339 Constant *C = BuildConstantFromSCEV(OpV); 8340 if (!C) return V; 8341 if (C->getType() != Op->getType()) 8342 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 8343 Op->getType(), 8344 false), 8345 C, Op->getType()); 8346 Operands.push_back(C); 8347 } 8348 8349 // Check to see if getSCEVAtScope actually made an improvement. 8350 if (MadeImprovement) { 8351 Constant *C = nullptr; 8352 const DataLayout &DL = getDataLayout(); 8353 if (const CmpInst *CI = dyn_cast<CmpInst>(I)) 8354 C = ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 8355 Operands[1], DL, &TLI); 8356 else if (const LoadInst *LI = dyn_cast<LoadInst>(I)) { 8357 if (!LI->isVolatile()) 8358 C = ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL); 8359 } else 8360 C = ConstantFoldInstOperands(I, Operands, DL, &TLI); 8361 if (!C) return V; 8362 return getSCEV(C); 8363 } 8364 } 8365 } 8366 8367 // This is some other type of SCEVUnknown, just return it. 8368 return V; 8369 } 8370 8371 if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) { 8372 // Avoid performing the look-up in the common case where the specified 8373 // expression has no loop-variant portions. 8374 for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) { 8375 const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 8376 if (OpAtScope != Comm->getOperand(i)) { 8377 // Okay, at least one of these operands is loop variant but might be 8378 // foldable. Build a new instance of the folded commutative expression. 8379 SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(), 8380 Comm->op_begin()+i); 8381 NewOps.push_back(OpAtScope); 8382 8383 for (++i; i != e; ++i) { 8384 OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 8385 NewOps.push_back(OpAtScope); 8386 } 8387 if (isa<SCEVAddExpr>(Comm)) 8388 return getAddExpr(NewOps, Comm->getNoWrapFlags()); 8389 if (isa<SCEVMulExpr>(Comm)) 8390 return getMulExpr(NewOps, Comm->getNoWrapFlags()); 8391 if (isa<SCEVMinMaxExpr>(Comm)) 8392 return getMinMaxExpr(Comm->getSCEVType(), NewOps); 8393 llvm_unreachable("Unknown commutative SCEV type!"); 8394 } 8395 } 8396 // If we got here, all operands are loop invariant. 8397 return Comm; 8398 } 8399 8400 if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) { 8401 const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L); 8402 const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L); 8403 if (LHS == Div->getLHS() && RHS == Div->getRHS()) 8404 return Div; // must be loop invariant 8405 return getUDivExpr(LHS, RHS); 8406 } 8407 8408 // If this is a loop recurrence for a loop that does not contain L, then we 8409 // are dealing with the final value computed by the loop. 8410 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) { 8411 // First, attempt to evaluate each operand. 8412 // Avoid performing the look-up in the common case where the specified 8413 // expression has no loop-variant portions. 8414 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 8415 const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L); 8416 if (OpAtScope == AddRec->getOperand(i)) 8417 continue; 8418 8419 // Okay, at least one of these operands is loop variant but might be 8420 // foldable. Build a new instance of the folded commutative expression. 8421 SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(), 8422 AddRec->op_begin()+i); 8423 NewOps.push_back(OpAtScope); 8424 for (++i; i != e; ++i) 8425 NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L)); 8426 8427 const SCEV *FoldedRec = 8428 getAddRecExpr(NewOps, AddRec->getLoop(), 8429 AddRec->getNoWrapFlags(SCEV::FlagNW)); 8430 AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec); 8431 // The addrec may be folded to a nonrecurrence, for example, if the 8432 // induction variable is multiplied by zero after constant folding. Go 8433 // ahead and return the folded value. 8434 if (!AddRec) 8435 return FoldedRec; 8436 break; 8437 } 8438 8439 // If the scope is outside the addrec's loop, evaluate it by using the 8440 // loop exit value of the addrec. 8441 if (!AddRec->getLoop()->contains(L)) { 8442 // To evaluate this recurrence, we need to know how many times the AddRec 8443 // loop iterates. Compute this now. 8444 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop()); 8445 if (BackedgeTakenCount == getCouldNotCompute()) return AddRec; 8446 8447 // Then, evaluate the AddRec. 8448 return AddRec->evaluateAtIteration(BackedgeTakenCount, *this); 8449 } 8450 8451 return AddRec; 8452 } 8453 8454 if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) { 8455 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8456 if (Op == Cast->getOperand()) 8457 return Cast; // must be loop invariant 8458 return getZeroExtendExpr(Op, Cast->getType()); 8459 } 8460 8461 if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) { 8462 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8463 if (Op == Cast->getOperand()) 8464 return Cast; // must be loop invariant 8465 return getSignExtendExpr(Op, Cast->getType()); 8466 } 8467 8468 if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) { 8469 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8470 if (Op == Cast->getOperand()) 8471 return Cast; // must be loop invariant 8472 return getTruncateExpr(Op, Cast->getType()); 8473 } 8474 8475 llvm_unreachable("Unknown SCEV type!"); 8476 } 8477 8478 const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) { 8479 return getSCEVAtScope(getSCEV(V), L); 8480 } 8481 8482 const SCEV *ScalarEvolution::stripInjectiveFunctions(const SCEV *S) const { 8483 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) 8484 return stripInjectiveFunctions(ZExt->getOperand()); 8485 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) 8486 return stripInjectiveFunctions(SExt->getOperand()); 8487 return S; 8488 } 8489 8490 /// Finds the minimum unsigned root of the following equation: 8491 /// 8492 /// A * X = B (mod N) 8493 /// 8494 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of 8495 /// A and B isn't important. 8496 /// 8497 /// If the equation does not have a solution, SCEVCouldNotCompute is returned. 8498 static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const SCEV *B, 8499 ScalarEvolution &SE) { 8500 uint32_t BW = A.getBitWidth(); 8501 assert(BW == SE.getTypeSizeInBits(B->getType())); 8502 assert(A != 0 && "A must be non-zero."); 8503 8504 // 1. D = gcd(A, N) 8505 // 8506 // The gcd of A and N may have only one prime factor: 2. The number of 8507 // trailing zeros in A is its multiplicity 8508 uint32_t Mult2 = A.countTrailingZeros(); 8509 // D = 2^Mult2 8510 8511 // 2. Check if B is divisible by D. 8512 // 8513 // B is divisible by D if and only if the multiplicity of prime factor 2 for B 8514 // is not less than multiplicity of this prime factor for D. 8515 if (SE.GetMinTrailingZeros(B) < Mult2) 8516 return SE.getCouldNotCompute(); 8517 8518 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic 8519 // modulo (N / D). 8520 // 8521 // If D == 1, (N / D) == N == 2^BW, so we need one extra bit to represent 8522 // (N / D) in general. The inverse itself always fits into BW bits, though, 8523 // so we immediately truncate it. 8524 APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D 8525 APInt Mod(BW + 1, 0); 8526 Mod.setBit(BW - Mult2); // Mod = N / D 8527 APInt I = AD.multiplicativeInverse(Mod).trunc(BW); 8528 8529 // 4. Compute the minimum unsigned root of the equation: 8530 // I * (B / D) mod (N / D) 8531 // To simplify the computation, we factor out the divide by D: 8532 // (I * B mod N) / D 8533 const SCEV *D = SE.getConstant(APInt::getOneBitSet(BW, Mult2)); 8534 return SE.getUDivExactExpr(SE.getMulExpr(B, SE.getConstant(I)), D); 8535 } 8536 8537 /// For a given quadratic addrec, generate coefficients of the corresponding 8538 /// quadratic equation, multiplied by a common value to ensure that they are 8539 /// integers. 8540 /// The returned value is a tuple { A, B, C, M, BitWidth }, where 8541 /// Ax^2 + Bx + C is the quadratic function, M is the value that A, B and C 8542 /// were multiplied by, and BitWidth is the bit width of the original addrec 8543 /// coefficients. 8544 /// This function returns None if the addrec coefficients are not compile- 8545 /// time constants. 8546 static Optional<std::tuple<APInt, APInt, APInt, APInt, unsigned>> 8547 GetQuadraticEquation(const SCEVAddRecExpr *AddRec) { 8548 assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!"); 8549 const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0)); 8550 const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1)); 8551 const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2)); 8552 LLVM_DEBUG(dbgs() << __func__ << ": analyzing quadratic addrec: " 8553 << *AddRec << '\n'); 8554 8555 // We currently can only solve this if the coefficients are constants. 8556 if (!LC || !MC || !NC) { 8557 LLVM_DEBUG(dbgs() << __func__ << ": coefficients are not constant\n"); 8558 return None; 8559 } 8560 8561 APInt L = LC->getAPInt(); 8562 APInt M = MC->getAPInt(); 8563 APInt N = NC->getAPInt(); 8564 assert(!N.isNullValue() && "This is not a quadratic addrec"); 8565 8566 unsigned BitWidth = LC->getAPInt().getBitWidth(); 8567 unsigned NewWidth = BitWidth + 1; 8568 LLVM_DEBUG(dbgs() << __func__ << ": addrec coeff bw: " 8569 << BitWidth << '\n'); 8570 // The sign-extension (as opposed to a zero-extension) here matches the 8571 // extension used in SolveQuadraticEquationWrap (with the same motivation). 8572 N = N.sext(NewWidth); 8573 M = M.sext(NewWidth); 8574 L = L.sext(NewWidth); 8575 8576 // The increments are M, M+N, M+2N, ..., so the accumulated values are 8577 // L+M, (L+M)+(M+N), (L+M)+(M+N)+(M+2N), ..., that is, 8578 // L+M, L+2M+N, L+3M+3N, ... 8579 // After n iterations the accumulated value Acc is L + nM + n(n-1)/2 N. 8580 // 8581 // The equation Acc = 0 is then 8582 // L + nM + n(n-1)/2 N = 0, or 2L + 2M n + n(n-1) N = 0. 8583 // In a quadratic form it becomes: 8584 // N n^2 + (2M-N) n + 2L = 0. 8585 8586 APInt A = N; 8587 APInt B = 2 * M - A; 8588 APInt C = 2 * L; 8589 APInt T = APInt(NewWidth, 2); 8590 LLVM_DEBUG(dbgs() << __func__ << ": equation " << A << "x^2 + " << B 8591 << "x + " << C << ", coeff bw: " << NewWidth 8592 << ", multiplied by " << T << '\n'); 8593 return std::make_tuple(A, B, C, T, BitWidth); 8594 } 8595 8596 /// Helper function to compare optional APInts: 8597 /// (a) if X and Y both exist, return min(X, Y), 8598 /// (b) if neither X nor Y exist, return None, 8599 /// (c) if exactly one of X and Y exists, return that value. 8600 static Optional<APInt> MinOptional(Optional<APInt> X, Optional<APInt> Y) { 8601 if (X.hasValue() && Y.hasValue()) { 8602 unsigned W = std::max(X->getBitWidth(), Y->getBitWidth()); 8603 APInt XW = X->sextOrSelf(W); 8604 APInt YW = Y->sextOrSelf(W); 8605 return XW.slt(YW) ? *X : *Y; 8606 } 8607 if (!X.hasValue() && !Y.hasValue()) 8608 return None; 8609 return X.hasValue() ? *X : *Y; 8610 } 8611 8612 /// Helper function to truncate an optional APInt to a given BitWidth. 8613 /// When solving addrec-related equations, it is preferable to return a value 8614 /// that has the same bit width as the original addrec's coefficients. If the 8615 /// solution fits in the original bit width, truncate it (except for i1). 8616 /// Returning a value of a different bit width may inhibit some optimizations. 8617 /// 8618 /// In general, a solution to a quadratic equation generated from an addrec 8619 /// may require BW+1 bits, where BW is the bit width of the addrec's 8620 /// coefficients. The reason is that the coefficients of the quadratic 8621 /// equation are BW+1 bits wide (to avoid truncation when converting from 8622 /// the addrec to the equation). 8623 static Optional<APInt> TruncIfPossible(Optional<APInt> X, unsigned BitWidth) { 8624 if (!X.hasValue()) 8625 return None; 8626 unsigned W = X->getBitWidth(); 8627 if (BitWidth > 1 && BitWidth < W && X->isIntN(BitWidth)) 8628 return X->trunc(BitWidth); 8629 return X; 8630 } 8631 8632 /// Let c(n) be the value of the quadratic chrec {L,+,M,+,N} after n 8633 /// iterations. The values L, M, N are assumed to be signed, and they 8634 /// should all have the same bit widths. 8635 /// Find the least n >= 0 such that c(n) = 0 in the arithmetic modulo 2^BW, 8636 /// where BW is the bit width of the addrec's coefficients. 8637 /// If the calculated value is a BW-bit integer (for BW > 1), it will be 8638 /// returned as such, otherwise the bit width of the returned value may 8639 /// be greater than BW. 8640 /// 8641 /// This function returns None if 8642 /// (a) the addrec coefficients are not constant, or 8643 /// (b) SolveQuadraticEquationWrap was unable to find a solution. For cases 8644 /// like x^2 = 5, no integer solutions exist, in other cases an integer 8645 /// solution may exist, but SolveQuadraticEquationWrap may fail to find it. 8646 static Optional<APInt> 8647 SolveQuadraticAddRecExact(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) { 8648 APInt A, B, C, M; 8649 unsigned BitWidth; 8650 auto T = GetQuadraticEquation(AddRec); 8651 if (!T.hasValue()) 8652 return None; 8653 8654 std::tie(A, B, C, M, BitWidth) = *T; 8655 LLVM_DEBUG(dbgs() << __func__ << ": solving for unsigned overflow\n"); 8656 Optional<APInt> X = APIntOps::SolveQuadraticEquationWrap(A, B, C, BitWidth+1); 8657 if (!X.hasValue()) 8658 return None; 8659 8660 ConstantInt *CX = ConstantInt::get(SE.getContext(), *X); 8661 ConstantInt *V = EvaluateConstantChrecAtConstant(AddRec, CX, SE); 8662 if (!V->isZero()) 8663 return None; 8664 8665 return TruncIfPossible(X, BitWidth); 8666 } 8667 8668 /// Let c(n) be the value of the quadratic chrec {0,+,M,+,N} after n 8669 /// iterations. The values M, N are assumed to be signed, and they 8670 /// should all have the same bit widths. 8671 /// Find the least n such that c(n) does not belong to the given range, 8672 /// while c(n-1) does. 8673 /// 8674 /// This function returns None if 8675 /// (a) the addrec coefficients are not constant, or 8676 /// (b) SolveQuadraticEquationWrap was unable to find a solution for the 8677 /// bounds of the range. 8678 static Optional<APInt> 8679 SolveQuadraticAddRecRange(const SCEVAddRecExpr *AddRec, 8680 const ConstantRange &Range, ScalarEvolution &SE) { 8681 assert(AddRec->getOperand(0)->isZero() && 8682 "Starting value of addrec should be 0"); 8683 LLVM_DEBUG(dbgs() << __func__ << ": solving boundary crossing for range " 8684 << Range << ", addrec " << *AddRec << '\n'); 8685 // This case is handled in getNumIterationsInRange. Here we can assume that 8686 // we start in the range. 8687 assert(Range.contains(APInt(SE.getTypeSizeInBits(AddRec->getType()), 0)) && 8688 "Addrec's initial value should be in range"); 8689 8690 APInt A, B, C, M; 8691 unsigned BitWidth; 8692 auto T = GetQuadraticEquation(AddRec); 8693 if (!T.hasValue()) 8694 return None; 8695 8696 // Be careful about the return value: there can be two reasons for not 8697 // returning an actual number. First, if no solutions to the equations 8698 // were found, and second, if the solutions don't leave the given range. 8699 // The first case means that the actual solution is "unknown", the second 8700 // means that it's known, but not valid. If the solution is unknown, we 8701 // cannot make any conclusions. 8702 // Return a pair: the optional solution and a flag indicating if the 8703 // solution was found. 8704 auto SolveForBoundary = [&](APInt Bound) -> std::pair<Optional<APInt>,bool> { 8705 // Solve for signed overflow and unsigned overflow, pick the lower 8706 // solution. 8707 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: checking boundary " 8708 << Bound << " (before multiplying by " << M << ")\n"); 8709 Bound *= M; // The quadratic equation multiplier. 8710 8711 Optional<APInt> SO = None; 8712 if (BitWidth > 1) { 8713 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for " 8714 "signed overflow\n"); 8715 SO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound, BitWidth); 8716 } 8717 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for " 8718 "unsigned overflow\n"); 8719 Optional<APInt> UO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound, 8720 BitWidth+1); 8721 8722 auto LeavesRange = [&] (const APInt &X) { 8723 ConstantInt *C0 = ConstantInt::get(SE.getContext(), X); 8724 ConstantInt *V0 = EvaluateConstantChrecAtConstant(AddRec, C0, SE); 8725 if (Range.contains(V0->getValue())) 8726 return false; 8727 // X should be at least 1, so X-1 is non-negative. 8728 ConstantInt *C1 = ConstantInt::get(SE.getContext(), X-1); 8729 ConstantInt *V1 = EvaluateConstantChrecAtConstant(AddRec, C1, SE); 8730 if (Range.contains(V1->getValue())) 8731 return true; 8732 return false; 8733 }; 8734 8735 // If SolveQuadraticEquationWrap returns None, it means that there can 8736 // be a solution, but the function failed to find it. We cannot treat it 8737 // as "no solution". 8738 if (!SO.hasValue() || !UO.hasValue()) 8739 return { None, false }; 8740 8741 // Check the smaller value first to see if it leaves the range. 8742 // At this point, both SO and UO must have values. 8743 Optional<APInt> Min = MinOptional(SO, UO); 8744 if (LeavesRange(*Min)) 8745 return { Min, true }; 8746 Optional<APInt> Max = Min == SO ? UO : SO; 8747 if (LeavesRange(*Max)) 8748 return { Max, true }; 8749 8750 // Solutions were found, but were eliminated, hence the "true". 8751 return { None, true }; 8752 }; 8753 8754 std::tie(A, B, C, M, BitWidth) = *T; 8755 // Lower bound is inclusive, subtract 1 to represent the exiting value. 8756 APInt Lower = Range.getLower().sextOrSelf(A.getBitWidth()) - 1; 8757 APInt Upper = Range.getUpper().sextOrSelf(A.getBitWidth()); 8758 auto SL = SolveForBoundary(Lower); 8759 auto SU = SolveForBoundary(Upper); 8760 // If any of the solutions was unknown, no meaninigful conclusions can 8761 // be made. 8762 if (!SL.second || !SU.second) 8763 return None; 8764 8765 // Claim: The correct solution is not some value between Min and Max. 8766 // 8767 // Justification: Assuming that Min and Max are different values, one of 8768 // them is when the first signed overflow happens, the other is when the 8769 // first unsigned overflow happens. Crossing the range boundary is only 8770 // possible via an overflow (treating 0 as a special case of it, modeling 8771 // an overflow as crossing k*2^W for some k). 8772 // 8773 // The interesting case here is when Min was eliminated as an invalid 8774 // solution, but Max was not. The argument is that if there was another 8775 // overflow between Min and Max, it would also have been eliminated if 8776 // it was considered. 8777 // 8778 // For a given boundary, it is possible to have two overflows of the same 8779 // type (signed/unsigned) without having the other type in between: this 8780 // can happen when the vertex of the parabola is between the iterations 8781 // corresponding to the overflows. This is only possible when the two 8782 // overflows cross k*2^W for the same k. In such case, if the second one 8783 // left the range (and was the first one to do so), the first overflow 8784 // would have to enter the range, which would mean that either we had left 8785 // the range before or that we started outside of it. Both of these cases 8786 // are contradictions. 8787 // 8788 // Claim: In the case where SolveForBoundary returns None, the correct 8789 // solution is not some value between the Max for this boundary and the 8790 // Min of the other boundary. 8791 // 8792 // Justification: Assume that we had such Max_A and Min_B corresponding 8793 // to range boundaries A and B and such that Max_A < Min_B. If there was 8794 // a solution between Max_A and Min_B, it would have to be caused by an 8795 // overflow corresponding to either A or B. It cannot correspond to B, 8796 // since Min_B is the first occurrence of such an overflow. If it 8797 // corresponded to A, it would have to be either a signed or an unsigned 8798 // overflow that is larger than both eliminated overflows for A. But 8799 // between the eliminated overflows and this overflow, the values would 8800 // cover the entire value space, thus crossing the other boundary, which 8801 // is a contradiction. 8802 8803 return TruncIfPossible(MinOptional(SL.first, SU.first), BitWidth); 8804 } 8805 8806 ScalarEvolution::ExitLimit 8807 ScalarEvolution::howFarToZero(const SCEV *V, const Loop *L, bool ControlsExit, 8808 bool AllowPredicates) { 8809 8810 // This is only used for loops with a "x != y" exit test. The exit condition 8811 // is now expressed as a single expression, V = x-y. So the exit test is 8812 // effectively V != 0. We know and take advantage of the fact that this 8813 // expression only being used in a comparison by zero context. 8814 8815 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 8816 // If the value is a constant 8817 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 8818 // If the value is already zero, the branch will execute zero times. 8819 if (C->getValue()->isZero()) return C; 8820 return getCouldNotCompute(); // Otherwise it will loop infinitely. 8821 } 8822 8823 const SCEVAddRecExpr *AddRec = 8824 dyn_cast<SCEVAddRecExpr>(stripInjectiveFunctions(V)); 8825 8826 if (!AddRec && AllowPredicates) 8827 // Try to make this an AddRec using runtime tests, in the first X 8828 // iterations of this loop, where X is the SCEV expression found by the 8829 // algorithm below. 8830 AddRec = convertSCEVToAddRecWithPredicates(V, L, Predicates); 8831 8832 if (!AddRec || AddRec->getLoop() != L) 8833 return getCouldNotCompute(); 8834 8835 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of 8836 // the quadratic equation to solve it. 8837 if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) { 8838 // We can only use this value if the chrec ends up with an exact zero 8839 // value at this index. When solving for "X*X != 5", for example, we 8840 // should not accept a root of 2. 8841 if (auto S = SolveQuadraticAddRecExact(AddRec, *this)) { 8842 const auto *R = cast<SCEVConstant>(getConstant(S.getValue())); 8843 return ExitLimit(R, R, false, Predicates); 8844 } 8845 return getCouldNotCompute(); 8846 } 8847 8848 // Otherwise we can only handle this if it is affine. 8849 if (!AddRec->isAffine()) 8850 return getCouldNotCompute(); 8851 8852 // If this is an affine expression, the execution count of this branch is 8853 // the minimum unsigned root of the following equation: 8854 // 8855 // Start + Step*N = 0 (mod 2^BW) 8856 // 8857 // equivalent to: 8858 // 8859 // Step*N = -Start (mod 2^BW) 8860 // 8861 // where BW is the common bit width of Start and Step. 8862 8863 // Get the initial value for the loop. 8864 const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop()); 8865 const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop()); 8866 8867 // For now we handle only constant steps. 8868 // 8869 // TODO: Handle a nonconstant Step given AddRec<NUW>. If the 8870 // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap 8871 // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step. 8872 // We have not yet seen any such cases. 8873 const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step); 8874 if (!StepC || StepC->getValue()->isZero()) 8875 return getCouldNotCompute(); 8876 8877 // For positive steps (counting up until unsigned overflow): 8878 // N = -Start/Step (as unsigned) 8879 // For negative steps (counting down to zero): 8880 // N = Start/-Step 8881 // First compute the unsigned distance from zero in the direction of Step. 8882 bool CountDown = StepC->getAPInt().isNegative(); 8883 const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start); 8884 8885 // Handle unitary steps, which cannot wraparound. 8886 // 1*N = -Start; -1*N = Start (mod 2^BW), so: 8887 // N = Distance (as unsigned) 8888 if (StepC->getValue()->isOne() || StepC->getValue()->isMinusOne()) { 8889 APInt MaxBECount = getUnsignedRangeMax(Distance); 8890 8891 // When a loop like "for (int i = 0; i != n; ++i) { /* body */ }" is rotated, 8892 // we end up with a loop whose backedge-taken count is n - 1. Detect this 8893 // case, and see if we can improve the bound. 8894 // 8895 // Explicitly handling this here is necessary because getUnsignedRange 8896 // isn't context-sensitive; it doesn't know that we only care about the 8897 // range inside the loop. 8898 const SCEV *Zero = getZero(Distance->getType()); 8899 const SCEV *One = getOne(Distance->getType()); 8900 const SCEV *DistancePlusOne = getAddExpr(Distance, One); 8901 if (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_NE, DistancePlusOne, Zero)) { 8902 // If Distance + 1 doesn't overflow, we can compute the maximum distance 8903 // as "unsigned_max(Distance + 1) - 1". 8904 ConstantRange CR = getUnsignedRange(DistancePlusOne); 8905 MaxBECount = APIntOps::umin(MaxBECount, CR.getUnsignedMax() - 1); 8906 } 8907 return ExitLimit(Distance, getConstant(MaxBECount), false, Predicates); 8908 } 8909 8910 // If the condition controls loop exit (the loop exits only if the expression 8911 // is true) and the addition is no-wrap we can use unsigned divide to 8912 // compute the backedge count. In this case, the step may not divide the 8913 // distance, but we don't care because if the condition is "missed" the loop 8914 // will have undefined behavior due to wrapping. 8915 if (ControlsExit && AddRec->hasNoSelfWrap() && 8916 loopHasNoAbnormalExits(AddRec->getLoop())) { 8917 const SCEV *Exact = 8918 getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step); 8919 const SCEV *Max = 8920 Exact == getCouldNotCompute() 8921 ? Exact 8922 : getConstant(getUnsignedRangeMax(Exact)); 8923 return ExitLimit(Exact, Max, false, Predicates); 8924 } 8925 8926 // Solve the general equation. 8927 const SCEV *E = SolveLinEquationWithOverflow(StepC->getAPInt(), 8928 getNegativeSCEV(Start), *this); 8929 const SCEV *M = E == getCouldNotCompute() 8930 ? E 8931 : getConstant(getUnsignedRangeMax(E)); 8932 return ExitLimit(E, M, false, Predicates); 8933 } 8934 8935 ScalarEvolution::ExitLimit 8936 ScalarEvolution::howFarToNonZero(const SCEV *V, const Loop *L) { 8937 // Loops that look like: while (X == 0) are very strange indeed. We don't 8938 // handle them yet except for the trivial case. This could be expanded in the 8939 // future as needed. 8940 8941 // If the value is a constant, check to see if it is known to be non-zero 8942 // already. If so, the backedge will execute zero times. 8943 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 8944 if (!C->getValue()->isZero()) 8945 return getZero(C->getType()); 8946 return getCouldNotCompute(); // Otherwise it will loop infinitely. 8947 } 8948 8949 // We could implement others, but I really doubt anyone writes loops like 8950 // this, and if they did, they would already be constant folded. 8951 return getCouldNotCompute(); 8952 } 8953 8954 std::pair<BasicBlock *, BasicBlock *> 8955 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) { 8956 // If the block has a unique predecessor, then there is no path from the 8957 // predecessor to the block that does not go through the direct edge 8958 // from the predecessor to the block. 8959 if (BasicBlock *Pred = BB->getSinglePredecessor()) 8960 return {Pred, BB}; 8961 8962 // A loop's header is defined to be a block that dominates the loop. 8963 // If the header has a unique predecessor outside the loop, it must be 8964 // a block that has exactly one successor that can reach the loop. 8965 if (Loop *L = LI.getLoopFor(BB)) 8966 return {L->getLoopPredecessor(), L->getHeader()}; 8967 8968 return {nullptr, nullptr}; 8969 } 8970 8971 /// SCEV structural equivalence is usually sufficient for testing whether two 8972 /// expressions are equal, however for the purposes of looking for a condition 8973 /// guarding a loop, it can be useful to be a little more general, since a 8974 /// front-end may have replicated the controlling expression. 8975 static bool HasSameValue(const SCEV *A, const SCEV *B) { 8976 // Quick check to see if they are the same SCEV. 8977 if (A == B) return true; 8978 8979 auto ComputesEqualValues = [](const Instruction *A, const Instruction *B) { 8980 // Not all instructions that are "identical" compute the same value. For 8981 // instance, two distinct alloca instructions allocating the same type are 8982 // identical and do not read memory; but compute distinct values. 8983 return A->isIdenticalTo(B) && (isa<BinaryOperator>(A) || isa<GetElementPtrInst>(A)); 8984 }; 8985 8986 // Otherwise, if they're both SCEVUnknown, it's possible that they hold 8987 // two different instructions with the same value. Check for this case. 8988 if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A)) 8989 if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B)) 8990 if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue())) 8991 if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue())) 8992 if (ComputesEqualValues(AI, BI)) 8993 return true; 8994 8995 // Otherwise assume they may have a different value. 8996 return false; 8997 } 8998 8999 bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred, 9000 const SCEV *&LHS, const SCEV *&RHS, 9001 unsigned Depth) { 9002 bool Changed = false; 9003 // Simplifies ICMP to trivial true or false by turning it into '0 == 0' or 9004 // '0 != 0'. 9005 auto TrivialCase = [&](bool TriviallyTrue) { 9006 LHS = RHS = getConstant(ConstantInt::getFalse(getContext())); 9007 Pred = TriviallyTrue ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE; 9008 return true; 9009 }; 9010 // If we hit the max recursion limit bail out. 9011 if (Depth >= 3) 9012 return false; 9013 9014 // Canonicalize a constant to the right side. 9015 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 9016 // Check for both operands constant. 9017 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 9018 if (ConstantExpr::getICmp(Pred, 9019 LHSC->getValue(), 9020 RHSC->getValue())->isNullValue()) 9021 return TrivialCase(false); 9022 else 9023 return TrivialCase(true); 9024 } 9025 // Otherwise swap the operands to put the constant on the right. 9026 std::swap(LHS, RHS); 9027 Pred = ICmpInst::getSwappedPredicate(Pred); 9028 Changed = true; 9029 } 9030 9031 // If we're comparing an addrec with a value which is loop-invariant in the 9032 // addrec's loop, put the addrec on the left. Also make a dominance check, 9033 // as both operands could be addrecs loop-invariant in each other's loop. 9034 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) { 9035 const Loop *L = AR->getLoop(); 9036 if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) { 9037 std::swap(LHS, RHS); 9038 Pred = ICmpInst::getSwappedPredicate(Pred); 9039 Changed = true; 9040 } 9041 } 9042 9043 // If there's a constant operand, canonicalize comparisons with boundary 9044 // cases, and canonicalize *-or-equal comparisons to regular comparisons. 9045 if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) { 9046 const APInt &RA = RC->getAPInt(); 9047 9048 bool SimplifiedByConstantRange = false; 9049 9050 if (!ICmpInst::isEquality(Pred)) { 9051 ConstantRange ExactCR = ConstantRange::makeExactICmpRegion(Pred, RA); 9052 if (ExactCR.isFullSet()) 9053 return TrivialCase(true); 9054 else if (ExactCR.isEmptySet()) 9055 return TrivialCase(false); 9056 9057 APInt NewRHS; 9058 CmpInst::Predicate NewPred; 9059 if (ExactCR.getEquivalentICmp(NewPred, NewRHS) && 9060 ICmpInst::isEquality(NewPred)) { 9061 // We were able to convert an inequality to an equality. 9062 Pred = NewPred; 9063 RHS = getConstant(NewRHS); 9064 Changed = SimplifiedByConstantRange = true; 9065 } 9066 } 9067 9068 if (!SimplifiedByConstantRange) { 9069 switch (Pred) { 9070 default: 9071 break; 9072 case ICmpInst::ICMP_EQ: 9073 case ICmpInst::ICMP_NE: 9074 // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b. 9075 if (!RA) 9076 if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(LHS)) 9077 if (const SCEVMulExpr *ME = 9078 dyn_cast<SCEVMulExpr>(AE->getOperand(0))) 9079 if (AE->getNumOperands() == 2 && ME->getNumOperands() == 2 && 9080 ME->getOperand(0)->isAllOnesValue()) { 9081 RHS = AE->getOperand(1); 9082 LHS = ME->getOperand(1); 9083 Changed = true; 9084 } 9085 break; 9086 9087 9088 // The "Should have been caught earlier!" messages refer to the fact 9089 // that the ExactCR.isFullSet() or ExactCR.isEmptySet() check above 9090 // should have fired on the corresponding cases, and canonicalized the 9091 // check to trivial case. 9092 9093 case ICmpInst::ICMP_UGE: 9094 assert(!RA.isMinValue() && "Should have been caught earlier!"); 9095 Pred = ICmpInst::ICMP_UGT; 9096 RHS = getConstant(RA - 1); 9097 Changed = true; 9098 break; 9099 case ICmpInst::ICMP_ULE: 9100 assert(!RA.isMaxValue() && "Should have been caught earlier!"); 9101 Pred = ICmpInst::ICMP_ULT; 9102 RHS = getConstant(RA + 1); 9103 Changed = true; 9104 break; 9105 case ICmpInst::ICMP_SGE: 9106 assert(!RA.isMinSignedValue() && "Should have been caught earlier!"); 9107 Pred = ICmpInst::ICMP_SGT; 9108 RHS = getConstant(RA - 1); 9109 Changed = true; 9110 break; 9111 case ICmpInst::ICMP_SLE: 9112 assert(!RA.isMaxSignedValue() && "Should have been caught earlier!"); 9113 Pred = ICmpInst::ICMP_SLT; 9114 RHS = getConstant(RA + 1); 9115 Changed = true; 9116 break; 9117 } 9118 } 9119 } 9120 9121 // Check for obvious equality. 9122 if (HasSameValue(LHS, RHS)) { 9123 if (ICmpInst::isTrueWhenEqual(Pred)) 9124 return TrivialCase(true); 9125 if (ICmpInst::isFalseWhenEqual(Pred)) 9126 return TrivialCase(false); 9127 } 9128 9129 // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by 9130 // adding or subtracting 1 from one of the operands. 9131 switch (Pred) { 9132 case ICmpInst::ICMP_SLE: 9133 if (!getSignedRangeMax(RHS).isMaxSignedValue()) { 9134 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 9135 SCEV::FlagNSW); 9136 Pred = ICmpInst::ICMP_SLT; 9137 Changed = true; 9138 } else if (!getSignedRangeMin(LHS).isMinSignedValue()) { 9139 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS, 9140 SCEV::FlagNSW); 9141 Pred = ICmpInst::ICMP_SLT; 9142 Changed = true; 9143 } 9144 break; 9145 case ICmpInst::ICMP_SGE: 9146 if (!getSignedRangeMin(RHS).isMinSignedValue()) { 9147 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS, 9148 SCEV::FlagNSW); 9149 Pred = ICmpInst::ICMP_SGT; 9150 Changed = true; 9151 } else if (!getSignedRangeMax(LHS).isMaxSignedValue()) { 9152 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 9153 SCEV::FlagNSW); 9154 Pred = ICmpInst::ICMP_SGT; 9155 Changed = true; 9156 } 9157 break; 9158 case ICmpInst::ICMP_ULE: 9159 if (!getUnsignedRangeMax(RHS).isMaxValue()) { 9160 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 9161 SCEV::FlagNUW); 9162 Pred = ICmpInst::ICMP_ULT; 9163 Changed = true; 9164 } else if (!getUnsignedRangeMin(LHS).isMinValue()) { 9165 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS); 9166 Pred = ICmpInst::ICMP_ULT; 9167 Changed = true; 9168 } 9169 break; 9170 case ICmpInst::ICMP_UGE: 9171 if (!getUnsignedRangeMin(RHS).isMinValue()) { 9172 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS); 9173 Pred = ICmpInst::ICMP_UGT; 9174 Changed = true; 9175 } else if (!getUnsignedRangeMax(LHS).isMaxValue()) { 9176 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 9177 SCEV::FlagNUW); 9178 Pred = ICmpInst::ICMP_UGT; 9179 Changed = true; 9180 } 9181 break; 9182 default: 9183 break; 9184 } 9185 9186 // TODO: More simplifications are possible here. 9187 9188 // Recursively simplify until we either hit a recursion limit or nothing 9189 // changes. 9190 if (Changed) 9191 return SimplifyICmpOperands(Pred, LHS, RHS, Depth+1); 9192 9193 return Changed; 9194 } 9195 9196 bool ScalarEvolution::isKnownNegative(const SCEV *S) { 9197 return getSignedRangeMax(S).isNegative(); 9198 } 9199 9200 bool ScalarEvolution::isKnownPositive(const SCEV *S) { 9201 return getSignedRangeMin(S).isStrictlyPositive(); 9202 } 9203 9204 bool ScalarEvolution::isKnownNonNegative(const SCEV *S) { 9205 return !getSignedRangeMin(S).isNegative(); 9206 } 9207 9208 bool ScalarEvolution::isKnownNonPositive(const SCEV *S) { 9209 return !getSignedRangeMax(S).isStrictlyPositive(); 9210 } 9211 9212 bool ScalarEvolution::isKnownNonZero(const SCEV *S) { 9213 return isKnownNegative(S) || isKnownPositive(S); 9214 } 9215 9216 std::pair<const SCEV *, const SCEV *> 9217 ScalarEvolution::SplitIntoInitAndPostInc(const Loop *L, const SCEV *S) { 9218 // Compute SCEV on entry of loop L. 9219 const SCEV *Start = SCEVInitRewriter::rewrite(S, L, *this); 9220 if (Start == getCouldNotCompute()) 9221 return { Start, Start }; 9222 // Compute post increment SCEV for loop L. 9223 const SCEV *PostInc = SCEVPostIncRewriter::rewrite(S, L, *this); 9224 assert(PostInc != getCouldNotCompute() && "Unexpected could not compute"); 9225 return { Start, PostInc }; 9226 } 9227 9228 bool ScalarEvolution::isKnownViaInduction(ICmpInst::Predicate Pred, 9229 const SCEV *LHS, const SCEV *RHS) { 9230 // First collect all loops. 9231 SmallPtrSet<const Loop *, 8> LoopsUsed; 9232 getUsedLoops(LHS, LoopsUsed); 9233 getUsedLoops(RHS, LoopsUsed); 9234 9235 if (LoopsUsed.empty()) 9236 return false; 9237 9238 // Domination relationship must be a linear order on collected loops. 9239 #ifndef NDEBUG 9240 for (auto *L1 : LoopsUsed) 9241 for (auto *L2 : LoopsUsed) 9242 assert((DT.dominates(L1->getHeader(), L2->getHeader()) || 9243 DT.dominates(L2->getHeader(), L1->getHeader())) && 9244 "Domination relationship is not a linear order"); 9245 #endif 9246 9247 const Loop *MDL = 9248 *std::max_element(LoopsUsed.begin(), LoopsUsed.end(), 9249 [&](const Loop *L1, const Loop *L2) { 9250 return DT.properlyDominates(L1->getHeader(), L2->getHeader()); 9251 }); 9252 9253 // Get init and post increment value for LHS. 9254 auto SplitLHS = SplitIntoInitAndPostInc(MDL, LHS); 9255 // if LHS contains unknown non-invariant SCEV then bail out. 9256 if (SplitLHS.first == getCouldNotCompute()) 9257 return false; 9258 assert (SplitLHS.second != getCouldNotCompute() && "Unexpected CNC"); 9259 // Get init and post increment value for RHS. 9260 auto SplitRHS = SplitIntoInitAndPostInc(MDL, RHS); 9261 // if RHS contains unknown non-invariant SCEV then bail out. 9262 if (SplitRHS.first == getCouldNotCompute()) 9263 return false; 9264 assert (SplitRHS.second != getCouldNotCompute() && "Unexpected CNC"); 9265 // It is possible that init SCEV contains an invariant load but it does 9266 // not dominate MDL and is not available at MDL loop entry, so we should 9267 // check it here. 9268 if (!isAvailableAtLoopEntry(SplitLHS.first, MDL) || 9269 !isAvailableAtLoopEntry(SplitRHS.first, MDL)) 9270 return false; 9271 9272 // It seems backedge guard check is faster than entry one so in some cases 9273 // it can speed up whole estimation by short circuit 9274 return isLoopBackedgeGuardedByCond(MDL, Pred, SplitLHS.second, 9275 SplitRHS.second) && 9276 isLoopEntryGuardedByCond(MDL, Pred, SplitLHS.first, SplitRHS.first); 9277 } 9278 9279 bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred, 9280 const SCEV *LHS, const SCEV *RHS) { 9281 // Canonicalize the inputs first. 9282 (void)SimplifyICmpOperands(Pred, LHS, RHS); 9283 9284 if (isKnownViaInduction(Pred, LHS, RHS)) 9285 return true; 9286 9287 if (isKnownPredicateViaSplitting(Pred, LHS, RHS)) 9288 return true; 9289 9290 // Otherwise see what can be done with some simple reasoning. 9291 return isKnownViaNonRecursiveReasoning(Pred, LHS, RHS); 9292 } 9293 9294 bool ScalarEvolution::isKnownOnEveryIteration(ICmpInst::Predicate Pred, 9295 const SCEVAddRecExpr *LHS, 9296 const SCEV *RHS) { 9297 const Loop *L = LHS->getLoop(); 9298 return isLoopEntryGuardedByCond(L, Pred, LHS->getStart(), RHS) && 9299 isLoopBackedgeGuardedByCond(L, Pred, LHS->getPostIncExpr(*this), RHS); 9300 } 9301 9302 bool ScalarEvolution::isMonotonicPredicate(const SCEVAddRecExpr *LHS, 9303 ICmpInst::Predicate Pred, 9304 bool &Increasing) { 9305 bool Result = isMonotonicPredicateImpl(LHS, Pred, Increasing); 9306 9307 #ifndef NDEBUG 9308 // Verify an invariant: inverting the predicate should turn a monotonically 9309 // increasing change to a monotonically decreasing one, and vice versa. 9310 bool IncreasingSwapped; 9311 bool ResultSwapped = isMonotonicPredicateImpl( 9312 LHS, ICmpInst::getSwappedPredicate(Pred), IncreasingSwapped); 9313 9314 assert(Result == ResultSwapped && "should be able to analyze both!"); 9315 if (ResultSwapped) 9316 assert(Increasing == !IncreasingSwapped && 9317 "monotonicity should flip as we flip the predicate"); 9318 #endif 9319 9320 return Result; 9321 } 9322 9323 bool ScalarEvolution::isMonotonicPredicateImpl(const SCEVAddRecExpr *LHS, 9324 ICmpInst::Predicate Pred, 9325 bool &Increasing) { 9326 9327 // A zero step value for LHS means the induction variable is essentially a 9328 // loop invariant value. We don't really depend on the predicate actually 9329 // flipping from false to true (for increasing predicates, and the other way 9330 // around for decreasing predicates), all we care about is that *if* the 9331 // predicate changes then it only changes from false to true. 9332 // 9333 // A zero step value in itself is not very useful, but there may be places 9334 // where SCEV can prove X >= 0 but not prove X > 0, so it is helpful to be 9335 // as general as possible. 9336 9337 switch (Pred) { 9338 default: 9339 return false; // Conservative answer 9340 9341 case ICmpInst::ICMP_UGT: 9342 case ICmpInst::ICMP_UGE: 9343 case ICmpInst::ICMP_ULT: 9344 case ICmpInst::ICMP_ULE: 9345 if (!LHS->hasNoUnsignedWrap()) 9346 return false; 9347 9348 Increasing = Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE; 9349 return true; 9350 9351 case ICmpInst::ICMP_SGT: 9352 case ICmpInst::ICMP_SGE: 9353 case ICmpInst::ICMP_SLT: 9354 case ICmpInst::ICMP_SLE: { 9355 if (!LHS->hasNoSignedWrap()) 9356 return false; 9357 9358 const SCEV *Step = LHS->getStepRecurrence(*this); 9359 9360 if (isKnownNonNegative(Step)) { 9361 Increasing = Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE; 9362 return true; 9363 } 9364 9365 if (isKnownNonPositive(Step)) { 9366 Increasing = Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE; 9367 return true; 9368 } 9369 9370 return false; 9371 } 9372 9373 } 9374 9375 llvm_unreachable("switch has default clause!"); 9376 } 9377 9378 bool ScalarEvolution::isLoopInvariantPredicate( 9379 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, 9380 ICmpInst::Predicate &InvariantPred, const SCEV *&InvariantLHS, 9381 const SCEV *&InvariantRHS) { 9382 9383 // If there is a loop-invariant, force it into the RHS, otherwise bail out. 9384 if (!isLoopInvariant(RHS, L)) { 9385 if (!isLoopInvariant(LHS, L)) 9386 return false; 9387 9388 std::swap(LHS, RHS); 9389 Pred = ICmpInst::getSwappedPredicate(Pred); 9390 } 9391 9392 const SCEVAddRecExpr *ArLHS = dyn_cast<SCEVAddRecExpr>(LHS); 9393 if (!ArLHS || ArLHS->getLoop() != L) 9394 return false; 9395 9396 bool Increasing; 9397 if (!isMonotonicPredicate(ArLHS, Pred, Increasing)) 9398 return false; 9399 9400 // If the predicate "ArLHS `Pred` RHS" monotonically increases from false to 9401 // true as the loop iterates, and the backedge is control dependent on 9402 // "ArLHS `Pred` RHS" == true then we can reason as follows: 9403 // 9404 // * if the predicate was false in the first iteration then the predicate 9405 // is never evaluated again, since the loop exits without taking the 9406 // backedge. 9407 // * if the predicate was true in the first iteration then it will 9408 // continue to be true for all future iterations since it is 9409 // monotonically increasing. 9410 // 9411 // For both the above possibilities, we can replace the loop varying 9412 // predicate with its value on the first iteration of the loop (which is 9413 // loop invariant). 9414 // 9415 // A similar reasoning applies for a monotonically decreasing predicate, by 9416 // replacing true with false and false with true in the above two bullets. 9417 9418 auto P = Increasing ? Pred : ICmpInst::getInversePredicate(Pred); 9419 9420 if (!isLoopBackedgeGuardedByCond(L, P, LHS, RHS)) 9421 return false; 9422 9423 InvariantPred = Pred; 9424 InvariantLHS = ArLHS->getStart(); 9425 InvariantRHS = RHS; 9426 return true; 9427 } 9428 9429 bool ScalarEvolution::isKnownPredicateViaConstantRanges( 9430 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { 9431 if (HasSameValue(LHS, RHS)) 9432 return ICmpInst::isTrueWhenEqual(Pred); 9433 9434 // This code is split out from isKnownPredicate because it is called from 9435 // within isLoopEntryGuardedByCond. 9436 9437 auto CheckRanges = 9438 [&](const ConstantRange &RangeLHS, const ConstantRange &RangeRHS) { 9439 return ConstantRange::makeSatisfyingICmpRegion(Pred, RangeRHS) 9440 .contains(RangeLHS); 9441 }; 9442 9443 // The check at the top of the function catches the case where the values are 9444 // known to be equal. 9445 if (Pred == CmpInst::ICMP_EQ) 9446 return false; 9447 9448 if (Pred == CmpInst::ICMP_NE) 9449 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)) || 9450 CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)) || 9451 isKnownNonZero(getMinusSCEV(LHS, RHS)); 9452 9453 if (CmpInst::isSigned(Pred)) 9454 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)); 9455 9456 return CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)); 9457 } 9458 9459 bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred, 9460 const SCEV *LHS, 9461 const SCEV *RHS) { 9462 // Match Result to (X + Y)<ExpectedFlags> where Y is a constant integer. 9463 // Return Y via OutY. 9464 auto MatchBinaryAddToConst = 9465 [this](const SCEV *Result, const SCEV *X, APInt &OutY, 9466 SCEV::NoWrapFlags ExpectedFlags) { 9467 const SCEV *NonConstOp, *ConstOp; 9468 SCEV::NoWrapFlags FlagsPresent; 9469 9470 if (!splitBinaryAdd(Result, ConstOp, NonConstOp, FlagsPresent) || 9471 !isa<SCEVConstant>(ConstOp) || NonConstOp != X) 9472 return false; 9473 9474 OutY = cast<SCEVConstant>(ConstOp)->getAPInt(); 9475 return (FlagsPresent & ExpectedFlags) == ExpectedFlags; 9476 }; 9477 9478 APInt C; 9479 9480 switch (Pred) { 9481 default: 9482 break; 9483 9484 case ICmpInst::ICMP_SGE: 9485 std::swap(LHS, RHS); 9486 LLVM_FALLTHROUGH; 9487 case ICmpInst::ICMP_SLE: 9488 // X s<= (X + C)<nsw> if C >= 0 9489 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) && C.isNonNegative()) 9490 return true; 9491 9492 // (X + C)<nsw> s<= X if C <= 0 9493 if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) && 9494 !C.isStrictlyPositive()) 9495 return true; 9496 break; 9497 9498 case ICmpInst::ICMP_SGT: 9499 std::swap(LHS, RHS); 9500 LLVM_FALLTHROUGH; 9501 case ICmpInst::ICMP_SLT: 9502 // X s< (X + C)<nsw> if C > 0 9503 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) && 9504 C.isStrictlyPositive()) 9505 return true; 9506 9507 // (X + C)<nsw> s< X if C < 0 9508 if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) && C.isNegative()) 9509 return true; 9510 break; 9511 } 9512 9513 return false; 9514 } 9515 9516 bool ScalarEvolution::isKnownPredicateViaSplitting(ICmpInst::Predicate Pred, 9517 const SCEV *LHS, 9518 const SCEV *RHS) { 9519 if (Pred != ICmpInst::ICMP_ULT || ProvingSplitPredicate) 9520 return false; 9521 9522 // Allowing arbitrary number of activations of isKnownPredicateViaSplitting on 9523 // the stack can result in exponential time complexity. 9524 SaveAndRestore<bool> Restore(ProvingSplitPredicate, true); 9525 9526 // If L >= 0 then I `ult` L <=> I >= 0 && I `slt` L 9527 // 9528 // To prove L >= 0 we use isKnownNonNegative whereas to prove I >= 0 we use 9529 // isKnownPredicate. isKnownPredicate is more powerful, but also more 9530 // expensive; and using isKnownNonNegative(RHS) is sufficient for most of the 9531 // interesting cases seen in practice. We can consider "upgrading" L >= 0 to 9532 // use isKnownPredicate later if needed. 9533 return isKnownNonNegative(RHS) && 9534 isKnownPredicate(CmpInst::ICMP_SGE, LHS, getZero(LHS->getType())) && 9535 isKnownPredicate(CmpInst::ICMP_SLT, LHS, RHS); 9536 } 9537 9538 bool ScalarEvolution::isImpliedViaGuard(BasicBlock *BB, 9539 ICmpInst::Predicate Pred, 9540 const SCEV *LHS, const SCEV *RHS) { 9541 // No need to even try if we know the module has no guards. 9542 if (!HasGuards) 9543 return false; 9544 9545 return any_of(*BB, [&](Instruction &I) { 9546 using namespace llvm::PatternMatch; 9547 9548 Value *Condition; 9549 return match(&I, m_Intrinsic<Intrinsic::experimental_guard>( 9550 m_Value(Condition))) && 9551 isImpliedCond(Pred, LHS, RHS, Condition, false); 9552 }); 9553 } 9554 9555 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is 9556 /// protected by a conditional between LHS and RHS. This is used to 9557 /// to eliminate casts. 9558 bool 9559 ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L, 9560 ICmpInst::Predicate Pred, 9561 const SCEV *LHS, const SCEV *RHS) { 9562 // Interpret a null as meaning no loop, where there is obviously no guard 9563 // (interprocedural conditions notwithstanding). 9564 if (!L) return true; 9565 9566 if (VerifyIR) 9567 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()) && 9568 "This cannot be done on broken IR!"); 9569 9570 9571 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS)) 9572 return true; 9573 9574 BasicBlock *Latch = L->getLoopLatch(); 9575 if (!Latch) 9576 return false; 9577 9578 BranchInst *LoopContinuePredicate = 9579 dyn_cast<BranchInst>(Latch->getTerminator()); 9580 if (LoopContinuePredicate && LoopContinuePredicate->isConditional() && 9581 isImpliedCond(Pred, LHS, RHS, 9582 LoopContinuePredicate->getCondition(), 9583 LoopContinuePredicate->getSuccessor(0) != L->getHeader())) 9584 return true; 9585 9586 // We don't want more than one activation of the following loops on the stack 9587 // -- that can lead to O(n!) time complexity. 9588 if (WalkingBEDominatingConds) 9589 return false; 9590 9591 SaveAndRestore<bool> ClearOnExit(WalkingBEDominatingConds, true); 9592 9593 // See if we can exploit a trip count to prove the predicate. 9594 const auto &BETakenInfo = getBackedgeTakenInfo(L); 9595 const SCEV *LatchBECount = BETakenInfo.getExact(Latch, this); 9596 if (LatchBECount != getCouldNotCompute()) { 9597 // We know that Latch branches back to the loop header exactly 9598 // LatchBECount times. This means the backdege condition at Latch is 9599 // equivalent to "{0,+,1} u< LatchBECount". 9600 Type *Ty = LatchBECount->getType(); 9601 auto NoWrapFlags = SCEV::NoWrapFlags(SCEV::FlagNUW | SCEV::FlagNW); 9602 const SCEV *LoopCounter = 9603 getAddRecExpr(getZero(Ty), getOne(Ty), L, NoWrapFlags); 9604 if (isImpliedCond(Pred, LHS, RHS, ICmpInst::ICMP_ULT, LoopCounter, 9605 LatchBECount)) 9606 return true; 9607 } 9608 9609 // Check conditions due to any @llvm.assume intrinsics. 9610 for (auto &AssumeVH : AC.assumptions()) { 9611 if (!AssumeVH) 9612 continue; 9613 auto *CI = cast<CallInst>(AssumeVH); 9614 if (!DT.dominates(CI, Latch->getTerminator())) 9615 continue; 9616 9617 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false)) 9618 return true; 9619 } 9620 9621 // If the loop is not reachable from the entry block, we risk running into an 9622 // infinite loop as we walk up into the dom tree. These loops do not matter 9623 // anyway, so we just return a conservative answer when we see them. 9624 if (!DT.isReachableFromEntry(L->getHeader())) 9625 return false; 9626 9627 if (isImpliedViaGuard(Latch, Pred, LHS, RHS)) 9628 return true; 9629 9630 for (DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()]; 9631 DTN != HeaderDTN; DTN = DTN->getIDom()) { 9632 assert(DTN && "should reach the loop header before reaching the root!"); 9633 9634 BasicBlock *BB = DTN->getBlock(); 9635 if (isImpliedViaGuard(BB, Pred, LHS, RHS)) 9636 return true; 9637 9638 BasicBlock *PBB = BB->getSinglePredecessor(); 9639 if (!PBB) 9640 continue; 9641 9642 BranchInst *ContinuePredicate = dyn_cast<BranchInst>(PBB->getTerminator()); 9643 if (!ContinuePredicate || !ContinuePredicate->isConditional()) 9644 continue; 9645 9646 Value *Condition = ContinuePredicate->getCondition(); 9647 9648 // If we have an edge `E` within the loop body that dominates the only 9649 // latch, the condition guarding `E` also guards the backedge. This 9650 // reasoning works only for loops with a single latch. 9651 9652 BasicBlockEdge DominatingEdge(PBB, BB); 9653 if (DominatingEdge.isSingleEdge()) { 9654 // We're constructively (and conservatively) enumerating edges within the 9655 // loop body that dominate the latch. The dominator tree better agree 9656 // with us on this: 9657 assert(DT.dominates(DominatingEdge, Latch) && "should be!"); 9658 9659 if (isImpliedCond(Pred, LHS, RHS, Condition, 9660 BB != ContinuePredicate->getSuccessor(0))) 9661 return true; 9662 } 9663 } 9664 9665 return false; 9666 } 9667 9668 bool 9669 ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L, 9670 ICmpInst::Predicate Pred, 9671 const SCEV *LHS, const SCEV *RHS) { 9672 // Interpret a null as meaning no loop, where there is obviously no guard 9673 // (interprocedural conditions notwithstanding). 9674 if (!L) return false; 9675 9676 if (VerifyIR) 9677 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()) && 9678 "This cannot be done on broken IR!"); 9679 9680 // Both LHS and RHS must be available at loop entry. 9681 assert(isAvailableAtLoopEntry(LHS, L) && 9682 "LHS is not available at Loop Entry"); 9683 assert(isAvailableAtLoopEntry(RHS, L) && 9684 "RHS is not available at Loop Entry"); 9685 9686 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS)) 9687 return true; 9688 9689 // If we cannot prove strict comparison (e.g. a > b), maybe we can prove 9690 // the facts (a >= b && a != b) separately. A typical situation is when the 9691 // non-strict comparison is known from ranges and non-equality is known from 9692 // dominating predicates. If we are proving strict comparison, we always try 9693 // to prove non-equality and non-strict comparison separately. 9694 auto NonStrictPredicate = ICmpInst::getNonStrictPredicate(Pred); 9695 const bool ProvingStrictComparison = (Pred != NonStrictPredicate); 9696 bool ProvedNonStrictComparison = false; 9697 bool ProvedNonEquality = false; 9698 9699 if (ProvingStrictComparison) { 9700 ProvedNonStrictComparison = 9701 isKnownViaNonRecursiveReasoning(NonStrictPredicate, LHS, RHS); 9702 ProvedNonEquality = 9703 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_NE, LHS, RHS); 9704 if (ProvedNonStrictComparison && ProvedNonEquality) 9705 return true; 9706 } 9707 9708 // Try to prove (Pred, LHS, RHS) using isImpliedViaGuard. 9709 auto ProveViaGuard = [&](BasicBlock *Block) { 9710 if (isImpliedViaGuard(Block, Pred, LHS, RHS)) 9711 return true; 9712 if (ProvingStrictComparison) { 9713 if (!ProvedNonStrictComparison) 9714 ProvedNonStrictComparison = 9715 isImpliedViaGuard(Block, NonStrictPredicate, LHS, RHS); 9716 if (!ProvedNonEquality) 9717 ProvedNonEquality = 9718 isImpliedViaGuard(Block, ICmpInst::ICMP_NE, LHS, RHS); 9719 if (ProvedNonStrictComparison && ProvedNonEquality) 9720 return true; 9721 } 9722 return false; 9723 }; 9724 9725 // Try to prove (Pred, LHS, RHS) using isImpliedCond. 9726 auto ProveViaCond = [&](Value *Condition, bool Inverse) { 9727 if (isImpliedCond(Pred, LHS, RHS, Condition, Inverse)) 9728 return true; 9729 if (ProvingStrictComparison) { 9730 if (!ProvedNonStrictComparison) 9731 ProvedNonStrictComparison = 9732 isImpliedCond(NonStrictPredicate, LHS, RHS, Condition, Inverse); 9733 if (!ProvedNonEquality) 9734 ProvedNonEquality = 9735 isImpliedCond(ICmpInst::ICMP_NE, LHS, RHS, Condition, Inverse); 9736 if (ProvedNonStrictComparison && ProvedNonEquality) 9737 return true; 9738 } 9739 return false; 9740 }; 9741 9742 // Starting at the loop predecessor, climb up the predecessor chain, as long 9743 // as there are predecessors that can be found that have unique successors 9744 // leading to the original header. 9745 for (std::pair<BasicBlock *, BasicBlock *> 9746 Pair(L->getLoopPredecessor(), L->getHeader()); 9747 Pair.first; 9748 Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { 9749 9750 if (ProveViaGuard(Pair.first)) 9751 return true; 9752 9753 BranchInst *LoopEntryPredicate = 9754 dyn_cast<BranchInst>(Pair.first->getTerminator()); 9755 if (!LoopEntryPredicate || 9756 LoopEntryPredicate->isUnconditional()) 9757 continue; 9758 9759 if (ProveViaCond(LoopEntryPredicate->getCondition(), 9760 LoopEntryPredicate->getSuccessor(0) != Pair.second)) 9761 return true; 9762 } 9763 9764 // Check conditions due to any @llvm.assume intrinsics. 9765 for (auto &AssumeVH : AC.assumptions()) { 9766 if (!AssumeVH) 9767 continue; 9768 auto *CI = cast<CallInst>(AssumeVH); 9769 if (!DT.dominates(CI, L->getHeader())) 9770 continue; 9771 9772 if (ProveViaCond(CI->getArgOperand(0), false)) 9773 return true; 9774 } 9775 9776 return false; 9777 } 9778 9779 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, 9780 const SCEV *LHS, const SCEV *RHS, 9781 Value *FoundCondValue, 9782 bool Inverse) { 9783 if (!PendingLoopPredicates.insert(FoundCondValue).second) 9784 return false; 9785 9786 auto ClearOnExit = 9787 make_scope_exit([&]() { PendingLoopPredicates.erase(FoundCondValue); }); 9788 9789 // Recursively handle And and Or conditions. 9790 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FoundCondValue)) { 9791 if (BO->getOpcode() == Instruction::And) { 9792 if (!Inverse) 9793 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) || 9794 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse); 9795 } else if (BO->getOpcode() == Instruction::Or) { 9796 if (Inverse) 9797 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) || 9798 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse); 9799 } 9800 } 9801 9802 ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue); 9803 if (!ICI) return false; 9804 9805 // Now that we found a conditional branch that dominates the loop or controls 9806 // the loop latch. Check to see if it is the comparison we are looking for. 9807 ICmpInst::Predicate FoundPred; 9808 if (Inverse) 9809 FoundPred = ICI->getInversePredicate(); 9810 else 9811 FoundPred = ICI->getPredicate(); 9812 9813 const SCEV *FoundLHS = getSCEV(ICI->getOperand(0)); 9814 const SCEV *FoundRHS = getSCEV(ICI->getOperand(1)); 9815 9816 return isImpliedCond(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS); 9817 } 9818 9819 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, 9820 const SCEV *RHS, 9821 ICmpInst::Predicate FoundPred, 9822 const SCEV *FoundLHS, 9823 const SCEV *FoundRHS) { 9824 // Balance the types. 9825 if (getTypeSizeInBits(LHS->getType()) < 9826 getTypeSizeInBits(FoundLHS->getType())) { 9827 if (CmpInst::isSigned(Pred)) { 9828 LHS = getSignExtendExpr(LHS, FoundLHS->getType()); 9829 RHS = getSignExtendExpr(RHS, FoundLHS->getType()); 9830 } else { 9831 LHS = getZeroExtendExpr(LHS, FoundLHS->getType()); 9832 RHS = getZeroExtendExpr(RHS, FoundLHS->getType()); 9833 } 9834 } else if (getTypeSizeInBits(LHS->getType()) > 9835 getTypeSizeInBits(FoundLHS->getType())) { 9836 if (CmpInst::isSigned(FoundPred)) { 9837 FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType()); 9838 FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType()); 9839 } else { 9840 FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType()); 9841 FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType()); 9842 } 9843 } 9844 9845 // Canonicalize the query to match the way instcombine will have 9846 // canonicalized the comparison. 9847 if (SimplifyICmpOperands(Pred, LHS, RHS)) 9848 if (LHS == RHS) 9849 return CmpInst::isTrueWhenEqual(Pred); 9850 if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS)) 9851 if (FoundLHS == FoundRHS) 9852 return CmpInst::isFalseWhenEqual(FoundPred); 9853 9854 // Check to see if we can make the LHS or RHS match. 9855 if (LHS == FoundRHS || RHS == FoundLHS) { 9856 if (isa<SCEVConstant>(RHS)) { 9857 std::swap(FoundLHS, FoundRHS); 9858 FoundPred = ICmpInst::getSwappedPredicate(FoundPred); 9859 } else { 9860 std::swap(LHS, RHS); 9861 Pred = ICmpInst::getSwappedPredicate(Pred); 9862 } 9863 } 9864 9865 // Check whether the found predicate is the same as the desired predicate. 9866 if (FoundPred == Pred) 9867 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS); 9868 9869 // Check whether swapping the found predicate makes it the same as the 9870 // desired predicate. 9871 if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) { 9872 if (isa<SCEVConstant>(RHS)) 9873 return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS); 9874 else 9875 return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred), 9876 RHS, LHS, FoundLHS, FoundRHS); 9877 } 9878 9879 // Unsigned comparison is the same as signed comparison when both the operands 9880 // are non-negative. 9881 if (CmpInst::isUnsigned(FoundPred) && 9882 CmpInst::getSignedPredicate(FoundPred) == Pred && 9883 isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) 9884 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS); 9885 9886 // Check if we can make progress by sharpening ranges. 9887 if (FoundPred == ICmpInst::ICMP_NE && 9888 (isa<SCEVConstant>(FoundLHS) || isa<SCEVConstant>(FoundRHS))) { 9889 9890 const SCEVConstant *C = nullptr; 9891 const SCEV *V = nullptr; 9892 9893 if (isa<SCEVConstant>(FoundLHS)) { 9894 C = cast<SCEVConstant>(FoundLHS); 9895 V = FoundRHS; 9896 } else { 9897 C = cast<SCEVConstant>(FoundRHS); 9898 V = FoundLHS; 9899 } 9900 9901 // The guarding predicate tells us that C != V. If the known range 9902 // of V is [C, t), we can sharpen the range to [C + 1, t). The 9903 // range we consider has to correspond to same signedness as the 9904 // predicate we're interested in folding. 9905 9906 APInt Min = ICmpInst::isSigned(Pred) ? 9907 getSignedRangeMin(V) : getUnsignedRangeMin(V); 9908 9909 if (Min == C->getAPInt()) { 9910 // Given (V >= Min && V != Min) we conclude V >= (Min + 1). 9911 // This is true even if (Min + 1) wraps around -- in case of 9912 // wraparound, (Min + 1) < Min, so (V >= Min => V >= (Min + 1)). 9913 9914 APInt SharperMin = Min + 1; 9915 9916 switch (Pred) { 9917 case ICmpInst::ICMP_SGE: 9918 case ICmpInst::ICMP_UGE: 9919 // We know V `Pred` SharperMin. If this implies LHS `Pred` 9920 // RHS, we're done. 9921 if (isImpliedCondOperands(Pred, LHS, RHS, V, 9922 getConstant(SharperMin))) 9923 return true; 9924 LLVM_FALLTHROUGH; 9925 9926 case ICmpInst::ICMP_SGT: 9927 case ICmpInst::ICMP_UGT: 9928 // We know from the range information that (V `Pred` Min || 9929 // V == Min). We know from the guarding condition that !(V 9930 // == Min). This gives us 9931 // 9932 // V `Pred` Min || V == Min && !(V == Min) 9933 // => V `Pred` Min 9934 // 9935 // If V `Pred` Min implies LHS `Pred` RHS, we're done. 9936 9937 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min))) 9938 return true; 9939 LLVM_FALLTHROUGH; 9940 9941 default: 9942 // No change 9943 break; 9944 } 9945 } 9946 } 9947 9948 // Check whether the actual condition is beyond sufficient. 9949 if (FoundPred == ICmpInst::ICMP_EQ) 9950 if (ICmpInst::isTrueWhenEqual(Pred)) 9951 if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS)) 9952 return true; 9953 if (Pred == ICmpInst::ICMP_NE) 9954 if (!ICmpInst::isTrueWhenEqual(FoundPred)) 9955 if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS)) 9956 return true; 9957 9958 // Otherwise assume the worst. 9959 return false; 9960 } 9961 9962 bool ScalarEvolution::splitBinaryAdd(const SCEV *Expr, 9963 const SCEV *&L, const SCEV *&R, 9964 SCEV::NoWrapFlags &Flags) { 9965 const auto *AE = dyn_cast<SCEVAddExpr>(Expr); 9966 if (!AE || AE->getNumOperands() != 2) 9967 return false; 9968 9969 L = AE->getOperand(0); 9970 R = AE->getOperand(1); 9971 Flags = AE->getNoWrapFlags(); 9972 return true; 9973 } 9974 9975 Optional<APInt> ScalarEvolution::computeConstantDifference(const SCEV *More, 9976 const SCEV *Less) { 9977 // We avoid subtracting expressions here because this function is usually 9978 // fairly deep in the call stack (i.e. is called many times). 9979 9980 // X - X = 0. 9981 if (More == Less) 9982 return APInt(getTypeSizeInBits(More->getType()), 0); 9983 9984 if (isa<SCEVAddRecExpr>(Less) && isa<SCEVAddRecExpr>(More)) { 9985 const auto *LAR = cast<SCEVAddRecExpr>(Less); 9986 const auto *MAR = cast<SCEVAddRecExpr>(More); 9987 9988 if (LAR->getLoop() != MAR->getLoop()) 9989 return None; 9990 9991 // We look at affine expressions only; not for correctness but to keep 9992 // getStepRecurrence cheap. 9993 if (!LAR->isAffine() || !MAR->isAffine()) 9994 return None; 9995 9996 if (LAR->getStepRecurrence(*this) != MAR->getStepRecurrence(*this)) 9997 return None; 9998 9999 Less = LAR->getStart(); 10000 More = MAR->getStart(); 10001 10002 // fall through 10003 } 10004 10005 if (isa<SCEVConstant>(Less) && isa<SCEVConstant>(More)) { 10006 const auto &M = cast<SCEVConstant>(More)->getAPInt(); 10007 const auto &L = cast<SCEVConstant>(Less)->getAPInt(); 10008 return M - L; 10009 } 10010 10011 SCEV::NoWrapFlags Flags; 10012 const SCEV *LLess = nullptr, *RLess = nullptr; 10013 const SCEV *LMore = nullptr, *RMore = nullptr; 10014 const SCEVConstant *C1 = nullptr, *C2 = nullptr; 10015 // Compare (X + C1) vs X. 10016 if (splitBinaryAdd(Less, LLess, RLess, Flags)) 10017 if ((C1 = dyn_cast<SCEVConstant>(LLess))) 10018 if (RLess == More) 10019 return -(C1->getAPInt()); 10020 10021 // Compare X vs (X + C2). 10022 if (splitBinaryAdd(More, LMore, RMore, Flags)) 10023 if ((C2 = dyn_cast<SCEVConstant>(LMore))) 10024 if (RMore == Less) 10025 return C2->getAPInt(); 10026 10027 // Compare (X + C1) vs (X + C2). 10028 if (C1 && C2 && RLess == RMore) 10029 return C2->getAPInt() - C1->getAPInt(); 10030 10031 return None; 10032 } 10033 10034 bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow( 10035 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 10036 const SCEV *FoundLHS, const SCEV *FoundRHS) { 10037 if (Pred != CmpInst::ICMP_SLT && Pred != CmpInst::ICMP_ULT) 10038 return false; 10039 10040 const auto *AddRecLHS = dyn_cast<SCEVAddRecExpr>(LHS); 10041 if (!AddRecLHS) 10042 return false; 10043 10044 const auto *AddRecFoundLHS = dyn_cast<SCEVAddRecExpr>(FoundLHS); 10045 if (!AddRecFoundLHS) 10046 return false; 10047 10048 // We'd like to let SCEV reason about control dependencies, so we constrain 10049 // both the inequalities to be about add recurrences on the same loop. This 10050 // way we can use isLoopEntryGuardedByCond later. 10051 10052 const Loop *L = AddRecFoundLHS->getLoop(); 10053 if (L != AddRecLHS->getLoop()) 10054 return false; 10055 10056 // FoundLHS u< FoundRHS u< -C => (FoundLHS + C) u< (FoundRHS + C) ... (1) 10057 // 10058 // FoundLHS s< FoundRHS s< INT_MIN - C => (FoundLHS + C) s< (FoundRHS + C) 10059 // ... (2) 10060 // 10061 // Informal proof for (2), assuming (1) [*]: 10062 // 10063 // We'll also assume (A s< B) <=> ((A + INT_MIN) u< (B + INT_MIN)) ... (3)[**] 10064 // 10065 // Then 10066 // 10067 // FoundLHS s< FoundRHS s< INT_MIN - C 10068 // <=> (FoundLHS + INT_MIN) u< (FoundRHS + INT_MIN) u< -C [ using (3) ] 10069 // <=> (FoundLHS + INT_MIN + C) u< (FoundRHS + INT_MIN + C) [ using (1) ] 10070 // <=> (FoundLHS + INT_MIN + C + INT_MIN) s< 10071 // (FoundRHS + INT_MIN + C + INT_MIN) [ using (3) ] 10072 // <=> FoundLHS + C s< FoundRHS + C 10073 // 10074 // [*]: (1) can be proved by ruling out overflow. 10075 // 10076 // [**]: This can be proved by analyzing all the four possibilities: 10077 // (A s< 0, B s< 0), (A s< 0, B s>= 0), (A s>= 0, B s< 0) and 10078 // (A s>= 0, B s>= 0). 10079 // 10080 // Note: 10081 // Despite (2), "FoundRHS s< INT_MIN - C" does not mean that "FoundRHS + C" 10082 // will not sign underflow. For instance, say FoundLHS = (i8 -128), FoundRHS 10083 // = (i8 -127) and C = (i8 -100). Then INT_MIN - C = (i8 -28), and FoundRHS 10084 // s< (INT_MIN - C). Lack of sign overflow / underflow in "FoundRHS + C" is 10085 // neither necessary nor sufficient to prove "(FoundLHS + C) s< (FoundRHS + 10086 // C)". 10087 10088 Optional<APInt> LDiff = computeConstantDifference(LHS, FoundLHS); 10089 Optional<APInt> RDiff = computeConstantDifference(RHS, FoundRHS); 10090 if (!LDiff || !RDiff || *LDiff != *RDiff) 10091 return false; 10092 10093 if (LDiff->isMinValue()) 10094 return true; 10095 10096 APInt FoundRHSLimit; 10097 10098 if (Pred == CmpInst::ICMP_ULT) { 10099 FoundRHSLimit = -(*RDiff); 10100 } else { 10101 assert(Pred == CmpInst::ICMP_SLT && "Checked above!"); 10102 FoundRHSLimit = APInt::getSignedMinValue(getTypeSizeInBits(RHS->getType())) - *RDiff; 10103 } 10104 10105 // Try to prove (1) or (2), as needed. 10106 return isAvailableAtLoopEntry(FoundRHS, L) && 10107 isLoopEntryGuardedByCond(L, Pred, FoundRHS, 10108 getConstant(FoundRHSLimit)); 10109 } 10110 10111 bool ScalarEvolution::isImpliedViaMerge(ICmpInst::Predicate Pred, 10112 const SCEV *LHS, const SCEV *RHS, 10113 const SCEV *FoundLHS, 10114 const SCEV *FoundRHS, unsigned Depth) { 10115 const PHINode *LPhi = nullptr, *RPhi = nullptr; 10116 10117 auto ClearOnExit = make_scope_exit([&]() { 10118 if (LPhi) { 10119 bool Erased = PendingMerges.erase(LPhi); 10120 assert(Erased && "Failed to erase LPhi!"); 10121 (void)Erased; 10122 } 10123 if (RPhi) { 10124 bool Erased = PendingMerges.erase(RPhi); 10125 assert(Erased && "Failed to erase RPhi!"); 10126 (void)Erased; 10127 } 10128 }); 10129 10130 // Find respective Phis and check that they are not being pending. 10131 if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS)) 10132 if (auto *Phi = dyn_cast<PHINode>(LU->getValue())) { 10133 if (!PendingMerges.insert(Phi).second) 10134 return false; 10135 LPhi = Phi; 10136 } 10137 if (const SCEVUnknown *RU = dyn_cast<SCEVUnknown>(RHS)) 10138 if (auto *Phi = dyn_cast<PHINode>(RU->getValue())) { 10139 // If we detect a loop of Phi nodes being processed by this method, for 10140 // example: 10141 // 10142 // %a = phi i32 [ %some1, %preheader ], [ %b, %latch ] 10143 // %b = phi i32 [ %some2, %preheader ], [ %a, %latch ] 10144 // 10145 // we don't want to deal with a case that complex, so return conservative 10146 // answer false. 10147 if (!PendingMerges.insert(Phi).second) 10148 return false; 10149 RPhi = Phi; 10150 } 10151 10152 // If none of LHS, RHS is a Phi, nothing to do here. 10153 if (!LPhi && !RPhi) 10154 return false; 10155 10156 // If there is a SCEVUnknown Phi we are interested in, make it left. 10157 if (!LPhi) { 10158 std::swap(LHS, RHS); 10159 std::swap(FoundLHS, FoundRHS); 10160 std::swap(LPhi, RPhi); 10161 Pred = ICmpInst::getSwappedPredicate(Pred); 10162 } 10163 10164 assert(LPhi && "LPhi should definitely be a SCEVUnknown Phi!"); 10165 const BasicBlock *LBB = LPhi->getParent(); 10166 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 10167 10168 auto ProvedEasily = [&](const SCEV *S1, const SCEV *S2) { 10169 return isKnownViaNonRecursiveReasoning(Pred, S1, S2) || 10170 isImpliedCondOperandsViaRanges(Pred, S1, S2, FoundLHS, FoundRHS) || 10171 isImpliedViaOperations(Pred, S1, S2, FoundLHS, FoundRHS, Depth); 10172 }; 10173 10174 if (RPhi && RPhi->getParent() == LBB) { 10175 // Case one: RHS is also a SCEVUnknown Phi from the same basic block. 10176 // If we compare two Phis from the same block, and for each entry block 10177 // the predicate is true for incoming values from this block, then the 10178 // predicate is also true for the Phis. 10179 for (const BasicBlock *IncBB : predecessors(LBB)) { 10180 const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB)); 10181 const SCEV *R = getSCEV(RPhi->getIncomingValueForBlock(IncBB)); 10182 if (!ProvedEasily(L, R)) 10183 return false; 10184 } 10185 } else if (RAR && RAR->getLoop()->getHeader() == LBB) { 10186 // Case two: RHS is also a Phi from the same basic block, and it is an 10187 // AddRec. It means that there is a loop which has both AddRec and Unknown 10188 // PHIs, for it we can compare incoming values of AddRec from above the loop 10189 // and latch with their respective incoming values of LPhi. 10190 // TODO: Generalize to handle loops with many inputs in a header. 10191 if (LPhi->getNumIncomingValues() != 2) return false; 10192 10193 auto *RLoop = RAR->getLoop(); 10194 auto *Predecessor = RLoop->getLoopPredecessor(); 10195 assert(Predecessor && "Loop with AddRec with no predecessor?"); 10196 const SCEV *L1 = getSCEV(LPhi->getIncomingValueForBlock(Predecessor)); 10197 if (!ProvedEasily(L1, RAR->getStart())) 10198 return false; 10199 auto *Latch = RLoop->getLoopLatch(); 10200 assert(Latch && "Loop with AddRec with no latch?"); 10201 const SCEV *L2 = getSCEV(LPhi->getIncomingValueForBlock(Latch)); 10202 if (!ProvedEasily(L2, RAR->getPostIncExpr(*this))) 10203 return false; 10204 } else { 10205 // In all other cases go over inputs of LHS and compare each of them to RHS, 10206 // the predicate is true for (LHS, RHS) if it is true for all such pairs. 10207 // At this point RHS is either a non-Phi, or it is a Phi from some block 10208 // different from LBB. 10209 for (const BasicBlock *IncBB : predecessors(LBB)) { 10210 // Check that RHS is available in this block. 10211 if (!dominates(RHS, IncBB)) 10212 return false; 10213 const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB)); 10214 if (!ProvedEasily(L, RHS)) 10215 return false; 10216 } 10217 } 10218 return true; 10219 } 10220 10221 bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred, 10222 const SCEV *LHS, const SCEV *RHS, 10223 const SCEV *FoundLHS, 10224 const SCEV *FoundRHS) { 10225 if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, FoundLHS, FoundRHS)) 10226 return true; 10227 10228 if (isImpliedCondOperandsViaNoOverflow(Pred, LHS, RHS, FoundLHS, FoundRHS)) 10229 return true; 10230 10231 return isImpliedCondOperandsHelper(Pred, LHS, RHS, 10232 FoundLHS, FoundRHS) || 10233 // ~x < ~y --> x > y 10234 isImpliedCondOperandsHelper(Pred, LHS, RHS, 10235 getNotSCEV(FoundRHS), 10236 getNotSCEV(FoundLHS)); 10237 } 10238 10239 /// Is MaybeMinMaxExpr an (U|S)(Min|Max) of Candidate and some other values? 10240 template <typename MinMaxExprType> 10241 static bool IsMinMaxConsistingOf(const SCEV *MaybeMinMaxExpr, 10242 const SCEV *Candidate) { 10243 const MinMaxExprType *MinMaxExpr = dyn_cast<MinMaxExprType>(MaybeMinMaxExpr); 10244 if (!MinMaxExpr) 10245 return false; 10246 10247 return find(MinMaxExpr->operands(), Candidate) != MinMaxExpr->op_end(); 10248 } 10249 10250 static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE, 10251 ICmpInst::Predicate Pred, 10252 const SCEV *LHS, const SCEV *RHS) { 10253 // If both sides are affine addrecs for the same loop, with equal 10254 // steps, and we know the recurrences don't wrap, then we only 10255 // need to check the predicate on the starting values. 10256 10257 if (!ICmpInst::isRelational(Pred)) 10258 return false; 10259 10260 const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS); 10261 if (!LAR) 10262 return false; 10263 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 10264 if (!RAR) 10265 return false; 10266 if (LAR->getLoop() != RAR->getLoop()) 10267 return false; 10268 if (!LAR->isAffine() || !RAR->isAffine()) 10269 return false; 10270 10271 if (LAR->getStepRecurrence(SE) != RAR->getStepRecurrence(SE)) 10272 return false; 10273 10274 SCEV::NoWrapFlags NW = ICmpInst::isSigned(Pred) ? 10275 SCEV::FlagNSW : SCEV::FlagNUW; 10276 if (!LAR->getNoWrapFlags(NW) || !RAR->getNoWrapFlags(NW)) 10277 return false; 10278 10279 return SE.isKnownPredicate(Pred, LAR->getStart(), RAR->getStart()); 10280 } 10281 10282 /// Is LHS `Pred` RHS true on the virtue of LHS or RHS being a Min or Max 10283 /// expression? 10284 static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE, 10285 ICmpInst::Predicate Pred, 10286 const SCEV *LHS, const SCEV *RHS) { 10287 switch (Pred) { 10288 default: 10289 return false; 10290 10291 case ICmpInst::ICMP_SGE: 10292 std::swap(LHS, RHS); 10293 LLVM_FALLTHROUGH; 10294 case ICmpInst::ICMP_SLE: 10295 return 10296 // min(A, ...) <= A 10297 IsMinMaxConsistingOf<SCEVSMinExpr>(LHS, RHS) || 10298 // A <= max(A, ...) 10299 IsMinMaxConsistingOf<SCEVSMaxExpr>(RHS, LHS); 10300 10301 case ICmpInst::ICMP_UGE: 10302 std::swap(LHS, RHS); 10303 LLVM_FALLTHROUGH; 10304 case ICmpInst::ICMP_ULE: 10305 return 10306 // min(A, ...) <= A 10307 IsMinMaxConsistingOf<SCEVUMinExpr>(LHS, RHS) || 10308 // A <= max(A, ...) 10309 IsMinMaxConsistingOf<SCEVUMaxExpr>(RHS, LHS); 10310 } 10311 10312 llvm_unreachable("covered switch fell through?!"); 10313 } 10314 10315 bool ScalarEvolution::isImpliedViaOperations(ICmpInst::Predicate Pred, 10316 const SCEV *LHS, const SCEV *RHS, 10317 const SCEV *FoundLHS, 10318 const SCEV *FoundRHS, 10319 unsigned Depth) { 10320 assert(getTypeSizeInBits(LHS->getType()) == 10321 getTypeSizeInBits(RHS->getType()) && 10322 "LHS and RHS have different sizes?"); 10323 assert(getTypeSizeInBits(FoundLHS->getType()) == 10324 getTypeSizeInBits(FoundRHS->getType()) && 10325 "FoundLHS and FoundRHS have different sizes?"); 10326 // We want to avoid hurting the compile time with analysis of too big trees. 10327 if (Depth > MaxSCEVOperationsImplicationDepth) 10328 return false; 10329 // We only want to work with ICMP_SGT comparison so far. 10330 // TODO: Extend to ICMP_UGT? 10331 if (Pred == ICmpInst::ICMP_SLT) { 10332 Pred = ICmpInst::ICMP_SGT; 10333 std::swap(LHS, RHS); 10334 std::swap(FoundLHS, FoundRHS); 10335 } 10336 if (Pred != ICmpInst::ICMP_SGT) 10337 return false; 10338 10339 auto GetOpFromSExt = [&](const SCEV *S) { 10340 if (auto *Ext = dyn_cast<SCEVSignExtendExpr>(S)) 10341 return Ext->getOperand(); 10342 // TODO: If S is a SCEVConstant then you can cheaply "strip" the sext off 10343 // the constant in some cases. 10344 return S; 10345 }; 10346 10347 // Acquire values from extensions. 10348 auto *OrigLHS = LHS; 10349 auto *OrigFoundLHS = FoundLHS; 10350 LHS = GetOpFromSExt(LHS); 10351 FoundLHS = GetOpFromSExt(FoundLHS); 10352 10353 // Is the SGT predicate can be proved trivially or using the found context. 10354 auto IsSGTViaContext = [&](const SCEV *S1, const SCEV *S2) { 10355 return isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGT, S1, S2) || 10356 isImpliedViaOperations(ICmpInst::ICMP_SGT, S1, S2, OrigFoundLHS, 10357 FoundRHS, Depth + 1); 10358 }; 10359 10360 if (auto *LHSAddExpr = dyn_cast<SCEVAddExpr>(LHS)) { 10361 // We want to avoid creation of any new non-constant SCEV. Since we are 10362 // going to compare the operands to RHS, we should be certain that we don't 10363 // need any size extensions for this. So let's decline all cases when the 10364 // sizes of types of LHS and RHS do not match. 10365 // TODO: Maybe try to get RHS from sext to catch more cases? 10366 if (getTypeSizeInBits(LHS->getType()) != getTypeSizeInBits(RHS->getType())) 10367 return false; 10368 10369 // Should not overflow. 10370 if (!LHSAddExpr->hasNoSignedWrap()) 10371 return false; 10372 10373 auto *LL = LHSAddExpr->getOperand(0); 10374 auto *LR = LHSAddExpr->getOperand(1); 10375 auto *MinusOne = getNegativeSCEV(getOne(RHS->getType())); 10376 10377 // Checks that S1 >= 0 && S2 > RHS, trivially or using the found context. 10378 auto IsSumGreaterThanRHS = [&](const SCEV *S1, const SCEV *S2) { 10379 return IsSGTViaContext(S1, MinusOne) && IsSGTViaContext(S2, RHS); 10380 }; 10381 // Try to prove the following rule: 10382 // (LHS = LL + LR) && (LL >= 0) && (LR > RHS) => (LHS > RHS). 10383 // (LHS = LL + LR) && (LR >= 0) && (LL > RHS) => (LHS > RHS). 10384 if (IsSumGreaterThanRHS(LL, LR) || IsSumGreaterThanRHS(LR, LL)) 10385 return true; 10386 } else if (auto *LHSUnknownExpr = dyn_cast<SCEVUnknown>(LHS)) { 10387 Value *LL, *LR; 10388 // FIXME: Once we have SDiv implemented, we can get rid of this matching. 10389 10390 using namespace llvm::PatternMatch; 10391 10392 if (match(LHSUnknownExpr->getValue(), m_SDiv(m_Value(LL), m_Value(LR)))) { 10393 // Rules for division. 10394 // We are going to perform some comparisons with Denominator and its 10395 // derivative expressions. In general case, creating a SCEV for it may 10396 // lead to a complex analysis of the entire graph, and in particular it 10397 // can request trip count recalculation for the same loop. This would 10398 // cache as SCEVCouldNotCompute to avoid the infinite recursion. To avoid 10399 // this, we only want to create SCEVs that are constants in this section. 10400 // So we bail if Denominator is not a constant. 10401 if (!isa<ConstantInt>(LR)) 10402 return false; 10403 10404 auto *Denominator = cast<SCEVConstant>(getSCEV(LR)); 10405 10406 // We want to make sure that LHS = FoundLHS / Denominator. If it is so, 10407 // then a SCEV for the numerator already exists and matches with FoundLHS. 10408 auto *Numerator = getExistingSCEV(LL); 10409 if (!Numerator || Numerator->getType() != FoundLHS->getType()) 10410 return false; 10411 10412 // Make sure that the numerator matches with FoundLHS and the denominator 10413 // is positive. 10414 if (!HasSameValue(Numerator, FoundLHS) || !isKnownPositive(Denominator)) 10415 return false; 10416 10417 auto *DTy = Denominator->getType(); 10418 auto *FRHSTy = FoundRHS->getType(); 10419 if (DTy->isPointerTy() != FRHSTy->isPointerTy()) 10420 // One of types is a pointer and another one is not. We cannot extend 10421 // them properly to a wider type, so let us just reject this case. 10422 // TODO: Usage of getEffectiveSCEVType for DTy, FRHSTy etc should help 10423 // to avoid this check. 10424 return false; 10425 10426 // Given that: 10427 // FoundLHS > FoundRHS, LHS = FoundLHS / Denominator, Denominator > 0. 10428 auto *WTy = getWiderType(DTy, FRHSTy); 10429 auto *DenominatorExt = getNoopOrSignExtend(Denominator, WTy); 10430 auto *FoundRHSExt = getNoopOrSignExtend(FoundRHS, WTy); 10431 10432 // Try to prove the following rule: 10433 // (FoundRHS > Denominator - 2) && (RHS <= 0) => (LHS > RHS). 10434 // For example, given that FoundLHS > 2. It means that FoundLHS is at 10435 // least 3. If we divide it by Denominator < 4, we will have at least 1. 10436 auto *DenomMinusTwo = getMinusSCEV(DenominatorExt, getConstant(WTy, 2)); 10437 if (isKnownNonPositive(RHS) && 10438 IsSGTViaContext(FoundRHSExt, DenomMinusTwo)) 10439 return true; 10440 10441 // Try to prove the following rule: 10442 // (FoundRHS > -1 - Denominator) && (RHS < 0) => (LHS > RHS). 10443 // For example, given that FoundLHS > -3. Then FoundLHS is at least -2. 10444 // If we divide it by Denominator > 2, then: 10445 // 1. If FoundLHS is negative, then the result is 0. 10446 // 2. If FoundLHS is non-negative, then the result is non-negative. 10447 // Anyways, the result is non-negative. 10448 auto *MinusOne = getNegativeSCEV(getOne(WTy)); 10449 auto *NegDenomMinusOne = getMinusSCEV(MinusOne, DenominatorExt); 10450 if (isKnownNegative(RHS) && 10451 IsSGTViaContext(FoundRHSExt, NegDenomMinusOne)) 10452 return true; 10453 } 10454 } 10455 10456 // If our expression contained SCEVUnknown Phis, and we split it down and now 10457 // need to prove something for them, try to prove the predicate for every 10458 // possible incoming values of those Phis. 10459 if (isImpliedViaMerge(Pred, OrigLHS, RHS, OrigFoundLHS, FoundRHS, Depth + 1)) 10460 return true; 10461 10462 return false; 10463 } 10464 10465 static bool isKnownPredicateExtendIdiom(ICmpInst::Predicate Pred, 10466 const SCEV *LHS, const SCEV *RHS) { 10467 // zext x u<= sext x, sext x s<= zext x 10468 switch (Pred) { 10469 case ICmpInst::ICMP_SGE: 10470 std::swap(LHS, RHS); 10471 LLVM_FALLTHROUGH; 10472 case ICmpInst::ICMP_SLE: { 10473 // If operand >=s 0 then ZExt == SExt. If operand <s 0 then SExt <s ZExt. 10474 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(LHS); 10475 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(RHS); 10476 if (SExt && ZExt && SExt->getOperand() == ZExt->getOperand()) 10477 return true; 10478 break; 10479 } 10480 case ICmpInst::ICMP_UGE: 10481 std::swap(LHS, RHS); 10482 LLVM_FALLTHROUGH; 10483 case ICmpInst::ICMP_ULE: { 10484 // If operand >=s 0 then ZExt == SExt. If operand <s 0 then ZExt <u SExt. 10485 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(LHS); 10486 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(RHS); 10487 if (SExt && ZExt && SExt->getOperand() == ZExt->getOperand()) 10488 return true; 10489 break; 10490 } 10491 default: 10492 break; 10493 }; 10494 return false; 10495 } 10496 10497 bool 10498 ScalarEvolution::isKnownViaNonRecursiveReasoning(ICmpInst::Predicate Pred, 10499 const SCEV *LHS, const SCEV *RHS) { 10500 return isKnownPredicateExtendIdiom(Pred, LHS, RHS) || 10501 isKnownPredicateViaConstantRanges(Pred, LHS, RHS) || 10502 IsKnownPredicateViaMinOrMax(*this, Pred, LHS, RHS) || 10503 IsKnownPredicateViaAddRecStart(*this, Pred, LHS, RHS) || 10504 isKnownPredicateViaNoOverflow(Pred, LHS, RHS); 10505 } 10506 10507 bool 10508 ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred, 10509 const SCEV *LHS, const SCEV *RHS, 10510 const SCEV *FoundLHS, 10511 const SCEV *FoundRHS) { 10512 switch (Pred) { 10513 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!"); 10514 case ICmpInst::ICMP_EQ: 10515 case ICmpInst::ICMP_NE: 10516 if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS)) 10517 return true; 10518 break; 10519 case ICmpInst::ICMP_SLT: 10520 case ICmpInst::ICMP_SLE: 10521 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, LHS, FoundLHS) && 10522 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, RHS, FoundRHS)) 10523 return true; 10524 break; 10525 case ICmpInst::ICMP_SGT: 10526 case ICmpInst::ICMP_SGE: 10527 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, LHS, FoundLHS) && 10528 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, RHS, FoundRHS)) 10529 return true; 10530 break; 10531 case ICmpInst::ICMP_ULT: 10532 case ICmpInst::ICMP_ULE: 10533 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, LHS, FoundLHS) && 10534 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, RHS, FoundRHS)) 10535 return true; 10536 break; 10537 case ICmpInst::ICMP_UGT: 10538 case ICmpInst::ICMP_UGE: 10539 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, LHS, FoundLHS) && 10540 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, RHS, FoundRHS)) 10541 return true; 10542 break; 10543 } 10544 10545 // Maybe it can be proved via operations? 10546 if (isImpliedViaOperations(Pred, LHS, RHS, FoundLHS, FoundRHS)) 10547 return true; 10548 10549 return false; 10550 } 10551 10552 bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred, 10553 const SCEV *LHS, 10554 const SCEV *RHS, 10555 const SCEV *FoundLHS, 10556 const SCEV *FoundRHS) { 10557 if (!isa<SCEVConstant>(RHS) || !isa<SCEVConstant>(FoundRHS)) 10558 // The restriction on `FoundRHS` be lifted easily -- it exists only to 10559 // reduce the compile time impact of this optimization. 10560 return false; 10561 10562 Optional<APInt> Addend = computeConstantDifference(LHS, FoundLHS); 10563 if (!Addend) 10564 return false; 10565 10566 const APInt &ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getAPInt(); 10567 10568 // `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the 10569 // antecedent "`FoundLHS` `Pred` `FoundRHS`". 10570 ConstantRange FoundLHSRange = 10571 ConstantRange::makeAllowedICmpRegion(Pred, ConstFoundRHS); 10572 10573 // Since `LHS` is `FoundLHS` + `Addend`, we can compute a range for `LHS`: 10574 ConstantRange LHSRange = FoundLHSRange.add(ConstantRange(*Addend)); 10575 10576 // We can also compute the range of values for `LHS` that satisfy the 10577 // consequent, "`LHS` `Pred` `RHS`": 10578 const APInt &ConstRHS = cast<SCEVConstant>(RHS)->getAPInt(); 10579 ConstantRange SatisfyingLHSRange = 10580 ConstantRange::makeSatisfyingICmpRegion(Pred, ConstRHS); 10581 10582 // The antecedent implies the consequent if every value of `LHS` that 10583 // satisfies the antecedent also satisfies the consequent. 10584 return SatisfyingLHSRange.contains(LHSRange); 10585 } 10586 10587 bool ScalarEvolution::doesIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride, 10588 bool IsSigned, bool NoWrap) { 10589 assert(isKnownPositive(Stride) && "Positive stride expected!"); 10590 10591 if (NoWrap) return false; 10592 10593 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 10594 const SCEV *One = getOne(Stride->getType()); 10595 10596 if (IsSigned) { 10597 APInt MaxRHS = getSignedRangeMax(RHS); 10598 APInt MaxValue = APInt::getSignedMaxValue(BitWidth); 10599 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 10600 10601 // SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow! 10602 return (std::move(MaxValue) - MaxStrideMinusOne).slt(MaxRHS); 10603 } 10604 10605 APInt MaxRHS = getUnsignedRangeMax(RHS); 10606 APInt MaxValue = APInt::getMaxValue(BitWidth); 10607 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 10608 10609 // UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow! 10610 return (std::move(MaxValue) - MaxStrideMinusOne).ult(MaxRHS); 10611 } 10612 10613 bool ScalarEvolution::doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride, 10614 bool IsSigned, bool NoWrap) { 10615 if (NoWrap) return false; 10616 10617 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 10618 const SCEV *One = getOne(Stride->getType()); 10619 10620 if (IsSigned) { 10621 APInt MinRHS = getSignedRangeMin(RHS); 10622 APInt MinValue = APInt::getSignedMinValue(BitWidth); 10623 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 10624 10625 // SMinRHS - SMaxStrideMinusOne < SMinValue => overflow! 10626 return (std::move(MinValue) + MaxStrideMinusOne).sgt(MinRHS); 10627 } 10628 10629 APInt MinRHS = getUnsignedRangeMin(RHS); 10630 APInt MinValue = APInt::getMinValue(BitWidth); 10631 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 10632 10633 // UMinRHS - UMaxStrideMinusOne < UMinValue => overflow! 10634 return (std::move(MinValue) + MaxStrideMinusOne).ugt(MinRHS); 10635 } 10636 10637 const SCEV *ScalarEvolution::computeBECount(const SCEV *Delta, const SCEV *Step, 10638 bool Equality) { 10639 const SCEV *One = getOne(Step->getType()); 10640 Delta = Equality ? getAddExpr(Delta, Step) 10641 : getAddExpr(Delta, getMinusSCEV(Step, One)); 10642 return getUDivExpr(Delta, Step); 10643 } 10644 10645 const SCEV *ScalarEvolution::computeMaxBECountForLT(const SCEV *Start, 10646 const SCEV *Stride, 10647 const SCEV *End, 10648 unsigned BitWidth, 10649 bool IsSigned) { 10650 10651 assert(!isKnownNonPositive(Stride) && 10652 "Stride is expected strictly positive!"); 10653 // Calculate the maximum backedge count based on the range of values 10654 // permitted by Start, End, and Stride. 10655 const SCEV *MaxBECount; 10656 APInt MinStart = 10657 IsSigned ? getSignedRangeMin(Start) : getUnsignedRangeMin(Start); 10658 10659 APInt StrideForMaxBECount = 10660 IsSigned ? getSignedRangeMin(Stride) : getUnsignedRangeMin(Stride); 10661 10662 // We already know that the stride is positive, so we paper over conservatism 10663 // in our range computation by forcing StrideForMaxBECount to be at least one. 10664 // In theory this is unnecessary, but we expect MaxBECount to be a 10665 // SCEVConstant, and (udiv <constant> 0) is not constant folded by SCEV (there 10666 // is nothing to constant fold it to). 10667 APInt One(BitWidth, 1, IsSigned); 10668 StrideForMaxBECount = APIntOps::smax(One, StrideForMaxBECount); 10669 10670 APInt MaxValue = IsSigned ? APInt::getSignedMaxValue(BitWidth) 10671 : APInt::getMaxValue(BitWidth); 10672 APInt Limit = MaxValue - (StrideForMaxBECount - 1); 10673 10674 // Although End can be a MAX expression we estimate MaxEnd considering only 10675 // the case End = RHS of the loop termination condition. This is safe because 10676 // in the other case (End - Start) is zero, leading to a zero maximum backedge 10677 // taken count. 10678 APInt MaxEnd = IsSigned ? APIntOps::smin(getSignedRangeMax(End), Limit) 10679 : APIntOps::umin(getUnsignedRangeMax(End), Limit); 10680 10681 MaxBECount = computeBECount(getConstant(MaxEnd - MinStart) /* Delta */, 10682 getConstant(StrideForMaxBECount) /* Step */, 10683 false /* Equality */); 10684 10685 return MaxBECount; 10686 } 10687 10688 ScalarEvolution::ExitLimit 10689 ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS, 10690 const Loop *L, bool IsSigned, 10691 bool ControlsExit, bool AllowPredicates) { 10692 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 10693 10694 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 10695 bool PredicatedIV = false; 10696 10697 if (!IV && AllowPredicates) { 10698 // Try to make this an AddRec using runtime tests, in the first X 10699 // iterations of this loop, where X is the SCEV expression found by the 10700 // algorithm below. 10701 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 10702 PredicatedIV = true; 10703 } 10704 10705 // Avoid weird loops 10706 if (!IV || IV->getLoop() != L || !IV->isAffine()) 10707 return getCouldNotCompute(); 10708 10709 bool NoWrap = ControlsExit && 10710 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW); 10711 10712 const SCEV *Stride = IV->getStepRecurrence(*this); 10713 10714 bool PositiveStride = isKnownPositive(Stride); 10715 10716 // Avoid negative or zero stride values. 10717 if (!PositiveStride) { 10718 // We can compute the correct backedge taken count for loops with unknown 10719 // strides if we can prove that the loop is not an infinite loop with side 10720 // effects. Here's the loop structure we are trying to handle - 10721 // 10722 // i = start 10723 // do { 10724 // A[i] = i; 10725 // i += s; 10726 // } while (i < end); 10727 // 10728 // The backedge taken count for such loops is evaluated as - 10729 // (max(end, start + stride) - start - 1) /u stride 10730 // 10731 // The additional preconditions that we need to check to prove correctness 10732 // of the above formula is as follows - 10733 // 10734 // a) IV is either nuw or nsw depending upon signedness (indicated by the 10735 // NoWrap flag). 10736 // b) loop is single exit with no side effects. 10737 // 10738 // 10739 // Precondition a) implies that if the stride is negative, this is a single 10740 // trip loop. The backedge taken count formula reduces to zero in this case. 10741 // 10742 // Precondition b) implies that the unknown stride cannot be zero otherwise 10743 // we have UB. 10744 // 10745 // The positive stride case is the same as isKnownPositive(Stride) returning 10746 // true (original behavior of the function). 10747 // 10748 // We want to make sure that the stride is truly unknown as there are edge 10749 // cases where ScalarEvolution propagates no wrap flags to the 10750 // post-increment/decrement IV even though the increment/decrement operation 10751 // itself is wrapping. The computed backedge taken count may be wrong in 10752 // such cases. This is prevented by checking that the stride is not known to 10753 // be either positive or non-positive. For example, no wrap flags are 10754 // propagated to the post-increment IV of this loop with a trip count of 2 - 10755 // 10756 // unsigned char i; 10757 // for(i=127; i<128; i+=129) 10758 // A[i] = i; 10759 // 10760 if (PredicatedIV || !NoWrap || isKnownNonPositive(Stride) || 10761 !loopHasNoSideEffects(L)) 10762 return getCouldNotCompute(); 10763 } else if (!Stride->isOne() && 10764 doesIVOverflowOnLT(RHS, Stride, IsSigned, NoWrap)) 10765 // Avoid proven overflow cases: this will ensure that the backedge taken 10766 // count will not generate any unsigned overflow. Relaxed no-overflow 10767 // conditions exploit NoWrapFlags, allowing to optimize in presence of 10768 // undefined behaviors like the case of C language. 10769 return getCouldNotCompute(); 10770 10771 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SLT 10772 : ICmpInst::ICMP_ULT; 10773 const SCEV *Start = IV->getStart(); 10774 const SCEV *End = RHS; 10775 // When the RHS is not invariant, we do not know the end bound of the loop and 10776 // cannot calculate the ExactBECount needed by ExitLimit. However, we can 10777 // calculate the MaxBECount, given the start, stride and max value for the end 10778 // bound of the loop (RHS), and the fact that IV does not overflow (which is 10779 // checked above). 10780 if (!isLoopInvariant(RHS, L)) { 10781 const SCEV *MaxBECount = computeMaxBECountForLT( 10782 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned); 10783 return ExitLimit(getCouldNotCompute() /* ExactNotTaken */, MaxBECount, 10784 false /*MaxOrZero*/, Predicates); 10785 } 10786 // If the backedge is taken at least once, then it will be taken 10787 // (End-Start)/Stride times (rounded up to a multiple of Stride), where Start 10788 // is the LHS value of the less-than comparison the first time it is evaluated 10789 // and End is the RHS. 10790 const SCEV *BECountIfBackedgeTaken = 10791 computeBECount(getMinusSCEV(End, Start), Stride, false); 10792 // If the loop entry is guarded by the result of the backedge test of the 10793 // first loop iteration, then we know the backedge will be taken at least 10794 // once and so the backedge taken count is as above. If not then we use the 10795 // expression (max(End,Start)-Start)/Stride to describe the backedge count, 10796 // as if the backedge is taken at least once max(End,Start) is End and so the 10797 // result is as above, and if not max(End,Start) is Start so we get a backedge 10798 // count of zero. 10799 const SCEV *BECount; 10800 if (isLoopEntryGuardedByCond(L, Cond, getMinusSCEV(Start, Stride), RHS)) 10801 BECount = BECountIfBackedgeTaken; 10802 else { 10803 End = IsSigned ? getSMaxExpr(RHS, Start) : getUMaxExpr(RHS, Start); 10804 BECount = computeBECount(getMinusSCEV(End, Start), Stride, false); 10805 } 10806 10807 const SCEV *MaxBECount; 10808 bool MaxOrZero = false; 10809 if (isa<SCEVConstant>(BECount)) 10810 MaxBECount = BECount; 10811 else if (isa<SCEVConstant>(BECountIfBackedgeTaken)) { 10812 // If we know exactly how many times the backedge will be taken if it's 10813 // taken at least once, then the backedge count will either be that or 10814 // zero. 10815 MaxBECount = BECountIfBackedgeTaken; 10816 MaxOrZero = true; 10817 } else { 10818 MaxBECount = computeMaxBECountForLT( 10819 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned); 10820 } 10821 10822 if (isa<SCEVCouldNotCompute>(MaxBECount) && 10823 !isa<SCEVCouldNotCompute>(BECount)) 10824 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 10825 10826 return ExitLimit(BECount, MaxBECount, MaxOrZero, Predicates); 10827 } 10828 10829 ScalarEvolution::ExitLimit 10830 ScalarEvolution::howManyGreaterThans(const SCEV *LHS, const SCEV *RHS, 10831 const Loop *L, bool IsSigned, 10832 bool ControlsExit, bool AllowPredicates) { 10833 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 10834 // We handle only IV > Invariant 10835 if (!isLoopInvariant(RHS, L)) 10836 return getCouldNotCompute(); 10837 10838 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 10839 if (!IV && AllowPredicates) 10840 // Try to make this an AddRec using runtime tests, in the first X 10841 // iterations of this loop, where X is the SCEV expression found by the 10842 // algorithm below. 10843 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 10844 10845 // Avoid weird loops 10846 if (!IV || IV->getLoop() != L || !IV->isAffine()) 10847 return getCouldNotCompute(); 10848 10849 bool NoWrap = ControlsExit && 10850 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW); 10851 10852 const SCEV *Stride = getNegativeSCEV(IV->getStepRecurrence(*this)); 10853 10854 // Avoid negative or zero stride values 10855 if (!isKnownPositive(Stride)) 10856 return getCouldNotCompute(); 10857 10858 // Avoid proven overflow cases: this will ensure that the backedge taken count 10859 // will not generate any unsigned overflow. Relaxed no-overflow conditions 10860 // exploit NoWrapFlags, allowing to optimize in presence of undefined 10861 // behaviors like the case of C language. 10862 if (!Stride->isOne() && doesIVOverflowOnGT(RHS, Stride, IsSigned, NoWrap)) 10863 return getCouldNotCompute(); 10864 10865 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SGT 10866 : ICmpInst::ICMP_UGT; 10867 10868 const SCEV *Start = IV->getStart(); 10869 const SCEV *End = RHS; 10870 if (!isLoopEntryGuardedByCond(L, Cond, getAddExpr(Start, Stride), RHS)) 10871 End = IsSigned ? getSMinExpr(RHS, Start) : getUMinExpr(RHS, Start); 10872 10873 const SCEV *BECount = computeBECount(getMinusSCEV(Start, End), Stride, false); 10874 10875 APInt MaxStart = IsSigned ? getSignedRangeMax(Start) 10876 : getUnsignedRangeMax(Start); 10877 10878 APInt MinStride = IsSigned ? getSignedRangeMin(Stride) 10879 : getUnsignedRangeMin(Stride); 10880 10881 unsigned BitWidth = getTypeSizeInBits(LHS->getType()); 10882 APInt Limit = IsSigned ? APInt::getSignedMinValue(BitWidth) + (MinStride - 1) 10883 : APInt::getMinValue(BitWidth) + (MinStride - 1); 10884 10885 // Although End can be a MIN expression we estimate MinEnd considering only 10886 // the case End = RHS. This is safe because in the other case (Start - End) 10887 // is zero, leading to a zero maximum backedge taken count. 10888 APInt MinEnd = 10889 IsSigned ? APIntOps::smax(getSignedRangeMin(RHS), Limit) 10890 : APIntOps::umax(getUnsignedRangeMin(RHS), Limit); 10891 10892 const SCEV *MaxBECount = isa<SCEVConstant>(BECount) 10893 ? BECount 10894 : computeBECount(getConstant(MaxStart - MinEnd), 10895 getConstant(MinStride), false); 10896 10897 if (isa<SCEVCouldNotCompute>(MaxBECount)) 10898 MaxBECount = BECount; 10899 10900 return ExitLimit(BECount, MaxBECount, false, Predicates); 10901 } 10902 10903 const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range, 10904 ScalarEvolution &SE) const { 10905 if (Range.isFullSet()) // Infinite loop. 10906 return SE.getCouldNotCompute(); 10907 10908 // If the start is a non-zero constant, shift the range to simplify things. 10909 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart())) 10910 if (!SC->getValue()->isZero()) { 10911 SmallVector<const SCEV *, 4> Operands(op_begin(), op_end()); 10912 Operands[0] = SE.getZero(SC->getType()); 10913 const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(), 10914 getNoWrapFlags(FlagNW)); 10915 if (const auto *ShiftedAddRec = dyn_cast<SCEVAddRecExpr>(Shifted)) 10916 return ShiftedAddRec->getNumIterationsInRange( 10917 Range.subtract(SC->getAPInt()), SE); 10918 // This is strange and shouldn't happen. 10919 return SE.getCouldNotCompute(); 10920 } 10921 10922 // The only time we can solve this is when we have all constant indices. 10923 // Otherwise, we cannot determine the overflow conditions. 10924 if (any_of(operands(), [](const SCEV *Op) { return !isa<SCEVConstant>(Op); })) 10925 return SE.getCouldNotCompute(); 10926 10927 // Okay at this point we know that all elements of the chrec are constants and 10928 // that the start element is zero. 10929 10930 // First check to see if the range contains zero. If not, the first 10931 // iteration exits. 10932 unsigned BitWidth = SE.getTypeSizeInBits(getType()); 10933 if (!Range.contains(APInt(BitWidth, 0))) 10934 return SE.getZero(getType()); 10935 10936 if (isAffine()) { 10937 // If this is an affine expression then we have this situation: 10938 // Solve {0,+,A} in Range === Ax in Range 10939 10940 // We know that zero is in the range. If A is positive then we know that 10941 // the upper value of the range must be the first possible exit value. 10942 // If A is negative then the lower of the range is the last possible loop 10943 // value. Also note that we already checked for a full range. 10944 APInt A = cast<SCEVConstant>(getOperand(1))->getAPInt(); 10945 APInt End = A.sge(1) ? (Range.getUpper() - 1) : Range.getLower(); 10946 10947 // The exit value should be (End+A)/A. 10948 APInt ExitVal = (End + A).udiv(A); 10949 ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal); 10950 10951 // Evaluate at the exit value. If we really did fall out of the valid 10952 // range, then we computed our trip count, otherwise wrap around or other 10953 // things must have happened. 10954 ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE); 10955 if (Range.contains(Val->getValue())) 10956 return SE.getCouldNotCompute(); // Something strange happened 10957 10958 // Ensure that the previous value is in the range. This is a sanity check. 10959 assert(Range.contains( 10960 EvaluateConstantChrecAtConstant(this, 10961 ConstantInt::get(SE.getContext(), ExitVal - 1), SE)->getValue()) && 10962 "Linear scev computation is off in a bad way!"); 10963 return SE.getConstant(ExitValue); 10964 } 10965 10966 if (isQuadratic()) { 10967 if (auto S = SolveQuadraticAddRecRange(this, Range, SE)) 10968 return SE.getConstant(S.getValue()); 10969 } 10970 10971 return SE.getCouldNotCompute(); 10972 } 10973 10974 const SCEVAddRecExpr * 10975 SCEVAddRecExpr::getPostIncExpr(ScalarEvolution &SE) const { 10976 assert(getNumOperands() > 1 && "AddRec with zero step?"); 10977 // There is a temptation to just call getAddExpr(this, getStepRecurrence(SE)), 10978 // but in this case we cannot guarantee that the value returned will be an 10979 // AddRec because SCEV does not have a fixed point where it stops 10980 // simplification: it is legal to return ({rec1} + {rec2}). For example, it 10981 // may happen if we reach arithmetic depth limit while simplifying. So we 10982 // construct the returned value explicitly. 10983 SmallVector<const SCEV *, 3> Ops; 10984 // If this is {A,+,B,+,C,...,+,N}, then its step is {B,+,C,+,...,+,N}, and 10985 // (this + Step) is {A+B,+,B+C,+...,+,N}. 10986 for (unsigned i = 0, e = getNumOperands() - 1; i < e; ++i) 10987 Ops.push_back(SE.getAddExpr(getOperand(i), getOperand(i + 1))); 10988 // We know that the last operand is not a constant zero (otherwise it would 10989 // have been popped out earlier). This guarantees us that if the result has 10990 // the same last operand, then it will also not be popped out, meaning that 10991 // the returned value will be an AddRec. 10992 const SCEV *Last = getOperand(getNumOperands() - 1); 10993 assert(!Last->isZero() && "Recurrency with zero step?"); 10994 Ops.push_back(Last); 10995 return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, getLoop(), 10996 SCEV::FlagAnyWrap)); 10997 } 10998 10999 // Return true when S contains at least an undef value. 11000 static inline bool containsUndefs(const SCEV *S) { 11001 return SCEVExprContains(S, [](const SCEV *S) { 11002 if (const auto *SU = dyn_cast<SCEVUnknown>(S)) 11003 return isa<UndefValue>(SU->getValue()); 11004 return false; 11005 }); 11006 } 11007 11008 namespace { 11009 11010 // Collect all steps of SCEV expressions. 11011 struct SCEVCollectStrides { 11012 ScalarEvolution &SE; 11013 SmallVectorImpl<const SCEV *> &Strides; 11014 11015 SCEVCollectStrides(ScalarEvolution &SE, SmallVectorImpl<const SCEV *> &S) 11016 : SE(SE), Strides(S) {} 11017 11018 bool follow(const SCEV *S) { 11019 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) 11020 Strides.push_back(AR->getStepRecurrence(SE)); 11021 return true; 11022 } 11023 11024 bool isDone() const { return false; } 11025 }; 11026 11027 // Collect all SCEVUnknown and SCEVMulExpr expressions. 11028 struct SCEVCollectTerms { 11029 SmallVectorImpl<const SCEV *> &Terms; 11030 11031 SCEVCollectTerms(SmallVectorImpl<const SCEV *> &T) : Terms(T) {} 11032 11033 bool follow(const SCEV *S) { 11034 if (isa<SCEVUnknown>(S) || isa<SCEVMulExpr>(S) || 11035 isa<SCEVSignExtendExpr>(S)) { 11036 if (!containsUndefs(S)) 11037 Terms.push_back(S); 11038 11039 // Stop recursion: once we collected a term, do not walk its operands. 11040 return false; 11041 } 11042 11043 // Keep looking. 11044 return true; 11045 } 11046 11047 bool isDone() const { return false; } 11048 }; 11049 11050 // Check if a SCEV contains an AddRecExpr. 11051 struct SCEVHasAddRec { 11052 bool &ContainsAddRec; 11053 11054 SCEVHasAddRec(bool &ContainsAddRec) : ContainsAddRec(ContainsAddRec) { 11055 ContainsAddRec = false; 11056 } 11057 11058 bool follow(const SCEV *S) { 11059 if (isa<SCEVAddRecExpr>(S)) { 11060 ContainsAddRec = true; 11061 11062 // Stop recursion: once we collected a term, do not walk its operands. 11063 return false; 11064 } 11065 11066 // Keep looking. 11067 return true; 11068 } 11069 11070 bool isDone() const { return false; } 11071 }; 11072 11073 // Find factors that are multiplied with an expression that (possibly as a 11074 // subexpression) contains an AddRecExpr. In the expression: 11075 // 11076 // 8 * (100 + %p * %q * (%a + {0, +, 1}_loop)) 11077 // 11078 // "%p * %q" are factors multiplied by the expression "(%a + {0, +, 1}_loop)" 11079 // that contains the AddRec {0, +, 1}_loop. %p * %q are likely to be array size 11080 // parameters as they form a product with an induction variable. 11081 // 11082 // This collector expects all array size parameters to be in the same MulExpr. 11083 // It might be necessary to later add support for collecting parameters that are 11084 // spread over different nested MulExpr. 11085 struct SCEVCollectAddRecMultiplies { 11086 SmallVectorImpl<const SCEV *> &Terms; 11087 ScalarEvolution &SE; 11088 11089 SCEVCollectAddRecMultiplies(SmallVectorImpl<const SCEV *> &T, ScalarEvolution &SE) 11090 : Terms(T), SE(SE) {} 11091 11092 bool follow(const SCEV *S) { 11093 if (auto *Mul = dyn_cast<SCEVMulExpr>(S)) { 11094 bool HasAddRec = false; 11095 SmallVector<const SCEV *, 0> Operands; 11096 for (auto Op : Mul->operands()) { 11097 const SCEVUnknown *Unknown = dyn_cast<SCEVUnknown>(Op); 11098 if (Unknown && !isa<CallInst>(Unknown->getValue())) { 11099 Operands.push_back(Op); 11100 } else if (Unknown) { 11101 HasAddRec = true; 11102 } else { 11103 bool ContainsAddRec = false; 11104 SCEVHasAddRec ContiansAddRec(ContainsAddRec); 11105 visitAll(Op, ContiansAddRec); 11106 HasAddRec |= ContainsAddRec; 11107 } 11108 } 11109 if (Operands.size() == 0) 11110 return true; 11111 11112 if (!HasAddRec) 11113 return false; 11114 11115 Terms.push_back(SE.getMulExpr(Operands)); 11116 // Stop recursion: once we collected a term, do not walk its operands. 11117 return false; 11118 } 11119 11120 // Keep looking. 11121 return true; 11122 } 11123 11124 bool isDone() const { return false; } 11125 }; 11126 11127 } // end anonymous namespace 11128 11129 /// Find parametric terms in this SCEVAddRecExpr. We first for parameters in 11130 /// two places: 11131 /// 1) The strides of AddRec expressions. 11132 /// 2) Unknowns that are multiplied with AddRec expressions. 11133 void ScalarEvolution::collectParametricTerms(const SCEV *Expr, 11134 SmallVectorImpl<const SCEV *> &Terms) { 11135 SmallVector<const SCEV *, 4> Strides; 11136 SCEVCollectStrides StrideCollector(*this, Strides); 11137 visitAll(Expr, StrideCollector); 11138 11139 LLVM_DEBUG({ 11140 dbgs() << "Strides:\n"; 11141 for (const SCEV *S : Strides) 11142 dbgs() << *S << "\n"; 11143 }); 11144 11145 for (const SCEV *S : Strides) { 11146 SCEVCollectTerms TermCollector(Terms); 11147 visitAll(S, TermCollector); 11148 } 11149 11150 LLVM_DEBUG({ 11151 dbgs() << "Terms:\n"; 11152 for (const SCEV *T : Terms) 11153 dbgs() << *T << "\n"; 11154 }); 11155 11156 SCEVCollectAddRecMultiplies MulCollector(Terms, *this); 11157 visitAll(Expr, MulCollector); 11158 } 11159 11160 static bool findArrayDimensionsRec(ScalarEvolution &SE, 11161 SmallVectorImpl<const SCEV *> &Terms, 11162 SmallVectorImpl<const SCEV *> &Sizes) { 11163 int Last = Terms.size() - 1; 11164 const SCEV *Step = Terms[Last]; 11165 11166 // End of recursion. 11167 if (Last == 0) { 11168 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Step)) { 11169 SmallVector<const SCEV *, 2> Qs; 11170 for (const SCEV *Op : M->operands()) 11171 if (!isa<SCEVConstant>(Op)) 11172 Qs.push_back(Op); 11173 11174 Step = SE.getMulExpr(Qs); 11175 } 11176 11177 Sizes.push_back(Step); 11178 return true; 11179 } 11180 11181 for (const SCEV *&Term : Terms) { 11182 // Normalize the terms before the next call to findArrayDimensionsRec. 11183 const SCEV *Q, *R; 11184 SCEVDivision::divide(SE, Term, Step, &Q, &R); 11185 11186 // Bail out when GCD does not evenly divide one of the terms. 11187 if (!R->isZero()) 11188 return false; 11189 11190 Term = Q; 11191 } 11192 11193 // Remove all SCEVConstants. 11194 Terms.erase( 11195 remove_if(Terms, [](const SCEV *E) { return isa<SCEVConstant>(E); }), 11196 Terms.end()); 11197 11198 if (Terms.size() > 0) 11199 if (!findArrayDimensionsRec(SE, Terms, Sizes)) 11200 return false; 11201 11202 Sizes.push_back(Step); 11203 return true; 11204 } 11205 11206 // Returns true when one of the SCEVs of Terms contains a SCEVUnknown parameter. 11207 static inline bool containsParameters(SmallVectorImpl<const SCEV *> &Terms) { 11208 for (const SCEV *T : Terms) 11209 if (SCEVExprContains(T, [](const SCEV *S) { return isa<SCEVUnknown>(S); })) 11210 return true; 11211 11212 return false; 11213 } 11214 11215 // Return the number of product terms in S. 11216 static inline int numberOfTerms(const SCEV *S) { 11217 if (const SCEVMulExpr *Expr = dyn_cast<SCEVMulExpr>(S)) 11218 return Expr->getNumOperands(); 11219 return 1; 11220 } 11221 11222 static const SCEV *removeConstantFactors(ScalarEvolution &SE, const SCEV *T) { 11223 if (isa<SCEVConstant>(T)) 11224 return nullptr; 11225 11226 if (isa<SCEVUnknown>(T)) 11227 return T; 11228 11229 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(T)) { 11230 SmallVector<const SCEV *, 2> Factors; 11231 for (const SCEV *Op : M->operands()) 11232 if (!isa<SCEVConstant>(Op)) 11233 Factors.push_back(Op); 11234 11235 return SE.getMulExpr(Factors); 11236 } 11237 11238 return T; 11239 } 11240 11241 /// Return the size of an element read or written by Inst. 11242 const SCEV *ScalarEvolution::getElementSize(Instruction *Inst) { 11243 Type *Ty; 11244 if (StoreInst *Store = dyn_cast<StoreInst>(Inst)) 11245 Ty = Store->getValueOperand()->getType(); 11246 else if (LoadInst *Load = dyn_cast<LoadInst>(Inst)) 11247 Ty = Load->getType(); 11248 else 11249 return nullptr; 11250 11251 Type *ETy = getEffectiveSCEVType(PointerType::getUnqual(Ty)); 11252 return getSizeOfExpr(ETy, Ty); 11253 } 11254 11255 void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms, 11256 SmallVectorImpl<const SCEV *> &Sizes, 11257 const SCEV *ElementSize) { 11258 if (Terms.size() < 1 || !ElementSize) 11259 return; 11260 11261 // Early return when Terms do not contain parameters: we do not delinearize 11262 // non parametric SCEVs. 11263 if (!containsParameters(Terms)) 11264 return; 11265 11266 LLVM_DEBUG({ 11267 dbgs() << "Terms:\n"; 11268 for (const SCEV *T : Terms) 11269 dbgs() << *T << "\n"; 11270 }); 11271 11272 // Remove duplicates. 11273 array_pod_sort(Terms.begin(), Terms.end()); 11274 Terms.erase(std::unique(Terms.begin(), Terms.end()), Terms.end()); 11275 11276 // Put larger terms first. 11277 llvm::sort(Terms, [](const SCEV *LHS, const SCEV *RHS) { 11278 return numberOfTerms(LHS) > numberOfTerms(RHS); 11279 }); 11280 11281 // Try to divide all terms by the element size. If term is not divisible by 11282 // element size, proceed with the original term. 11283 for (const SCEV *&Term : Terms) { 11284 const SCEV *Q, *R; 11285 SCEVDivision::divide(*this, Term, ElementSize, &Q, &R); 11286 if (!Q->isZero()) 11287 Term = Q; 11288 } 11289 11290 SmallVector<const SCEV *, 4> NewTerms; 11291 11292 // Remove constant factors. 11293 for (const SCEV *T : Terms) 11294 if (const SCEV *NewT = removeConstantFactors(*this, T)) 11295 NewTerms.push_back(NewT); 11296 11297 LLVM_DEBUG({ 11298 dbgs() << "Terms after sorting:\n"; 11299 for (const SCEV *T : NewTerms) 11300 dbgs() << *T << "\n"; 11301 }); 11302 11303 if (NewTerms.empty() || !findArrayDimensionsRec(*this, NewTerms, Sizes)) { 11304 Sizes.clear(); 11305 return; 11306 } 11307 11308 // The last element to be pushed into Sizes is the size of an element. 11309 Sizes.push_back(ElementSize); 11310 11311 LLVM_DEBUG({ 11312 dbgs() << "Sizes:\n"; 11313 for (const SCEV *S : Sizes) 11314 dbgs() << *S << "\n"; 11315 }); 11316 } 11317 11318 void ScalarEvolution::computeAccessFunctions( 11319 const SCEV *Expr, SmallVectorImpl<const SCEV *> &Subscripts, 11320 SmallVectorImpl<const SCEV *> &Sizes) { 11321 // Early exit in case this SCEV is not an affine multivariate function. 11322 if (Sizes.empty()) 11323 return; 11324 11325 if (auto *AR = dyn_cast<SCEVAddRecExpr>(Expr)) 11326 if (!AR->isAffine()) 11327 return; 11328 11329 const SCEV *Res = Expr; 11330 int Last = Sizes.size() - 1; 11331 for (int i = Last; i >= 0; i--) { 11332 const SCEV *Q, *R; 11333 SCEVDivision::divide(*this, Res, Sizes[i], &Q, &R); 11334 11335 LLVM_DEBUG({ 11336 dbgs() << "Res: " << *Res << "\n"; 11337 dbgs() << "Sizes[i]: " << *Sizes[i] << "\n"; 11338 dbgs() << "Res divided by Sizes[i]:\n"; 11339 dbgs() << "Quotient: " << *Q << "\n"; 11340 dbgs() << "Remainder: " << *R << "\n"; 11341 }); 11342 11343 Res = Q; 11344 11345 // Do not record the last subscript corresponding to the size of elements in 11346 // the array. 11347 if (i == Last) { 11348 11349 // Bail out if the remainder is too complex. 11350 if (isa<SCEVAddRecExpr>(R)) { 11351 Subscripts.clear(); 11352 Sizes.clear(); 11353 return; 11354 } 11355 11356 continue; 11357 } 11358 11359 // Record the access function for the current subscript. 11360 Subscripts.push_back(R); 11361 } 11362 11363 // Also push in last position the remainder of the last division: it will be 11364 // the access function of the innermost dimension. 11365 Subscripts.push_back(Res); 11366 11367 std::reverse(Subscripts.begin(), Subscripts.end()); 11368 11369 LLVM_DEBUG({ 11370 dbgs() << "Subscripts:\n"; 11371 for (const SCEV *S : Subscripts) 11372 dbgs() << *S << "\n"; 11373 }); 11374 } 11375 11376 /// Splits the SCEV into two vectors of SCEVs representing the subscripts and 11377 /// sizes of an array access. Returns the remainder of the delinearization that 11378 /// is the offset start of the array. The SCEV->delinearize algorithm computes 11379 /// the multiples of SCEV coefficients: that is a pattern matching of sub 11380 /// expressions in the stride and base of a SCEV corresponding to the 11381 /// computation of a GCD (greatest common divisor) of base and stride. When 11382 /// SCEV->delinearize fails, it returns the SCEV unchanged. 11383 /// 11384 /// For example: when analyzing the memory access A[i][j][k] in this loop nest 11385 /// 11386 /// void foo(long n, long m, long o, double A[n][m][o]) { 11387 /// 11388 /// for (long i = 0; i < n; i++) 11389 /// for (long j = 0; j < m; j++) 11390 /// for (long k = 0; k < o; k++) 11391 /// A[i][j][k] = 1.0; 11392 /// } 11393 /// 11394 /// the delinearization input is the following AddRec SCEV: 11395 /// 11396 /// AddRec: {{{%A,+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>,+,8}<%for.k> 11397 /// 11398 /// From this SCEV, we are able to say that the base offset of the access is %A 11399 /// because it appears as an offset that does not divide any of the strides in 11400 /// the loops: 11401 /// 11402 /// CHECK: Base offset: %A 11403 /// 11404 /// and then SCEV->delinearize determines the size of some of the dimensions of 11405 /// the array as these are the multiples by which the strides are happening: 11406 /// 11407 /// CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes. 11408 /// 11409 /// Note that the outermost dimension remains of UnknownSize because there are 11410 /// no strides that would help identifying the size of the last dimension: when 11411 /// the array has been statically allocated, one could compute the size of that 11412 /// dimension by dividing the overall size of the array by the size of the known 11413 /// dimensions: %m * %o * 8. 11414 /// 11415 /// Finally delinearize provides the access functions for the array reference 11416 /// that does correspond to A[i][j][k] of the above C testcase: 11417 /// 11418 /// CHECK: ArrayRef[{0,+,1}<%for.i>][{0,+,1}<%for.j>][{0,+,1}<%for.k>] 11419 /// 11420 /// The testcases are checking the output of a function pass: 11421 /// DelinearizationPass that walks through all loads and stores of a function 11422 /// asking for the SCEV of the memory access with respect to all enclosing 11423 /// loops, calling SCEV->delinearize on that and printing the results. 11424 void ScalarEvolution::delinearize(const SCEV *Expr, 11425 SmallVectorImpl<const SCEV *> &Subscripts, 11426 SmallVectorImpl<const SCEV *> &Sizes, 11427 const SCEV *ElementSize) { 11428 // First step: collect parametric terms. 11429 SmallVector<const SCEV *, 4> Terms; 11430 collectParametricTerms(Expr, Terms); 11431 11432 if (Terms.empty()) 11433 return; 11434 11435 // Second step: find subscript sizes. 11436 findArrayDimensions(Terms, Sizes, ElementSize); 11437 11438 if (Sizes.empty()) 11439 return; 11440 11441 // Third step: compute the access functions for each subscript. 11442 computeAccessFunctions(Expr, Subscripts, Sizes); 11443 11444 if (Subscripts.empty()) 11445 return; 11446 11447 LLVM_DEBUG({ 11448 dbgs() << "succeeded to delinearize " << *Expr << "\n"; 11449 dbgs() << "ArrayDecl[UnknownSize]"; 11450 for (const SCEV *S : Sizes) 11451 dbgs() << "[" << *S << "]"; 11452 11453 dbgs() << "\nArrayRef"; 11454 for (const SCEV *S : Subscripts) 11455 dbgs() << "[" << *S << "]"; 11456 dbgs() << "\n"; 11457 }); 11458 } 11459 11460 bool ScalarEvolution::getIndexExpressionsFromGEP( 11461 const GetElementPtrInst *GEP, SmallVectorImpl<const SCEV *> &Subscripts, 11462 SmallVectorImpl<int> &Sizes) { 11463 assert(Subscripts.empty() && Sizes.empty() && 11464 "Expected output lists to be empty on entry to this function."); 11465 assert(GEP && "getIndexExpressionsFromGEP called with a null GEP"); 11466 Type *Ty = GEP->getPointerOperandType(); 11467 bool DroppedFirstDim = false; 11468 for (unsigned i = 1; i < GEP->getNumOperands(); i++) { 11469 const SCEV *Expr = getSCEV(GEP->getOperand(i)); 11470 if (i == 1) { 11471 if (auto *PtrTy = dyn_cast<PointerType>(Ty)) { 11472 Ty = PtrTy->getElementType(); 11473 } else if (auto *ArrayTy = dyn_cast<ArrayType>(Ty)) { 11474 Ty = ArrayTy->getElementType(); 11475 } else { 11476 Subscripts.clear(); 11477 Sizes.clear(); 11478 return false; 11479 } 11480 if (auto *Const = dyn_cast<SCEVConstant>(Expr)) 11481 if (Const->getValue()->isZero()) { 11482 DroppedFirstDim = true; 11483 continue; 11484 } 11485 Subscripts.push_back(Expr); 11486 continue; 11487 } 11488 11489 auto *ArrayTy = dyn_cast<ArrayType>(Ty); 11490 if (!ArrayTy) { 11491 Subscripts.clear(); 11492 Sizes.clear(); 11493 return false; 11494 } 11495 11496 Subscripts.push_back(Expr); 11497 if (!(DroppedFirstDim && i == 2)) 11498 Sizes.push_back(ArrayTy->getNumElements()); 11499 11500 Ty = ArrayTy->getElementType(); 11501 } 11502 return !Subscripts.empty(); 11503 } 11504 11505 //===----------------------------------------------------------------------===// 11506 // SCEVCallbackVH Class Implementation 11507 //===----------------------------------------------------------------------===// 11508 11509 void ScalarEvolution::SCEVCallbackVH::deleted() { 11510 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 11511 if (PHINode *PN = dyn_cast<PHINode>(getValPtr())) 11512 SE->ConstantEvolutionLoopExitValue.erase(PN); 11513 SE->eraseValueFromMap(getValPtr()); 11514 // this now dangles! 11515 } 11516 11517 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) { 11518 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 11519 11520 // Forget all the expressions associated with users of the old value, 11521 // so that future queries will recompute the expressions using the new 11522 // value. 11523 Value *Old = getValPtr(); 11524 SmallVector<User *, 16> Worklist(Old->user_begin(), Old->user_end()); 11525 SmallPtrSet<User *, 8> Visited; 11526 while (!Worklist.empty()) { 11527 User *U = Worklist.pop_back_val(); 11528 // Deleting the Old value will cause this to dangle. Postpone 11529 // that until everything else is done. 11530 if (U == Old) 11531 continue; 11532 if (!Visited.insert(U).second) 11533 continue; 11534 if (PHINode *PN = dyn_cast<PHINode>(U)) 11535 SE->ConstantEvolutionLoopExitValue.erase(PN); 11536 SE->eraseValueFromMap(U); 11537 Worklist.insert(Worklist.end(), U->user_begin(), U->user_end()); 11538 } 11539 // Delete the Old value. 11540 if (PHINode *PN = dyn_cast<PHINode>(Old)) 11541 SE->ConstantEvolutionLoopExitValue.erase(PN); 11542 SE->eraseValueFromMap(Old); 11543 // this now dangles! 11544 } 11545 11546 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se) 11547 : CallbackVH(V), SE(se) {} 11548 11549 //===----------------------------------------------------------------------===// 11550 // ScalarEvolution Class Implementation 11551 //===----------------------------------------------------------------------===// 11552 11553 ScalarEvolution::ScalarEvolution(Function &F, TargetLibraryInfo &TLI, 11554 AssumptionCache &AC, DominatorTree &DT, 11555 LoopInfo &LI) 11556 : F(F), TLI(TLI), AC(AC), DT(DT), LI(LI), 11557 CouldNotCompute(new SCEVCouldNotCompute()), ValuesAtScopes(64), 11558 LoopDispositions(64), BlockDispositions(64) { 11559 // To use guards for proving predicates, we need to scan every instruction in 11560 // relevant basic blocks, and not just terminators. Doing this is a waste of 11561 // time if the IR does not actually contain any calls to 11562 // @llvm.experimental.guard, so do a quick check and remember this beforehand. 11563 // 11564 // This pessimizes the case where a pass that preserves ScalarEvolution wants 11565 // to _add_ guards to the module when there weren't any before, and wants 11566 // ScalarEvolution to optimize based on those guards. For now we prefer to be 11567 // efficient in lieu of being smart in that rather obscure case. 11568 11569 auto *GuardDecl = F.getParent()->getFunction( 11570 Intrinsic::getName(Intrinsic::experimental_guard)); 11571 HasGuards = GuardDecl && !GuardDecl->use_empty(); 11572 } 11573 11574 ScalarEvolution::ScalarEvolution(ScalarEvolution &&Arg) 11575 : F(Arg.F), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC), DT(Arg.DT), 11576 LI(Arg.LI), CouldNotCompute(std::move(Arg.CouldNotCompute)), 11577 ValueExprMap(std::move(Arg.ValueExprMap)), 11578 PendingLoopPredicates(std::move(Arg.PendingLoopPredicates)), 11579 PendingPhiRanges(std::move(Arg.PendingPhiRanges)), 11580 PendingMerges(std::move(Arg.PendingMerges)), 11581 MinTrailingZerosCache(std::move(Arg.MinTrailingZerosCache)), 11582 BackedgeTakenCounts(std::move(Arg.BackedgeTakenCounts)), 11583 PredicatedBackedgeTakenCounts( 11584 std::move(Arg.PredicatedBackedgeTakenCounts)), 11585 ConstantEvolutionLoopExitValue( 11586 std::move(Arg.ConstantEvolutionLoopExitValue)), 11587 ValuesAtScopes(std::move(Arg.ValuesAtScopes)), 11588 LoopDispositions(std::move(Arg.LoopDispositions)), 11589 LoopPropertiesCache(std::move(Arg.LoopPropertiesCache)), 11590 BlockDispositions(std::move(Arg.BlockDispositions)), 11591 UnsignedRanges(std::move(Arg.UnsignedRanges)), 11592 SignedRanges(std::move(Arg.SignedRanges)), 11593 UniqueSCEVs(std::move(Arg.UniqueSCEVs)), 11594 UniquePreds(std::move(Arg.UniquePreds)), 11595 SCEVAllocator(std::move(Arg.SCEVAllocator)), 11596 LoopUsers(std::move(Arg.LoopUsers)), 11597 PredicatedSCEVRewrites(std::move(Arg.PredicatedSCEVRewrites)), 11598 FirstUnknown(Arg.FirstUnknown) { 11599 Arg.FirstUnknown = nullptr; 11600 } 11601 11602 ScalarEvolution::~ScalarEvolution() { 11603 // Iterate through all the SCEVUnknown instances and call their 11604 // destructors, so that they release their references to their values. 11605 for (SCEVUnknown *U = FirstUnknown; U;) { 11606 SCEVUnknown *Tmp = U; 11607 U = U->Next; 11608 Tmp->~SCEVUnknown(); 11609 } 11610 FirstUnknown = nullptr; 11611 11612 ExprValueMap.clear(); 11613 ValueExprMap.clear(); 11614 HasRecMap.clear(); 11615 11616 // Free any extra memory created for ExitNotTakenInfo in the unlikely event 11617 // that a loop had multiple computable exits. 11618 for (auto &BTCI : BackedgeTakenCounts) 11619 BTCI.second.clear(); 11620 for (auto &BTCI : PredicatedBackedgeTakenCounts) 11621 BTCI.second.clear(); 11622 11623 assert(PendingLoopPredicates.empty() && "isImpliedCond garbage"); 11624 assert(PendingPhiRanges.empty() && "getRangeRef garbage"); 11625 assert(PendingMerges.empty() && "isImpliedViaMerge garbage"); 11626 assert(!WalkingBEDominatingConds && "isLoopBackedgeGuardedByCond garbage!"); 11627 assert(!ProvingSplitPredicate && "ProvingSplitPredicate garbage!"); 11628 } 11629 11630 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) { 11631 return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L)); 11632 } 11633 11634 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, 11635 const Loop *L) { 11636 // Print all inner loops first 11637 for (Loop *I : *L) 11638 PrintLoopInfo(OS, SE, I); 11639 11640 OS << "Loop "; 11641 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11642 OS << ": "; 11643 11644 SmallVector<BasicBlock *, 8> ExitingBlocks; 11645 L->getExitingBlocks(ExitingBlocks); 11646 if (ExitingBlocks.size() != 1) 11647 OS << "<multiple exits> "; 11648 11649 if (SE->hasLoopInvariantBackedgeTakenCount(L)) 11650 OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L) << "\n"; 11651 else 11652 OS << "Unpredictable backedge-taken count.\n"; 11653 11654 if (ExitingBlocks.size() > 1) 11655 for (BasicBlock *ExitingBlock : ExitingBlocks) { 11656 OS << " exit count for " << ExitingBlock->getName() << ": " 11657 << *SE->getExitCount(L, ExitingBlock) << "\n"; 11658 } 11659 11660 OS << "Loop "; 11661 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11662 OS << ": "; 11663 11664 if (!isa<SCEVCouldNotCompute>(SE->getConstantMaxBackedgeTakenCount(L))) { 11665 OS << "max backedge-taken count is " << *SE->getConstantMaxBackedgeTakenCount(L); 11666 if (SE->isBackedgeTakenCountMaxOrZero(L)) 11667 OS << ", actual taken count either this or zero."; 11668 } else { 11669 OS << "Unpredictable max backedge-taken count. "; 11670 } 11671 11672 OS << "\n" 11673 "Loop "; 11674 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11675 OS << ": "; 11676 11677 SCEVUnionPredicate Pred; 11678 auto PBT = SE->getPredicatedBackedgeTakenCount(L, Pred); 11679 if (!isa<SCEVCouldNotCompute>(PBT)) { 11680 OS << "Predicated backedge-taken count is " << *PBT << "\n"; 11681 OS << " Predicates:\n"; 11682 Pred.print(OS, 4); 11683 } else { 11684 OS << "Unpredictable predicated backedge-taken count. "; 11685 } 11686 OS << "\n"; 11687 11688 if (SE->hasLoopInvariantBackedgeTakenCount(L)) { 11689 OS << "Loop "; 11690 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11691 OS << ": "; 11692 OS << "Trip multiple is " << SE->getSmallConstantTripMultiple(L) << "\n"; 11693 } 11694 } 11695 11696 static StringRef loopDispositionToStr(ScalarEvolution::LoopDisposition LD) { 11697 switch (LD) { 11698 case ScalarEvolution::LoopVariant: 11699 return "Variant"; 11700 case ScalarEvolution::LoopInvariant: 11701 return "Invariant"; 11702 case ScalarEvolution::LoopComputable: 11703 return "Computable"; 11704 } 11705 llvm_unreachable("Unknown ScalarEvolution::LoopDisposition kind!"); 11706 } 11707 11708 void ScalarEvolution::print(raw_ostream &OS) const { 11709 // ScalarEvolution's implementation of the print method is to print 11710 // out SCEV values of all instructions that are interesting. Doing 11711 // this potentially causes it to create new SCEV objects though, 11712 // which technically conflicts with the const qualifier. This isn't 11713 // observable from outside the class though, so casting away the 11714 // const isn't dangerous. 11715 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 11716 11717 if (ClassifyExpressions) { 11718 OS << "Classifying expressions for: "; 11719 F.printAsOperand(OS, /*PrintType=*/false); 11720 OS << "\n"; 11721 for (Instruction &I : instructions(F)) 11722 if (isSCEVable(I.getType()) && !isa<CmpInst>(I)) { 11723 OS << I << '\n'; 11724 OS << " --> "; 11725 const SCEV *SV = SE.getSCEV(&I); 11726 SV->print(OS); 11727 if (!isa<SCEVCouldNotCompute>(SV)) { 11728 OS << " U: "; 11729 SE.getUnsignedRange(SV).print(OS); 11730 OS << " S: "; 11731 SE.getSignedRange(SV).print(OS); 11732 } 11733 11734 const Loop *L = LI.getLoopFor(I.getParent()); 11735 11736 const SCEV *AtUse = SE.getSCEVAtScope(SV, L); 11737 if (AtUse != SV) { 11738 OS << " --> "; 11739 AtUse->print(OS); 11740 if (!isa<SCEVCouldNotCompute>(AtUse)) { 11741 OS << " U: "; 11742 SE.getUnsignedRange(AtUse).print(OS); 11743 OS << " S: "; 11744 SE.getSignedRange(AtUse).print(OS); 11745 } 11746 } 11747 11748 if (L) { 11749 OS << "\t\t" "Exits: "; 11750 const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop()); 11751 if (!SE.isLoopInvariant(ExitValue, L)) { 11752 OS << "<<Unknown>>"; 11753 } else { 11754 OS << *ExitValue; 11755 } 11756 11757 bool First = true; 11758 for (auto *Iter = L; Iter; Iter = Iter->getParentLoop()) { 11759 if (First) { 11760 OS << "\t\t" "LoopDispositions: { "; 11761 First = false; 11762 } else { 11763 OS << ", "; 11764 } 11765 11766 Iter->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11767 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, Iter)); 11768 } 11769 11770 for (auto *InnerL : depth_first(L)) { 11771 if (InnerL == L) 11772 continue; 11773 if (First) { 11774 OS << "\t\t" "LoopDispositions: { "; 11775 First = false; 11776 } else { 11777 OS << ", "; 11778 } 11779 11780 InnerL->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11781 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, InnerL)); 11782 } 11783 11784 OS << " }"; 11785 } 11786 11787 OS << "\n"; 11788 } 11789 } 11790 11791 OS << "Determining loop execution counts for: "; 11792 F.printAsOperand(OS, /*PrintType=*/false); 11793 OS << "\n"; 11794 for (Loop *I : LI) 11795 PrintLoopInfo(OS, &SE, I); 11796 } 11797 11798 ScalarEvolution::LoopDisposition 11799 ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) { 11800 auto &Values = LoopDispositions[S]; 11801 for (auto &V : Values) { 11802 if (V.getPointer() == L) 11803 return V.getInt(); 11804 } 11805 Values.emplace_back(L, LoopVariant); 11806 LoopDisposition D = computeLoopDisposition(S, L); 11807 auto &Values2 = LoopDispositions[S]; 11808 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { 11809 if (V.getPointer() == L) { 11810 V.setInt(D); 11811 break; 11812 } 11813 } 11814 return D; 11815 } 11816 11817 ScalarEvolution::LoopDisposition 11818 ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) { 11819 switch (static_cast<SCEVTypes>(S->getSCEVType())) { 11820 case scConstant: 11821 return LoopInvariant; 11822 case scTruncate: 11823 case scZeroExtend: 11824 case scSignExtend: 11825 return getLoopDisposition(cast<SCEVCastExpr>(S)->getOperand(), L); 11826 case scAddRecExpr: { 11827 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 11828 11829 // If L is the addrec's loop, it's computable. 11830 if (AR->getLoop() == L) 11831 return LoopComputable; 11832 11833 // Add recurrences are never invariant in the function-body (null loop). 11834 if (!L) 11835 return LoopVariant; 11836 11837 // Everything that is not defined at loop entry is variant. 11838 if (DT.dominates(L->getHeader(), AR->getLoop()->getHeader())) 11839 return LoopVariant; 11840 assert(!L->contains(AR->getLoop()) && "Containing loop's header does not" 11841 " dominate the contained loop's header?"); 11842 11843 // This recurrence is invariant w.r.t. L if AR's loop contains L. 11844 if (AR->getLoop()->contains(L)) 11845 return LoopInvariant; 11846 11847 // This recurrence is variant w.r.t. L if any of its operands 11848 // are variant. 11849 for (auto *Op : AR->operands()) 11850 if (!isLoopInvariant(Op, L)) 11851 return LoopVariant; 11852 11853 // Otherwise it's loop-invariant. 11854 return LoopInvariant; 11855 } 11856 case scAddExpr: 11857 case scMulExpr: 11858 case scUMaxExpr: 11859 case scSMaxExpr: 11860 case scUMinExpr: 11861 case scSMinExpr: { 11862 bool HasVarying = false; 11863 for (auto *Op : cast<SCEVNAryExpr>(S)->operands()) { 11864 LoopDisposition D = getLoopDisposition(Op, L); 11865 if (D == LoopVariant) 11866 return LoopVariant; 11867 if (D == LoopComputable) 11868 HasVarying = true; 11869 } 11870 return HasVarying ? LoopComputable : LoopInvariant; 11871 } 11872 case scUDivExpr: { 11873 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 11874 LoopDisposition LD = getLoopDisposition(UDiv->getLHS(), L); 11875 if (LD == LoopVariant) 11876 return LoopVariant; 11877 LoopDisposition RD = getLoopDisposition(UDiv->getRHS(), L); 11878 if (RD == LoopVariant) 11879 return LoopVariant; 11880 return (LD == LoopInvariant && RD == LoopInvariant) ? 11881 LoopInvariant : LoopComputable; 11882 } 11883 case scUnknown: 11884 // All non-instruction values are loop invariant. All instructions are loop 11885 // invariant if they are not contained in the specified loop. 11886 // Instructions are never considered invariant in the function body 11887 // (null loop) because they are defined within the "loop". 11888 if (auto *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) 11889 return (L && !L->contains(I)) ? LoopInvariant : LoopVariant; 11890 return LoopInvariant; 11891 case scCouldNotCompute: 11892 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 11893 } 11894 llvm_unreachable("Unknown SCEV kind!"); 11895 } 11896 11897 bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) { 11898 return getLoopDisposition(S, L) == LoopInvariant; 11899 } 11900 11901 bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) { 11902 return getLoopDisposition(S, L) == LoopComputable; 11903 } 11904 11905 ScalarEvolution::BlockDisposition 11906 ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) { 11907 auto &Values = BlockDispositions[S]; 11908 for (auto &V : Values) { 11909 if (V.getPointer() == BB) 11910 return V.getInt(); 11911 } 11912 Values.emplace_back(BB, DoesNotDominateBlock); 11913 BlockDisposition D = computeBlockDisposition(S, BB); 11914 auto &Values2 = BlockDispositions[S]; 11915 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { 11916 if (V.getPointer() == BB) { 11917 V.setInt(D); 11918 break; 11919 } 11920 } 11921 return D; 11922 } 11923 11924 ScalarEvolution::BlockDisposition 11925 ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) { 11926 switch (static_cast<SCEVTypes>(S->getSCEVType())) { 11927 case scConstant: 11928 return ProperlyDominatesBlock; 11929 case scTruncate: 11930 case scZeroExtend: 11931 case scSignExtend: 11932 return getBlockDisposition(cast<SCEVCastExpr>(S)->getOperand(), BB); 11933 case scAddRecExpr: { 11934 // This uses a "dominates" query instead of "properly dominates" query 11935 // to test for proper dominance too, because the instruction which 11936 // produces the addrec's value is a PHI, and a PHI effectively properly 11937 // dominates its entire containing block. 11938 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 11939 if (!DT.dominates(AR->getLoop()->getHeader(), BB)) 11940 return DoesNotDominateBlock; 11941 11942 // Fall through into SCEVNAryExpr handling. 11943 LLVM_FALLTHROUGH; 11944 } 11945 case scAddExpr: 11946 case scMulExpr: 11947 case scUMaxExpr: 11948 case scSMaxExpr: 11949 case scUMinExpr: 11950 case scSMinExpr: { 11951 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S); 11952 bool Proper = true; 11953 for (const SCEV *NAryOp : NAry->operands()) { 11954 BlockDisposition D = getBlockDisposition(NAryOp, BB); 11955 if (D == DoesNotDominateBlock) 11956 return DoesNotDominateBlock; 11957 if (D == DominatesBlock) 11958 Proper = false; 11959 } 11960 return Proper ? ProperlyDominatesBlock : DominatesBlock; 11961 } 11962 case scUDivExpr: { 11963 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 11964 const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS(); 11965 BlockDisposition LD = getBlockDisposition(LHS, BB); 11966 if (LD == DoesNotDominateBlock) 11967 return DoesNotDominateBlock; 11968 BlockDisposition RD = getBlockDisposition(RHS, BB); 11969 if (RD == DoesNotDominateBlock) 11970 return DoesNotDominateBlock; 11971 return (LD == ProperlyDominatesBlock && RD == ProperlyDominatesBlock) ? 11972 ProperlyDominatesBlock : DominatesBlock; 11973 } 11974 case scUnknown: 11975 if (Instruction *I = 11976 dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) { 11977 if (I->getParent() == BB) 11978 return DominatesBlock; 11979 if (DT.properlyDominates(I->getParent(), BB)) 11980 return ProperlyDominatesBlock; 11981 return DoesNotDominateBlock; 11982 } 11983 return ProperlyDominatesBlock; 11984 case scCouldNotCompute: 11985 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 11986 } 11987 llvm_unreachable("Unknown SCEV kind!"); 11988 } 11989 11990 bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) { 11991 return getBlockDisposition(S, BB) >= DominatesBlock; 11992 } 11993 11994 bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) { 11995 return getBlockDisposition(S, BB) == ProperlyDominatesBlock; 11996 } 11997 11998 bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const { 11999 return SCEVExprContains(S, [&](const SCEV *Expr) { return Expr == Op; }); 12000 } 12001 12002 bool ScalarEvolution::ExitLimit::hasOperand(const SCEV *S) const { 12003 auto IsS = [&](const SCEV *X) { return S == X; }; 12004 auto ContainsS = [&](const SCEV *X) { 12005 return !isa<SCEVCouldNotCompute>(X) && SCEVExprContains(X, IsS); 12006 }; 12007 return ContainsS(ExactNotTaken) || ContainsS(MaxNotTaken); 12008 } 12009 12010 void 12011 ScalarEvolution::forgetMemoizedResults(const SCEV *S) { 12012 ValuesAtScopes.erase(S); 12013 LoopDispositions.erase(S); 12014 BlockDispositions.erase(S); 12015 UnsignedRanges.erase(S); 12016 SignedRanges.erase(S); 12017 ExprValueMap.erase(S); 12018 HasRecMap.erase(S); 12019 MinTrailingZerosCache.erase(S); 12020 12021 for (auto I = PredicatedSCEVRewrites.begin(); 12022 I != PredicatedSCEVRewrites.end();) { 12023 std::pair<const SCEV *, const Loop *> Entry = I->first; 12024 if (Entry.first == S) 12025 PredicatedSCEVRewrites.erase(I++); 12026 else 12027 ++I; 12028 } 12029 12030 auto RemoveSCEVFromBackedgeMap = 12031 [S, this](DenseMap<const Loop *, BackedgeTakenInfo> &Map) { 12032 for (auto I = Map.begin(), E = Map.end(); I != E;) { 12033 BackedgeTakenInfo &BEInfo = I->second; 12034 if (BEInfo.hasOperand(S, this)) { 12035 BEInfo.clear(); 12036 Map.erase(I++); 12037 } else 12038 ++I; 12039 } 12040 }; 12041 12042 RemoveSCEVFromBackedgeMap(BackedgeTakenCounts); 12043 RemoveSCEVFromBackedgeMap(PredicatedBackedgeTakenCounts); 12044 } 12045 12046 void 12047 ScalarEvolution::getUsedLoops(const SCEV *S, 12048 SmallPtrSetImpl<const Loop *> &LoopsUsed) { 12049 struct FindUsedLoops { 12050 FindUsedLoops(SmallPtrSetImpl<const Loop *> &LoopsUsed) 12051 : LoopsUsed(LoopsUsed) {} 12052 SmallPtrSetImpl<const Loop *> &LoopsUsed; 12053 bool follow(const SCEV *S) { 12054 if (auto *AR = dyn_cast<SCEVAddRecExpr>(S)) 12055 LoopsUsed.insert(AR->getLoop()); 12056 return true; 12057 } 12058 12059 bool isDone() const { return false; } 12060 }; 12061 12062 FindUsedLoops F(LoopsUsed); 12063 SCEVTraversal<FindUsedLoops>(F).visitAll(S); 12064 } 12065 12066 void ScalarEvolution::addToLoopUseLists(const SCEV *S) { 12067 SmallPtrSet<const Loop *, 8> LoopsUsed; 12068 getUsedLoops(S, LoopsUsed); 12069 for (auto *L : LoopsUsed) 12070 LoopUsers[L].push_back(S); 12071 } 12072 12073 void ScalarEvolution::verify() const { 12074 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 12075 ScalarEvolution SE2(F, TLI, AC, DT, LI); 12076 12077 SmallVector<Loop *, 8> LoopStack(LI.begin(), LI.end()); 12078 12079 // Map's SCEV expressions from one ScalarEvolution "universe" to another. 12080 struct SCEVMapper : public SCEVRewriteVisitor<SCEVMapper> { 12081 SCEVMapper(ScalarEvolution &SE) : SCEVRewriteVisitor<SCEVMapper>(SE) {} 12082 12083 const SCEV *visitConstant(const SCEVConstant *Constant) { 12084 return SE.getConstant(Constant->getAPInt()); 12085 } 12086 12087 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 12088 return SE.getUnknown(Expr->getValue()); 12089 } 12090 12091 const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) { 12092 return SE.getCouldNotCompute(); 12093 } 12094 }; 12095 12096 SCEVMapper SCM(SE2); 12097 12098 while (!LoopStack.empty()) { 12099 auto *L = LoopStack.pop_back_val(); 12100 LoopStack.insert(LoopStack.end(), L->begin(), L->end()); 12101 12102 auto *CurBECount = SCM.visit( 12103 const_cast<ScalarEvolution *>(this)->getBackedgeTakenCount(L)); 12104 auto *NewBECount = SE2.getBackedgeTakenCount(L); 12105 12106 if (CurBECount == SE2.getCouldNotCompute() || 12107 NewBECount == SE2.getCouldNotCompute()) { 12108 // NB! This situation is legal, but is very suspicious -- whatever pass 12109 // change the loop to make a trip count go from could not compute to 12110 // computable or vice-versa *should have* invalidated SCEV. However, we 12111 // choose not to assert here (for now) since we don't want false 12112 // positives. 12113 continue; 12114 } 12115 12116 if (containsUndefs(CurBECount) || containsUndefs(NewBECount)) { 12117 // SCEV treats "undef" as an unknown but consistent value (i.e. it does 12118 // not propagate undef aggressively). This means we can (and do) fail 12119 // verification in cases where a transform makes the trip count of a loop 12120 // go from "undef" to "undef+1" (say). The transform is fine, since in 12121 // both cases the loop iterates "undef" times, but SCEV thinks we 12122 // increased the trip count of the loop by 1 incorrectly. 12123 continue; 12124 } 12125 12126 if (SE.getTypeSizeInBits(CurBECount->getType()) > 12127 SE.getTypeSizeInBits(NewBECount->getType())) 12128 NewBECount = SE2.getZeroExtendExpr(NewBECount, CurBECount->getType()); 12129 else if (SE.getTypeSizeInBits(CurBECount->getType()) < 12130 SE.getTypeSizeInBits(NewBECount->getType())) 12131 CurBECount = SE2.getZeroExtendExpr(CurBECount, NewBECount->getType()); 12132 12133 const SCEV *Delta = SE2.getMinusSCEV(CurBECount, NewBECount); 12134 12135 // Unless VerifySCEVStrict is set, we only compare constant deltas. 12136 if ((VerifySCEVStrict || isa<SCEVConstant>(Delta)) && !Delta->isZero()) { 12137 dbgs() << "Trip Count for " << *L << " Changed!\n"; 12138 dbgs() << "Old: " << *CurBECount << "\n"; 12139 dbgs() << "New: " << *NewBECount << "\n"; 12140 dbgs() << "Delta: " << *Delta << "\n"; 12141 std::abort(); 12142 } 12143 } 12144 } 12145 12146 bool ScalarEvolution::invalidate( 12147 Function &F, const PreservedAnalyses &PA, 12148 FunctionAnalysisManager::Invalidator &Inv) { 12149 // Invalidate the ScalarEvolution object whenever it isn't preserved or one 12150 // of its dependencies is invalidated. 12151 auto PAC = PA.getChecker<ScalarEvolutionAnalysis>(); 12152 return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) || 12153 Inv.invalidate<AssumptionAnalysis>(F, PA) || 12154 Inv.invalidate<DominatorTreeAnalysis>(F, PA) || 12155 Inv.invalidate<LoopAnalysis>(F, PA); 12156 } 12157 12158 AnalysisKey ScalarEvolutionAnalysis::Key; 12159 12160 ScalarEvolution ScalarEvolutionAnalysis::run(Function &F, 12161 FunctionAnalysisManager &AM) { 12162 return ScalarEvolution(F, AM.getResult<TargetLibraryAnalysis>(F), 12163 AM.getResult<AssumptionAnalysis>(F), 12164 AM.getResult<DominatorTreeAnalysis>(F), 12165 AM.getResult<LoopAnalysis>(F)); 12166 } 12167 12168 PreservedAnalyses 12169 ScalarEvolutionVerifierPass::run(Function &F, FunctionAnalysisManager &AM) { 12170 AM.getResult<ScalarEvolutionAnalysis>(F).verify(); 12171 return PreservedAnalyses::all(); 12172 } 12173 12174 PreservedAnalyses 12175 ScalarEvolutionPrinterPass::run(Function &F, FunctionAnalysisManager &AM) { 12176 AM.getResult<ScalarEvolutionAnalysis>(F).print(OS); 12177 return PreservedAnalyses::all(); 12178 } 12179 12180 INITIALIZE_PASS_BEGIN(ScalarEvolutionWrapperPass, "scalar-evolution", 12181 "Scalar Evolution Analysis", false, true) 12182 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 12183 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 12184 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 12185 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 12186 INITIALIZE_PASS_END(ScalarEvolutionWrapperPass, "scalar-evolution", 12187 "Scalar Evolution Analysis", false, true) 12188 12189 char ScalarEvolutionWrapperPass::ID = 0; 12190 12191 ScalarEvolutionWrapperPass::ScalarEvolutionWrapperPass() : FunctionPass(ID) { 12192 initializeScalarEvolutionWrapperPassPass(*PassRegistry::getPassRegistry()); 12193 } 12194 12195 bool ScalarEvolutionWrapperPass::runOnFunction(Function &F) { 12196 SE.reset(new ScalarEvolution( 12197 F, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F), 12198 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F), 12199 getAnalysis<DominatorTreeWrapperPass>().getDomTree(), 12200 getAnalysis<LoopInfoWrapperPass>().getLoopInfo())); 12201 return false; 12202 } 12203 12204 void ScalarEvolutionWrapperPass::releaseMemory() { SE.reset(); } 12205 12206 void ScalarEvolutionWrapperPass::print(raw_ostream &OS, const Module *) const { 12207 SE->print(OS); 12208 } 12209 12210 void ScalarEvolutionWrapperPass::verifyAnalysis() const { 12211 if (!VerifySCEV) 12212 return; 12213 12214 SE->verify(); 12215 } 12216 12217 void ScalarEvolutionWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 12218 AU.setPreservesAll(); 12219 AU.addRequiredTransitive<AssumptionCacheTracker>(); 12220 AU.addRequiredTransitive<LoopInfoWrapperPass>(); 12221 AU.addRequiredTransitive<DominatorTreeWrapperPass>(); 12222 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>(); 12223 } 12224 12225 const SCEVPredicate *ScalarEvolution::getEqualPredicate(const SCEV *LHS, 12226 const SCEV *RHS) { 12227 FoldingSetNodeID ID; 12228 assert(LHS->getType() == RHS->getType() && 12229 "Type mismatch between LHS and RHS"); 12230 // Unique this node based on the arguments 12231 ID.AddInteger(SCEVPredicate::P_Equal); 12232 ID.AddPointer(LHS); 12233 ID.AddPointer(RHS); 12234 void *IP = nullptr; 12235 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 12236 return S; 12237 SCEVEqualPredicate *Eq = new (SCEVAllocator) 12238 SCEVEqualPredicate(ID.Intern(SCEVAllocator), LHS, RHS); 12239 UniquePreds.InsertNode(Eq, IP); 12240 return Eq; 12241 } 12242 12243 const SCEVPredicate *ScalarEvolution::getWrapPredicate( 12244 const SCEVAddRecExpr *AR, 12245 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 12246 FoldingSetNodeID ID; 12247 // Unique this node based on the arguments 12248 ID.AddInteger(SCEVPredicate::P_Wrap); 12249 ID.AddPointer(AR); 12250 ID.AddInteger(AddedFlags); 12251 void *IP = nullptr; 12252 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 12253 return S; 12254 auto *OF = new (SCEVAllocator) 12255 SCEVWrapPredicate(ID.Intern(SCEVAllocator), AR, AddedFlags); 12256 UniquePreds.InsertNode(OF, IP); 12257 return OF; 12258 } 12259 12260 namespace { 12261 12262 class SCEVPredicateRewriter : public SCEVRewriteVisitor<SCEVPredicateRewriter> { 12263 public: 12264 12265 /// Rewrites \p S in the context of a loop L and the SCEV predication 12266 /// infrastructure. 12267 /// 12268 /// If \p Pred is non-null, the SCEV expression is rewritten to respect the 12269 /// equivalences present in \p Pred. 12270 /// 12271 /// If \p NewPreds is non-null, rewrite is free to add further predicates to 12272 /// \p NewPreds such that the result will be an AddRecExpr. 12273 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, 12274 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 12275 SCEVUnionPredicate *Pred) { 12276 SCEVPredicateRewriter Rewriter(L, SE, NewPreds, Pred); 12277 return Rewriter.visit(S); 12278 } 12279 12280 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 12281 if (Pred) { 12282 auto ExprPreds = Pred->getPredicatesForExpr(Expr); 12283 for (auto *Pred : ExprPreds) 12284 if (const auto *IPred = dyn_cast<SCEVEqualPredicate>(Pred)) 12285 if (IPred->getLHS() == Expr) 12286 return IPred->getRHS(); 12287 } 12288 return convertToAddRecWithPreds(Expr); 12289 } 12290 12291 const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) { 12292 const SCEV *Operand = visit(Expr->getOperand()); 12293 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 12294 if (AR && AR->getLoop() == L && AR->isAffine()) { 12295 // This couldn't be folded because the operand didn't have the nuw 12296 // flag. Add the nusw flag as an assumption that we could make. 12297 const SCEV *Step = AR->getStepRecurrence(SE); 12298 Type *Ty = Expr->getType(); 12299 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNUSW)) 12300 return SE.getAddRecExpr(SE.getZeroExtendExpr(AR->getStart(), Ty), 12301 SE.getSignExtendExpr(Step, Ty), L, 12302 AR->getNoWrapFlags()); 12303 } 12304 return SE.getZeroExtendExpr(Operand, Expr->getType()); 12305 } 12306 12307 const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) { 12308 const SCEV *Operand = visit(Expr->getOperand()); 12309 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 12310 if (AR && AR->getLoop() == L && AR->isAffine()) { 12311 // This couldn't be folded because the operand didn't have the nsw 12312 // flag. Add the nssw flag as an assumption that we could make. 12313 const SCEV *Step = AR->getStepRecurrence(SE); 12314 Type *Ty = Expr->getType(); 12315 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNSSW)) 12316 return SE.getAddRecExpr(SE.getSignExtendExpr(AR->getStart(), Ty), 12317 SE.getSignExtendExpr(Step, Ty), L, 12318 AR->getNoWrapFlags()); 12319 } 12320 return SE.getSignExtendExpr(Operand, Expr->getType()); 12321 } 12322 12323 private: 12324 explicit SCEVPredicateRewriter(const Loop *L, ScalarEvolution &SE, 12325 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 12326 SCEVUnionPredicate *Pred) 12327 : SCEVRewriteVisitor(SE), NewPreds(NewPreds), Pred(Pred), L(L) {} 12328 12329 bool addOverflowAssumption(const SCEVPredicate *P) { 12330 if (!NewPreds) { 12331 // Check if we've already made this assumption. 12332 return Pred && Pred->implies(P); 12333 } 12334 NewPreds->insert(P); 12335 return true; 12336 } 12337 12338 bool addOverflowAssumption(const SCEVAddRecExpr *AR, 12339 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 12340 auto *A = SE.getWrapPredicate(AR, AddedFlags); 12341 return addOverflowAssumption(A); 12342 } 12343 12344 // If \p Expr represents a PHINode, we try to see if it can be represented 12345 // as an AddRec, possibly under a predicate (PHISCEVPred). If it is possible 12346 // to add this predicate as a runtime overflow check, we return the AddRec. 12347 // If \p Expr does not meet these conditions (is not a PHI node, or we 12348 // couldn't create an AddRec for it, or couldn't add the predicate), we just 12349 // return \p Expr. 12350 const SCEV *convertToAddRecWithPreds(const SCEVUnknown *Expr) { 12351 if (!isa<PHINode>(Expr->getValue())) 12352 return Expr; 12353 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 12354 PredicatedRewrite = SE.createAddRecFromPHIWithCasts(Expr); 12355 if (!PredicatedRewrite) 12356 return Expr; 12357 for (auto *P : PredicatedRewrite->second){ 12358 // Wrap predicates from outer loops are not supported. 12359 if (auto *WP = dyn_cast<const SCEVWrapPredicate>(P)) { 12360 auto *AR = cast<const SCEVAddRecExpr>(WP->getExpr()); 12361 if (L != AR->getLoop()) 12362 return Expr; 12363 } 12364 if (!addOverflowAssumption(P)) 12365 return Expr; 12366 } 12367 return PredicatedRewrite->first; 12368 } 12369 12370 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds; 12371 SCEVUnionPredicate *Pred; 12372 const Loop *L; 12373 }; 12374 12375 } // end anonymous namespace 12376 12377 const SCEV *ScalarEvolution::rewriteUsingPredicate(const SCEV *S, const Loop *L, 12378 SCEVUnionPredicate &Preds) { 12379 return SCEVPredicateRewriter::rewrite(S, L, *this, nullptr, &Preds); 12380 } 12381 12382 const SCEVAddRecExpr *ScalarEvolution::convertSCEVToAddRecWithPredicates( 12383 const SCEV *S, const Loop *L, 12384 SmallPtrSetImpl<const SCEVPredicate *> &Preds) { 12385 SmallPtrSet<const SCEVPredicate *, 4> TransformPreds; 12386 S = SCEVPredicateRewriter::rewrite(S, L, *this, &TransformPreds, nullptr); 12387 auto *AddRec = dyn_cast<SCEVAddRecExpr>(S); 12388 12389 if (!AddRec) 12390 return nullptr; 12391 12392 // Since the transformation was successful, we can now transfer the SCEV 12393 // predicates. 12394 for (auto *P : TransformPreds) 12395 Preds.insert(P); 12396 12397 return AddRec; 12398 } 12399 12400 /// SCEV predicates 12401 SCEVPredicate::SCEVPredicate(const FoldingSetNodeIDRef ID, 12402 SCEVPredicateKind Kind) 12403 : FastID(ID), Kind(Kind) {} 12404 12405 SCEVEqualPredicate::SCEVEqualPredicate(const FoldingSetNodeIDRef ID, 12406 const SCEV *LHS, const SCEV *RHS) 12407 : SCEVPredicate(ID, P_Equal), LHS(LHS), RHS(RHS) { 12408 assert(LHS->getType() == RHS->getType() && "LHS and RHS types don't match"); 12409 assert(LHS != RHS && "LHS and RHS are the same SCEV"); 12410 } 12411 12412 bool SCEVEqualPredicate::implies(const SCEVPredicate *N) const { 12413 const auto *Op = dyn_cast<SCEVEqualPredicate>(N); 12414 12415 if (!Op) 12416 return false; 12417 12418 return Op->LHS == LHS && Op->RHS == RHS; 12419 } 12420 12421 bool SCEVEqualPredicate::isAlwaysTrue() const { return false; } 12422 12423 const SCEV *SCEVEqualPredicate::getExpr() const { return LHS; } 12424 12425 void SCEVEqualPredicate::print(raw_ostream &OS, unsigned Depth) const { 12426 OS.indent(Depth) << "Equal predicate: " << *LHS << " == " << *RHS << "\n"; 12427 } 12428 12429 SCEVWrapPredicate::SCEVWrapPredicate(const FoldingSetNodeIDRef ID, 12430 const SCEVAddRecExpr *AR, 12431 IncrementWrapFlags Flags) 12432 : SCEVPredicate(ID, P_Wrap), AR(AR), Flags(Flags) {} 12433 12434 const SCEV *SCEVWrapPredicate::getExpr() const { return AR; } 12435 12436 bool SCEVWrapPredicate::implies(const SCEVPredicate *N) const { 12437 const auto *Op = dyn_cast<SCEVWrapPredicate>(N); 12438 12439 return Op && Op->AR == AR && setFlags(Flags, Op->Flags) == Flags; 12440 } 12441 12442 bool SCEVWrapPredicate::isAlwaysTrue() const { 12443 SCEV::NoWrapFlags ScevFlags = AR->getNoWrapFlags(); 12444 IncrementWrapFlags IFlags = Flags; 12445 12446 if (ScalarEvolution::setFlags(ScevFlags, SCEV::FlagNSW) == ScevFlags) 12447 IFlags = clearFlags(IFlags, IncrementNSSW); 12448 12449 return IFlags == IncrementAnyWrap; 12450 } 12451 12452 void SCEVWrapPredicate::print(raw_ostream &OS, unsigned Depth) const { 12453 OS.indent(Depth) << *getExpr() << " Added Flags: "; 12454 if (SCEVWrapPredicate::IncrementNUSW & getFlags()) 12455 OS << "<nusw>"; 12456 if (SCEVWrapPredicate::IncrementNSSW & getFlags()) 12457 OS << "<nssw>"; 12458 OS << "\n"; 12459 } 12460 12461 SCEVWrapPredicate::IncrementWrapFlags 12462 SCEVWrapPredicate::getImpliedFlags(const SCEVAddRecExpr *AR, 12463 ScalarEvolution &SE) { 12464 IncrementWrapFlags ImpliedFlags = IncrementAnyWrap; 12465 SCEV::NoWrapFlags StaticFlags = AR->getNoWrapFlags(); 12466 12467 // We can safely transfer the NSW flag as NSSW. 12468 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNSW) == StaticFlags) 12469 ImpliedFlags = IncrementNSSW; 12470 12471 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNUW) == StaticFlags) { 12472 // If the increment is positive, the SCEV NUW flag will also imply the 12473 // WrapPredicate NUSW flag. 12474 if (const auto *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE))) 12475 if (Step->getValue()->getValue().isNonNegative()) 12476 ImpliedFlags = setFlags(ImpliedFlags, IncrementNUSW); 12477 } 12478 12479 return ImpliedFlags; 12480 } 12481 12482 /// Union predicates don't get cached so create a dummy set ID for it. 12483 SCEVUnionPredicate::SCEVUnionPredicate() 12484 : SCEVPredicate(FoldingSetNodeIDRef(nullptr, 0), P_Union) {} 12485 12486 bool SCEVUnionPredicate::isAlwaysTrue() const { 12487 return all_of(Preds, 12488 [](const SCEVPredicate *I) { return I->isAlwaysTrue(); }); 12489 } 12490 12491 ArrayRef<const SCEVPredicate *> 12492 SCEVUnionPredicate::getPredicatesForExpr(const SCEV *Expr) { 12493 auto I = SCEVToPreds.find(Expr); 12494 if (I == SCEVToPreds.end()) 12495 return ArrayRef<const SCEVPredicate *>(); 12496 return I->second; 12497 } 12498 12499 bool SCEVUnionPredicate::implies(const SCEVPredicate *N) const { 12500 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) 12501 return all_of(Set->Preds, 12502 [this](const SCEVPredicate *I) { return this->implies(I); }); 12503 12504 auto ScevPredsIt = SCEVToPreds.find(N->getExpr()); 12505 if (ScevPredsIt == SCEVToPreds.end()) 12506 return false; 12507 auto &SCEVPreds = ScevPredsIt->second; 12508 12509 return any_of(SCEVPreds, 12510 [N](const SCEVPredicate *I) { return I->implies(N); }); 12511 } 12512 12513 const SCEV *SCEVUnionPredicate::getExpr() const { return nullptr; } 12514 12515 void SCEVUnionPredicate::print(raw_ostream &OS, unsigned Depth) const { 12516 for (auto Pred : Preds) 12517 Pred->print(OS, Depth); 12518 } 12519 12520 void SCEVUnionPredicate::add(const SCEVPredicate *N) { 12521 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) { 12522 for (auto Pred : Set->Preds) 12523 add(Pred); 12524 return; 12525 } 12526 12527 if (implies(N)) 12528 return; 12529 12530 const SCEV *Key = N->getExpr(); 12531 assert(Key && "Only SCEVUnionPredicate doesn't have an " 12532 " associated expression!"); 12533 12534 SCEVToPreds[Key].push_back(N); 12535 Preds.push_back(N); 12536 } 12537 12538 PredicatedScalarEvolution::PredicatedScalarEvolution(ScalarEvolution &SE, 12539 Loop &L) 12540 : SE(SE), L(L) {} 12541 12542 const SCEV *PredicatedScalarEvolution::getSCEV(Value *V) { 12543 const SCEV *Expr = SE.getSCEV(V); 12544 RewriteEntry &Entry = RewriteMap[Expr]; 12545 12546 // If we already have an entry and the version matches, return it. 12547 if (Entry.second && Generation == Entry.first) 12548 return Entry.second; 12549 12550 // We found an entry but it's stale. Rewrite the stale entry 12551 // according to the current predicate. 12552 if (Entry.second) 12553 Expr = Entry.second; 12554 12555 const SCEV *NewSCEV = SE.rewriteUsingPredicate(Expr, &L, Preds); 12556 Entry = {Generation, NewSCEV}; 12557 12558 return NewSCEV; 12559 } 12560 12561 const SCEV *PredicatedScalarEvolution::getBackedgeTakenCount() { 12562 if (!BackedgeCount) { 12563 SCEVUnionPredicate BackedgePred; 12564 BackedgeCount = SE.getPredicatedBackedgeTakenCount(&L, BackedgePred); 12565 addPredicate(BackedgePred); 12566 } 12567 return BackedgeCount; 12568 } 12569 12570 void PredicatedScalarEvolution::addPredicate(const SCEVPredicate &Pred) { 12571 if (Preds.implies(&Pred)) 12572 return; 12573 Preds.add(&Pred); 12574 updateGeneration(); 12575 } 12576 12577 const SCEVUnionPredicate &PredicatedScalarEvolution::getUnionPredicate() const { 12578 return Preds; 12579 } 12580 12581 void PredicatedScalarEvolution::updateGeneration() { 12582 // If the generation number wrapped recompute everything. 12583 if (++Generation == 0) { 12584 for (auto &II : RewriteMap) { 12585 const SCEV *Rewritten = II.second.second; 12586 II.second = {Generation, SE.rewriteUsingPredicate(Rewritten, &L, Preds)}; 12587 } 12588 } 12589 } 12590 12591 void PredicatedScalarEvolution::setNoOverflow( 12592 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 12593 const SCEV *Expr = getSCEV(V); 12594 const auto *AR = cast<SCEVAddRecExpr>(Expr); 12595 12596 auto ImpliedFlags = SCEVWrapPredicate::getImpliedFlags(AR, SE); 12597 12598 // Clear the statically implied flags. 12599 Flags = SCEVWrapPredicate::clearFlags(Flags, ImpliedFlags); 12600 addPredicate(*SE.getWrapPredicate(AR, Flags)); 12601 12602 auto II = FlagsMap.insert({V, Flags}); 12603 if (!II.second) 12604 II.first->second = SCEVWrapPredicate::setFlags(Flags, II.first->second); 12605 } 12606 12607 bool PredicatedScalarEvolution::hasNoOverflow( 12608 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 12609 const SCEV *Expr = getSCEV(V); 12610 const auto *AR = cast<SCEVAddRecExpr>(Expr); 12611 12612 Flags = SCEVWrapPredicate::clearFlags( 12613 Flags, SCEVWrapPredicate::getImpliedFlags(AR, SE)); 12614 12615 auto II = FlagsMap.find(V); 12616 12617 if (II != FlagsMap.end()) 12618 Flags = SCEVWrapPredicate::clearFlags(Flags, II->second); 12619 12620 return Flags == SCEVWrapPredicate::IncrementAnyWrap; 12621 } 12622 12623 const SCEVAddRecExpr *PredicatedScalarEvolution::getAsAddRec(Value *V) { 12624 const SCEV *Expr = this->getSCEV(V); 12625 SmallPtrSet<const SCEVPredicate *, 4> NewPreds; 12626 auto *New = SE.convertSCEVToAddRecWithPredicates(Expr, &L, NewPreds); 12627 12628 if (!New) 12629 return nullptr; 12630 12631 for (auto *P : NewPreds) 12632 Preds.add(P); 12633 12634 updateGeneration(); 12635 RewriteMap[SE.getSCEV(V)] = {Generation, New}; 12636 return New; 12637 } 12638 12639 PredicatedScalarEvolution::PredicatedScalarEvolution( 12640 const PredicatedScalarEvolution &Init) 12641 : RewriteMap(Init.RewriteMap), SE(Init.SE), L(Init.L), Preds(Init.Preds), 12642 Generation(Init.Generation), BackedgeCount(Init.BackedgeCount) { 12643 for (auto I : Init.FlagsMap) 12644 FlagsMap.insert(I); 12645 } 12646 12647 void PredicatedScalarEvolution::print(raw_ostream &OS, unsigned Depth) const { 12648 // For each block. 12649 for (auto *BB : L.getBlocks()) 12650 for (auto &I : *BB) { 12651 if (!SE.isSCEVable(I.getType())) 12652 continue; 12653 12654 auto *Expr = SE.getSCEV(&I); 12655 auto II = RewriteMap.find(Expr); 12656 12657 if (II == RewriteMap.end()) 12658 continue; 12659 12660 // Don't print things that are not interesting. 12661 if (II->second.second == Expr) 12662 continue; 12663 12664 OS.indent(Depth) << "[PSE]" << I << ":\n"; 12665 OS.indent(Depth + 2) << *Expr << "\n"; 12666 OS.indent(Depth + 2) << "--> " << *II->second.second << "\n"; 12667 } 12668 } 12669 12670 // Match the mathematical pattern A - (A / B) * B, where A and B can be 12671 // arbitrary expressions. 12672 // It's not always easy, as A and B can be folded (imagine A is X / 2, and B is 12673 // 4, A / B becomes X / 8). 12674 bool ScalarEvolution::matchURem(const SCEV *Expr, const SCEV *&LHS, 12675 const SCEV *&RHS) { 12676 const auto *Add = dyn_cast<SCEVAddExpr>(Expr); 12677 if (Add == nullptr || Add->getNumOperands() != 2) 12678 return false; 12679 12680 const SCEV *A = Add->getOperand(1); 12681 const auto *Mul = dyn_cast<SCEVMulExpr>(Add->getOperand(0)); 12682 12683 if (Mul == nullptr) 12684 return false; 12685 12686 const auto MatchURemWithDivisor = [&](const SCEV *B) { 12687 // (SomeExpr + (-(SomeExpr / B) * B)). 12688 if (Expr == getURemExpr(A, B)) { 12689 LHS = A; 12690 RHS = B; 12691 return true; 12692 } 12693 return false; 12694 }; 12695 12696 // (SomeExpr + (-1 * (SomeExpr / B) * B)). 12697 if (Mul->getNumOperands() == 3 && isa<SCEVConstant>(Mul->getOperand(0))) 12698 return MatchURemWithDivisor(Mul->getOperand(1)) || 12699 MatchURemWithDivisor(Mul->getOperand(2)); 12700 12701 // (SomeExpr + ((-SomeExpr / B) * B)) or (SomeExpr + ((SomeExpr / B) * -B)). 12702 if (Mul->getNumOperands() == 2) 12703 return MatchURemWithDivisor(Mul->getOperand(1)) || 12704 MatchURemWithDivisor(Mul->getOperand(0)) || 12705 MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(1))) || 12706 MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(0))); 12707 return false; 12708 } 12709