1 //===- LoopStrengthReduce.cpp - Strength Reduce IVs in Loops --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This transformation analyzes and transforms the induction variables (and 10 // computations derived from them) into forms suitable for efficient execution 11 // on the target. 12 // 13 // This pass performs a strength reduction on array references inside loops that 14 // have as one or more of their components the loop induction variable, it 15 // rewrites expressions to take advantage of scaled-index addressing modes 16 // available on the target, and it performs a variety of other optimizations 17 // related to loop induction variables. 18 // 19 // Terminology note: this code has a lot of handling for "post-increment" or 20 // "post-inc" users. This is not talking about post-increment addressing modes; 21 // it is instead talking about code like this: 22 // 23 // %i = phi [ 0, %entry ], [ %i.next, %latch ] 24 // ... 25 // %i.next = add %i, 1 26 // %c = icmp eq %i.next, %n 27 // 28 // The SCEV for %i is {0,+,1}<%L>. The SCEV for %i.next is {1,+,1}<%L>, however 29 // it's useful to think about these as the same register, with some uses using 30 // the value of the register before the add and some using it after. In this 31 // example, the icmp is a post-increment user, since it uses %i.next, which is 32 // the value of the induction variable after the increment. The other common 33 // case of post-increment users is users outside the loop. 34 // 35 // TODO: More sophistication in the way Formulae are generated and filtered. 36 // 37 // TODO: Handle multiple loops at a time. 38 // 39 // TODO: Should the addressing mode BaseGV be changed to a ConstantExpr instead 40 // of a GlobalValue? 41 // 42 // TODO: When truncation is free, truncate ICmp users' operands to make it a 43 // smaller encoding (on x86 at least). 44 // 45 // TODO: When a negated register is used by an add (such as in a list of 46 // multiple base registers, or as the increment expression in an addrec), 47 // we may not actually need both reg and (-1 * reg) in registers; the 48 // negation can be implemented by using a sub instead of an add. The 49 // lack of support for taking this into consideration when making 50 // register pressure decisions is partly worked around by the "Special" 51 // use kind. 52 // 53 //===----------------------------------------------------------------------===// 54 55 #include "llvm/Transforms/Scalar/LoopStrengthReduce.h" 56 #include "llvm/ADT/APInt.h" 57 #include "llvm/ADT/DenseMap.h" 58 #include "llvm/ADT/DenseSet.h" 59 #include "llvm/ADT/Hashing.h" 60 #include "llvm/ADT/PointerIntPair.h" 61 #include "llvm/ADT/STLExtras.h" 62 #include "llvm/ADT/SetVector.h" 63 #include "llvm/ADT/SmallBitVector.h" 64 #include "llvm/ADT/SmallPtrSet.h" 65 #include "llvm/ADT/SmallSet.h" 66 #include "llvm/ADT/SmallVector.h" 67 #include "llvm/ADT/iterator_range.h" 68 #include "llvm/Analysis/AssumptionCache.h" 69 #include "llvm/Analysis/IVUsers.h" 70 #include "llvm/Analysis/LoopAnalysisManager.h" 71 #include "llvm/Analysis/LoopInfo.h" 72 #include "llvm/Analysis/LoopPass.h" 73 #include "llvm/Analysis/MemorySSA.h" 74 #include "llvm/Analysis/MemorySSAUpdater.h" 75 #include "llvm/Analysis/ScalarEvolution.h" 76 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 77 #include "llvm/Analysis/ScalarEvolutionNormalization.h" 78 #include "llvm/Analysis/TargetLibraryInfo.h" 79 #include "llvm/Analysis/TargetTransformInfo.h" 80 #include "llvm/Analysis/ValueTracking.h" 81 #include "llvm/Config/llvm-config.h" 82 #include "llvm/IR/BasicBlock.h" 83 #include "llvm/IR/Constant.h" 84 #include "llvm/IR/Constants.h" 85 #include "llvm/IR/DebugInfoMetadata.h" 86 #include "llvm/IR/DerivedTypes.h" 87 #include "llvm/IR/Dominators.h" 88 #include "llvm/IR/GlobalValue.h" 89 #include "llvm/IR/IRBuilder.h" 90 #include "llvm/IR/InstrTypes.h" 91 #include "llvm/IR/Instruction.h" 92 #include "llvm/IR/Instructions.h" 93 #include "llvm/IR/IntrinsicInst.h" 94 #include "llvm/IR/Intrinsics.h" 95 #include "llvm/IR/Module.h" 96 #include "llvm/IR/OperandTraits.h" 97 #include "llvm/IR/Operator.h" 98 #include "llvm/IR/PassManager.h" 99 #include "llvm/IR/Type.h" 100 #include "llvm/IR/Use.h" 101 #include "llvm/IR/User.h" 102 #include "llvm/IR/Value.h" 103 #include "llvm/IR/ValueHandle.h" 104 #include "llvm/InitializePasses.h" 105 #include "llvm/Pass.h" 106 #include "llvm/Support/Casting.h" 107 #include "llvm/Support/CommandLine.h" 108 #include "llvm/Support/Compiler.h" 109 #include "llvm/Support/Debug.h" 110 #include "llvm/Support/ErrorHandling.h" 111 #include "llvm/Support/MathExtras.h" 112 #include "llvm/Support/raw_ostream.h" 113 #include "llvm/Transforms/Scalar.h" 114 #include "llvm/Transforms/Utils.h" 115 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 116 #include "llvm/Transforms/Utils/Local.h" 117 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 118 #include <algorithm> 119 #include <cassert> 120 #include <cstddef> 121 #include <cstdint> 122 #include <cstdlib> 123 #include <iterator> 124 #include <limits> 125 #include <map> 126 #include <numeric> 127 #include <utility> 128 129 using namespace llvm; 130 131 #define DEBUG_TYPE "loop-reduce" 132 133 /// MaxIVUsers is an arbitrary threshold that provides an early opportunity for 134 /// bail out. This threshold is far beyond the number of users that LSR can 135 /// conceivably solve, so it should not affect generated code, but catches the 136 /// worst cases before LSR burns too much compile time and stack space. 137 static const unsigned MaxIVUsers = 200; 138 139 // Temporary flag to cleanup congruent phis after LSR phi expansion. 140 // It's currently disabled until we can determine whether it's truly useful or 141 // not. The flag should be removed after the v3.0 release. 142 // This is now needed for ivchains. 143 static cl::opt<bool> EnablePhiElim( 144 "enable-lsr-phielim", cl::Hidden, cl::init(true), 145 cl::desc("Enable LSR phi elimination")); 146 147 // The flag adds instruction count to solutions cost comparision. 148 static cl::opt<bool> InsnsCost( 149 "lsr-insns-cost", cl::Hidden, cl::init(true), 150 cl::desc("Add instruction count to a LSR cost model")); 151 152 // Flag to choose how to narrow complex lsr solution 153 static cl::opt<bool> LSRExpNarrow( 154 "lsr-exp-narrow", cl::Hidden, cl::init(false), 155 cl::desc("Narrow LSR complex solution using" 156 " expectation of registers number")); 157 158 // Flag to narrow search space by filtering non-optimal formulae with 159 // the same ScaledReg and Scale. 160 static cl::opt<bool> FilterSameScaledReg( 161 "lsr-filter-same-scaled-reg", cl::Hidden, cl::init(true), 162 cl::desc("Narrow LSR search space by filtering non-optimal formulae" 163 " with the same ScaledReg and Scale")); 164 165 static cl::opt<TTI::AddressingModeKind> PreferredAddresingMode( 166 "lsr-preferred-addressing-mode", cl::Hidden, cl::init(TTI::AMK_None), 167 cl::desc("A flag that overrides the target's preferred addressing mode."), 168 cl::values(clEnumValN(TTI::AMK_None, 169 "none", 170 "Don't prefer any addressing mode"), 171 clEnumValN(TTI::AMK_PreIndexed, 172 "preindexed", 173 "Prefer pre-indexed addressing mode"), 174 clEnumValN(TTI::AMK_PostIndexed, 175 "postindexed", 176 "Prefer post-indexed addressing mode"))); 177 178 static cl::opt<unsigned> ComplexityLimit( 179 "lsr-complexity-limit", cl::Hidden, 180 cl::init(std::numeric_limits<uint16_t>::max()), 181 cl::desc("LSR search space complexity limit")); 182 183 static cl::opt<unsigned> SetupCostDepthLimit( 184 "lsr-setupcost-depth-limit", cl::Hidden, cl::init(7), 185 cl::desc("The limit on recursion depth for LSRs setup cost")); 186 187 #ifndef NDEBUG 188 // Stress test IV chain generation. 189 static cl::opt<bool> StressIVChain( 190 "stress-ivchain", cl::Hidden, cl::init(false), 191 cl::desc("Stress test LSR IV chains")); 192 #else 193 static bool StressIVChain = false; 194 #endif 195 196 namespace { 197 198 struct MemAccessTy { 199 /// Used in situations where the accessed memory type is unknown. 200 static const unsigned UnknownAddressSpace = 201 std::numeric_limits<unsigned>::max(); 202 203 Type *MemTy = nullptr; 204 unsigned AddrSpace = UnknownAddressSpace; 205 206 MemAccessTy() = default; 207 MemAccessTy(Type *Ty, unsigned AS) : MemTy(Ty), AddrSpace(AS) {} 208 209 bool operator==(MemAccessTy Other) const { 210 return MemTy == Other.MemTy && AddrSpace == Other.AddrSpace; 211 } 212 213 bool operator!=(MemAccessTy Other) const { return !(*this == Other); } 214 215 static MemAccessTy getUnknown(LLVMContext &Ctx, 216 unsigned AS = UnknownAddressSpace) { 217 return MemAccessTy(Type::getVoidTy(Ctx), AS); 218 } 219 220 Type *getType() { return MemTy; } 221 }; 222 223 /// This class holds data which is used to order reuse candidates. 224 class RegSortData { 225 public: 226 /// This represents the set of LSRUse indices which reference 227 /// a particular register. 228 SmallBitVector UsedByIndices; 229 230 void print(raw_ostream &OS) const; 231 void dump() const; 232 }; 233 234 } // end anonymous namespace 235 236 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 237 void RegSortData::print(raw_ostream &OS) const { 238 OS << "[NumUses=" << UsedByIndices.count() << ']'; 239 } 240 241 LLVM_DUMP_METHOD void RegSortData::dump() const { 242 print(errs()); errs() << '\n'; 243 } 244 #endif 245 246 namespace { 247 248 /// Map register candidates to information about how they are used. 249 class RegUseTracker { 250 using RegUsesTy = DenseMap<const SCEV *, RegSortData>; 251 252 RegUsesTy RegUsesMap; 253 SmallVector<const SCEV *, 16> RegSequence; 254 255 public: 256 void countRegister(const SCEV *Reg, size_t LUIdx); 257 void dropRegister(const SCEV *Reg, size_t LUIdx); 258 void swapAndDropUse(size_t LUIdx, size_t LastLUIdx); 259 260 bool isRegUsedByUsesOtherThan(const SCEV *Reg, size_t LUIdx) const; 261 262 const SmallBitVector &getUsedByIndices(const SCEV *Reg) const; 263 264 void clear(); 265 266 using iterator = SmallVectorImpl<const SCEV *>::iterator; 267 using const_iterator = SmallVectorImpl<const SCEV *>::const_iterator; 268 269 iterator begin() { return RegSequence.begin(); } 270 iterator end() { return RegSequence.end(); } 271 const_iterator begin() const { return RegSequence.begin(); } 272 const_iterator end() const { return RegSequence.end(); } 273 }; 274 275 } // end anonymous namespace 276 277 void 278 RegUseTracker::countRegister(const SCEV *Reg, size_t LUIdx) { 279 std::pair<RegUsesTy::iterator, bool> Pair = 280 RegUsesMap.insert(std::make_pair(Reg, RegSortData())); 281 RegSortData &RSD = Pair.first->second; 282 if (Pair.second) 283 RegSequence.push_back(Reg); 284 RSD.UsedByIndices.resize(std::max(RSD.UsedByIndices.size(), LUIdx + 1)); 285 RSD.UsedByIndices.set(LUIdx); 286 } 287 288 void 289 RegUseTracker::dropRegister(const SCEV *Reg, size_t LUIdx) { 290 RegUsesTy::iterator It = RegUsesMap.find(Reg); 291 assert(It != RegUsesMap.end()); 292 RegSortData &RSD = It->second; 293 assert(RSD.UsedByIndices.size() > LUIdx); 294 RSD.UsedByIndices.reset(LUIdx); 295 } 296 297 void 298 RegUseTracker::swapAndDropUse(size_t LUIdx, size_t LastLUIdx) { 299 assert(LUIdx <= LastLUIdx); 300 301 // Update RegUses. The data structure is not optimized for this purpose; 302 // we must iterate through it and update each of the bit vectors. 303 for (auto &Pair : RegUsesMap) { 304 SmallBitVector &UsedByIndices = Pair.second.UsedByIndices; 305 if (LUIdx < UsedByIndices.size()) 306 UsedByIndices[LUIdx] = 307 LastLUIdx < UsedByIndices.size() ? UsedByIndices[LastLUIdx] : false; 308 UsedByIndices.resize(std::min(UsedByIndices.size(), LastLUIdx)); 309 } 310 } 311 312 bool 313 RegUseTracker::isRegUsedByUsesOtherThan(const SCEV *Reg, size_t LUIdx) const { 314 RegUsesTy::const_iterator I = RegUsesMap.find(Reg); 315 if (I == RegUsesMap.end()) 316 return false; 317 const SmallBitVector &UsedByIndices = I->second.UsedByIndices; 318 int i = UsedByIndices.find_first(); 319 if (i == -1) return false; 320 if ((size_t)i != LUIdx) return true; 321 return UsedByIndices.find_next(i) != -1; 322 } 323 324 const SmallBitVector &RegUseTracker::getUsedByIndices(const SCEV *Reg) const { 325 RegUsesTy::const_iterator I = RegUsesMap.find(Reg); 326 assert(I != RegUsesMap.end() && "Unknown register!"); 327 return I->second.UsedByIndices; 328 } 329 330 void RegUseTracker::clear() { 331 RegUsesMap.clear(); 332 RegSequence.clear(); 333 } 334 335 namespace { 336 337 /// This class holds information that describes a formula for computing 338 /// satisfying a use. It may include broken-out immediates and scaled registers. 339 struct Formula { 340 /// Global base address used for complex addressing. 341 GlobalValue *BaseGV = nullptr; 342 343 /// Base offset for complex addressing. 344 int64_t BaseOffset = 0; 345 346 /// Whether any complex addressing has a base register. 347 bool HasBaseReg = false; 348 349 /// The scale of any complex addressing. 350 int64_t Scale = 0; 351 352 /// The list of "base" registers for this use. When this is non-empty. The 353 /// canonical representation of a formula is 354 /// 1. BaseRegs.size > 1 implies ScaledReg != NULL and 355 /// 2. ScaledReg != NULL implies Scale != 1 || !BaseRegs.empty(). 356 /// 3. The reg containing recurrent expr related with currect loop in the 357 /// formula should be put in the ScaledReg. 358 /// #1 enforces that the scaled register is always used when at least two 359 /// registers are needed by the formula: e.g., reg1 + reg2 is reg1 + 1 * reg2. 360 /// #2 enforces that 1 * reg is reg. 361 /// #3 ensures invariant regs with respect to current loop can be combined 362 /// together in LSR codegen. 363 /// This invariant can be temporarily broken while building a formula. 364 /// However, every formula inserted into the LSRInstance must be in canonical 365 /// form. 366 SmallVector<const SCEV *, 4> BaseRegs; 367 368 /// The 'scaled' register for this use. This should be non-null when Scale is 369 /// not zero. 370 const SCEV *ScaledReg = nullptr; 371 372 /// An additional constant offset which added near the use. This requires a 373 /// temporary register, but the offset itself can live in an add immediate 374 /// field rather than a register. 375 int64_t UnfoldedOffset = 0; 376 377 Formula() = default; 378 379 void initialMatch(const SCEV *S, Loop *L, ScalarEvolution &SE); 380 381 bool isCanonical(const Loop &L) const; 382 383 void canonicalize(const Loop &L); 384 385 bool unscale(); 386 387 bool hasZeroEnd() const; 388 389 size_t getNumRegs() const; 390 Type *getType() const; 391 392 void deleteBaseReg(const SCEV *&S); 393 394 bool referencesReg(const SCEV *S) const; 395 bool hasRegsUsedByUsesOtherThan(size_t LUIdx, 396 const RegUseTracker &RegUses) const; 397 398 void print(raw_ostream &OS) const; 399 void dump() const; 400 }; 401 402 } // end anonymous namespace 403 404 /// Recursion helper for initialMatch. 405 static void DoInitialMatch(const SCEV *S, Loop *L, 406 SmallVectorImpl<const SCEV *> &Good, 407 SmallVectorImpl<const SCEV *> &Bad, 408 ScalarEvolution &SE) { 409 // Collect expressions which properly dominate the loop header. 410 if (SE.properlyDominates(S, L->getHeader())) { 411 Good.push_back(S); 412 return; 413 } 414 415 // Look at add operands. 416 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 417 for (const SCEV *S : Add->operands()) 418 DoInitialMatch(S, L, Good, Bad, SE); 419 return; 420 } 421 422 // Look at addrec operands. 423 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) 424 if (!AR->getStart()->isZero() && AR->isAffine()) { 425 DoInitialMatch(AR->getStart(), L, Good, Bad, SE); 426 DoInitialMatch(SE.getAddRecExpr(SE.getConstant(AR->getType(), 0), 427 AR->getStepRecurrence(SE), 428 // FIXME: AR->getNoWrapFlags() 429 AR->getLoop(), SCEV::FlagAnyWrap), 430 L, Good, Bad, SE); 431 return; 432 } 433 434 // Handle a multiplication by -1 (negation) if it didn't fold. 435 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) 436 if (Mul->getOperand(0)->isAllOnesValue()) { 437 SmallVector<const SCEV *, 4> Ops(drop_begin(Mul->operands())); 438 const SCEV *NewMul = SE.getMulExpr(Ops); 439 440 SmallVector<const SCEV *, 4> MyGood; 441 SmallVector<const SCEV *, 4> MyBad; 442 DoInitialMatch(NewMul, L, MyGood, MyBad, SE); 443 const SCEV *NegOne = SE.getSCEV(ConstantInt::getAllOnesValue( 444 SE.getEffectiveSCEVType(NewMul->getType()))); 445 for (const SCEV *S : MyGood) 446 Good.push_back(SE.getMulExpr(NegOne, S)); 447 for (const SCEV *S : MyBad) 448 Bad.push_back(SE.getMulExpr(NegOne, S)); 449 return; 450 } 451 452 // Ok, we can't do anything interesting. Just stuff the whole thing into a 453 // register and hope for the best. 454 Bad.push_back(S); 455 } 456 457 /// Incorporate loop-variant parts of S into this Formula, attempting to keep 458 /// all loop-invariant and loop-computable values in a single base register. 459 void Formula::initialMatch(const SCEV *S, Loop *L, ScalarEvolution &SE) { 460 SmallVector<const SCEV *, 4> Good; 461 SmallVector<const SCEV *, 4> Bad; 462 DoInitialMatch(S, L, Good, Bad, SE); 463 if (!Good.empty()) { 464 const SCEV *Sum = SE.getAddExpr(Good); 465 if (!Sum->isZero()) 466 BaseRegs.push_back(Sum); 467 HasBaseReg = true; 468 } 469 if (!Bad.empty()) { 470 const SCEV *Sum = SE.getAddExpr(Bad); 471 if (!Sum->isZero()) 472 BaseRegs.push_back(Sum); 473 HasBaseReg = true; 474 } 475 canonicalize(*L); 476 } 477 478 /// Check whether or not this formula satisfies the canonical 479 /// representation. 480 /// \see Formula::BaseRegs. 481 bool Formula::isCanonical(const Loop &L) const { 482 if (!ScaledReg) 483 return BaseRegs.size() <= 1; 484 485 if (Scale != 1) 486 return true; 487 488 if (Scale == 1 && BaseRegs.empty()) 489 return false; 490 491 const SCEVAddRecExpr *SAR = dyn_cast<const SCEVAddRecExpr>(ScaledReg); 492 if (SAR && SAR->getLoop() == &L) 493 return true; 494 495 // If ScaledReg is not a recurrent expr, or it is but its loop is not current 496 // loop, meanwhile BaseRegs contains a recurrent expr reg related with current 497 // loop, we want to swap the reg in BaseRegs with ScaledReg. 498 auto I = find_if(BaseRegs, [&](const SCEV *S) { 499 return isa<const SCEVAddRecExpr>(S) && 500 (cast<SCEVAddRecExpr>(S)->getLoop() == &L); 501 }); 502 return I == BaseRegs.end(); 503 } 504 505 /// Helper method to morph a formula into its canonical representation. 506 /// \see Formula::BaseRegs. 507 /// Every formula having more than one base register, must use the ScaledReg 508 /// field. Otherwise, we would have to do special cases everywhere in LSR 509 /// to treat reg1 + reg2 + ... the same way as reg1 + 1*reg2 + ... 510 /// On the other hand, 1*reg should be canonicalized into reg. 511 void Formula::canonicalize(const Loop &L) { 512 if (isCanonical(L)) 513 return; 514 // So far we did not need this case. This is easy to implement but it is 515 // useless to maintain dead code. Beside it could hurt compile time. 516 assert(!BaseRegs.empty() && "1*reg => reg, should not be needed."); 517 518 // Keep the invariant sum in BaseRegs and one of the variant sum in ScaledReg. 519 if (!ScaledReg) { 520 ScaledReg = BaseRegs.pop_back_val(); 521 Scale = 1; 522 } 523 524 // If ScaledReg is an invariant with respect to L, find the reg from 525 // BaseRegs containing the recurrent expr related with Loop L. Swap the 526 // reg with ScaledReg. 527 const SCEVAddRecExpr *SAR = dyn_cast<const SCEVAddRecExpr>(ScaledReg); 528 if (!SAR || SAR->getLoop() != &L) { 529 auto I = find_if(BaseRegs, [&](const SCEV *S) { 530 return isa<const SCEVAddRecExpr>(S) && 531 (cast<SCEVAddRecExpr>(S)->getLoop() == &L); 532 }); 533 if (I != BaseRegs.end()) 534 std::swap(ScaledReg, *I); 535 } 536 assert(isCanonical(L) && "Failed to canonicalize?"); 537 } 538 539 /// Get rid of the scale in the formula. 540 /// In other words, this method morphes reg1 + 1*reg2 into reg1 + reg2. 541 /// \return true if it was possible to get rid of the scale, false otherwise. 542 /// \note After this operation the formula may not be in the canonical form. 543 bool Formula::unscale() { 544 if (Scale != 1) 545 return false; 546 Scale = 0; 547 BaseRegs.push_back(ScaledReg); 548 ScaledReg = nullptr; 549 return true; 550 } 551 552 bool Formula::hasZeroEnd() const { 553 if (UnfoldedOffset || BaseOffset) 554 return false; 555 if (BaseRegs.size() != 1 || ScaledReg) 556 return false; 557 return true; 558 } 559 560 /// Return the total number of register operands used by this formula. This does 561 /// not include register uses implied by non-constant addrec strides. 562 size_t Formula::getNumRegs() const { 563 return !!ScaledReg + BaseRegs.size(); 564 } 565 566 /// Return the type of this formula, if it has one, or null otherwise. This type 567 /// is meaningless except for the bit size. 568 Type *Formula::getType() const { 569 return !BaseRegs.empty() ? BaseRegs.front()->getType() : 570 ScaledReg ? ScaledReg->getType() : 571 BaseGV ? BaseGV->getType() : 572 nullptr; 573 } 574 575 /// Delete the given base reg from the BaseRegs list. 576 void Formula::deleteBaseReg(const SCEV *&S) { 577 if (&S != &BaseRegs.back()) 578 std::swap(S, BaseRegs.back()); 579 BaseRegs.pop_back(); 580 } 581 582 /// Test if this formula references the given register. 583 bool Formula::referencesReg(const SCEV *S) const { 584 return S == ScaledReg || is_contained(BaseRegs, S); 585 } 586 587 /// Test whether this formula uses registers which are used by uses other than 588 /// the use with the given index. 589 bool Formula::hasRegsUsedByUsesOtherThan(size_t LUIdx, 590 const RegUseTracker &RegUses) const { 591 if (ScaledReg) 592 if (RegUses.isRegUsedByUsesOtherThan(ScaledReg, LUIdx)) 593 return true; 594 for (const SCEV *BaseReg : BaseRegs) 595 if (RegUses.isRegUsedByUsesOtherThan(BaseReg, LUIdx)) 596 return true; 597 return false; 598 } 599 600 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 601 void Formula::print(raw_ostream &OS) const { 602 bool First = true; 603 if (BaseGV) { 604 if (!First) OS << " + "; else First = false; 605 BaseGV->printAsOperand(OS, /*PrintType=*/false); 606 } 607 if (BaseOffset != 0) { 608 if (!First) OS << " + "; else First = false; 609 OS << BaseOffset; 610 } 611 for (const SCEV *BaseReg : BaseRegs) { 612 if (!First) OS << " + "; else First = false; 613 OS << "reg(" << *BaseReg << ')'; 614 } 615 if (HasBaseReg && BaseRegs.empty()) { 616 if (!First) OS << " + "; else First = false; 617 OS << "**error: HasBaseReg**"; 618 } else if (!HasBaseReg && !BaseRegs.empty()) { 619 if (!First) OS << " + "; else First = false; 620 OS << "**error: !HasBaseReg**"; 621 } 622 if (Scale != 0) { 623 if (!First) OS << " + "; else First = false; 624 OS << Scale << "*reg("; 625 if (ScaledReg) 626 OS << *ScaledReg; 627 else 628 OS << "<unknown>"; 629 OS << ')'; 630 } 631 if (UnfoldedOffset != 0) { 632 if (!First) OS << " + "; 633 OS << "imm(" << UnfoldedOffset << ')'; 634 } 635 } 636 637 LLVM_DUMP_METHOD void Formula::dump() const { 638 print(errs()); errs() << '\n'; 639 } 640 #endif 641 642 /// Return true if the given addrec can be sign-extended without changing its 643 /// value. 644 static bool isAddRecSExtable(const SCEVAddRecExpr *AR, ScalarEvolution &SE) { 645 Type *WideTy = 646 IntegerType::get(SE.getContext(), SE.getTypeSizeInBits(AR->getType()) + 1); 647 return isa<SCEVAddRecExpr>(SE.getSignExtendExpr(AR, WideTy)); 648 } 649 650 /// Return true if the given add can be sign-extended without changing its 651 /// value. 652 static bool isAddSExtable(const SCEVAddExpr *A, ScalarEvolution &SE) { 653 Type *WideTy = 654 IntegerType::get(SE.getContext(), SE.getTypeSizeInBits(A->getType()) + 1); 655 return isa<SCEVAddExpr>(SE.getSignExtendExpr(A, WideTy)); 656 } 657 658 /// Return true if the given mul can be sign-extended without changing its 659 /// value. 660 static bool isMulSExtable(const SCEVMulExpr *M, ScalarEvolution &SE) { 661 Type *WideTy = 662 IntegerType::get(SE.getContext(), 663 SE.getTypeSizeInBits(M->getType()) * M->getNumOperands()); 664 return isa<SCEVMulExpr>(SE.getSignExtendExpr(M, WideTy)); 665 } 666 667 /// Return an expression for LHS /s RHS, if it can be determined and if the 668 /// remainder is known to be zero, or null otherwise. If IgnoreSignificantBits 669 /// is true, expressions like (X * Y) /s Y are simplified to X, ignoring that 670 /// the multiplication may overflow, which is useful when the result will be 671 /// used in a context where the most significant bits are ignored. 672 static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS, 673 ScalarEvolution &SE, 674 bool IgnoreSignificantBits = false) { 675 // Handle the trivial case, which works for any SCEV type. 676 if (LHS == RHS) 677 return SE.getConstant(LHS->getType(), 1); 678 679 // Handle a few RHS special cases. 680 const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS); 681 if (RC) { 682 const APInt &RA = RC->getAPInt(); 683 // Handle x /s -1 as x * -1, to give ScalarEvolution a chance to do 684 // some folding. 685 if (RA.isAllOnesValue()) 686 return SE.getMulExpr(LHS, RC); 687 // Handle x /s 1 as x. 688 if (RA == 1) 689 return LHS; 690 } 691 692 // Check for a division of a constant by a constant. 693 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(LHS)) { 694 if (!RC) 695 return nullptr; 696 const APInt &LA = C->getAPInt(); 697 const APInt &RA = RC->getAPInt(); 698 if (LA.srem(RA) != 0) 699 return nullptr; 700 return SE.getConstant(LA.sdiv(RA)); 701 } 702 703 // Distribute the sdiv over addrec operands, if the addrec doesn't overflow. 704 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) { 705 if ((IgnoreSignificantBits || isAddRecSExtable(AR, SE)) && AR->isAffine()) { 706 const SCEV *Step = getExactSDiv(AR->getStepRecurrence(SE), RHS, SE, 707 IgnoreSignificantBits); 708 if (!Step) return nullptr; 709 const SCEV *Start = getExactSDiv(AR->getStart(), RHS, SE, 710 IgnoreSignificantBits); 711 if (!Start) return nullptr; 712 // FlagNW is independent of the start value, step direction, and is 713 // preserved with smaller magnitude steps. 714 // FIXME: AR->getNoWrapFlags(SCEV::FlagNW) 715 return SE.getAddRecExpr(Start, Step, AR->getLoop(), SCEV::FlagAnyWrap); 716 } 717 return nullptr; 718 } 719 720 // Distribute the sdiv over add operands, if the add doesn't overflow. 721 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(LHS)) { 722 if (IgnoreSignificantBits || isAddSExtable(Add, SE)) { 723 SmallVector<const SCEV *, 8> Ops; 724 for (const SCEV *S : Add->operands()) { 725 const SCEV *Op = getExactSDiv(S, RHS, SE, IgnoreSignificantBits); 726 if (!Op) return nullptr; 727 Ops.push_back(Op); 728 } 729 return SE.getAddExpr(Ops); 730 } 731 return nullptr; 732 } 733 734 // Check for a multiply operand that we can pull RHS out of. 735 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS)) { 736 if (IgnoreSignificantBits || isMulSExtable(Mul, SE)) { 737 // Handle special case C1*X*Y /s C2*X*Y. 738 if (const SCEVMulExpr *MulRHS = dyn_cast<SCEVMulExpr>(RHS)) { 739 if (IgnoreSignificantBits || isMulSExtable(MulRHS, SE)) { 740 const SCEVConstant *LC = dyn_cast<SCEVConstant>(Mul->getOperand(0)); 741 const SCEVConstant *RC = 742 dyn_cast<SCEVConstant>(MulRHS->getOperand(0)); 743 if (LC && RC) { 744 SmallVector<const SCEV *, 4> LOps(drop_begin(Mul->operands())); 745 SmallVector<const SCEV *, 4> ROps(drop_begin(MulRHS->operands())); 746 if (LOps == ROps) 747 return getExactSDiv(LC, RC, SE, IgnoreSignificantBits); 748 } 749 } 750 } 751 752 SmallVector<const SCEV *, 4> Ops; 753 bool Found = false; 754 for (const SCEV *S : Mul->operands()) { 755 if (!Found) 756 if (const SCEV *Q = getExactSDiv(S, RHS, SE, 757 IgnoreSignificantBits)) { 758 S = Q; 759 Found = true; 760 } 761 Ops.push_back(S); 762 } 763 return Found ? SE.getMulExpr(Ops) : nullptr; 764 } 765 return nullptr; 766 } 767 768 // Otherwise we don't know. 769 return nullptr; 770 } 771 772 /// If S involves the addition of a constant integer value, return that integer 773 /// value, and mutate S to point to a new SCEV with that value excluded. 774 static int64_t ExtractImmediate(const SCEV *&S, ScalarEvolution &SE) { 775 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) { 776 if (C->getAPInt().getMinSignedBits() <= 64) { 777 S = SE.getConstant(C->getType(), 0); 778 return C->getValue()->getSExtValue(); 779 } 780 } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 781 SmallVector<const SCEV *, 8> NewOps(Add->operands()); 782 int64_t Result = ExtractImmediate(NewOps.front(), SE); 783 if (Result != 0) 784 S = SE.getAddExpr(NewOps); 785 return Result; 786 } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 787 SmallVector<const SCEV *, 8> NewOps(AR->operands()); 788 int64_t Result = ExtractImmediate(NewOps.front(), SE); 789 if (Result != 0) 790 S = SE.getAddRecExpr(NewOps, AR->getLoop(), 791 // FIXME: AR->getNoWrapFlags(SCEV::FlagNW) 792 SCEV::FlagAnyWrap); 793 return Result; 794 } 795 return 0; 796 } 797 798 /// If S involves the addition of a GlobalValue address, return that symbol, and 799 /// mutate S to point to a new SCEV with that value excluded. 800 static GlobalValue *ExtractSymbol(const SCEV *&S, ScalarEvolution &SE) { 801 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 802 if (GlobalValue *GV = dyn_cast<GlobalValue>(U->getValue())) { 803 S = SE.getConstant(GV->getType(), 0); 804 return GV; 805 } 806 } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 807 SmallVector<const SCEV *, 8> NewOps(Add->operands()); 808 GlobalValue *Result = ExtractSymbol(NewOps.back(), SE); 809 if (Result) 810 S = SE.getAddExpr(NewOps); 811 return Result; 812 } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 813 SmallVector<const SCEV *, 8> NewOps(AR->operands()); 814 GlobalValue *Result = ExtractSymbol(NewOps.front(), SE); 815 if (Result) 816 S = SE.getAddRecExpr(NewOps, AR->getLoop(), 817 // FIXME: AR->getNoWrapFlags(SCEV::FlagNW) 818 SCEV::FlagAnyWrap); 819 return Result; 820 } 821 return nullptr; 822 } 823 824 /// Returns true if the specified instruction is using the specified value as an 825 /// address. 826 static bool isAddressUse(const TargetTransformInfo &TTI, 827 Instruction *Inst, Value *OperandVal) { 828 bool isAddress = isa<LoadInst>(Inst); 829 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 830 if (SI->getPointerOperand() == OperandVal) 831 isAddress = true; 832 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 833 // Addressing modes can also be folded into prefetches and a variety 834 // of intrinsics. 835 switch (II->getIntrinsicID()) { 836 case Intrinsic::memset: 837 case Intrinsic::prefetch: 838 case Intrinsic::masked_load: 839 if (II->getArgOperand(0) == OperandVal) 840 isAddress = true; 841 break; 842 case Intrinsic::masked_store: 843 if (II->getArgOperand(1) == OperandVal) 844 isAddress = true; 845 break; 846 case Intrinsic::memmove: 847 case Intrinsic::memcpy: 848 if (II->getArgOperand(0) == OperandVal || 849 II->getArgOperand(1) == OperandVal) 850 isAddress = true; 851 break; 852 default: { 853 MemIntrinsicInfo IntrInfo; 854 if (TTI.getTgtMemIntrinsic(II, IntrInfo)) { 855 if (IntrInfo.PtrVal == OperandVal) 856 isAddress = true; 857 } 858 } 859 } 860 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(Inst)) { 861 if (RMW->getPointerOperand() == OperandVal) 862 isAddress = true; 863 } else if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(Inst)) { 864 if (CmpX->getPointerOperand() == OperandVal) 865 isAddress = true; 866 } 867 return isAddress; 868 } 869 870 /// Return the type of the memory being accessed. 871 static MemAccessTy getAccessType(const TargetTransformInfo &TTI, 872 Instruction *Inst, Value *OperandVal) { 873 MemAccessTy AccessTy(Inst->getType(), MemAccessTy::UnknownAddressSpace); 874 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 875 AccessTy.MemTy = SI->getOperand(0)->getType(); 876 AccessTy.AddrSpace = SI->getPointerAddressSpace(); 877 } else if (const LoadInst *LI = dyn_cast<LoadInst>(Inst)) { 878 AccessTy.AddrSpace = LI->getPointerAddressSpace(); 879 } else if (const AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(Inst)) { 880 AccessTy.AddrSpace = RMW->getPointerAddressSpace(); 881 } else if (const AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(Inst)) { 882 AccessTy.AddrSpace = CmpX->getPointerAddressSpace(); 883 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 884 switch (II->getIntrinsicID()) { 885 case Intrinsic::prefetch: 886 case Intrinsic::memset: 887 AccessTy.AddrSpace = II->getArgOperand(0)->getType()->getPointerAddressSpace(); 888 AccessTy.MemTy = OperandVal->getType(); 889 break; 890 case Intrinsic::memmove: 891 case Intrinsic::memcpy: 892 AccessTy.AddrSpace = OperandVal->getType()->getPointerAddressSpace(); 893 AccessTy.MemTy = OperandVal->getType(); 894 break; 895 case Intrinsic::masked_load: 896 AccessTy.AddrSpace = 897 II->getArgOperand(0)->getType()->getPointerAddressSpace(); 898 break; 899 case Intrinsic::masked_store: 900 AccessTy.MemTy = II->getOperand(0)->getType(); 901 AccessTy.AddrSpace = 902 II->getArgOperand(1)->getType()->getPointerAddressSpace(); 903 break; 904 default: { 905 MemIntrinsicInfo IntrInfo; 906 if (TTI.getTgtMemIntrinsic(II, IntrInfo) && IntrInfo.PtrVal) { 907 AccessTy.AddrSpace 908 = IntrInfo.PtrVal->getType()->getPointerAddressSpace(); 909 } 910 911 break; 912 } 913 } 914 } 915 916 // All pointers have the same requirements, so canonicalize them to an 917 // arbitrary pointer type to minimize variation. 918 if (PointerType *PTy = dyn_cast<PointerType>(AccessTy.MemTy)) 919 AccessTy.MemTy = PointerType::get(IntegerType::get(PTy->getContext(), 1), 920 PTy->getAddressSpace()); 921 922 return AccessTy; 923 } 924 925 /// Return true if this AddRec is already a phi in its loop. 926 static bool isExistingPhi(const SCEVAddRecExpr *AR, ScalarEvolution &SE) { 927 for (PHINode &PN : AR->getLoop()->getHeader()->phis()) { 928 if (SE.isSCEVable(PN.getType()) && 929 (SE.getEffectiveSCEVType(PN.getType()) == 930 SE.getEffectiveSCEVType(AR->getType())) && 931 SE.getSCEV(&PN) == AR) 932 return true; 933 } 934 return false; 935 } 936 937 /// Check if expanding this expression is likely to incur significant cost. This 938 /// is tricky because SCEV doesn't track which expressions are actually computed 939 /// by the current IR. 940 /// 941 /// We currently allow expansion of IV increments that involve adds, 942 /// multiplication by constants, and AddRecs from existing phis. 943 /// 944 /// TODO: Allow UDivExpr if we can find an existing IV increment that is an 945 /// obvious multiple of the UDivExpr. 946 static bool isHighCostExpansion(const SCEV *S, 947 SmallPtrSetImpl<const SCEV*> &Processed, 948 ScalarEvolution &SE) { 949 // Zero/One operand expressions 950 switch (S->getSCEVType()) { 951 case scUnknown: 952 case scConstant: 953 return false; 954 case scTruncate: 955 return isHighCostExpansion(cast<SCEVTruncateExpr>(S)->getOperand(), 956 Processed, SE); 957 case scZeroExtend: 958 return isHighCostExpansion(cast<SCEVZeroExtendExpr>(S)->getOperand(), 959 Processed, SE); 960 case scSignExtend: 961 return isHighCostExpansion(cast<SCEVSignExtendExpr>(S)->getOperand(), 962 Processed, SE); 963 default: 964 break; 965 } 966 967 if (!Processed.insert(S).second) 968 return false; 969 970 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 971 for (const SCEV *S : Add->operands()) { 972 if (isHighCostExpansion(S, Processed, SE)) 973 return true; 974 } 975 return false; 976 } 977 978 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 979 if (Mul->getNumOperands() == 2) { 980 // Multiplication by a constant is ok 981 if (isa<SCEVConstant>(Mul->getOperand(0))) 982 return isHighCostExpansion(Mul->getOperand(1), Processed, SE); 983 984 // If we have the value of one operand, check if an existing 985 // multiplication already generates this expression. 986 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Mul->getOperand(1))) { 987 Value *UVal = U->getValue(); 988 for (User *UR : UVal->users()) { 989 // If U is a constant, it may be used by a ConstantExpr. 990 Instruction *UI = dyn_cast<Instruction>(UR); 991 if (UI && UI->getOpcode() == Instruction::Mul && 992 SE.isSCEVable(UI->getType())) { 993 return SE.getSCEV(UI) == Mul; 994 } 995 } 996 } 997 } 998 } 999 1000 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 1001 if (isExistingPhi(AR, SE)) 1002 return false; 1003 } 1004 1005 // Fow now, consider any other type of expression (div/mul/min/max) high cost. 1006 return true; 1007 } 1008 1009 namespace { 1010 1011 class LSRUse; 1012 1013 } // end anonymous namespace 1014 1015 /// Check if the addressing mode defined by \p F is completely 1016 /// folded in \p LU at isel time. 1017 /// This includes address-mode folding and special icmp tricks. 1018 /// This function returns true if \p LU can accommodate what \p F 1019 /// defines and up to 1 base + 1 scaled + offset. 1020 /// In other words, if \p F has several base registers, this function may 1021 /// still return true. Therefore, users still need to account for 1022 /// additional base registers and/or unfolded offsets to derive an 1023 /// accurate cost model. 1024 static bool isAMCompletelyFolded(const TargetTransformInfo &TTI, 1025 const LSRUse &LU, const Formula &F); 1026 1027 // Get the cost of the scaling factor used in F for LU. 1028 static InstructionCost getScalingFactorCost(const TargetTransformInfo &TTI, 1029 const LSRUse &LU, const Formula &F, 1030 const Loop &L); 1031 1032 namespace { 1033 1034 /// This class is used to measure and compare candidate formulae. 1035 class Cost { 1036 const Loop *L = nullptr; 1037 ScalarEvolution *SE = nullptr; 1038 const TargetTransformInfo *TTI = nullptr; 1039 TargetTransformInfo::LSRCost C; 1040 TTI::AddressingModeKind AMK = TTI::AMK_None; 1041 1042 public: 1043 Cost() = delete; 1044 Cost(const Loop *L, ScalarEvolution &SE, const TargetTransformInfo &TTI, 1045 TTI::AddressingModeKind AMK) : 1046 L(L), SE(&SE), TTI(&TTI), AMK(AMK) { 1047 C.Insns = 0; 1048 C.NumRegs = 0; 1049 C.AddRecCost = 0; 1050 C.NumIVMuls = 0; 1051 C.NumBaseAdds = 0; 1052 C.ImmCost = 0; 1053 C.SetupCost = 0; 1054 C.ScaleCost = 0; 1055 } 1056 1057 bool isLess(Cost &Other); 1058 1059 void Lose(); 1060 1061 #ifndef NDEBUG 1062 // Once any of the metrics loses, they must all remain losers. 1063 bool isValid() { 1064 return ((C.Insns | C.NumRegs | C.AddRecCost | C.NumIVMuls | C.NumBaseAdds 1065 | C.ImmCost | C.SetupCost | C.ScaleCost) != ~0u) 1066 || ((C.Insns & C.NumRegs & C.AddRecCost & C.NumIVMuls & C.NumBaseAdds 1067 & C.ImmCost & C.SetupCost & C.ScaleCost) == ~0u); 1068 } 1069 #endif 1070 1071 bool isLoser() { 1072 assert(isValid() && "invalid cost"); 1073 return C.NumRegs == ~0u; 1074 } 1075 1076 void RateFormula(const Formula &F, 1077 SmallPtrSetImpl<const SCEV *> &Regs, 1078 const DenseSet<const SCEV *> &VisitedRegs, 1079 const LSRUse &LU, 1080 SmallPtrSetImpl<const SCEV *> *LoserRegs = nullptr); 1081 1082 void print(raw_ostream &OS) const; 1083 void dump() const; 1084 1085 private: 1086 void RateRegister(const Formula &F, const SCEV *Reg, 1087 SmallPtrSetImpl<const SCEV *> &Regs); 1088 void RatePrimaryRegister(const Formula &F, const SCEV *Reg, 1089 SmallPtrSetImpl<const SCEV *> &Regs, 1090 SmallPtrSetImpl<const SCEV *> *LoserRegs); 1091 }; 1092 1093 /// An operand value in an instruction which is to be replaced with some 1094 /// equivalent, possibly strength-reduced, replacement. 1095 struct LSRFixup { 1096 /// The instruction which will be updated. 1097 Instruction *UserInst = nullptr; 1098 1099 /// The operand of the instruction which will be replaced. The operand may be 1100 /// used more than once; every instance will be replaced. 1101 Value *OperandValToReplace = nullptr; 1102 1103 /// If this user is to use the post-incremented value of an induction 1104 /// variable, this set is non-empty and holds the loops associated with the 1105 /// induction variable. 1106 PostIncLoopSet PostIncLoops; 1107 1108 /// A constant offset to be added to the LSRUse expression. This allows 1109 /// multiple fixups to share the same LSRUse with different offsets, for 1110 /// example in an unrolled loop. 1111 int64_t Offset = 0; 1112 1113 LSRFixup() = default; 1114 1115 bool isUseFullyOutsideLoop(const Loop *L) const; 1116 1117 void print(raw_ostream &OS) const; 1118 void dump() const; 1119 }; 1120 1121 /// A DenseMapInfo implementation for holding DenseMaps and DenseSets of sorted 1122 /// SmallVectors of const SCEV*. 1123 struct UniquifierDenseMapInfo { 1124 static SmallVector<const SCEV *, 4> getEmptyKey() { 1125 SmallVector<const SCEV *, 4> V; 1126 V.push_back(reinterpret_cast<const SCEV *>(-1)); 1127 return V; 1128 } 1129 1130 static SmallVector<const SCEV *, 4> getTombstoneKey() { 1131 SmallVector<const SCEV *, 4> V; 1132 V.push_back(reinterpret_cast<const SCEV *>(-2)); 1133 return V; 1134 } 1135 1136 static unsigned getHashValue(const SmallVector<const SCEV *, 4> &V) { 1137 return static_cast<unsigned>(hash_combine_range(V.begin(), V.end())); 1138 } 1139 1140 static bool isEqual(const SmallVector<const SCEV *, 4> &LHS, 1141 const SmallVector<const SCEV *, 4> &RHS) { 1142 return LHS == RHS; 1143 } 1144 }; 1145 1146 /// This class holds the state that LSR keeps for each use in IVUsers, as well 1147 /// as uses invented by LSR itself. It includes information about what kinds of 1148 /// things can be folded into the user, information about the user itself, and 1149 /// information about how the use may be satisfied. TODO: Represent multiple 1150 /// users of the same expression in common? 1151 class LSRUse { 1152 DenseSet<SmallVector<const SCEV *, 4>, UniquifierDenseMapInfo> Uniquifier; 1153 1154 public: 1155 /// An enum for a kind of use, indicating what types of scaled and immediate 1156 /// operands it might support. 1157 enum KindType { 1158 Basic, ///< A normal use, with no folding. 1159 Special, ///< A special case of basic, allowing -1 scales. 1160 Address, ///< An address use; folding according to TargetLowering 1161 ICmpZero ///< An equality icmp with both operands folded into one. 1162 // TODO: Add a generic icmp too? 1163 }; 1164 1165 using SCEVUseKindPair = PointerIntPair<const SCEV *, 2, KindType>; 1166 1167 KindType Kind; 1168 MemAccessTy AccessTy; 1169 1170 /// The list of operands which are to be replaced. 1171 SmallVector<LSRFixup, 8> Fixups; 1172 1173 /// Keep track of the min and max offsets of the fixups. 1174 int64_t MinOffset = std::numeric_limits<int64_t>::max(); 1175 int64_t MaxOffset = std::numeric_limits<int64_t>::min(); 1176 1177 /// This records whether all of the fixups using this LSRUse are outside of 1178 /// the loop, in which case some special-case heuristics may be used. 1179 bool AllFixupsOutsideLoop = true; 1180 1181 /// RigidFormula is set to true to guarantee that this use will be associated 1182 /// with a single formula--the one that initially matched. Some SCEV 1183 /// expressions cannot be expanded. This allows LSR to consider the registers 1184 /// used by those expressions without the need to expand them later after 1185 /// changing the formula. 1186 bool RigidFormula = false; 1187 1188 /// This records the widest use type for any fixup using this 1189 /// LSRUse. FindUseWithSimilarFormula can't consider uses with different max 1190 /// fixup widths to be equivalent, because the narrower one may be relying on 1191 /// the implicit truncation to truncate away bogus bits. 1192 Type *WidestFixupType = nullptr; 1193 1194 /// A list of ways to build a value that can satisfy this user. After the 1195 /// list is populated, one of these is selected heuristically and used to 1196 /// formulate a replacement for OperandValToReplace in UserInst. 1197 SmallVector<Formula, 12> Formulae; 1198 1199 /// The set of register candidates used by all formulae in this LSRUse. 1200 SmallPtrSet<const SCEV *, 4> Regs; 1201 1202 LSRUse(KindType K, MemAccessTy AT) : Kind(K), AccessTy(AT) {} 1203 1204 LSRFixup &getNewFixup() { 1205 Fixups.push_back(LSRFixup()); 1206 return Fixups.back(); 1207 } 1208 1209 void pushFixup(LSRFixup &f) { 1210 Fixups.push_back(f); 1211 if (f.Offset > MaxOffset) 1212 MaxOffset = f.Offset; 1213 if (f.Offset < MinOffset) 1214 MinOffset = f.Offset; 1215 } 1216 1217 bool HasFormulaWithSameRegs(const Formula &F) const; 1218 float getNotSelectedProbability(const SCEV *Reg) const; 1219 bool InsertFormula(const Formula &F, const Loop &L); 1220 void DeleteFormula(Formula &F); 1221 void RecomputeRegs(size_t LUIdx, RegUseTracker &Reguses); 1222 1223 void print(raw_ostream &OS) const; 1224 void dump() const; 1225 }; 1226 1227 } // end anonymous namespace 1228 1229 static bool isAMCompletelyFolded(const TargetTransformInfo &TTI, 1230 LSRUse::KindType Kind, MemAccessTy AccessTy, 1231 GlobalValue *BaseGV, int64_t BaseOffset, 1232 bool HasBaseReg, int64_t Scale, 1233 Instruction *Fixup = nullptr); 1234 1235 static unsigned getSetupCost(const SCEV *Reg, unsigned Depth) { 1236 if (isa<SCEVUnknown>(Reg) || isa<SCEVConstant>(Reg)) 1237 return 1; 1238 if (Depth == 0) 1239 return 0; 1240 if (const auto *S = dyn_cast<SCEVAddRecExpr>(Reg)) 1241 return getSetupCost(S->getStart(), Depth - 1); 1242 if (auto S = dyn_cast<SCEVIntegralCastExpr>(Reg)) 1243 return getSetupCost(S->getOperand(), Depth - 1); 1244 if (auto S = dyn_cast<SCEVNAryExpr>(Reg)) 1245 return std::accumulate(S->op_begin(), S->op_end(), 0, 1246 [&](unsigned i, const SCEV *Reg) { 1247 return i + getSetupCost(Reg, Depth - 1); 1248 }); 1249 if (auto S = dyn_cast<SCEVUDivExpr>(Reg)) 1250 return getSetupCost(S->getLHS(), Depth - 1) + 1251 getSetupCost(S->getRHS(), Depth - 1); 1252 return 0; 1253 } 1254 1255 /// Tally up interesting quantities from the given register. 1256 void Cost::RateRegister(const Formula &F, const SCEV *Reg, 1257 SmallPtrSetImpl<const SCEV *> &Regs) { 1258 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Reg)) { 1259 // If this is an addrec for another loop, it should be an invariant 1260 // with respect to L since L is the innermost loop (at least 1261 // for now LSR only handles innermost loops). 1262 if (AR->getLoop() != L) { 1263 // If the AddRec exists, consider it's register free and leave it alone. 1264 if (isExistingPhi(AR, *SE) && AMK != TTI::AMK_PostIndexed) 1265 return; 1266 1267 // It is bad to allow LSR for current loop to add induction variables 1268 // for its sibling loops. 1269 if (!AR->getLoop()->contains(L)) { 1270 Lose(); 1271 return; 1272 } 1273 1274 // Otherwise, it will be an invariant with respect to Loop L. 1275 ++C.NumRegs; 1276 return; 1277 } 1278 1279 unsigned LoopCost = 1; 1280 if (TTI->isIndexedLoadLegal(TTI->MIM_PostInc, AR->getType()) || 1281 TTI->isIndexedStoreLegal(TTI->MIM_PostInc, AR->getType())) { 1282 1283 // If the step size matches the base offset, we could use pre-indexed 1284 // addressing. 1285 if (AMK == TTI::AMK_PreIndexed) { 1286 if (auto *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(*SE))) 1287 if (Step->getAPInt() == F.BaseOffset) 1288 LoopCost = 0; 1289 } else if (AMK == TTI::AMK_PostIndexed) { 1290 const SCEV *LoopStep = AR->getStepRecurrence(*SE); 1291 if (isa<SCEVConstant>(LoopStep)) { 1292 const SCEV *LoopStart = AR->getStart(); 1293 if (!isa<SCEVConstant>(LoopStart) && 1294 SE->isLoopInvariant(LoopStart, L)) 1295 LoopCost = 0; 1296 } 1297 } 1298 } 1299 C.AddRecCost += LoopCost; 1300 1301 // Add the step value register, if it needs one. 1302 // TODO: The non-affine case isn't precisely modeled here. 1303 if (!AR->isAffine() || !isa<SCEVConstant>(AR->getOperand(1))) { 1304 if (!Regs.count(AR->getOperand(1))) { 1305 RateRegister(F, AR->getOperand(1), Regs); 1306 if (isLoser()) 1307 return; 1308 } 1309 } 1310 } 1311 ++C.NumRegs; 1312 1313 // Rough heuristic; favor registers which don't require extra setup 1314 // instructions in the preheader. 1315 C.SetupCost += getSetupCost(Reg, SetupCostDepthLimit); 1316 // Ensure we don't, even with the recusion limit, produce invalid costs. 1317 C.SetupCost = std::min<unsigned>(C.SetupCost, 1 << 16); 1318 1319 C.NumIVMuls += isa<SCEVMulExpr>(Reg) && 1320 SE->hasComputableLoopEvolution(Reg, L); 1321 } 1322 1323 /// Record this register in the set. If we haven't seen it before, rate 1324 /// it. Optional LoserRegs provides a way to declare any formula that refers to 1325 /// one of those regs an instant loser. 1326 void Cost::RatePrimaryRegister(const Formula &F, const SCEV *Reg, 1327 SmallPtrSetImpl<const SCEV *> &Regs, 1328 SmallPtrSetImpl<const SCEV *> *LoserRegs) { 1329 if (LoserRegs && LoserRegs->count(Reg)) { 1330 Lose(); 1331 return; 1332 } 1333 if (Regs.insert(Reg).second) { 1334 RateRegister(F, Reg, Regs); 1335 if (LoserRegs && isLoser()) 1336 LoserRegs->insert(Reg); 1337 } 1338 } 1339 1340 void Cost::RateFormula(const Formula &F, 1341 SmallPtrSetImpl<const SCEV *> &Regs, 1342 const DenseSet<const SCEV *> &VisitedRegs, 1343 const LSRUse &LU, 1344 SmallPtrSetImpl<const SCEV *> *LoserRegs) { 1345 assert(F.isCanonical(*L) && "Cost is accurate only for canonical formula"); 1346 // Tally up the registers. 1347 unsigned PrevAddRecCost = C.AddRecCost; 1348 unsigned PrevNumRegs = C.NumRegs; 1349 unsigned PrevNumBaseAdds = C.NumBaseAdds; 1350 if (const SCEV *ScaledReg = F.ScaledReg) { 1351 if (VisitedRegs.count(ScaledReg)) { 1352 Lose(); 1353 return; 1354 } 1355 RatePrimaryRegister(F, ScaledReg, Regs, LoserRegs); 1356 if (isLoser()) 1357 return; 1358 } 1359 for (const SCEV *BaseReg : F.BaseRegs) { 1360 if (VisitedRegs.count(BaseReg)) { 1361 Lose(); 1362 return; 1363 } 1364 RatePrimaryRegister(F, BaseReg, Regs, LoserRegs); 1365 if (isLoser()) 1366 return; 1367 } 1368 1369 // Determine how many (unfolded) adds we'll need inside the loop. 1370 size_t NumBaseParts = F.getNumRegs(); 1371 if (NumBaseParts > 1) 1372 // Do not count the base and a possible second register if the target 1373 // allows to fold 2 registers. 1374 C.NumBaseAdds += 1375 NumBaseParts - (1 + (F.Scale && isAMCompletelyFolded(*TTI, LU, F))); 1376 C.NumBaseAdds += (F.UnfoldedOffset != 0); 1377 1378 // Accumulate non-free scaling amounts. 1379 C.ScaleCost += *getScalingFactorCost(*TTI, LU, F, *L).getValue(); 1380 1381 // Tally up the non-zero immediates. 1382 for (const LSRFixup &Fixup : LU.Fixups) { 1383 int64_t O = Fixup.Offset; 1384 int64_t Offset = (uint64_t)O + F.BaseOffset; 1385 if (F.BaseGV) 1386 C.ImmCost += 64; // Handle symbolic values conservatively. 1387 // TODO: This should probably be the pointer size. 1388 else if (Offset != 0) 1389 C.ImmCost += APInt(64, Offset, true).getMinSignedBits(); 1390 1391 // Check with target if this offset with this instruction is 1392 // specifically not supported. 1393 if (LU.Kind == LSRUse::Address && Offset != 0 && 1394 !isAMCompletelyFolded(*TTI, LSRUse::Address, LU.AccessTy, F.BaseGV, 1395 Offset, F.HasBaseReg, F.Scale, Fixup.UserInst)) 1396 C.NumBaseAdds++; 1397 } 1398 1399 // If we don't count instruction cost exit here. 1400 if (!InsnsCost) { 1401 assert(isValid() && "invalid cost"); 1402 return; 1403 } 1404 1405 // Treat every new register that exceeds TTI.getNumberOfRegisters() - 1 as 1406 // additional instruction (at least fill). 1407 // TODO: Need distinguish register class? 1408 unsigned TTIRegNum = TTI->getNumberOfRegisters( 1409 TTI->getRegisterClassForType(false, F.getType())) - 1; 1410 if (C.NumRegs > TTIRegNum) { 1411 // Cost already exceeded TTIRegNum, then only newly added register can add 1412 // new instructions. 1413 if (PrevNumRegs > TTIRegNum) 1414 C.Insns += (C.NumRegs - PrevNumRegs); 1415 else 1416 C.Insns += (C.NumRegs - TTIRegNum); 1417 } 1418 1419 // If ICmpZero formula ends with not 0, it could not be replaced by 1420 // just add or sub. We'll need to compare final result of AddRec. 1421 // That means we'll need an additional instruction. But if the target can 1422 // macro-fuse a compare with a branch, don't count this extra instruction. 1423 // For -10 + {0, +, 1}: 1424 // i = i + 1; 1425 // cmp i, 10 1426 // 1427 // For {-10, +, 1}: 1428 // i = i + 1; 1429 if (LU.Kind == LSRUse::ICmpZero && !F.hasZeroEnd() && 1430 !TTI->canMacroFuseCmp()) 1431 C.Insns++; 1432 // Each new AddRec adds 1 instruction to calculation. 1433 C.Insns += (C.AddRecCost - PrevAddRecCost); 1434 1435 // BaseAdds adds instructions for unfolded registers. 1436 if (LU.Kind != LSRUse::ICmpZero) 1437 C.Insns += C.NumBaseAdds - PrevNumBaseAdds; 1438 assert(isValid() && "invalid cost"); 1439 } 1440 1441 /// Set this cost to a losing value. 1442 void Cost::Lose() { 1443 C.Insns = std::numeric_limits<unsigned>::max(); 1444 C.NumRegs = std::numeric_limits<unsigned>::max(); 1445 C.AddRecCost = std::numeric_limits<unsigned>::max(); 1446 C.NumIVMuls = std::numeric_limits<unsigned>::max(); 1447 C.NumBaseAdds = std::numeric_limits<unsigned>::max(); 1448 C.ImmCost = std::numeric_limits<unsigned>::max(); 1449 C.SetupCost = std::numeric_limits<unsigned>::max(); 1450 C.ScaleCost = std::numeric_limits<unsigned>::max(); 1451 } 1452 1453 /// Choose the lower cost. 1454 bool Cost::isLess(Cost &Other) { 1455 if (InsnsCost.getNumOccurrences() > 0 && InsnsCost && 1456 C.Insns != Other.C.Insns) 1457 return C.Insns < Other.C.Insns; 1458 return TTI->isLSRCostLess(C, Other.C); 1459 } 1460 1461 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1462 void Cost::print(raw_ostream &OS) const { 1463 if (InsnsCost) 1464 OS << C.Insns << " instruction" << (C.Insns == 1 ? " " : "s "); 1465 OS << C.NumRegs << " reg" << (C.NumRegs == 1 ? "" : "s"); 1466 if (C.AddRecCost != 0) 1467 OS << ", with addrec cost " << C.AddRecCost; 1468 if (C.NumIVMuls != 0) 1469 OS << ", plus " << C.NumIVMuls << " IV mul" 1470 << (C.NumIVMuls == 1 ? "" : "s"); 1471 if (C.NumBaseAdds != 0) 1472 OS << ", plus " << C.NumBaseAdds << " base add" 1473 << (C.NumBaseAdds == 1 ? "" : "s"); 1474 if (C.ScaleCost != 0) 1475 OS << ", plus " << C.ScaleCost << " scale cost"; 1476 if (C.ImmCost != 0) 1477 OS << ", plus " << C.ImmCost << " imm cost"; 1478 if (C.SetupCost != 0) 1479 OS << ", plus " << C.SetupCost << " setup cost"; 1480 } 1481 1482 LLVM_DUMP_METHOD void Cost::dump() const { 1483 print(errs()); errs() << '\n'; 1484 } 1485 #endif 1486 1487 /// Test whether this fixup always uses its value outside of the given loop. 1488 bool LSRFixup::isUseFullyOutsideLoop(const Loop *L) const { 1489 // PHI nodes use their value in their incoming blocks. 1490 if (const PHINode *PN = dyn_cast<PHINode>(UserInst)) { 1491 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 1492 if (PN->getIncomingValue(i) == OperandValToReplace && 1493 L->contains(PN->getIncomingBlock(i))) 1494 return false; 1495 return true; 1496 } 1497 1498 return !L->contains(UserInst); 1499 } 1500 1501 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1502 void LSRFixup::print(raw_ostream &OS) const { 1503 OS << "UserInst="; 1504 // Store is common and interesting enough to be worth special-casing. 1505 if (StoreInst *Store = dyn_cast<StoreInst>(UserInst)) { 1506 OS << "store "; 1507 Store->getOperand(0)->printAsOperand(OS, /*PrintType=*/false); 1508 } else if (UserInst->getType()->isVoidTy()) 1509 OS << UserInst->getOpcodeName(); 1510 else 1511 UserInst->printAsOperand(OS, /*PrintType=*/false); 1512 1513 OS << ", OperandValToReplace="; 1514 OperandValToReplace->printAsOperand(OS, /*PrintType=*/false); 1515 1516 for (const Loop *PIL : PostIncLoops) { 1517 OS << ", PostIncLoop="; 1518 PIL->getHeader()->printAsOperand(OS, /*PrintType=*/false); 1519 } 1520 1521 if (Offset != 0) 1522 OS << ", Offset=" << Offset; 1523 } 1524 1525 LLVM_DUMP_METHOD void LSRFixup::dump() const { 1526 print(errs()); errs() << '\n'; 1527 } 1528 #endif 1529 1530 /// Test whether this use as a formula which has the same registers as the given 1531 /// formula. 1532 bool LSRUse::HasFormulaWithSameRegs(const Formula &F) const { 1533 SmallVector<const SCEV *, 4> Key = F.BaseRegs; 1534 if (F.ScaledReg) Key.push_back(F.ScaledReg); 1535 // Unstable sort by host order ok, because this is only used for uniquifying. 1536 llvm::sort(Key); 1537 return Uniquifier.count(Key); 1538 } 1539 1540 /// The function returns a probability of selecting formula without Reg. 1541 float LSRUse::getNotSelectedProbability(const SCEV *Reg) const { 1542 unsigned FNum = 0; 1543 for (const Formula &F : Formulae) 1544 if (F.referencesReg(Reg)) 1545 FNum++; 1546 return ((float)(Formulae.size() - FNum)) / Formulae.size(); 1547 } 1548 1549 /// If the given formula has not yet been inserted, add it to the list, and 1550 /// return true. Return false otherwise. The formula must be in canonical form. 1551 bool LSRUse::InsertFormula(const Formula &F, const Loop &L) { 1552 assert(F.isCanonical(L) && "Invalid canonical representation"); 1553 1554 if (!Formulae.empty() && RigidFormula) 1555 return false; 1556 1557 SmallVector<const SCEV *, 4> Key = F.BaseRegs; 1558 if (F.ScaledReg) Key.push_back(F.ScaledReg); 1559 // Unstable sort by host order ok, because this is only used for uniquifying. 1560 llvm::sort(Key); 1561 1562 if (!Uniquifier.insert(Key).second) 1563 return false; 1564 1565 // Using a register to hold the value of 0 is not profitable. 1566 assert((!F.ScaledReg || !F.ScaledReg->isZero()) && 1567 "Zero allocated in a scaled register!"); 1568 #ifndef NDEBUG 1569 for (const SCEV *BaseReg : F.BaseRegs) 1570 assert(!BaseReg->isZero() && "Zero allocated in a base register!"); 1571 #endif 1572 1573 // Add the formula to the list. 1574 Formulae.push_back(F); 1575 1576 // Record registers now being used by this use. 1577 Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end()); 1578 if (F.ScaledReg) 1579 Regs.insert(F.ScaledReg); 1580 1581 return true; 1582 } 1583 1584 /// Remove the given formula from this use's list. 1585 void LSRUse::DeleteFormula(Formula &F) { 1586 if (&F != &Formulae.back()) 1587 std::swap(F, Formulae.back()); 1588 Formulae.pop_back(); 1589 } 1590 1591 /// Recompute the Regs field, and update RegUses. 1592 void LSRUse::RecomputeRegs(size_t LUIdx, RegUseTracker &RegUses) { 1593 // Now that we've filtered out some formulae, recompute the Regs set. 1594 SmallPtrSet<const SCEV *, 4> OldRegs = std::move(Regs); 1595 Regs.clear(); 1596 for (const Formula &F : Formulae) { 1597 if (F.ScaledReg) Regs.insert(F.ScaledReg); 1598 Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end()); 1599 } 1600 1601 // Update the RegTracker. 1602 for (const SCEV *S : OldRegs) 1603 if (!Regs.count(S)) 1604 RegUses.dropRegister(S, LUIdx); 1605 } 1606 1607 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1608 void LSRUse::print(raw_ostream &OS) const { 1609 OS << "LSR Use: Kind="; 1610 switch (Kind) { 1611 case Basic: OS << "Basic"; break; 1612 case Special: OS << "Special"; break; 1613 case ICmpZero: OS << "ICmpZero"; break; 1614 case Address: 1615 OS << "Address of "; 1616 if (AccessTy.MemTy->isPointerTy()) 1617 OS << "pointer"; // the full pointer type could be really verbose 1618 else { 1619 OS << *AccessTy.MemTy; 1620 } 1621 1622 OS << " in addrspace(" << AccessTy.AddrSpace << ')'; 1623 } 1624 1625 OS << ", Offsets={"; 1626 bool NeedComma = false; 1627 for (const LSRFixup &Fixup : Fixups) { 1628 if (NeedComma) OS << ','; 1629 OS << Fixup.Offset; 1630 NeedComma = true; 1631 } 1632 OS << '}'; 1633 1634 if (AllFixupsOutsideLoop) 1635 OS << ", all-fixups-outside-loop"; 1636 1637 if (WidestFixupType) 1638 OS << ", widest fixup type: " << *WidestFixupType; 1639 } 1640 1641 LLVM_DUMP_METHOD void LSRUse::dump() const { 1642 print(errs()); errs() << '\n'; 1643 } 1644 #endif 1645 1646 static bool isAMCompletelyFolded(const TargetTransformInfo &TTI, 1647 LSRUse::KindType Kind, MemAccessTy AccessTy, 1648 GlobalValue *BaseGV, int64_t BaseOffset, 1649 bool HasBaseReg, int64_t Scale, 1650 Instruction *Fixup/*= nullptr*/) { 1651 switch (Kind) { 1652 case LSRUse::Address: 1653 return TTI.isLegalAddressingMode(AccessTy.MemTy, BaseGV, BaseOffset, 1654 HasBaseReg, Scale, AccessTy.AddrSpace, Fixup); 1655 1656 case LSRUse::ICmpZero: 1657 // There's not even a target hook for querying whether it would be legal to 1658 // fold a GV into an ICmp. 1659 if (BaseGV) 1660 return false; 1661 1662 // ICmp only has two operands; don't allow more than two non-trivial parts. 1663 if (Scale != 0 && HasBaseReg && BaseOffset != 0) 1664 return false; 1665 1666 // ICmp only supports no scale or a -1 scale, as we can "fold" a -1 scale by 1667 // putting the scaled register in the other operand of the icmp. 1668 if (Scale != 0 && Scale != -1) 1669 return false; 1670 1671 // If we have low-level target information, ask the target if it can fold an 1672 // integer immediate on an icmp. 1673 if (BaseOffset != 0) { 1674 // We have one of: 1675 // ICmpZero BaseReg + BaseOffset => ICmp BaseReg, -BaseOffset 1676 // ICmpZero -1*ScaleReg + BaseOffset => ICmp ScaleReg, BaseOffset 1677 // Offs is the ICmp immediate. 1678 if (Scale == 0) 1679 // The cast does the right thing with 1680 // std::numeric_limits<int64_t>::min(). 1681 BaseOffset = -(uint64_t)BaseOffset; 1682 return TTI.isLegalICmpImmediate(BaseOffset); 1683 } 1684 1685 // ICmpZero BaseReg + -1*ScaleReg => ICmp BaseReg, ScaleReg 1686 return true; 1687 1688 case LSRUse::Basic: 1689 // Only handle single-register values. 1690 return !BaseGV && Scale == 0 && BaseOffset == 0; 1691 1692 case LSRUse::Special: 1693 // Special case Basic to handle -1 scales. 1694 return !BaseGV && (Scale == 0 || Scale == -1) && BaseOffset == 0; 1695 } 1696 1697 llvm_unreachable("Invalid LSRUse Kind!"); 1698 } 1699 1700 static bool isAMCompletelyFolded(const TargetTransformInfo &TTI, 1701 int64_t MinOffset, int64_t MaxOffset, 1702 LSRUse::KindType Kind, MemAccessTy AccessTy, 1703 GlobalValue *BaseGV, int64_t BaseOffset, 1704 bool HasBaseReg, int64_t Scale) { 1705 // Check for overflow. 1706 if (((int64_t)((uint64_t)BaseOffset + MinOffset) > BaseOffset) != 1707 (MinOffset > 0)) 1708 return false; 1709 MinOffset = (uint64_t)BaseOffset + MinOffset; 1710 if (((int64_t)((uint64_t)BaseOffset + MaxOffset) > BaseOffset) != 1711 (MaxOffset > 0)) 1712 return false; 1713 MaxOffset = (uint64_t)BaseOffset + MaxOffset; 1714 1715 return isAMCompletelyFolded(TTI, Kind, AccessTy, BaseGV, MinOffset, 1716 HasBaseReg, Scale) && 1717 isAMCompletelyFolded(TTI, Kind, AccessTy, BaseGV, MaxOffset, 1718 HasBaseReg, Scale); 1719 } 1720 1721 static bool isAMCompletelyFolded(const TargetTransformInfo &TTI, 1722 int64_t MinOffset, int64_t MaxOffset, 1723 LSRUse::KindType Kind, MemAccessTy AccessTy, 1724 const Formula &F, const Loop &L) { 1725 // For the purpose of isAMCompletelyFolded either having a canonical formula 1726 // or a scale not equal to zero is correct. 1727 // Problems may arise from non canonical formulae having a scale == 0. 1728 // Strictly speaking it would best to just rely on canonical formulae. 1729 // However, when we generate the scaled formulae, we first check that the 1730 // scaling factor is profitable before computing the actual ScaledReg for 1731 // compile time sake. 1732 assert((F.isCanonical(L) || F.Scale != 0)); 1733 return isAMCompletelyFolded(TTI, MinOffset, MaxOffset, Kind, AccessTy, 1734 F.BaseGV, F.BaseOffset, F.HasBaseReg, F.Scale); 1735 } 1736 1737 /// Test whether we know how to expand the current formula. 1738 static bool isLegalUse(const TargetTransformInfo &TTI, int64_t MinOffset, 1739 int64_t MaxOffset, LSRUse::KindType Kind, 1740 MemAccessTy AccessTy, GlobalValue *BaseGV, 1741 int64_t BaseOffset, bool HasBaseReg, int64_t Scale) { 1742 // We know how to expand completely foldable formulae. 1743 return isAMCompletelyFolded(TTI, MinOffset, MaxOffset, Kind, AccessTy, BaseGV, 1744 BaseOffset, HasBaseReg, Scale) || 1745 // Or formulae that use a base register produced by a sum of base 1746 // registers. 1747 (Scale == 1 && 1748 isAMCompletelyFolded(TTI, MinOffset, MaxOffset, Kind, AccessTy, 1749 BaseGV, BaseOffset, true, 0)); 1750 } 1751 1752 static bool isLegalUse(const TargetTransformInfo &TTI, int64_t MinOffset, 1753 int64_t MaxOffset, LSRUse::KindType Kind, 1754 MemAccessTy AccessTy, const Formula &F) { 1755 return isLegalUse(TTI, MinOffset, MaxOffset, Kind, AccessTy, F.BaseGV, 1756 F.BaseOffset, F.HasBaseReg, F.Scale); 1757 } 1758 1759 static bool isAMCompletelyFolded(const TargetTransformInfo &TTI, 1760 const LSRUse &LU, const Formula &F) { 1761 // Target may want to look at the user instructions. 1762 if (LU.Kind == LSRUse::Address && TTI.LSRWithInstrQueries()) { 1763 for (const LSRFixup &Fixup : LU.Fixups) 1764 if (!isAMCompletelyFolded(TTI, LSRUse::Address, LU.AccessTy, F.BaseGV, 1765 (F.BaseOffset + Fixup.Offset), F.HasBaseReg, 1766 F.Scale, Fixup.UserInst)) 1767 return false; 1768 return true; 1769 } 1770 1771 return isAMCompletelyFolded(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, 1772 LU.AccessTy, F.BaseGV, F.BaseOffset, F.HasBaseReg, 1773 F.Scale); 1774 } 1775 1776 static InstructionCost getScalingFactorCost(const TargetTransformInfo &TTI, 1777 const LSRUse &LU, const Formula &F, 1778 const Loop &L) { 1779 if (!F.Scale) 1780 return 0; 1781 1782 // If the use is not completely folded in that instruction, we will have to 1783 // pay an extra cost only for scale != 1. 1784 if (!isAMCompletelyFolded(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, 1785 LU.AccessTy, F, L)) 1786 return F.Scale != 1; 1787 1788 switch (LU.Kind) { 1789 case LSRUse::Address: { 1790 // Check the scaling factor cost with both the min and max offsets. 1791 InstructionCost ScaleCostMinOffset = TTI.getScalingFactorCost( 1792 LU.AccessTy.MemTy, F.BaseGV, F.BaseOffset + LU.MinOffset, F.HasBaseReg, 1793 F.Scale, LU.AccessTy.AddrSpace); 1794 InstructionCost ScaleCostMaxOffset = TTI.getScalingFactorCost( 1795 LU.AccessTy.MemTy, F.BaseGV, F.BaseOffset + LU.MaxOffset, F.HasBaseReg, 1796 F.Scale, LU.AccessTy.AddrSpace); 1797 1798 assert(ScaleCostMinOffset.isValid() && ScaleCostMaxOffset.isValid() && 1799 "Legal addressing mode has an illegal cost!"); 1800 return std::max(ScaleCostMinOffset, ScaleCostMaxOffset); 1801 } 1802 case LSRUse::ICmpZero: 1803 case LSRUse::Basic: 1804 case LSRUse::Special: 1805 // The use is completely folded, i.e., everything is folded into the 1806 // instruction. 1807 return 0; 1808 } 1809 1810 llvm_unreachable("Invalid LSRUse Kind!"); 1811 } 1812 1813 static bool isAlwaysFoldable(const TargetTransformInfo &TTI, 1814 LSRUse::KindType Kind, MemAccessTy AccessTy, 1815 GlobalValue *BaseGV, int64_t BaseOffset, 1816 bool HasBaseReg) { 1817 // Fast-path: zero is always foldable. 1818 if (BaseOffset == 0 && !BaseGV) return true; 1819 1820 // Conservatively, create an address with an immediate and a 1821 // base and a scale. 1822 int64_t Scale = Kind == LSRUse::ICmpZero ? -1 : 1; 1823 1824 // Canonicalize a scale of 1 to a base register if the formula doesn't 1825 // already have a base register. 1826 if (!HasBaseReg && Scale == 1) { 1827 Scale = 0; 1828 HasBaseReg = true; 1829 } 1830 1831 return isAMCompletelyFolded(TTI, Kind, AccessTy, BaseGV, BaseOffset, 1832 HasBaseReg, Scale); 1833 } 1834 1835 static bool isAlwaysFoldable(const TargetTransformInfo &TTI, 1836 ScalarEvolution &SE, int64_t MinOffset, 1837 int64_t MaxOffset, LSRUse::KindType Kind, 1838 MemAccessTy AccessTy, const SCEV *S, 1839 bool HasBaseReg) { 1840 // Fast-path: zero is always foldable. 1841 if (S->isZero()) return true; 1842 1843 // Conservatively, create an address with an immediate and a 1844 // base and a scale. 1845 int64_t BaseOffset = ExtractImmediate(S, SE); 1846 GlobalValue *BaseGV = ExtractSymbol(S, SE); 1847 1848 // If there's anything else involved, it's not foldable. 1849 if (!S->isZero()) return false; 1850 1851 // Fast-path: zero is always foldable. 1852 if (BaseOffset == 0 && !BaseGV) return true; 1853 1854 // Conservatively, create an address with an immediate and a 1855 // base and a scale. 1856 int64_t Scale = Kind == LSRUse::ICmpZero ? -1 : 1; 1857 1858 return isAMCompletelyFolded(TTI, MinOffset, MaxOffset, Kind, AccessTy, BaseGV, 1859 BaseOffset, HasBaseReg, Scale); 1860 } 1861 1862 namespace { 1863 1864 /// An individual increment in a Chain of IV increments. Relate an IV user to 1865 /// an expression that computes the IV it uses from the IV used by the previous 1866 /// link in the Chain. 1867 /// 1868 /// For the head of a chain, IncExpr holds the absolute SCEV expression for the 1869 /// original IVOperand. The head of the chain's IVOperand is only valid during 1870 /// chain collection, before LSR replaces IV users. During chain generation, 1871 /// IncExpr can be used to find the new IVOperand that computes the same 1872 /// expression. 1873 struct IVInc { 1874 Instruction *UserInst; 1875 Value* IVOperand; 1876 const SCEV *IncExpr; 1877 1878 IVInc(Instruction *U, Value *O, const SCEV *E) 1879 : UserInst(U), IVOperand(O), IncExpr(E) {} 1880 }; 1881 1882 // The list of IV increments in program order. We typically add the head of a 1883 // chain without finding subsequent links. 1884 struct IVChain { 1885 SmallVector<IVInc, 1> Incs; 1886 const SCEV *ExprBase = nullptr; 1887 1888 IVChain() = default; 1889 IVChain(const IVInc &Head, const SCEV *Base) 1890 : Incs(1, Head), ExprBase(Base) {} 1891 1892 using const_iterator = SmallVectorImpl<IVInc>::const_iterator; 1893 1894 // Return the first increment in the chain. 1895 const_iterator begin() const { 1896 assert(!Incs.empty()); 1897 return std::next(Incs.begin()); 1898 } 1899 const_iterator end() const { 1900 return Incs.end(); 1901 } 1902 1903 // Returns true if this chain contains any increments. 1904 bool hasIncs() const { return Incs.size() >= 2; } 1905 1906 // Add an IVInc to the end of this chain. 1907 void add(const IVInc &X) { Incs.push_back(X); } 1908 1909 // Returns the last UserInst in the chain. 1910 Instruction *tailUserInst() const { return Incs.back().UserInst; } 1911 1912 // Returns true if IncExpr can be profitably added to this chain. 1913 bool isProfitableIncrement(const SCEV *OperExpr, 1914 const SCEV *IncExpr, 1915 ScalarEvolution&); 1916 }; 1917 1918 /// Helper for CollectChains to track multiple IV increment uses. Distinguish 1919 /// between FarUsers that definitely cross IV increments and NearUsers that may 1920 /// be used between IV increments. 1921 struct ChainUsers { 1922 SmallPtrSet<Instruction*, 4> FarUsers; 1923 SmallPtrSet<Instruction*, 4> NearUsers; 1924 }; 1925 1926 /// This class holds state for the main loop strength reduction logic. 1927 class LSRInstance { 1928 IVUsers &IU; 1929 ScalarEvolution &SE; 1930 DominatorTree &DT; 1931 LoopInfo &LI; 1932 AssumptionCache &AC; 1933 TargetLibraryInfo &TLI; 1934 const TargetTransformInfo &TTI; 1935 Loop *const L; 1936 MemorySSAUpdater *MSSAU; 1937 TTI::AddressingModeKind AMK; 1938 bool Changed = false; 1939 1940 /// This is the insert position that the current loop's induction variable 1941 /// increment should be placed. In simple loops, this is the latch block's 1942 /// terminator. But in more complicated cases, this is a position which will 1943 /// dominate all the in-loop post-increment users. 1944 Instruction *IVIncInsertPos = nullptr; 1945 1946 /// Interesting factors between use strides. 1947 /// 1948 /// We explicitly use a SetVector which contains a SmallSet, instead of the 1949 /// default, a SmallDenseSet, because we need to use the full range of 1950 /// int64_ts, and there's currently no good way of doing that with 1951 /// SmallDenseSet. 1952 SetVector<int64_t, SmallVector<int64_t, 8>, SmallSet<int64_t, 8>> Factors; 1953 1954 /// Interesting use types, to facilitate truncation reuse. 1955 SmallSetVector<Type *, 4> Types; 1956 1957 /// The list of interesting uses. 1958 mutable SmallVector<LSRUse, 16> Uses; 1959 1960 /// Track which uses use which register candidates. 1961 RegUseTracker RegUses; 1962 1963 // Limit the number of chains to avoid quadratic behavior. We don't expect to 1964 // have more than a few IV increment chains in a loop. Missing a Chain falls 1965 // back to normal LSR behavior for those uses. 1966 static const unsigned MaxChains = 8; 1967 1968 /// IV users can form a chain of IV increments. 1969 SmallVector<IVChain, MaxChains> IVChainVec; 1970 1971 /// IV users that belong to profitable IVChains. 1972 SmallPtrSet<Use*, MaxChains> IVIncSet; 1973 1974 void OptimizeShadowIV(); 1975 bool FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse); 1976 ICmpInst *OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse); 1977 void OptimizeLoopTermCond(); 1978 1979 void ChainInstruction(Instruction *UserInst, Instruction *IVOper, 1980 SmallVectorImpl<ChainUsers> &ChainUsersVec); 1981 void FinalizeChain(IVChain &Chain); 1982 void CollectChains(); 1983 void GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter, 1984 SmallVectorImpl<WeakTrackingVH> &DeadInsts); 1985 1986 void CollectInterestingTypesAndFactors(); 1987 void CollectFixupsAndInitialFormulae(); 1988 1989 // Support for sharing of LSRUses between LSRFixups. 1990 using UseMapTy = DenseMap<LSRUse::SCEVUseKindPair, size_t>; 1991 UseMapTy UseMap; 1992 1993 bool reconcileNewOffset(LSRUse &LU, int64_t NewOffset, bool HasBaseReg, 1994 LSRUse::KindType Kind, MemAccessTy AccessTy); 1995 1996 std::pair<size_t, int64_t> getUse(const SCEV *&Expr, LSRUse::KindType Kind, 1997 MemAccessTy AccessTy); 1998 1999 void DeleteUse(LSRUse &LU, size_t LUIdx); 2000 2001 LSRUse *FindUseWithSimilarFormula(const Formula &F, const LSRUse &OrigLU); 2002 2003 void InsertInitialFormula(const SCEV *S, LSRUse &LU, size_t LUIdx); 2004 void InsertSupplementalFormula(const SCEV *S, LSRUse &LU, size_t LUIdx); 2005 void CountRegisters(const Formula &F, size_t LUIdx); 2006 bool InsertFormula(LSRUse &LU, unsigned LUIdx, const Formula &F); 2007 2008 void CollectLoopInvariantFixupsAndFormulae(); 2009 2010 void GenerateReassociations(LSRUse &LU, unsigned LUIdx, Formula Base, 2011 unsigned Depth = 0); 2012 2013 void GenerateReassociationsImpl(LSRUse &LU, unsigned LUIdx, 2014 const Formula &Base, unsigned Depth, 2015 size_t Idx, bool IsScaledReg = false); 2016 void GenerateCombinations(LSRUse &LU, unsigned LUIdx, Formula Base); 2017 void GenerateSymbolicOffsetsImpl(LSRUse &LU, unsigned LUIdx, 2018 const Formula &Base, size_t Idx, 2019 bool IsScaledReg = false); 2020 void GenerateSymbolicOffsets(LSRUse &LU, unsigned LUIdx, Formula Base); 2021 void GenerateConstantOffsetsImpl(LSRUse &LU, unsigned LUIdx, 2022 const Formula &Base, 2023 const SmallVectorImpl<int64_t> &Worklist, 2024 size_t Idx, bool IsScaledReg = false); 2025 void GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx, Formula Base); 2026 void GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx, Formula Base); 2027 void GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base); 2028 void GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base); 2029 void GenerateCrossUseConstantOffsets(); 2030 void GenerateAllReuseFormulae(); 2031 2032 void FilterOutUndesirableDedicatedRegisters(); 2033 2034 size_t EstimateSearchSpaceComplexity() const; 2035 void NarrowSearchSpaceByDetectingSupersets(); 2036 void NarrowSearchSpaceByCollapsingUnrolledCode(); 2037 void NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters(); 2038 void NarrowSearchSpaceByFilterFormulaWithSameScaledReg(); 2039 void NarrowSearchSpaceByFilterPostInc(); 2040 void NarrowSearchSpaceByDeletingCostlyFormulas(); 2041 void NarrowSearchSpaceByPickingWinnerRegs(); 2042 void NarrowSearchSpaceUsingHeuristics(); 2043 2044 void SolveRecurse(SmallVectorImpl<const Formula *> &Solution, 2045 Cost &SolutionCost, 2046 SmallVectorImpl<const Formula *> &Workspace, 2047 const Cost &CurCost, 2048 const SmallPtrSet<const SCEV *, 16> &CurRegs, 2049 DenseSet<const SCEV *> &VisitedRegs) const; 2050 void Solve(SmallVectorImpl<const Formula *> &Solution) const; 2051 2052 BasicBlock::iterator 2053 HoistInsertPosition(BasicBlock::iterator IP, 2054 const SmallVectorImpl<Instruction *> &Inputs) const; 2055 BasicBlock::iterator 2056 AdjustInsertPositionForExpand(BasicBlock::iterator IP, 2057 const LSRFixup &LF, 2058 const LSRUse &LU, 2059 SCEVExpander &Rewriter) const; 2060 2061 Value *Expand(const LSRUse &LU, const LSRFixup &LF, const Formula &F, 2062 BasicBlock::iterator IP, SCEVExpander &Rewriter, 2063 SmallVectorImpl<WeakTrackingVH> &DeadInsts) const; 2064 void RewriteForPHI(PHINode *PN, const LSRUse &LU, const LSRFixup &LF, 2065 const Formula &F, SCEVExpander &Rewriter, 2066 SmallVectorImpl<WeakTrackingVH> &DeadInsts) const; 2067 void Rewrite(const LSRUse &LU, const LSRFixup &LF, const Formula &F, 2068 SCEVExpander &Rewriter, 2069 SmallVectorImpl<WeakTrackingVH> &DeadInsts) const; 2070 void ImplementSolution(const SmallVectorImpl<const Formula *> &Solution); 2071 2072 public: 2073 LSRInstance(Loop *L, IVUsers &IU, ScalarEvolution &SE, DominatorTree &DT, 2074 LoopInfo &LI, const TargetTransformInfo &TTI, AssumptionCache &AC, 2075 TargetLibraryInfo &TLI, MemorySSAUpdater *MSSAU); 2076 2077 bool getChanged() const { return Changed; } 2078 2079 void print_factors_and_types(raw_ostream &OS) const; 2080 void print_fixups(raw_ostream &OS) const; 2081 void print_uses(raw_ostream &OS) const; 2082 void print(raw_ostream &OS) const; 2083 void dump() const; 2084 }; 2085 2086 } // end anonymous namespace 2087 2088 /// If IV is used in a int-to-float cast inside the loop then try to eliminate 2089 /// the cast operation. 2090 void LSRInstance::OptimizeShadowIV() { 2091 const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L); 2092 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) 2093 return; 2094 2095 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); 2096 UI != E; /* empty */) { 2097 IVUsers::const_iterator CandidateUI = UI; 2098 ++UI; 2099 Instruction *ShadowUse = CandidateUI->getUser(); 2100 Type *DestTy = nullptr; 2101 bool IsSigned = false; 2102 2103 /* If shadow use is a int->float cast then insert a second IV 2104 to eliminate this cast. 2105 2106 for (unsigned i = 0; i < n; ++i) 2107 foo((double)i); 2108 2109 is transformed into 2110 2111 double d = 0.0; 2112 for (unsigned i = 0; i < n; ++i, ++d) 2113 foo(d); 2114 */ 2115 if (UIToFPInst *UCast = dyn_cast<UIToFPInst>(CandidateUI->getUser())) { 2116 IsSigned = false; 2117 DestTy = UCast->getDestTy(); 2118 } 2119 else if (SIToFPInst *SCast = dyn_cast<SIToFPInst>(CandidateUI->getUser())) { 2120 IsSigned = true; 2121 DestTy = SCast->getDestTy(); 2122 } 2123 if (!DestTy) continue; 2124 2125 // If target does not support DestTy natively then do not apply 2126 // this transformation. 2127 if (!TTI.isTypeLegal(DestTy)) continue; 2128 2129 PHINode *PH = dyn_cast<PHINode>(ShadowUse->getOperand(0)); 2130 if (!PH) continue; 2131 if (PH->getNumIncomingValues() != 2) continue; 2132 2133 // If the calculation in integers overflows, the result in FP type will 2134 // differ. So we only can do this transformation if we are guaranteed to not 2135 // deal with overflowing values 2136 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(PH)); 2137 if (!AR) continue; 2138 if (IsSigned && !AR->hasNoSignedWrap()) continue; 2139 if (!IsSigned && !AR->hasNoUnsignedWrap()) continue; 2140 2141 Type *SrcTy = PH->getType(); 2142 int Mantissa = DestTy->getFPMantissaWidth(); 2143 if (Mantissa == -1) continue; 2144 if ((int)SE.getTypeSizeInBits(SrcTy) > Mantissa) 2145 continue; 2146 2147 unsigned Entry, Latch; 2148 if (PH->getIncomingBlock(0) == L->getLoopPreheader()) { 2149 Entry = 0; 2150 Latch = 1; 2151 } else { 2152 Entry = 1; 2153 Latch = 0; 2154 } 2155 2156 ConstantInt *Init = dyn_cast<ConstantInt>(PH->getIncomingValue(Entry)); 2157 if (!Init) continue; 2158 Constant *NewInit = ConstantFP::get(DestTy, IsSigned ? 2159 (double)Init->getSExtValue() : 2160 (double)Init->getZExtValue()); 2161 2162 BinaryOperator *Incr = 2163 dyn_cast<BinaryOperator>(PH->getIncomingValue(Latch)); 2164 if (!Incr) continue; 2165 if (Incr->getOpcode() != Instruction::Add 2166 && Incr->getOpcode() != Instruction::Sub) 2167 continue; 2168 2169 /* Initialize new IV, double d = 0.0 in above example. */ 2170 ConstantInt *C = nullptr; 2171 if (Incr->getOperand(0) == PH) 2172 C = dyn_cast<ConstantInt>(Incr->getOperand(1)); 2173 else if (Incr->getOperand(1) == PH) 2174 C = dyn_cast<ConstantInt>(Incr->getOperand(0)); 2175 else 2176 continue; 2177 2178 if (!C) continue; 2179 2180 // Ignore negative constants, as the code below doesn't handle them 2181 // correctly. TODO: Remove this restriction. 2182 if (!C->getValue().isStrictlyPositive()) continue; 2183 2184 /* Add new PHINode. */ 2185 PHINode *NewPH = PHINode::Create(DestTy, 2, "IV.S.", PH); 2186 2187 /* create new increment. '++d' in above example. */ 2188 Constant *CFP = ConstantFP::get(DestTy, C->getZExtValue()); 2189 BinaryOperator *NewIncr = 2190 BinaryOperator::Create(Incr->getOpcode() == Instruction::Add ? 2191 Instruction::FAdd : Instruction::FSub, 2192 NewPH, CFP, "IV.S.next.", Incr); 2193 2194 NewPH->addIncoming(NewInit, PH->getIncomingBlock(Entry)); 2195 NewPH->addIncoming(NewIncr, PH->getIncomingBlock(Latch)); 2196 2197 /* Remove cast operation */ 2198 ShadowUse->replaceAllUsesWith(NewPH); 2199 ShadowUse->eraseFromParent(); 2200 Changed = true; 2201 break; 2202 } 2203 } 2204 2205 /// If Cond has an operand that is an expression of an IV, set the IV user and 2206 /// stride information and return true, otherwise return false. 2207 bool LSRInstance::FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse) { 2208 for (IVStrideUse &U : IU) 2209 if (U.getUser() == Cond) { 2210 // NOTE: we could handle setcc instructions with multiple uses here, but 2211 // InstCombine does it as well for simple uses, it's not clear that it 2212 // occurs enough in real life to handle. 2213 CondUse = &U; 2214 return true; 2215 } 2216 return false; 2217 } 2218 2219 /// Rewrite the loop's terminating condition if it uses a max computation. 2220 /// 2221 /// This is a narrow solution to a specific, but acute, problem. For loops 2222 /// like this: 2223 /// 2224 /// i = 0; 2225 /// do { 2226 /// p[i] = 0.0; 2227 /// } while (++i < n); 2228 /// 2229 /// the trip count isn't just 'n', because 'n' might not be positive. And 2230 /// unfortunately this can come up even for loops where the user didn't use 2231 /// a C do-while loop. For example, seemingly well-behaved top-test loops 2232 /// will commonly be lowered like this: 2233 /// 2234 /// if (n > 0) { 2235 /// i = 0; 2236 /// do { 2237 /// p[i] = 0.0; 2238 /// } while (++i < n); 2239 /// } 2240 /// 2241 /// and then it's possible for subsequent optimization to obscure the if 2242 /// test in such a way that indvars can't find it. 2243 /// 2244 /// When indvars can't find the if test in loops like this, it creates a 2245 /// max expression, which allows it to give the loop a canonical 2246 /// induction variable: 2247 /// 2248 /// i = 0; 2249 /// max = n < 1 ? 1 : n; 2250 /// do { 2251 /// p[i] = 0.0; 2252 /// } while (++i != max); 2253 /// 2254 /// Canonical induction variables are necessary because the loop passes 2255 /// are designed around them. The most obvious example of this is the 2256 /// LoopInfo analysis, which doesn't remember trip count values. It 2257 /// expects to be able to rediscover the trip count each time it is 2258 /// needed, and it does this using a simple analysis that only succeeds if 2259 /// the loop has a canonical induction variable. 2260 /// 2261 /// However, when it comes time to generate code, the maximum operation 2262 /// can be quite costly, especially if it's inside of an outer loop. 2263 /// 2264 /// This function solves this problem by detecting this type of loop and 2265 /// rewriting their conditions from ICMP_NE back to ICMP_SLT, and deleting 2266 /// the instructions for the maximum computation. 2267 ICmpInst *LSRInstance::OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse) { 2268 // Check that the loop matches the pattern we're looking for. 2269 if (Cond->getPredicate() != CmpInst::ICMP_EQ && 2270 Cond->getPredicate() != CmpInst::ICMP_NE) 2271 return Cond; 2272 2273 SelectInst *Sel = dyn_cast<SelectInst>(Cond->getOperand(1)); 2274 if (!Sel || !Sel->hasOneUse()) return Cond; 2275 2276 const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L); 2277 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) 2278 return Cond; 2279 const SCEV *One = SE.getConstant(BackedgeTakenCount->getType(), 1); 2280 2281 // Add one to the backedge-taken count to get the trip count. 2282 const SCEV *IterationCount = SE.getAddExpr(One, BackedgeTakenCount); 2283 if (IterationCount != SE.getSCEV(Sel)) return Cond; 2284 2285 // Check for a max calculation that matches the pattern. There's no check 2286 // for ICMP_ULE here because the comparison would be with zero, which 2287 // isn't interesting. 2288 CmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE; 2289 const SCEVNAryExpr *Max = nullptr; 2290 if (const SCEVSMaxExpr *S = dyn_cast<SCEVSMaxExpr>(BackedgeTakenCount)) { 2291 Pred = ICmpInst::ICMP_SLE; 2292 Max = S; 2293 } else if (const SCEVSMaxExpr *S = dyn_cast<SCEVSMaxExpr>(IterationCount)) { 2294 Pred = ICmpInst::ICMP_SLT; 2295 Max = S; 2296 } else if (const SCEVUMaxExpr *U = dyn_cast<SCEVUMaxExpr>(IterationCount)) { 2297 Pred = ICmpInst::ICMP_ULT; 2298 Max = U; 2299 } else { 2300 // No match; bail. 2301 return Cond; 2302 } 2303 2304 // To handle a max with more than two operands, this optimization would 2305 // require additional checking and setup. 2306 if (Max->getNumOperands() != 2) 2307 return Cond; 2308 2309 const SCEV *MaxLHS = Max->getOperand(0); 2310 const SCEV *MaxRHS = Max->getOperand(1); 2311 2312 // ScalarEvolution canonicalizes constants to the left. For < and >, look 2313 // for a comparison with 1. For <= and >=, a comparison with zero. 2314 if (!MaxLHS || 2315 (ICmpInst::isTrueWhenEqual(Pred) ? !MaxLHS->isZero() : (MaxLHS != One))) 2316 return Cond; 2317 2318 // Check the relevant induction variable for conformance to 2319 // the pattern. 2320 const SCEV *IV = SE.getSCEV(Cond->getOperand(0)); 2321 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(IV); 2322 if (!AR || !AR->isAffine() || 2323 AR->getStart() != One || 2324 AR->getStepRecurrence(SE) != One) 2325 return Cond; 2326 2327 assert(AR->getLoop() == L && 2328 "Loop condition operand is an addrec in a different loop!"); 2329 2330 // Check the right operand of the select, and remember it, as it will 2331 // be used in the new comparison instruction. 2332 Value *NewRHS = nullptr; 2333 if (ICmpInst::isTrueWhenEqual(Pred)) { 2334 // Look for n+1, and grab n. 2335 if (AddOperator *BO = dyn_cast<AddOperator>(Sel->getOperand(1))) 2336 if (ConstantInt *BO1 = dyn_cast<ConstantInt>(BO->getOperand(1))) 2337 if (BO1->isOne() && SE.getSCEV(BO->getOperand(0)) == MaxRHS) 2338 NewRHS = BO->getOperand(0); 2339 if (AddOperator *BO = dyn_cast<AddOperator>(Sel->getOperand(2))) 2340 if (ConstantInt *BO1 = dyn_cast<ConstantInt>(BO->getOperand(1))) 2341 if (BO1->isOne() && SE.getSCEV(BO->getOperand(0)) == MaxRHS) 2342 NewRHS = BO->getOperand(0); 2343 if (!NewRHS) 2344 return Cond; 2345 } else if (SE.getSCEV(Sel->getOperand(1)) == MaxRHS) 2346 NewRHS = Sel->getOperand(1); 2347 else if (SE.getSCEV(Sel->getOperand(2)) == MaxRHS) 2348 NewRHS = Sel->getOperand(2); 2349 else if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(MaxRHS)) 2350 NewRHS = SU->getValue(); 2351 else 2352 // Max doesn't match expected pattern. 2353 return Cond; 2354 2355 // Determine the new comparison opcode. It may be signed or unsigned, 2356 // and the original comparison may be either equality or inequality. 2357 if (Cond->getPredicate() == CmpInst::ICMP_EQ) 2358 Pred = CmpInst::getInversePredicate(Pred); 2359 2360 // Ok, everything looks ok to change the condition into an SLT or SGE and 2361 // delete the max calculation. 2362 ICmpInst *NewCond = 2363 new ICmpInst(Cond, Pred, Cond->getOperand(0), NewRHS, "scmp"); 2364 2365 // Delete the max calculation instructions. 2366 NewCond->setDebugLoc(Cond->getDebugLoc()); 2367 Cond->replaceAllUsesWith(NewCond); 2368 CondUse->setUser(NewCond); 2369 Instruction *Cmp = cast<Instruction>(Sel->getOperand(0)); 2370 Cond->eraseFromParent(); 2371 Sel->eraseFromParent(); 2372 if (Cmp->use_empty()) 2373 Cmp->eraseFromParent(); 2374 return NewCond; 2375 } 2376 2377 /// Change loop terminating condition to use the postinc iv when possible. 2378 void 2379 LSRInstance::OptimizeLoopTermCond() { 2380 SmallPtrSet<Instruction *, 4> PostIncs; 2381 2382 // We need a different set of heuristics for rotated and non-rotated loops. 2383 // If a loop is rotated then the latch is also the backedge, so inserting 2384 // post-inc expressions just before the latch is ideal. To reduce live ranges 2385 // it also makes sense to rewrite terminating conditions to use post-inc 2386 // expressions. 2387 // 2388 // If the loop is not rotated then the latch is not a backedge; the latch 2389 // check is done in the loop head. Adding post-inc expressions before the 2390 // latch will cause overlapping live-ranges of pre-inc and post-inc expressions 2391 // in the loop body. In this case we do *not* want to use post-inc expressions 2392 // in the latch check, and we want to insert post-inc expressions before 2393 // the backedge. 2394 BasicBlock *LatchBlock = L->getLoopLatch(); 2395 SmallVector<BasicBlock*, 8> ExitingBlocks; 2396 L->getExitingBlocks(ExitingBlocks); 2397 if (llvm::all_of(ExitingBlocks, [&LatchBlock](const BasicBlock *BB) { 2398 return LatchBlock != BB; 2399 })) { 2400 // The backedge doesn't exit the loop; treat this as a head-tested loop. 2401 IVIncInsertPos = LatchBlock->getTerminator(); 2402 return; 2403 } 2404 2405 // Otherwise treat this as a rotated loop. 2406 for (BasicBlock *ExitingBlock : ExitingBlocks) { 2407 // Get the terminating condition for the loop if possible. If we 2408 // can, we want to change it to use a post-incremented version of its 2409 // induction variable, to allow coalescing the live ranges for the IV into 2410 // one register value. 2411 2412 BranchInst *TermBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator()); 2413 if (!TermBr) 2414 continue; 2415 // FIXME: Overly conservative, termination condition could be an 'or' etc.. 2416 if (TermBr->isUnconditional() || !isa<ICmpInst>(TermBr->getCondition())) 2417 continue; 2418 2419 // Search IVUsesByStride to find Cond's IVUse if there is one. 2420 IVStrideUse *CondUse = nullptr; 2421 ICmpInst *Cond = cast<ICmpInst>(TermBr->getCondition()); 2422 if (!FindIVUserForCond(Cond, CondUse)) 2423 continue; 2424 2425 // If the trip count is computed in terms of a max (due to ScalarEvolution 2426 // being unable to find a sufficient guard, for example), change the loop 2427 // comparison to use SLT or ULT instead of NE. 2428 // One consequence of doing this now is that it disrupts the count-down 2429 // optimization. That's not always a bad thing though, because in such 2430 // cases it may still be worthwhile to avoid a max. 2431 Cond = OptimizeMax(Cond, CondUse); 2432 2433 // If this exiting block dominates the latch block, it may also use 2434 // the post-inc value if it won't be shared with other uses. 2435 // Check for dominance. 2436 if (!DT.dominates(ExitingBlock, LatchBlock)) 2437 continue; 2438 2439 // Conservatively avoid trying to use the post-inc value in non-latch 2440 // exits if there may be pre-inc users in intervening blocks. 2441 if (LatchBlock != ExitingBlock) 2442 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) 2443 // Test if the use is reachable from the exiting block. This dominator 2444 // query is a conservative approximation of reachability. 2445 if (&*UI != CondUse && 2446 !DT.properlyDominates(UI->getUser()->getParent(), ExitingBlock)) { 2447 // Conservatively assume there may be reuse if the quotient of their 2448 // strides could be a legal scale. 2449 const SCEV *A = IU.getStride(*CondUse, L); 2450 const SCEV *B = IU.getStride(*UI, L); 2451 if (!A || !B) continue; 2452 if (SE.getTypeSizeInBits(A->getType()) != 2453 SE.getTypeSizeInBits(B->getType())) { 2454 if (SE.getTypeSizeInBits(A->getType()) > 2455 SE.getTypeSizeInBits(B->getType())) 2456 B = SE.getSignExtendExpr(B, A->getType()); 2457 else 2458 A = SE.getSignExtendExpr(A, B->getType()); 2459 } 2460 if (const SCEVConstant *D = 2461 dyn_cast_or_null<SCEVConstant>(getExactSDiv(B, A, SE))) { 2462 const ConstantInt *C = D->getValue(); 2463 // Stride of one or negative one can have reuse with non-addresses. 2464 if (C->isOne() || C->isMinusOne()) 2465 goto decline_post_inc; 2466 // Avoid weird situations. 2467 if (C->getValue().getMinSignedBits() >= 64 || 2468 C->getValue().isMinSignedValue()) 2469 goto decline_post_inc; 2470 // Check for possible scaled-address reuse. 2471 if (isAddressUse(TTI, UI->getUser(), UI->getOperandValToReplace())) { 2472 MemAccessTy AccessTy = getAccessType( 2473 TTI, UI->getUser(), UI->getOperandValToReplace()); 2474 int64_t Scale = C->getSExtValue(); 2475 if (TTI.isLegalAddressingMode(AccessTy.MemTy, /*BaseGV=*/nullptr, 2476 /*BaseOffset=*/0, 2477 /*HasBaseReg=*/false, Scale, 2478 AccessTy.AddrSpace)) 2479 goto decline_post_inc; 2480 Scale = -Scale; 2481 if (TTI.isLegalAddressingMode(AccessTy.MemTy, /*BaseGV=*/nullptr, 2482 /*BaseOffset=*/0, 2483 /*HasBaseReg=*/false, Scale, 2484 AccessTy.AddrSpace)) 2485 goto decline_post_inc; 2486 } 2487 } 2488 } 2489 2490 LLVM_DEBUG(dbgs() << " Change loop exiting icmp to use postinc iv: " 2491 << *Cond << '\n'); 2492 2493 // It's possible for the setcc instruction to be anywhere in the loop, and 2494 // possible for it to have multiple users. If it is not immediately before 2495 // the exiting block branch, move it. 2496 if (Cond->getNextNonDebugInstruction() != TermBr) { 2497 if (Cond->hasOneUse()) { 2498 Cond->moveBefore(TermBr); 2499 } else { 2500 // Clone the terminating condition and insert into the loopend. 2501 ICmpInst *OldCond = Cond; 2502 Cond = cast<ICmpInst>(Cond->clone()); 2503 Cond->setName(L->getHeader()->getName() + ".termcond"); 2504 ExitingBlock->getInstList().insert(TermBr->getIterator(), Cond); 2505 2506 // Clone the IVUse, as the old use still exists! 2507 CondUse = &IU.AddUser(Cond, CondUse->getOperandValToReplace()); 2508 TermBr->replaceUsesOfWith(OldCond, Cond); 2509 } 2510 } 2511 2512 // If we get to here, we know that we can transform the setcc instruction to 2513 // use the post-incremented version of the IV, allowing us to coalesce the 2514 // live ranges for the IV correctly. 2515 CondUse->transformToPostInc(L); 2516 Changed = true; 2517 2518 PostIncs.insert(Cond); 2519 decline_post_inc:; 2520 } 2521 2522 // Determine an insertion point for the loop induction variable increment. It 2523 // must dominate all the post-inc comparisons we just set up, and it must 2524 // dominate the loop latch edge. 2525 IVIncInsertPos = L->getLoopLatch()->getTerminator(); 2526 for (Instruction *Inst : PostIncs) { 2527 BasicBlock *BB = 2528 DT.findNearestCommonDominator(IVIncInsertPos->getParent(), 2529 Inst->getParent()); 2530 if (BB == Inst->getParent()) 2531 IVIncInsertPos = Inst; 2532 else if (BB != IVIncInsertPos->getParent()) 2533 IVIncInsertPos = BB->getTerminator(); 2534 } 2535 } 2536 2537 /// Determine if the given use can accommodate a fixup at the given offset and 2538 /// other details. If so, update the use and return true. 2539 bool LSRInstance::reconcileNewOffset(LSRUse &LU, int64_t NewOffset, 2540 bool HasBaseReg, LSRUse::KindType Kind, 2541 MemAccessTy AccessTy) { 2542 int64_t NewMinOffset = LU.MinOffset; 2543 int64_t NewMaxOffset = LU.MaxOffset; 2544 MemAccessTy NewAccessTy = AccessTy; 2545 2546 // Check for a mismatched kind. It's tempting to collapse mismatched kinds to 2547 // something conservative, however this can pessimize in the case that one of 2548 // the uses will have all its uses outside the loop, for example. 2549 if (LU.Kind != Kind) 2550 return false; 2551 2552 // Check for a mismatched access type, and fall back conservatively as needed. 2553 // TODO: Be less conservative when the type is similar and can use the same 2554 // addressing modes. 2555 if (Kind == LSRUse::Address) { 2556 if (AccessTy.MemTy != LU.AccessTy.MemTy) { 2557 NewAccessTy = MemAccessTy::getUnknown(AccessTy.MemTy->getContext(), 2558 AccessTy.AddrSpace); 2559 } 2560 } 2561 2562 // Conservatively assume HasBaseReg is true for now. 2563 if (NewOffset < LU.MinOffset) { 2564 if (!isAlwaysFoldable(TTI, Kind, NewAccessTy, /*BaseGV=*/nullptr, 2565 LU.MaxOffset - NewOffset, HasBaseReg)) 2566 return false; 2567 NewMinOffset = NewOffset; 2568 } else if (NewOffset > LU.MaxOffset) { 2569 if (!isAlwaysFoldable(TTI, Kind, NewAccessTy, /*BaseGV=*/nullptr, 2570 NewOffset - LU.MinOffset, HasBaseReg)) 2571 return false; 2572 NewMaxOffset = NewOffset; 2573 } 2574 2575 // Update the use. 2576 LU.MinOffset = NewMinOffset; 2577 LU.MaxOffset = NewMaxOffset; 2578 LU.AccessTy = NewAccessTy; 2579 return true; 2580 } 2581 2582 /// Return an LSRUse index and an offset value for a fixup which needs the given 2583 /// expression, with the given kind and optional access type. Either reuse an 2584 /// existing use or create a new one, as needed. 2585 std::pair<size_t, int64_t> LSRInstance::getUse(const SCEV *&Expr, 2586 LSRUse::KindType Kind, 2587 MemAccessTy AccessTy) { 2588 const SCEV *Copy = Expr; 2589 int64_t Offset = ExtractImmediate(Expr, SE); 2590 2591 // Basic uses can't accept any offset, for example. 2592 if (!isAlwaysFoldable(TTI, Kind, AccessTy, /*BaseGV=*/ nullptr, 2593 Offset, /*HasBaseReg=*/ true)) { 2594 Expr = Copy; 2595 Offset = 0; 2596 } 2597 2598 std::pair<UseMapTy::iterator, bool> P = 2599 UseMap.insert(std::make_pair(LSRUse::SCEVUseKindPair(Expr, Kind), 0)); 2600 if (!P.second) { 2601 // A use already existed with this base. 2602 size_t LUIdx = P.first->second; 2603 LSRUse &LU = Uses[LUIdx]; 2604 if (reconcileNewOffset(LU, Offset, /*HasBaseReg=*/true, Kind, AccessTy)) 2605 // Reuse this use. 2606 return std::make_pair(LUIdx, Offset); 2607 } 2608 2609 // Create a new use. 2610 size_t LUIdx = Uses.size(); 2611 P.first->second = LUIdx; 2612 Uses.push_back(LSRUse(Kind, AccessTy)); 2613 LSRUse &LU = Uses[LUIdx]; 2614 2615 LU.MinOffset = Offset; 2616 LU.MaxOffset = Offset; 2617 return std::make_pair(LUIdx, Offset); 2618 } 2619 2620 /// Delete the given use from the Uses list. 2621 void LSRInstance::DeleteUse(LSRUse &LU, size_t LUIdx) { 2622 if (&LU != &Uses.back()) 2623 std::swap(LU, Uses.back()); 2624 Uses.pop_back(); 2625 2626 // Update RegUses. 2627 RegUses.swapAndDropUse(LUIdx, Uses.size()); 2628 } 2629 2630 /// Look for a use distinct from OrigLU which is has a formula that has the same 2631 /// registers as the given formula. 2632 LSRUse * 2633 LSRInstance::FindUseWithSimilarFormula(const Formula &OrigF, 2634 const LSRUse &OrigLU) { 2635 // Search all uses for the formula. This could be more clever. 2636 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 2637 LSRUse &LU = Uses[LUIdx]; 2638 // Check whether this use is close enough to OrigLU, to see whether it's 2639 // worthwhile looking through its formulae. 2640 // Ignore ICmpZero uses because they may contain formulae generated by 2641 // GenerateICmpZeroScales, in which case adding fixup offsets may 2642 // be invalid. 2643 if (&LU != &OrigLU && 2644 LU.Kind != LSRUse::ICmpZero && 2645 LU.Kind == OrigLU.Kind && OrigLU.AccessTy == LU.AccessTy && 2646 LU.WidestFixupType == OrigLU.WidestFixupType && 2647 LU.HasFormulaWithSameRegs(OrigF)) { 2648 // Scan through this use's formulae. 2649 for (const Formula &F : LU.Formulae) { 2650 // Check to see if this formula has the same registers and symbols 2651 // as OrigF. 2652 if (F.BaseRegs == OrigF.BaseRegs && 2653 F.ScaledReg == OrigF.ScaledReg && 2654 F.BaseGV == OrigF.BaseGV && 2655 F.Scale == OrigF.Scale && 2656 F.UnfoldedOffset == OrigF.UnfoldedOffset) { 2657 if (F.BaseOffset == 0) 2658 return &LU; 2659 // This is the formula where all the registers and symbols matched; 2660 // there aren't going to be any others. Since we declined it, we 2661 // can skip the rest of the formulae and proceed to the next LSRUse. 2662 break; 2663 } 2664 } 2665 } 2666 } 2667 2668 // Nothing looked good. 2669 return nullptr; 2670 } 2671 2672 void LSRInstance::CollectInterestingTypesAndFactors() { 2673 SmallSetVector<const SCEV *, 4> Strides; 2674 2675 // Collect interesting types and strides. 2676 SmallVector<const SCEV *, 4> Worklist; 2677 for (const IVStrideUse &U : IU) { 2678 const SCEV *Expr = IU.getExpr(U); 2679 2680 // Collect interesting types. 2681 Types.insert(SE.getEffectiveSCEVType(Expr->getType())); 2682 2683 // Add strides for mentioned loops. 2684 Worklist.push_back(Expr); 2685 do { 2686 const SCEV *S = Worklist.pop_back_val(); 2687 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 2688 if (AR->getLoop() == L) 2689 Strides.insert(AR->getStepRecurrence(SE)); 2690 Worklist.push_back(AR->getStart()); 2691 } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 2692 Worklist.append(Add->op_begin(), Add->op_end()); 2693 } 2694 } while (!Worklist.empty()); 2695 } 2696 2697 // Compute interesting factors from the set of interesting strides. 2698 for (SmallSetVector<const SCEV *, 4>::const_iterator 2699 I = Strides.begin(), E = Strides.end(); I != E; ++I) 2700 for (SmallSetVector<const SCEV *, 4>::const_iterator NewStrideIter = 2701 std::next(I); NewStrideIter != E; ++NewStrideIter) { 2702 const SCEV *OldStride = *I; 2703 const SCEV *NewStride = *NewStrideIter; 2704 2705 if (SE.getTypeSizeInBits(OldStride->getType()) != 2706 SE.getTypeSizeInBits(NewStride->getType())) { 2707 if (SE.getTypeSizeInBits(OldStride->getType()) > 2708 SE.getTypeSizeInBits(NewStride->getType())) 2709 NewStride = SE.getSignExtendExpr(NewStride, OldStride->getType()); 2710 else 2711 OldStride = SE.getSignExtendExpr(OldStride, NewStride->getType()); 2712 } 2713 if (const SCEVConstant *Factor = 2714 dyn_cast_or_null<SCEVConstant>(getExactSDiv(NewStride, OldStride, 2715 SE, true))) { 2716 if (Factor->getAPInt().getMinSignedBits() <= 64 && !Factor->isZero()) 2717 Factors.insert(Factor->getAPInt().getSExtValue()); 2718 } else if (const SCEVConstant *Factor = 2719 dyn_cast_or_null<SCEVConstant>(getExactSDiv(OldStride, 2720 NewStride, 2721 SE, true))) { 2722 if (Factor->getAPInt().getMinSignedBits() <= 64 && !Factor->isZero()) 2723 Factors.insert(Factor->getAPInt().getSExtValue()); 2724 } 2725 } 2726 2727 // If all uses use the same type, don't bother looking for truncation-based 2728 // reuse. 2729 if (Types.size() == 1) 2730 Types.clear(); 2731 2732 LLVM_DEBUG(print_factors_and_types(dbgs())); 2733 } 2734 2735 /// Helper for CollectChains that finds an IV operand (computed by an AddRec in 2736 /// this loop) within [OI,OE) or returns OE. If IVUsers mapped Instructions to 2737 /// IVStrideUses, we could partially skip this. 2738 static User::op_iterator 2739 findIVOperand(User::op_iterator OI, User::op_iterator OE, 2740 Loop *L, ScalarEvolution &SE) { 2741 for(; OI != OE; ++OI) { 2742 if (Instruction *Oper = dyn_cast<Instruction>(*OI)) { 2743 if (!SE.isSCEVable(Oper->getType())) 2744 continue; 2745 2746 if (const SCEVAddRecExpr *AR = 2747 dyn_cast<SCEVAddRecExpr>(SE.getSCEV(Oper))) { 2748 if (AR->getLoop() == L) 2749 break; 2750 } 2751 } 2752 } 2753 return OI; 2754 } 2755 2756 /// IVChain logic must consistently peek base TruncInst operands, so wrap it in 2757 /// a convenient helper. 2758 static Value *getWideOperand(Value *Oper) { 2759 if (TruncInst *Trunc = dyn_cast<TruncInst>(Oper)) 2760 return Trunc->getOperand(0); 2761 return Oper; 2762 } 2763 2764 /// Return true if we allow an IV chain to include both types. 2765 static bool isCompatibleIVType(Value *LVal, Value *RVal) { 2766 Type *LType = LVal->getType(); 2767 Type *RType = RVal->getType(); 2768 return (LType == RType) || (LType->isPointerTy() && RType->isPointerTy() && 2769 // Different address spaces means (possibly) 2770 // different types of the pointer implementation, 2771 // e.g. i16 vs i32 so disallow that. 2772 (LType->getPointerAddressSpace() == 2773 RType->getPointerAddressSpace())); 2774 } 2775 2776 /// Return an approximation of this SCEV expression's "base", or NULL for any 2777 /// constant. Returning the expression itself is conservative. Returning a 2778 /// deeper subexpression is more precise and valid as long as it isn't less 2779 /// complex than another subexpression. For expressions involving multiple 2780 /// unscaled values, we need to return the pointer-type SCEVUnknown. This avoids 2781 /// forming chains across objects, such as: PrevOper==a[i], IVOper==b[i], 2782 /// IVInc==b-a. 2783 /// 2784 /// Since SCEVUnknown is the rightmost type, and pointers are the rightmost 2785 /// SCEVUnknown, we simply return the rightmost SCEV operand. 2786 static const SCEV *getExprBase(const SCEV *S) { 2787 switch (S->getSCEVType()) { 2788 default: // uncluding scUnknown. 2789 return S; 2790 case scConstant: 2791 return nullptr; 2792 case scTruncate: 2793 return getExprBase(cast<SCEVTruncateExpr>(S)->getOperand()); 2794 case scZeroExtend: 2795 return getExprBase(cast<SCEVZeroExtendExpr>(S)->getOperand()); 2796 case scSignExtend: 2797 return getExprBase(cast<SCEVSignExtendExpr>(S)->getOperand()); 2798 case scAddExpr: { 2799 // Skip over scaled operands (scMulExpr) to follow add operands as long as 2800 // there's nothing more complex. 2801 // FIXME: not sure if we want to recognize negation. 2802 const SCEVAddExpr *Add = cast<SCEVAddExpr>(S); 2803 for (std::reverse_iterator<SCEVAddExpr::op_iterator> I(Add->op_end()), 2804 E(Add->op_begin()); I != E; ++I) { 2805 const SCEV *SubExpr = *I; 2806 if (SubExpr->getSCEVType() == scAddExpr) 2807 return getExprBase(SubExpr); 2808 2809 if (SubExpr->getSCEVType() != scMulExpr) 2810 return SubExpr; 2811 } 2812 return S; // all operands are scaled, be conservative. 2813 } 2814 case scAddRecExpr: 2815 return getExprBase(cast<SCEVAddRecExpr>(S)->getStart()); 2816 } 2817 llvm_unreachable("Unknown SCEV kind!"); 2818 } 2819 2820 /// Return true if the chain increment is profitable to expand into a loop 2821 /// invariant value, which may require its own register. A profitable chain 2822 /// increment will be an offset relative to the same base. We allow such offsets 2823 /// to potentially be used as chain increment as long as it's not obviously 2824 /// expensive to expand using real instructions. 2825 bool IVChain::isProfitableIncrement(const SCEV *OperExpr, 2826 const SCEV *IncExpr, 2827 ScalarEvolution &SE) { 2828 // Aggressively form chains when -stress-ivchain. 2829 if (StressIVChain) 2830 return true; 2831 2832 // Do not replace a constant offset from IV head with a nonconstant IV 2833 // increment. 2834 if (!isa<SCEVConstant>(IncExpr)) { 2835 const SCEV *HeadExpr = SE.getSCEV(getWideOperand(Incs[0].IVOperand)); 2836 if (isa<SCEVConstant>(SE.getMinusSCEV(OperExpr, HeadExpr))) 2837 return false; 2838 } 2839 2840 SmallPtrSet<const SCEV*, 8> Processed; 2841 return !isHighCostExpansion(IncExpr, Processed, SE); 2842 } 2843 2844 /// Return true if the number of registers needed for the chain is estimated to 2845 /// be less than the number required for the individual IV users. First prohibit 2846 /// any IV users that keep the IV live across increments (the Users set should 2847 /// be empty). Next count the number and type of increments in the chain. 2848 /// 2849 /// Chaining IVs can lead to considerable code bloat if ISEL doesn't 2850 /// effectively use postinc addressing modes. Only consider it profitable it the 2851 /// increments can be computed in fewer registers when chained. 2852 /// 2853 /// TODO: Consider IVInc free if it's already used in another chains. 2854 static bool isProfitableChain(IVChain &Chain, 2855 SmallPtrSetImpl<Instruction *> &Users, 2856 ScalarEvolution &SE, 2857 const TargetTransformInfo &TTI) { 2858 if (StressIVChain) 2859 return true; 2860 2861 if (!Chain.hasIncs()) 2862 return false; 2863 2864 if (!Users.empty()) { 2865 LLVM_DEBUG(dbgs() << "Chain: " << *Chain.Incs[0].UserInst << " users:\n"; 2866 for (Instruction *Inst 2867 : Users) { dbgs() << " " << *Inst << "\n"; }); 2868 return false; 2869 } 2870 assert(!Chain.Incs.empty() && "empty IV chains are not allowed"); 2871 2872 // The chain itself may require a register, so intialize cost to 1. 2873 int cost = 1; 2874 2875 // A complete chain likely eliminates the need for keeping the original IV in 2876 // a register. LSR does not currently know how to form a complete chain unless 2877 // the header phi already exists. 2878 if (isa<PHINode>(Chain.tailUserInst()) 2879 && SE.getSCEV(Chain.tailUserInst()) == Chain.Incs[0].IncExpr) { 2880 --cost; 2881 } 2882 const SCEV *LastIncExpr = nullptr; 2883 unsigned NumConstIncrements = 0; 2884 unsigned NumVarIncrements = 0; 2885 unsigned NumReusedIncrements = 0; 2886 2887 if (TTI.isProfitableLSRChainElement(Chain.Incs[0].UserInst)) 2888 return true; 2889 2890 for (const IVInc &Inc : Chain) { 2891 if (TTI.isProfitableLSRChainElement(Inc.UserInst)) 2892 return true; 2893 if (Inc.IncExpr->isZero()) 2894 continue; 2895 2896 // Incrementing by zero or some constant is neutral. We assume constants can 2897 // be folded into an addressing mode or an add's immediate operand. 2898 if (isa<SCEVConstant>(Inc.IncExpr)) { 2899 ++NumConstIncrements; 2900 continue; 2901 } 2902 2903 if (Inc.IncExpr == LastIncExpr) 2904 ++NumReusedIncrements; 2905 else 2906 ++NumVarIncrements; 2907 2908 LastIncExpr = Inc.IncExpr; 2909 } 2910 // An IV chain with a single increment is handled by LSR's postinc 2911 // uses. However, a chain with multiple increments requires keeping the IV's 2912 // value live longer than it needs to be if chained. 2913 if (NumConstIncrements > 1) 2914 --cost; 2915 2916 // Materializing increment expressions in the preheader that didn't exist in 2917 // the original code may cost a register. For example, sign-extended array 2918 // indices can produce ridiculous increments like this: 2919 // IV + ((sext i32 (2 * %s) to i64) + (-1 * (sext i32 %s to i64))) 2920 cost += NumVarIncrements; 2921 2922 // Reusing variable increments likely saves a register to hold the multiple of 2923 // the stride. 2924 cost -= NumReusedIncrements; 2925 2926 LLVM_DEBUG(dbgs() << "Chain: " << *Chain.Incs[0].UserInst << " Cost: " << cost 2927 << "\n"); 2928 2929 return cost < 0; 2930 } 2931 2932 /// Add this IV user to an existing chain or make it the head of a new chain. 2933 void LSRInstance::ChainInstruction(Instruction *UserInst, Instruction *IVOper, 2934 SmallVectorImpl<ChainUsers> &ChainUsersVec) { 2935 // When IVs are used as types of varying widths, they are generally converted 2936 // to a wider type with some uses remaining narrow under a (free) trunc. 2937 Value *const NextIV = getWideOperand(IVOper); 2938 const SCEV *const OperExpr = SE.getSCEV(NextIV); 2939 const SCEV *const OperExprBase = getExprBase(OperExpr); 2940 2941 // Visit all existing chains. Check if its IVOper can be computed as a 2942 // profitable loop invariant increment from the last link in the Chain. 2943 unsigned ChainIdx = 0, NChains = IVChainVec.size(); 2944 const SCEV *LastIncExpr = nullptr; 2945 for (; ChainIdx < NChains; ++ChainIdx) { 2946 IVChain &Chain = IVChainVec[ChainIdx]; 2947 2948 // Prune the solution space aggressively by checking that both IV operands 2949 // are expressions that operate on the same unscaled SCEVUnknown. This 2950 // "base" will be canceled by the subsequent getMinusSCEV call. Checking 2951 // first avoids creating extra SCEV expressions. 2952 if (!StressIVChain && Chain.ExprBase != OperExprBase) 2953 continue; 2954 2955 Value *PrevIV = getWideOperand(Chain.Incs.back().IVOperand); 2956 if (!isCompatibleIVType(PrevIV, NextIV)) 2957 continue; 2958 2959 // A phi node terminates a chain. 2960 if (isa<PHINode>(UserInst) && isa<PHINode>(Chain.tailUserInst())) 2961 continue; 2962 2963 // The increment must be loop-invariant so it can be kept in a register. 2964 const SCEV *PrevExpr = SE.getSCEV(PrevIV); 2965 const SCEV *IncExpr = SE.getMinusSCEV(OperExpr, PrevExpr); 2966 if (isa<SCEVCouldNotCompute>(IncExpr) || !SE.isLoopInvariant(IncExpr, L)) 2967 continue; 2968 2969 if (Chain.isProfitableIncrement(OperExpr, IncExpr, SE)) { 2970 LastIncExpr = IncExpr; 2971 break; 2972 } 2973 } 2974 // If we haven't found a chain, create a new one, unless we hit the max. Don't 2975 // bother for phi nodes, because they must be last in the chain. 2976 if (ChainIdx == NChains) { 2977 if (isa<PHINode>(UserInst)) 2978 return; 2979 if (NChains >= MaxChains && !StressIVChain) { 2980 LLVM_DEBUG(dbgs() << "IV Chain Limit\n"); 2981 return; 2982 } 2983 LastIncExpr = OperExpr; 2984 // IVUsers may have skipped over sign/zero extensions. We don't currently 2985 // attempt to form chains involving extensions unless they can be hoisted 2986 // into this loop's AddRec. 2987 if (!isa<SCEVAddRecExpr>(LastIncExpr)) 2988 return; 2989 ++NChains; 2990 IVChainVec.push_back(IVChain(IVInc(UserInst, IVOper, LastIncExpr), 2991 OperExprBase)); 2992 ChainUsersVec.resize(NChains); 2993 LLVM_DEBUG(dbgs() << "IV Chain#" << ChainIdx << " Head: (" << *UserInst 2994 << ") IV=" << *LastIncExpr << "\n"); 2995 } else { 2996 LLVM_DEBUG(dbgs() << "IV Chain#" << ChainIdx << " Inc: (" << *UserInst 2997 << ") IV+" << *LastIncExpr << "\n"); 2998 // Add this IV user to the end of the chain. 2999 IVChainVec[ChainIdx].add(IVInc(UserInst, IVOper, LastIncExpr)); 3000 } 3001 IVChain &Chain = IVChainVec[ChainIdx]; 3002 3003 SmallPtrSet<Instruction*,4> &NearUsers = ChainUsersVec[ChainIdx].NearUsers; 3004 // This chain's NearUsers become FarUsers. 3005 if (!LastIncExpr->isZero()) { 3006 ChainUsersVec[ChainIdx].FarUsers.insert(NearUsers.begin(), 3007 NearUsers.end()); 3008 NearUsers.clear(); 3009 } 3010 3011 // All other uses of IVOperand become near uses of the chain. 3012 // We currently ignore intermediate values within SCEV expressions, assuming 3013 // they will eventually be used be the current chain, or can be computed 3014 // from one of the chain increments. To be more precise we could 3015 // transitively follow its user and only add leaf IV users to the set. 3016 for (User *U : IVOper->users()) { 3017 Instruction *OtherUse = dyn_cast<Instruction>(U); 3018 if (!OtherUse) 3019 continue; 3020 // Uses in the chain will no longer be uses if the chain is formed. 3021 // Include the head of the chain in this iteration (not Chain.begin()). 3022 IVChain::const_iterator IncIter = Chain.Incs.begin(); 3023 IVChain::const_iterator IncEnd = Chain.Incs.end(); 3024 for( ; IncIter != IncEnd; ++IncIter) { 3025 if (IncIter->UserInst == OtherUse) 3026 break; 3027 } 3028 if (IncIter != IncEnd) 3029 continue; 3030 3031 if (SE.isSCEVable(OtherUse->getType()) 3032 && !isa<SCEVUnknown>(SE.getSCEV(OtherUse)) 3033 && IU.isIVUserOrOperand(OtherUse)) { 3034 continue; 3035 } 3036 NearUsers.insert(OtherUse); 3037 } 3038 3039 // Since this user is part of the chain, it's no longer considered a use 3040 // of the chain. 3041 ChainUsersVec[ChainIdx].FarUsers.erase(UserInst); 3042 } 3043 3044 /// Populate the vector of Chains. 3045 /// 3046 /// This decreases ILP at the architecture level. Targets with ample registers, 3047 /// multiple memory ports, and no register renaming probably don't want 3048 /// this. However, such targets should probably disable LSR altogether. 3049 /// 3050 /// The job of LSR is to make a reasonable choice of induction variables across 3051 /// the loop. Subsequent passes can easily "unchain" computation exposing more 3052 /// ILP *within the loop* if the target wants it. 3053 /// 3054 /// Finding the best IV chain is potentially a scheduling problem. Since LSR 3055 /// will not reorder memory operations, it will recognize this as a chain, but 3056 /// will generate redundant IV increments. Ideally this would be corrected later 3057 /// by a smart scheduler: 3058 /// = A[i] 3059 /// = A[i+x] 3060 /// A[i] = 3061 /// A[i+x] = 3062 /// 3063 /// TODO: Walk the entire domtree within this loop, not just the path to the 3064 /// loop latch. This will discover chains on side paths, but requires 3065 /// maintaining multiple copies of the Chains state. 3066 void LSRInstance::CollectChains() { 3067 LLVM_DEBUG(dbgs() << "Collecting IV Chains.\n"); 3068 SmallVector<ChainUsers, 8> ChainUsersVec; 3069 3070 SmallVector<BasicBlock *,8> LatchPath; 3071 BasicBlock *LoopHeader = L->getHeader(); 3072 for (DomTreeNode *Rung = DT.getNode(L->getLoopLatch()); 3073 Rung->getBlock() != LoopHeader; Rung = Rung->getIDom()) { 3074 LatchPath.push_back(Rung->getBlock()); 3075 } 3076 LatchPath.push_back(LoopHeader); 3077 3078 // Walk the instruction stream from the loop header to the loop latch. 3079 for (BasicBlock *BB : reverse(LatchPath)) { 3080 for (Instruction &I : *BB) { 3081 // Skip instructions that weren't seen by IVUsers analysis. 3082 if (isa<PHINode>(I) || !IU.isIVUserOrOperand(&I)) 3083 continue; 3084 3085 // Ignore users that are part of a SCEV expression. This way we only 3086 // consider leaf IV Users. This effectively rediscovers a portion of 3087 // IVUsers analysis but in program order this time. 3088 if (SE.isSCEVable(I.getType()) && !isa<SCEVUnknown>(SE.getSCEV(&I))) 3089 continue; 3090 3091 // Remove this instruction from any NearUsers set it may be in. 3092 for (unsigned ChainIdx = 0, NChains = IVChainVec.size(); 3093 ChainIdx < NChains; ++ChainIdx) { 3094 ChainUsersVec[ChainIdx].NearUsers.erase(&I); 3095 } 3096 // Search for operands that can be chained. 3097 SmallPtrSet<Instruction*, 4> UniqueOperands; 3098 User::op_iterator IVOpEnd = I.op_end(); 3099 User::op_iterator IVOpIter = findIVOperand(I.op_begin(), IVOpEnd, L, SE); 3100 while (IVOpIter != IVOpEnd) { 3101 Instruction *IVOpInst = cast<Instruction>(*IVOpIter); 3102 if (UniqueOperands.insert(IVOpInst).second) 3103 ChainInstruction(&I, IVOpInst, ChainUsersVec); 3104 IVOpIter = findIVOperand(std::next(IVOpIter), IVOpEnd, L, SE); 3105 } 3106 } // Continue walking down the instructions. 3107 } // Continue walking down the domtree. 3108 // Visit phi backedges to determine if the chain can generate the IV postinc. 3109 for (PHINode &PN : L->getHeader()->phis()) { 3110 if (!SE.isSCEVable(PN.getType())) 3111 continue; 3112 3113 Instruction *IncV = 3114 dyn_cast<Instruction>(PN.getIncomingValueForBlock(L->getLoopLatch())); 3115 if (IncV) 3116 ChainInstruction(&PN, IncV, ChainUsersVec); 3117 } 3118 // Remove any unprofitable chains. 3119 unsigned ChainIdx = 0; 3120 for (unsigned UsersIdx = 0, NChains = IVChainVec.size(); 3121 UsersIdx < NChains; ++UsersIdx) { 3122 if (!isProfitableChain(IVChainVec[UsersIdx], 3123 ChainUsersVec[UsersIdx].FarUsers, SE, TTI)) 3124 continue; 3125 // Preserve the chain at UsesIdx. 3126 if (ChainIdx != UsersIdx) 3127 IVChainVec[ChainIdx] = IVChainVec[UsersIdx]; 3128 FinalizeChain(IVChainVec[ChainIdx]); 3129 ++ChainIdx; 3130 } 3131 IVChainVec.resize(ChainIdx); 3132 } 3133 3134 void LSRInstance::FinalizeChain(IVChain &Chain) { 3135 assert(!Chain.Incs.empty() && "empty IV chains are not allowed"); 3136 LLVM_DEBUG(dbgs() << "Final Chain: " << *Chain.Incs[0].UserInst << "\n"); 3137 3138 for (const IVInc &Inc : Chain) { 3139 LLVM_DEBUG(dbgs() << " Inc: " << *Inc.UserInst << "\n"); 3140 auto UseI = find(Inc.UserInst->operands(), Inc.IVOperand); 3141 assert(UseI != Inc.UserInst->op_end() && "cannot find IV operand"); 3142 IVIncSet.insert(UseI); 3143 } 3144 } 3145 3146 /// Return true if the IVInc can be folded into an addressing mode. 3147 static bool canFoldIVIncExpr(const SCEV *IncExpr, Instruction *UserInst, 3148 Value *Operand, const TargetTransformInfo &TTI) { 3149 const SCEVConstant *IncConst = dyn_cast<SCEVConstant>(IncExpr); 3150 if (!IncConst || !isAddressUse(TTI, UserInst, Operand)) 3151 return false; 3152 3153 if (IncConst->getAPInt().getMinSignedBits() > 64) 3154 return false; 3155 3156 MemAccessTy AccessTy = getAccessType(TTI, UserInst, Operand); 3157 int64_t IncOffset = IncConst->getValue()->getSExtValue(); 3158 if (!isAlwaysFoldable(TTI, LSRUse::Address, AccessTy, /*BaseGV=*/nullptr, 3159 IncOffset, /*HasBaseReg=*/false)) 3160 return false; 3161 3162 return true; 3163 } 3164 3165 /// Generate an add or subtract for each IVInc in a chain to materialize the IV 3166 /// user's operand from the previous IV user's operand. 3167 void LSRInstance::GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter, 3168 SmallVectorImpl<WeakTrackingVH> &DeadInsts) { 3169 // Find the new IVOperand for the head of the chain. It may have been replaced 3170 // by LSR. 3171 const IVInc &Head = Chain.Incs[0]; 3172 User::op_iterator IVOpEnd = Head.UserInst->op_end(); 3173 // findIVOperand returns IVOpEnd if it can no longer find a valid IV user. 3174 User::op_iterator IVOpIter = findIVOperand(Head.UserInst->op_begin(), 3175 IVOpEnd, L, SE); 3176 Value *IVSrc = nullptr; 3177 while (IVOpIter != IVOpEnd) { 3178 IVSrc = getWideOperand(*IVOpIter); 3179 3180 // If this operand computes the expression that the chain needs, we may use 3181 // it. (Check this after setting IVSrc which is used below.) 3182 // 3183 // Note that if Head.IncExpr is wider than IVSrc, then this phi is too 3184 // narrow for the chain, so we can no longer use it. We do allow using a 3185 // wider phi, assuming the LSR checked for free truncation. In that case we 3186 // should already have a truncate on this operand such that 3187 // getSCEV(IVSrc) == IncExpr. 3188 if (SE.getSCEV(*IVOpIter) == Head.IncExpr 3189 || SE.getSCEV(IVSrc) == Head.IncExpr) { 3190 break; 3191 } 3192 IVOpIter = findIVOperand(std::next(IVOpIter), IVOpEnd, L, SE); 3193 } 3194 if (IVOpIter == IVOpEnd) { 3195 // Gracefully give up on this chain. 3196 LLVM_DEBUG(dbgs() << "Concealed chain head: " << *Head.UserInst << "\n"); 3197 return; 3198 } 3199 assert(IVSrc && "Failed to find IV chain source"); 3200 3201 LLVM_DEBUG(dbgs() << "Generate chain at: " << *IVSrc << "\n"); 3202 Type *IVTy = IVSrc->getType(); 3203 Type *IntTy = SE.getEffectiveSCEVType(IVTy); 3204 const SCEV *LeftOverExpr = nullptr; 3205 for (const IVInc &Inc : Chain) { 3206 Instruction *InsertPt = Inc.UserInst; 3207 if (isa<PHINode>(InsertPt)) 3208 InsertPt = L->getLoopLatch()->getTerminator(); 3209 3210 // IVOper will replace the current IV User's operand. IVSrc is the IV 3211 // value currently held in a register. 3212 Value *IVOper = IVSrc; 3213 if (!Inc.IncExpr->isZero()) { 3214 // IncExpr was the result of subtraction of two narrow values, so must 3215 // be signed. 3216 const SCEV *IncExpr = SE.getNoopOrSignExtend(Inc.IncExpr, IntTy); 3217 LeftOverExpr = LeftOverExpr ? 3218 SE.getAddExpr(LeftOverExpr, IncExpr) : IncExpr; 3219 } 3220 if (LeftOverExpr && !LeftOverExpr->isZero()) { 3221 // Expand the IV increment. 3222 Rewriter.clearPostInc(); 3223 Value *IncV = Rewriter.expandCodeFor(LeftOverExpr, IntTy, InsertPt); 3224 const SCEV *IVOperExpr = SE.getAddExpr(SE.getUnknown(IVSrc), 3225 SE.getUnknown(IncV)); 3226 IVOper = Rewriter.expandCodeFor(IVOperExpr, IVTy, InsertPt); 3227 3228 // If an IV increment can't be folded, use it as the next IV value. 3229 if (!canFoldIVIncExpr(LeftOverExpr, Inc.UserInst, Inc.IVOperand, TTI)) { 3230 assert(IVTy == IVOper->getType() && "inconsistent IV increment type"); 3231 IVSrc = IVOper; 3232 LeftOverExpr = nullptr; 3233 } 3234 } 3235 Type *OperTy = Inc.IVOperand->getType(); 3236 if (IVTy != OperTy) { 3237 assert(SE.getTypeSizeInBits(IVTy) >= SE.getTypeSizeInBits(OperTy) && 3238 "cannot extend a chained IV"); 3239 IRBuilder<> Builder(InsertPt); 3240 IVOper = Builder.CreateTruncOrBitCast(IVOper, OperTy, "lsr.chain"); 3241 } 3242 Inc.UserInst->replaceUsesOfWith(Inc.IVOperand, IVOper); 3243 if (auto *OperandIsInstr = dyn_cast<Instruction>(Inc.IVOperand)) 3244 DeadInsts.emplace_back(OperandIsInstr); 3245 } 3246 // If LSR created a new, wider phi, we may also replace its postinc. We only 3247 // do this if we also found a wide value for the head of the chain. 3248 if (isa<PHINode>(Chain.tailUserInst())) { 3249 for (PHINode &Phi : L->getHeader()->phis()) { 3250 if (!isCompatibleIVType(&Phi, IVSrc)) 3251 continue; 3252 Instruction *PostIncV = dyn_cast<Instruction>( 3253 Phi.getIncomingValueForBlock(L->getLoopLatch())); 3254 if (!PostIncV || (SE.getSCEV(PostIncV) != SE.getSCEV(IVSrc))) 3255 continue; 3256 Value *IVOper = IVSrc; 3257 Type *PostIncTy = PostIncV->getType(); 3258 if (IVTy != PostIncTy) { 3259 assert(PostIncTy->isPointerTy() && "mixing int/ptr IV types"); 3260 IRBuilder<> Builder(L->getLoopLatch()->getTerminator()); 3261 Builder.SetCurrentDebugLocation(PostIncV->getDebugLoc()); 3262 IVOper = Builder.CreatePointerCast(IVSrc, PostIncTy, "lsr.chain"); 3263 } 3264 Phi.replaceUsesOfWith(PostIncV, IVOper); 3265 DeadInsts.emplace_back(PostIncV); 3266 } 3267 } 3268 } 3269 3270 void LSRInstance::CollectFixupsAndInitialFormulae() { 3271 BranchInst *ExitBranch = nullptr; 3272 bool SaveCmp = TTI.canSaveCmp(L, &ExitBranch, &SE, &LI, &DT, &AC, &TLI); 3273 3274 for (const IVStrideUse &U : IU) { 3275 Instruction *UserInst = U.getUser(); 3276 // Skip IV users that are part of profitable IV Chains. 3277 User::op_iterator UseI = 3278 find(UserInst->operands(), U.getOperandValToReplace()); 3279 assert(UseI != UserInst->op_end() && "cannot find IV operand"); 3280 if (IVIncSet.count(UseI)) { 3281 LLVM_DEBUG(dbgs() << "Use is in profitable chain: " << **UseI << '\n'); 3282 continue; 3283 } 3284 3285 LSRUse::KindType Kind = LSRUse::Basic; 3286 MemAccessTy AccessTy; 3287 if (isAddressUse(TTI, UserInst, U.getOperandValToReplace())) { 3288 Kind = LSRUse::Address; 3289 AccessTy = getAccessType(TTI, UserInst, U.getOperandValToReplace()); 3290 } 3291 3292 const SCEV *S = IU.getExpr(U); 3293 PostIncLoopSet TmpPostIncLoops = U.getPostIncLoops(); 3294 3295 // Equality (== and !=) ICmps are special. We can rewrite (i == N) as 3296 // (N - i == 0), and this allows (N - i) to be the expression that we work 3297 // with rather than just N or i, so we can consider the register 3298 // requirements for both N and i at the same time. Limiting this code to 3299 // equality icmps is not a problem because all interesting loops use 3300 // equality icmps, thanks to IndVarSimplify. 3301 if (ICmpInst *CI = dyn_cast<ICmpInst>(UserInst)) { 3302 // If CI can be saved in some target, like replaced inside hardware loop 3303 // in PowerPC, no need to generate initial formulae for it. 3304 if (SaveCmp && CI == dyn_cast<ICmpInst>(ExitBranch->getCondition())) 3305 continue; 3306 if (CI->isEquality()) { 3307 // Swap the operands if needed to put the OperandValToReplace on the 3308 // left, for consistency. 3309 Value *NV = CI->getOperand(1); 3310 if (NV == U.getOperandValToReplace()) { 3311 CI->setOperand(1, CI->getOperand(0)); 3312 CI->setOperand(0, NV); 3313 NV = CI->getOperand(1); 3314 Changed = true; 3315 } 3316 3317 // x == y --> x - y == 0 3318 const SCEV *N = SE.getSCEV(NV); 3319 if (SE.isLoopInvariant(N, L) && isSafeToExpand(N, SE) && 3320 (!NV->getType()->isPointerTy() || 3321 SE.getPointerBase(N) == SE.getPointerBase(S))) { 3322 // S is normalized, so normalize N before folding it into S 3323 // to keep the result normalized. 3324 N = normalizeForPostIncUse(N, TmpPostIncLoops, SE); 3325 Kind = LSRUse::ICmpZero; 3326 S = SE.getMinusSCEV(N, S); 3327 } 3328 3329 // -1 and the negations of all interesting strides (except the negation 3330 // of -1) are now also interesting. 3331 for (size_t i = 0, e = Factors.size(); i != e; ++i) 3332 if (Factors[i] != -1) 3333 Factors.insert(-(uint64_t)Factors[i]); 3334 Factors.insert(-1); 3335 } 3336 } 3337 3338 // Get or create an LSRUse. 3339 std::pair<size_t, int64_t> P = getUse(S, Kind, AccessTy); 3340 size_t LUIdx = P.first; 3341 int64_t Offset = P.second; 3342 LSRUse &LU = Uses[LUIdx]; 3343 3344 // Record the fixup. 3345 LSRFixup &LF = LU.getNewFixup(); 3346 LF.UserInst = UserInst; 3347 LF.OperandValToReplace = U.getOperandValToReplace(); 3348 LF.PostIncLoops = TmpPostIncLoops; 3349 LF.Offset = Offset; 3350 LU.AllFixupsOutsideLoop &= LF.isUseFullyOutsideLoop(L); 3351 3352 if (!LU.WidestFixupType || 3353 SE.getTypeSizeInBits(LU.WidestFixupType) < 3354 SE.getTypeSizeInBits(LF.OperandValToReplace->getType())) 3355 LU.WidestFixupType = LF.OperandValToReplace->getType(); 3356 3357 // If this is the first use of this LSRUse, give it a formula. 3358 if (LU.Formulae.empty()) { 3359 InsertInitialFormula(S, LU, LUIdx); 3360 CountRegisters(LU.Formulae.back(), LUIdx); 3361 } 3362 } 3363 3364 LLVM_DEBUG(print_fixups(dbgs())); 3365 } 3366 3367 /// Insert a formula for the given expression into the given use, separating out 3368 /// loop-variant portions from loop-invariant and loop-computable portions. 3369 void 3370 LSRInstance::InsertInitialFormula(const SCEV *S, LSRUse &LU, size_t LUIdx) { 3371 // Mark uses whose expressions cannot be expanded. 3372 if (!isSafeToExpand(S, SE)) 3373 LU.RigidFormula = true; 3374 3375 Formula F; 3376 F.initialMatch(S, L, SE); 3377 bool Inserted = InsertFormula(LU, LUIdx, F); 3378 assert(Inserted && "Initial formula already exists!"); (void)Inserted; 3379 } 3380 3381 /// Insert a simple single-register formula for the given expression into the 3382 /// given use. 3383 void 3384 LSRInstance::InsertSupplementalFormula(const SCEV *S, 3385 LSRUse &LU, size_t LUIdx) { 3386 Formula F; 3387 F.BaseRegs.push_back(S); 3388 F.HasBaseReg = true; 3389 bool Inserted = InsertFormula(LU, LUIdx, F); 3390 assert(Inserted && "Supplemental formula already exists!"); (void)Inserted; 3391 } 3392 3393 /// Note which registers are used by the given formula, updating RegUses. 3394 void LSRInstance::CountRegisters(const Formula &F, size_t LUIdx) { 3395 if (F.ScaledReg) 3396 RegUses.countRegister(F.ScaledReg, LUIdx); 3397 for (const SCEV *BaseReg : F.BaseRegs) 3398 RegUses.countRegister(BaseReg, LUIdx); 3399 } 3400 3401 /// If the given formula has not yet been inserted, add it to the list, and 3402 /// return true. Return false otherwise. 3403 bool LSRInstance::InsertFormula(LSRUse &LU, unsigned LUIdx, const Formula &F) { 3404 // Do not insert formula that we will not be able to expand. 3405 assert(isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, F) && 3406 "Formula is illegal"); 3407 3408 if (!LU.InsertFormula(F, *L)) 3409 return false; 3410 3411 CountRegisters(F, LUIdx); 3412 return true; 3413 } 3414 3415 /// Check for other uses of loop-invariant values which we're tracking. These 3416 /// other uses will pin these values in registers, making them less profitable 3417 /// for elimination. 3418 /// TODO: This currently misses non-constant addrec step registers. 3419 /// TODO: Should this give more weight to users inside the loop? 3420 void 3421 LSRInstance::CollectLoopInvariantFixupsAndFormulae() { 3422 SmallVector<const SCEV *, 8> Worklist(RegUses.begin(), RegUses.end()); 3423 SmallPtrSet<const SCEV *, 32> Visited; 3424 3425 while (!Worklist.empty()) { 3426 const SCEV *S = Worklist.pop_back_val(); 3427 3428 // Don't process the same SCEV twice 3429 if (!Visited.insert(S).second) 3430 continue; 3431 3432 if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) 3433 Worklist.append(N->op_begin(), N->op_end()); 3434 else if (const SCEVIntegralCastExpr *C = dyn_cast<SCEVIntegralCastExpr>(S)) 3435 Worklist.push_back(C->getOperand()); 3436 else if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) { 3437 Worklist.push_back(D->getLHS()); 3438 Worklist.push_back(D->getRHS()); 3439 } else if (const SCEVUnknown *US = dyn_cast<SCEVUnknown>(S)) { 3440 const Value *V = US->getValue(); 3441 if (const Instruction *Inst = dyn_cast<Instruction>(V)) { 3442 // Look for instructions defined outside the loop. 3443 if (L->contains(Inst)) continue; 3444 } else if (isa<UndefValue>(V)) 3445 // Undef doesn't have a live range, so it doesn't matter. 3446 continue; 3447 for (const Use &U : V->uses()) { 3448 const Instruction *UserInst = dyn_cast<Instruction>(U.getUser()); 3449 // Ignore non-instructions. 3450 if (!UserInst) 3451 continue; 3452 // Don't bother if the instruction is an EHPad. 3453 if (UserInst->isEHPad()) 3454 continue; 3455 // Ignore instructions in other functions (as can happen with 3456 // Constants). 3457 if (UserInst->getParent()->getParent() != L->getHeader()->getParent()) 3458 continue; 3459 // Ignore instructions not dominated by the loop. 3460 const BasicBlock *UseBB = !isa<PHINode>(UserInst) ? 3461 UserInst->getParent() : 3462 cast<PHINode>(UserInst)->getIncomingBlock( 3463 PHINode::getIncomingValueNumForOperand(U.getOperandNo())); 3464 if (!DT.dominates(L->getHeader(), UseBB)) 3465 continue; 3466 // Don't bother if the instruction is in a BB which ends in an EHPad. 3467 if (UseBB->getTerminator()->isEHPad()) 3468 continue; 3469 // Don't bother rewriting PHIs in catchswitch blocks. 3470 if (isa<CatchSwitchInst>(UserInst->getParent()->getTerminator())) 3471 continue; 3472 // Ignore uses which are part of other SCEV expressions, to avoid 3473 // analyzing them multiple times. 3474 if (SE.isSCEVable(UserInst->getType())) { 3475 const SCEV *UserS = SE.getSCEV(const_cast<Instruction *>(UserInst)); 3476 // If the user is a no-op, look through to its uses. 3477 if (!isa<SCEVUnknown>(UserS)) 3478 continue; 3479 if (UserS == US) { 3480 Worklist.push_back( 3481 SE.getUnknown(const_cast<Instruction *>(UserInst))); 3482 continue; 3483 } 3484 } 3485 // Ignore icmp instructions which are already being analyzed. 3486 if (const ICmpInst *ICI = dyn_cast<ICmpInst>(UserInst)) { 3487 unsigned OtherIdx = !U.getOperandNo(); 3488 Value *OtherOp = const_cast<Value *>(ICI->getOperand(OtherIdx)); 3489 if (SE.hasComputableLoopEvolution(SE.getSCEV(OtherOp), L)) 3490 continue; 3491 } 3492 3493 std::pair<size_t, int64_t> P = getUse( 3494 S, LSRUse::Basic, MemAccessTy()); 3495 size_t LUIdx = P.first; 3496 int64_t Offset = P.second; 3497 LSRUse &LU = Uses[LUIdx]; 3498 LSRFixup &LF = LU.getNewFixup(); 3499 LF.UserInst = const_cast<Instruction *>(UserInst); 3500 LF.OperandValToReplace = U; 3501 LF.Offset = Offset; 3502 LU.AllFixupsOutsideLoop &= LF.isUseFullyOutsideLoop(L); 3503 if (!LU.WidestFixupType || 3504 SE.getTypeSizeInBits(LU.WidestFixupType) < 3505 SE.getTypeSizeInBits(LF.OperandValToReplace->getType())) 3506 LU.WidestFixupType = LF.OperandValToReplace->getType(); 3507 InsertSupplementalFormula(US, LU, LUIdx); 3508 CountRegisters(LU.Formulae.back(), Uses.size() - 1); 3509 break; 3510 } 3511 } 3512 } 3513 } 3514 3515 /// Split S into subexpressions which can be pulled out into separate 3516 /// registers. If C is non-null, multiply each subexpression by C. 3517 /// 3518 /// Return remainder expression after factoring the subexpressions captured by 3519 /// Ops. If Ops is complete, return NULL. 3520 static const SCEV *CollectSubexprs(const SCEV *S, const SCEVConstant *C, 3521 SmallVectorImpl<const SCEV *> &Ops, 3522 const Loop *L, 3523 ScalarEvolution &SE, 3524 unsigned Depth = 0) { 3525 // Arbitrarily cap recursion to protect compile time. 3526 if (Depth >= 3) 3527 return S; 3528 3529 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 3530 // Break out add operands. 3531 for (const SCEV *S : Add->operands()) { 3532 const SCEV *Remainder = CollectSubexprs(S, C, Ops, L, SE, Depth+1); 3533 if (Remainder) 3534 Ops.push_back(C ? SE.getMulExpr(C, Remainder) : Remainder); 3535 } 3536 return nullptr; 3537 } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 3538 // Split a non-zero base out of an addrec. 3539 if (AR->getStart()->isZero() || !AR->isAffine()) 3540 return S; 3541 3542 const SCEV *Remainder = CollectSubexprs(AR->getStart(), 3543 C, Ops, L, SE, Depth+1); 3544 // Split the non-zero AddRec unless it is part of a nested recurrence that 3545 // does not pertain to this loop. 3546 if (Remainder && (AR->getLoop() == L || !isa<SCEVAddRecExpr>(Remainder))) { 3547 Ops.push_back(C ? SE.getMulExpr(C, Remainder) : Remainder); 3548 Remainder = nullptr; 3549 } 3550 if (Remainder != AR->getStart()) { 3551 if (!Remainder) 3552 Remainder = SE.getConstant(AR->getType(), 0); 3553 return SE.getAddRecExpr(Remainder, 3554 AR->getStepRecurrence(SE), 3555 AR->getLoop(), 3556 //FIXME: AR->getNoWrapFlags(SCEV::FlagNW) 3557 SCEV::FlagAnyWrap); 3558 } 3559 } else if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 3560 // Break (C * (a + b + c)) into C*a + C*b + C*c. 3561 if (Mul->getNumOperands() != 2) 3562 return S; 3563 if (const SCEVConstant *Op0 = 3564 dyn_cast<SCEVConstant>(Mul->getOperand(0))) { 3565 C = C ? cast<SCEVConstant>(SE.getMulExpr(C, Op0)) : Op0; 3566 const SCEV *Remainder = 3567 CollectSubexprs(Mul->getOperand(1), C, Ops, L, SE, Depth+1); 3568 if (Remainder) 3569 Ops.push_back(SE.getMulExpr(C, Remainder)); 3570 return nullptr; 3571 } 3572 } 3573 return S; 3574 } 3575 3576 /// Return true if the SCEV represents a value that may end up as a 3577 /// post-increment operation. 3578 static bool mayUsePostIncMode(const TargetTransformInfo &TTI, 3579 LSRUse &LU, const SCEV *S, const Loop *L, 3580 ScalarEvolution &SE) { 3581 if (LU.Kind != LSRUse::Address || 3582 !LU.AccessTy.getType()->isIntOrIntVectorTy()) 3583 return false; 3584 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S); 3585 if (!AR) 3586 return false; 3587 const SCEV *LoopStep = AR->getStepRecurrence(SE); 3588 if (!isa<SCEVConstant>(LoopStep)) 3589 return false; 3590 // Check if a post-indexed load/store can be used. 3591 if (TTI.isIndexedLoadLegal(TTI.MIM_PostInc, AR->getType()) || 3592 TTI.isIndexedStoreLegal(TTI.MIM_PostInc, AR->getType())) { 3593 const SCEV *LoopStart = AR->getStart(); 3594 if (!isa<SCEVConstant>(LoopStart) && SE.isLoopInvariant(LoopStart, L)) 3595 return true; 3596 } 3597 return false; 3598 } 3599 3600 /// Helper function for LSRInstance::GenerateReassociations. 3601 void LSRInstance::GenerateReassociationsImpl(LSRUse &LU, unsigned LUIdx, 3602 const Formula &Base, 3603 unsigned Depth, size_t Idx, 3604 bool IsScaledReg) { 3605 const SCEV *BaseReg = IsScaledReg ? Base.ScaledReg : Base.BaseRegs[Idx]; 3606 // Don't generate reassociations for the base register of a value that 3607 // may generate a post-increment operator. The reason is that the 3608 // reassociations cause extra base+register formula to be created, 3609 // and possibly chosen, but the post-increment is more efficient. 3610 if (AMK == TTI::AMK_PostIndexed && mayUsePostIncMode(TTI, LU, BaseReg, L, SE)) 3611 return; 3612 SmallVector<const SCEV *, 8> AddOps; 3613 const SCEV *Remainder = CollectSubexprs(BaseReg, nullptr, AddOps, L, SE); 3614 if (Remainder) 3615 AddOps.push_back(Remainder); 3616 3617 if (AddOps.size() == 1) 3618 return; 3619 3620 for (SmallVectorImpl<const SCEV *>::const_iterator J = AddOps.begin(), 3621 JE = AddOps.end(); 3622 J != JE; ++J) { 3623 // Loop-variant "unknown" values are uninteresting; we won't be able to 3624 // do anything meaningful with them. 3625 if (isa<SCEVUnknown>(*J) && !SE.isLoopInvariant(*J, L)) 3626 continue; 3627 3628 // Don't pull a constant into a register if the constant could be folded 3629 // into an immediate field. 3630 if (isAlwaysFoldable(TTI, SE, LU.MinOffset, LU.MaxOffset, LU.Kind, 3631 LU.AccessTy, *J, Base.getNumRegs() > 1)) 3632 continue; 3633 3634 // Collect all operands except *J. 3635 SmallVector<const SCEV *, 8> InnerAddOps( 3636 ((const SmallVector<const SCEV *, 8> &)AddOps).begin(), J); 3637 InnerAddOps.append(std::next(J), 3638 ((const SmallVector<const SCEV *, 8> &)AddOps).end()); 3639 3640 // Don't leave just a constant behind in a register if the constant could 3641 // be folded into an immediate field. 3642 if (InnerAddOps.size() == 1 && 3643 isAlwaysFoldable(TTI, SE, LU.MinOffset, LU.MaxOffset, LU.Kind, 3644 LU.AccessTy, InnerAddOps[0], Base.getNumRegs() > 1)) 3645 continue; 3646 3647 const SCEV *InnerSum = SE.getAddExpr(InnerAddOps); 3648 if (InnerSum->isZero()) 3649 continue; 3650 Formula F = Base; 3651 3652 // Add the remaining pieces of the add back into the new formula. 3653 const SCEVConstant *InnerSumSC = dyn_cast<SCEVConstant>(InnerSum); 3654 if (InnerSumSC && SE.getTypeSizeInBits(InnerSumSC->getType()) <= 64 && 3655 TTI.isLegalAddImmediate((uint64_t)F.UnfoldedOffset + 3656 InnerSumSC->getValue()->getZExtValue())) { 3657 F.UnfoldedOffset = 3658 (uint64_t)F.UnfoldedOffset + InnerSumSC->getValue()->getZExtValue(); 3659 if (IsScaledReg) 3660 F.ScaledReg = nullptr; 3661 else 3662 F.BaseRegs.erase(F.BaseRegs.begin() + Idx); 3663 } else if (IsScaledReg) 3664 F.ScaledReg = InnerSum; 3665 else 3666 F.BaseRegs[Idx] = InnerSum; 3667 3668 // Add J as its own register, or an unfolded immediate. 3669 const SCEVConstant *SC = dyn_cast<SCEVConstant>(*J); 3670 if (SC && SE.getTypeSizeInBits(SC->getType()) <= 64 && 3671 TTI.isLegalAddImmediate((uint64_t)F.UnfoldedOffset + 3672 SC->getValue()->getZExtValue())) 3673 F.UnfoldedOffset = 3674 (uint64_t)F.UnfoldedOffset + SC->getValue()->getZExtValue(); 3675 else 3676 F.BaseRegs.push_back(*J); 3677 // We may have changed the number of register in base regs, adjust the 3678 // formula accordingly. 3679 F.canonicalize(*L); 3680 3681 if (InsertFormula(LU, LUIdx, F)) 3682 // If that formula hadn't been seen before, recurse to find more like 3683 // it. 3684 // Add check on Log16(AddOps.size()) - same as Log2_32(AddOps.size()) >> 2) 3685 // Because just Depth is not enough to bound compile time. 3686 // This means that every time AddOps.size() is greater 16^x we will add 3687 // x to Depth. 3688 GenerateReassociations(LU, LUIdx, LU.Formulae.back(), 3689 Depth + 1 + (Log2_32(AddOps.size()) >> 2)); 3690 } 3691 } 3692 3693 /// Split out subexpressions from adds and the bases of addrecs. 3694 void LSRInstance::GenerateReassociations(LSRUse &LU, unsigned LUIdx, 3695 Formula Base, unsigned Depth) { 3696 assert(Base.isCanonical(*L) && "Input must be in the canonical form"); 3697 // Arbitrarily cap recursion to protect compile time. 3698 if (Depth >= 3) 3699 return; 3700 3701 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) 3702 GenerateReassociationsImpl(LU, LUIdx, Base, Depth, i); 3703 3704 if (Base.Scale == 1) 3705 GenerateReassociationsImpl(LU, LUIdx, Base, Depth, 3706 /* Idx */ -1, /* IsScaledReg */ true); 3707 } 3708 3709 /// Generate a formula consisting of all of the loop-dominating registers added 3710 /// into a single register. 3711 void LSRInstance::GenerateCombinations(LSRUse &LU, unsigned LUIdx, 3712 Formula Base) { 3713 // This method is only interesting on a plurality of registers. 3714 if (Base.BaseRegs.size() + (Base.Scale == 1) + 3715 (Base.UnfoldedOffset != 0) <= 1) 3716 return; 3717 3718 // Flatten the representation, i.e., reg1 + 1*reg2 => reg1 + reg2, before 3719 // processing the formula. 3720 Base.unscale(); 3721 SmallVector<const SCEV *, 4> Ops; 3722 Formula NewBase = Base; 3723 NewBase.BaseRegs.clear(); 3724 Type *CombinedIntegerType = nullptr; 3725 for (const SCEV *BaseReg : Base.BaseRegs) { 3726 if (SE.properlyDominates(BaseReg, L->getHeader()) && 3727 !SE.hasComputableLoopEvolution(BaseReg, L)) { 3728 if (!CombinedIntegerType) 3729 CombinedIntegerType = SE.getEffectiveSCEVType(BaseReg->getType()); 3730 Ops.push_back(BaseReg); 3731 } 3732 else 3733 NewBase.BaseRegs.push_back(BaseReg); 3734 } 3735 3736 // If no register is relevant, we're done. 3737 if (Ops.size() == 0) 3738 return; 3739 3740 // Utility function for generating the required variants of the combined 3741 // registers. 3742 auto GenerateFormula = [&](const SCEV *Sum) { 3743 Formula F = NewBase; 3744 3745 // TODO: If Sum is zero, it probably means ScalarEvolution missed an 3746 // opportunity to fold something. For now, just ignore such cases 3747 // rather than proceed with zero in a register. 3748 if (Sum->isZero()) 3749 return; 3750 3751 F.BaseRegs.push_back(Sum); 3752 F.canonicalize(*L); 3753 (void)InsertFormula(LU, LUIdx, F); 3754 }; 3755 3756 // If we collected at least two registers, generate a formula combining them. 3757 if (Ops.size() > 1) { 3758 SmallVector<const SCEV *, 4> OpsCopy(Ops); // Don't let SE modify Ops. 3759 GenerateFormula(SE.getAddExpr(OpsCopy)); 3760 } 3761 3762 // If we have an unfolded offset, generate a formula combining it with the 3763 // registers collected. 3764 if (NewBase.UnfoldedOffset) { 3765 assert(CombinedIntegerType && "Missing a type for the unfolded offset"); 3766 Ops.push_back(SE.getConstant(CombinedIntegerType, NewBase.UnfoldedOffset, 3767 true)); 3768 NewBase.UnfoldedOffset = 0; 3769 GenerateFormula(SE.getAddExpr(Ops)); 3770 } 3771 } 3772 3773 /// Helper function for LSRInstance::GenerateSymbolicOffsets. 3774 void LSRInstance::GenerateSymbolicOffsetsImpl(LSRUse &LU, unsigned LUIdx, 3775 const Formula &Base, size_t Idx, 3776 bool IsScaledReg) { 3777 const SCEV *G = IsScaledReg ? Base.ScaledReg : Base.BaseRegs[Idx]; 3778 GlobalValue *GV = ExtractSymbol(G, SE); 3779 if (G->isZero() || !GV) 3780 return; 3781 Formula F = Base; 3782 F.BaseGV = GV; 3783 if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, F)) 3784 return; 3785 if (IsScaledReg) 3786 F.ScaledReg = G; 3787 else 3788 F.BaseRegs[Idx] = G; 3789 (void)InsertFormula(LU, LUIdx, F); 3790 } 3791 3792 /// Generate reuse formulae using symbolic offsets. 3793 void LSRInstance::GenerateSymbolicOffsets(LSRUse &LU, unsigned LUIdx, 3794 Formula Base) { 3795 // We can't add a symbolic offset if the address already contains one. 3796 if (Base.BaseGV) return; 3797 3798 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) 3799 GenerateSymbolicOffsetsImpl(LU, LUIdx, Base, i); 3800 if (Base.Scale == 1) 3801 GenerateSymbolicOffsetsImpl(LU, LUIdx, Base, /* Idx */ -1, 3802 /* IsScaledReg */ true); 3803 } 3804 3805 /// Helper function for LSRInstance::GenerateConstantOffsets. 3806 void LSRInstance::GenerateConstantOffsetsImpl( 3807 LSRUse &LU, unsigned LUIdx, const Formula &Base, 3808 const SmallVectorImpl<int64_t> &Worklist, size_t Idx, bool IsScaledReg) { 3809 3810 auto GenerateOffset = [&](const SCEV *G, int64_t Offset) { 3811 Formula F = Base; 3812 F.BaseOffset = (uint64_t)Base.BaseOffset - Offset; 3813 3814 if (isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, F)) { 3815 // Add the offset to the base register. 3816 const SCEV *NewG = SE.getAddExpr(SE.getConstant(G->getType(), Offset), G); 3817 // If it cancelled out, drop the base register, otherwise update it. 3818 if (NewG->isZero()) { 3819 if (IsScaledReg) { 3820 F.Scale = 0; 3821 F.ScaledReg = nullptr; 3822 } else 3823 F.deleteBaseReg(F.BaseRegs[Idx]); 3824 F.canonicalize(*L); 3825 } else if (IsScaledReg) 3826 F.ScaledReg = NewG; 3827 else 3828 F.BaseRegs[Idx] = NewG; 3829 3830 (void)InsertFormula(LU, LUIdx, F); 3831 } 3832 }; 3833 3834 const SCEV *G = IsScaledReg ? Base.ScaledReg : Base.BaseRegs[Idx]; 3835 3836 // With constant offsets and constant steps, we can generate pre-inc 3837 // accesses by having the offset equal the step. So, for access #0 with a 3838 // step of 8, we generate a G - 8 base which would require the first access 3839 // to be ((G - 8) + 8),+,8. The pre-indexed access then updates the pointer 3840 // for itself and hopefully becomes the base for other accesses. This means 3841 // means that a single pre-indexed access can be generated to become the new 3842 // base pointer for each iteration of the loop, resulting in no extra add/sub 3843 // instructions for pointer updating. 3844 if (AMK == TTI::AMK_PreIndexed && LU.Kind == LSRUse::Address) { 3845 if (auto *GAR = dyn_cast<SCEVAddRecExpr>(G)) { 3846 if (auto *StepRec = 3847 dyn_cast<SCEVConstant>(GAR->getStepRecurrence(SE))) { 3848 const APInt &StepInt = StepRec->getAPInt(); 3849 int64_t Step = StepInt.isNegative() ? 3850 StepInt.getSExtValue() : StepInt.getZExtValue(); 3851 3852 for (int64_t Offset : Worklist) { 3853 Offset -= Step; 3854 GenerateOffset(G, Offset); 3855 } 3856 } 3857 } 3858 } 3859 for (int64_t Offset : Worklist) 3860 GenerateOffset(G, Offset); 3861 3862 int64_t Imm = ExtractImmediate(G, SE); 3863 if (G->isZero() || Imm == 0) 3864 return; 3865 Formula F = Base; 3866 F.BaseOffset = (uint64_t)F.BaseOffset + Imm; 3867 if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, F)) 3868 return; 3869 if (IsScaledReg) { 3870 F.ScaledReg = G; 3871 } else { 3872 F.BaseRegs[Idx] = G; 3873 // We may generate non canonical Formula if G is a recurrent expr reg 3874 // related with current loop while F.ScaledReg is not. 3875 F.canonicalize(*L); 3876 } 3877 (void)InsertFormula(LU, LUIdx, F); 3878 } 3879 3880 /// GenerateConstantOffsets - Generate reuse formulae using symbolic offsets. 3881 void LSRInstance::GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx, 3882 Formula Base) { 3883 // TODO: For now, just add the min and max offset, because it usually isn't 3884 // worthwhile looking at everything inbetween. 3885 SmallVector<int64_t, 2> Worklist; 3886 Worklist.push_back(LU.MinOffset); 3887 if (LU.MaxOffset != LU.MinOffset) 3888 Worklist.push_back(LU.MaxOffset); 3889 3890 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) 3891 GenerateConstantOffsetsImpl(LU, LUIdx, Base, Worklist, i); 3892 if (Base.Scale == 1) 3893 GenerateConstantOffsetsImpl(LU, LUIdx, Base, Worklist, /* Idx */ -1, 3894 /* IsScaledReg */ true); 3895 } 3896 3897 /// For ICmpZero, check to see if we can scale up the comparison. For example, x 3898 /// == y -> x*c == y*c. 3899 void LSRInstance::GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx, 3900 Formula Base) { 3901 if (LU.Kind != LSRUse::ICmpZero) return; 3902 3903 // Determine the integer type for the base formula. 3904 Type *IntTy = Base.getType(); 3905 if (!IntTy) return; 3906 if (SE.getTypeSizeInBits(IntTy) > 64) return; 3907 3908 // Don't do this if there is more than one offset. 3909 if (LU.MinOffset != LU.MaxOffset) return; 3910 3911 // Check if transformation is valid. It is illegal to multiply pointer. 3912 if (Base.ScaledReg && Base.ScaledReg->getType()->isPointerTy()) 3913 return; 3914 for (const SCEV *BaseReg : Base.BaseRegs) 3915 if (BaseReg->getType()->isPointerTy()) 3916 return; 3917 assert(!Base.BaseGV && "ICmpZero use is not legal!"); 3918 3919 // Check each interesting stride. 3920 for (int64_t Factor : Factors) { 3921 // Check that the multiplication doesn't overflow. 3922 if (Base.BaseOffset == std::numeric_limits<int64_t>::min() && Factor == -1) 3923 continue; 3924 int64_t NewBaseOffset = (uint64_t)Base.BaseOffset * Factor; 3925 assert(Factor != 0 && "Zero factor not expected!"); 3926 if (NewBaseOffset / Factor != Base.BaseOffset) 3927 continue; 3928 // If the offset will be truncated at this use, check that it is in bounds. 3929 if (!IntTy->isPointerTy() && 3930 !ConstantInt::isValueValidForType(IntTy, NewBaseOffset)) 3931 continue; 3932 3933 // Check that multiplying with the use offset doesn't overflow. 3934 int64_t Offset = LU.MinOffset; 3935 if (Offset == std::numeric_limits<int64_t>::min() && Factor == -1) 3936 continue; 3937 Offset = (uint64_t)Offset * Factor; 3938 if (Offset / Factor != LU.MinOffset) 3939 continue; 3940 // If the offset will be truncated at this use, check that it is in bounds. 3941 if (!IntTy->isPointerTy() && 3942 !ConstantInt::isValueValidForType(IntTy, Offset)) 3943 continue; 3944 3945 Formula F = Base; 3946 F.BaseOffset = NewBaseOffset; 3947 3948 // Check that this scale is legal. 3949 if (!isLegalUse(TTI, Offset, Offset, LU.Kind, LU.AccessTy, F)) 3950 continue; 3951 3952 // Compensate for the use having MinOffset built into it. 3953 F.BaseOffset = (uint64_t)F.BaseOffset + Offset - LU.MinOffset; 3954 3955 const SCEV *FactorS = SE.getConstant(IntTy, Factor); 3956 3957 // Check that multiplying with each base register doesn't overflow. 3958 for (size_t i = 0, e = F.BaseRegs.size(); i != e; ++i) { 3959 F.BaseRegs[i] = SE.getMulExpr(F.BaseRegs[i], FactorS); 3960 if (getExactSDiv(F.BaseRegs[i], FactorS, SE) != Base.BaseRegs[i]) 3961 goto next; 3962 } 3963 3964 // Check that multiplying with the scaled register doesn't overflow. 3965 if (F.ScaledReg) { 3966 F.ScaledReg = SE.getMulExpr(F.ScaledReg, FactorS); 3967 if (getExactSDiv(F.ScaledReg, FactorS, SE) != Base.ScaledReg) 3968 continue; 3969 } 3970 3971 // Check that multiplying with the unfolded offset doesn't overflow. 3972 if (F.UnfoldedOffset != 0) { 3973 if (F.UnfoldedOffset == std::numeric_limits<int64_t>::min() && 3974 Factor == -1) 3975 continue; 3976 F.UnfoldedOffset = (uint64_t)F.UnfoldedOffset * Factor; 3977 if (F.UnfoldedOffset / Factor != Base.UnfoldedOffset) 3978 continue; 3979 // If the offset will be truncated, check that it is in bounds. 3980 if (!IntTy->isPointerTy() && 3981 !ConstantInt::isValueValidForType(IntTy, F.UnfoldedOffset)) 3982 continue; 3983 } 3984 3985 // If we make it here and it's legal, add it. 3986 (void)InsertFormula(LU, LUIdx, F); 3987 next:; 3988 } 3989 } 3990 3991 /// Generate stride factor reuse formulae by making use of scaled-offset address 3992 /// modes, for example. 3993 void LSRInstance::GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base) { 3994 // Determine the integer type for the base formula. 3995 Type *IntTy = Base.getType(); 3996 if (!IntTy) return; 3997 3998 // If this Formula already has a scaled register, we can't add another one. 3999 // Try to unscale the formula to generate a better scale. 4000 if (Base.Scale != 0 && !Base.unscale()) 4001 return; 4002 4003 assert(Base.Scale == 0 && "unscale did not did its job!"); 4004 4005 // Check each interesting stride. 4006 for (int64_t Factor : Factors) { 4007 Base.Scale = Factor; 4008 Base.HasBaseReg = Base.BaseRegs.size() > 1; 4009 // Check whether this scale is going to be legal. 4010 if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, 4011 Base)) { 4012 // As a special-case, handle special out-of-loop Basic users specially. 4013 // TODO: Reconsider this special case. 4014 if (LU.Kind == LSRUse::Basic && 4015 isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LSRUse::Special, 4016 LU.AccessTy, Base) && 4017 LU.AllFixupsOutsideLoop) 4018 LU.Kind = LSRUse::Special; 4019 else 4020 continue; 4021 } 4022 // For an ICmpZero, negating a solitary base register won't lead to 4023 // new solutions. 4024 if (LU.Kind == LSRUse::ICmpZero && 4025 !Base.HasBaseReg && Base.BaseOffset == 0 && !Base.BaseGV) 4026 continue; 4027 // For each addrec base reg, if its loop is current loop, apply the scale. 4028 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) { 4029 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Base.BaseRegs[i]); 4030 if (AR && (AR->getLoop() == L || LU.AllFixupsOutsideLoop)) { 4031 const SCEV *FactorS = SE.getConstant(IntTy, Factor); 4032 if (FactorS->isZero()) 4033 continue; 4034 // Divide out the factor, ignoring high bits, since we'll be 4035 // scaling the value back up in the end. 4036 if (const SCEV *Quotient = getExactSDiv(AR, FactorS, SE, true)) { 4037 // TODO: This could be optimized to avoid all the copying. 4038 Formula F = Base; 4039 F.ScaledReg = Quotient; 4040 F.deleteBaseReg(F.BaseRegs[i]); 4041 // The canonical representation of 1*reg is reg, which is already in 4042 // Base. In that case, do not try to insert the formula, it will be 4043 // rejected anyway. 4044 if (F.Scale == 1 && (F.BaseRegs.empty() || 4045 (AR->getLoop() != L && LU.AllFixupsOutsideLoop))) 4046 continue; 4047 // If AllFixupsOutsideLoop is true and F.Scale is 1, we may generate 4048 // non canonical Formula with ScaledReg's loop not being L. 4049 if (F.Scale == 1 && LU.AllFixupsOutsideLoop) 4050 F.canonicalize(*L); 4051 (void)InsertFormula(LU, LUIdx, F); 4052 } 4053 } 4054 } 4055 } 4056 } 4057 4058 /// Generate reuse formulae from different IV types. 4059 void LSRInstance::GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base) { 4060 // Don't bother truncating symbolic values. 4061 if (Base.BaseGV) return; 4062 4063 // Determine the integer type for the base formula. 4064 Type *DstTy = Base.getType(); 4065 if (!DstTy) return; 4066 DstTy = SE.getEffectiveSCEVType(DstTy); 4067 4068 for (Type *SrcTy : Types) { 4069 if (SrcTy != DstTy && TTI.isTruncateFree(SrcTy, DstTy)) { 4070 Formula F = Base; 4071 4072 // Sometimes SCEV is able to prove zero during ext transform. It may 4073 // happen if SCEV did not do all possible transforms while creating the 4074 // initial node (maybe due to depth limitations), but it can do them while 4075 // taking ext. 4076 if (F.ScaledReg) { 4077 const SCEV *NewScaledReg = SE.getAnyExtendExpr(F.ScaledReg, SrcTy); 4078 if (NewScaledReg->isZero()) 4079 continue; 4080 F.ScaledReg = NewScaledReg; 4081 } 4082 bool HasZeroBaseReg = false; 4083 for (const SCEV *&BaseReg : F.BaseRegs) { 4084 const SCEV *NewBaseReg = SE.getAnyExtendExpr(BaseReg, SrcTy); 4085 if (NewBaseReg->isZero()) { 4086 HasZeroBaseReg = true; 4087 break; 4088 } 4089 BaseReg = NewBaseReg; 4090 } 4091 if (HasZeroBaseReg) 4092 continue; 4093 4094 // TODO: This assumes we've done basic processing on all uses and 4095 // have an idea what the register usage is. 4096 if (!F.hasRegsUsedByUsesOtherThan(LUIdx, RegUses)) 4097 continue; 4098 4099 F.canonicalize(*L); 4100 (void)InsertFormula(LU, LUIdx, F); 4101 } 4102 } 4103 } 4104 4105 namespace { 4106 4107 /// Helper class for GenerateCrossUseConstantOffsets. It's used to defer 4108 /// modifications so that the search phase doesn't have to worry about the data 4109 /// structures moving underneath it. 4110 struct WorkItem { 4111 size_t LUIdx; 4112 int64_t Imm; 4113 const SCEV *OrigReg; 4114 4115 WorkItem(size_t LI, int64_t I, const SCEV *R) 4116 : LUIdx(LI), Imm(I), OrigReg(R) {} 4117 4118 void print(raw_ostream &OS) const; 4119 void dump() const; 4120 }; 4121 4122 } // end anonymous namespace 4123 4124 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 4125 void WorkItem::print(raw_ostream &OS) const { 4126 OS << "in formulae referencing " << *OrigReg << " in use " << LUIdx 4127 << " , add offset " << Imm; 4128 } 4129 4130 LLVM_DUMP_METHOD void WorkItem::dump() const { 4131 print(errs()); errs() << '\n'; 4132 } 4133 #endif 4134 4135 /// Look for registers which are a constant distance apart and try to form reuse 4136 /// opportunities between them. 4137 void LSRInstance::GenerateCrossUseConstantOffsets() { 4138 // Group the registers by their value without any added constant offset. 4139 using ImmMapTy = std::map<int64_t, const SCEV *>; 4140 4141 DenseMap<const SCEV *, ImmMapTy> Map; 4142 DenseMap<const SCEV *, SmallBitVector> UsedByIndicesMap; 4143 SmallVector<const SCEV *, 8> Sequence; 4144 for (const SCEV *Use : RegUses) { 4145 const SCEV *Reg = Use; // Make a copy for ExtractImmediate to modify. 4146 int64_t Imm = ExtractImmediate(Reg, SE); 4147 auto Pair = Map.insert(std::make_pair(Reg, ImmMapTy())); 4148 if (Pair.second) 4149 Sequence.push_back(Reg); 4150 Pair.first->second.insert(std::make_pair(Imm, Use)); 4151 UsedByIndicesMap[Reg] |= RegUses.getUsedByIndices(Use); 4152 } 4153 4154 // Now examine each set of registers with the same base value. Build up 4155 // a list of work to do and do the work in a separate step so that we're 4156 // not adding formulae and register counts while we're searching. 4157 SmallVector<WorkItem, 32> WorkItems; 4158 SmallSet<std::pair<size_t, int64_t>, 32> UniqueItems; 4159 for (const SCEV *Reg : Sequence) { 4160 const ImmMapTy &Imms = Map.find(Reg)->second; 4161 4162 // It's not worthwhile looking for reuse if there's only one offset. 4163 if (Imms.size() == 1) 4164 continue; 4165 4166 LLVM_DEBUG(dbgs() << "Generating cross-use offsets for " << *Reg << ':'; 4167 for (const auto &Entry 4168 : Imms) dbgs() 4169 << ' ' << Entry.first; 4170 dbgs() << '\n'); 4171 4172 // Examine each offset. 4173 for (ImmMapTy::const_iterator J = Imms.begin(), JE = Imms.end(); 4174 J != JE; ++J) { 4175 const SCEV *OrigReg = J->second; 4176 4177 int64_t JImm = J->first; 4178 const SmallBitVector &UsedByIndices = RegUses.getUsedByIndices(OrigReg); 4179 4180 if (!isa<SCEVConstant>(OrigReg) && 4181 UsedByIndicesMap[Reg].count() == 1) { 4182 LLVM_DEBUG(dbgs() << "Skipping cross-use reuse for " << *OrigReg 4183 << '\n'); 4184 continue; 4185 } 4186 4187 // Conservatively examine offsets between this orig reg a few selected 4188 // other orig regs. 4189 int64_t First = Imms.begin()->first; 4190 int64_t Last = std::prev(Imms.end())->first; 4191 // Compute (First + Last) / 2 without overflow using the fact that 4192 // First + Last = 2 * (First + Last) + (First ^ Last). 4193 int64_t Avg = (First & Last) + ((First ^ Last) >> 1); 4194 // If the result is negative and First is odd and Last even (or vice versa), 4195 // we rounded towards -inf. Add 1 in that case, to round towards 0. 4196 Avg = Avg + ((First ^ Last) & ((uint64_t)Avg >> 63)); 4197 ImmMapTy::const_iterator OtherImms[] = { 4198 Imms.begin(), std::prev(Imms.end()), 4199 Imms.lower_bound(Avg)}; 4200 for (size_t i = 0, e = array_lengthof(OtherImms); i != e; ++i) { 4201 ImmMapTy::const_iterator M = OtherImms[i]; 4202 if (M == J || M == JE) continue; 4203 4204 // Compute the difference between the two. 4205 int64_t Imm = (uint64_t)JImm - M->first; 4206 for (unsigned LUIdx : UsedByIndices.set_bits()) 4207 // Make a memo of this use, offset, and register tuple. 4208 if (UniqueItems.insert(std::make_pair(LUIdx, Imm)).second) 4209 WorkItems.push_back(WorkItem(LUIdx, Imm, OrigReg)); 4210 } 4211 } 4212 } 4213 4214 Map.clear(); 4215 Sequence.clear(); 4216 UsedByIndicesMap.clear(); 4217 UniqueItems.clear(); 4218 4219 // Now iterate through the worklist and add new formulae. 4220 for (const WorkItem &WI : WorkItems) { 4221 size_t LUIdx = WI.LUIdx; 4222 LSRUse &LU = Uses[LUIdx]; 4223 int64_t Imm = WI.Imm; 4224 const SCEV *OrigReg = WI.OrigReg; 4225 4226 Type *IntTy = SE.getEffectiveSCEVType(OrigReg->getType()); 4227 const SCEV *NegImmS = SE.getSCEV(ConstantInt::get(IntTy, -(uint64_t)Imm)); 4228 unsigned BitWidth = SE.getTypeSizeInBits(IntTy); 4229 4230 // TODO: Use a more targeted data structure. 4231 for (size_t L = 0, LE = LU.Formulae.size(); L != LE; ++L) { 4232 Formula F = LU.Formulae[L]; 4233 // FIXME: The code for the scaled and unscaled registers looks 4234 // very similar but slightly different. Investigate if they 4235 // could be merged. That way, we would not have to unscale the 4236 // Formula. 4237 F.unscale(); 4238 // Use the immediate in the scaled register. 4239 if (F.ScaledReg == OrigReg) { 4240 int64_t Offset = (uint64_t)F.BaseOffset + Imm * (uint64_t)F.Scale; 4241 // Don't create 50 + reg(-50). 4242 if (F.referencesReg(SE.getSCEV( 4243 ConstantInt::get(IntTy, -(uint64_t)Offset)))) 4244 continue; 4245 Formula NewF = F; 4246 NewF.BaseOffset = Offset; 4247 if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, 4248 NewF)) 4249 continue; 4250 NewF.ScaledReg = SE.getAddExpr(NegImmS, NewF.ScaledReg); 4251 4252 // If the new scale is a constant in a register, and adding the constant 4253 // value to the immediate would produce a value closer to zero than the 4254 // immediate itself, then the formula isn't worthwhile. 4255 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(NewF.ScaledReg)) 4256 if (C->getValue()->isNegative() != (NewF.BaseOffset < 0) && 4257 (C->getAPInt().abs() * APInt(BitWidth, F.Scale)) 4258 .ule(std::abs(NewF.BaseOffset))) 4259 continue; 4260 4261 // OK, looks good. 4262 NewF.canonicalize(*this->L); 4263 (void)InsertFormula(LU, LUIdx, NewF); 4264 } else { 4265 // Use the immediate in a base register. 4266 for (size_t N = 0, NE = F.BaseRegs.size(); N != NE; ++N) { 4267 const SCEV *BaseReg = F.BaseRegs[N]; 4268 if (BaseReg != OrigReg) 4269 continue; 4270 Formula NewF = F; 4271 NewF.BaseOffset = (uint64_t)NewF.BaseOffset + Imm; 4272 if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, 4273 LU.Kind, LU.AccessTy, NewF)) { 4274 if (AMK == TTI::AMK_PostIndexed && 4275 mayUsePostIncMode(TTI, LU, OrigReg, this->L, SE)) 4276 continue; 4277 if (!TTI.isLegalAddImmediate((uint64_t)NewF.UnfoldedOffset + Imm)) 4278 continue; 4279 NewF = F; 4280 NewF.UnfoldedOffset = (uint64_t)NewF.UnfoldedOffset + Imm; 4281 } 4282 NewF.BaseRegs[N] = SE.getAddExpr(NegImmS, BaseReg); 4283 4284 // If the new formula has a constant in a register, and adding the 4285 // constant value to the immediate would produce a value closer to 4286 // zero than the immediate itself, then the formula isn't worthwhile. 4287 for (const SCEV *NewReg : NewF.BaseRegs) 4288 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(NewReg)) 4289 if ((C->getAPInt() + NewF.BaseOffset) 4290 .abs() 4291 .slt(std::abs(NewF.BaseOffset)) && 4292 (C->getAPInt() + NewF.BaseOffset).countTrailingZeros() >= 4293 countTrailingZeros<uint64_t>(NewF.BaseOffset)) 4294 goto skip_formula; 4295 4296 // Ok, looks good. 4297 NewF.canonicalize(*this->L); 4298 (void)InsertFormula(LU, LUIdx, NewF); 4299 break; 4300 skip_formula:; 4301 } 4302 } 4303 } 4304 } 4305 } 4306 4307 /// Generate formulae for each use. 4308 void 4309 LSRInstance::GenerateAllReuseFormulae() { 4310 // This is split into multiple loops so that hasRegsUsedByUsesOtherThan 4311 // queries are more precise. 4312 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 4313 LSRUse &LU = Uses[LUIdx]; 4314 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 4315 GenerateReassociations(LU, LUIdx, LU.Formulae[i]); 4316 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 4317 GenerateCombinations(LU, LUIdx, LU.Formulae[i]); 4318 } 4319 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 4320 LSRUse &LU = Uses[LUIdx]; 4321 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 4322 GenerateSymbolicOffsets(LU, LUIdx, LU.Formulae[i]); 4323 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 4324 GenerateConstantOffsets(LU, LUIdx, LU.Formulae[i]); 4325 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 4326 GenerateICmpZeroScales(LU, LUIdx, LU.Formulae[i]); 4327 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 4328 GenerateScales(LU, LUIdx, LU.Formulae[i]); 4329 } 4330 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 4331 LSRUse &LU = Uses[LUIdx]; 4332 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 4333 GenerateTruncates(LU, LUIdx, LU.Formulae[i]); 4334 } 4335 4336 GenerateCrossUseConstantOffsets(); 4337 4338 LLVM_DEBUG(dbgs() << "\n" 4339 "After generating reuse formulae:\n"; 4340 print_uses(dbgs())); 4341 } 4342 4343 /// If there are multiple formulae with the same set of registers used 4344 /// by other uses, pick the best one and delete the others. 4345 void LSRInstance::FilterOutUndesirableDedicatedRegisters() { 4346 DenseSet<const SCEV *> VisitedRegs; 4347 SmallPtrSet<const SCEV *, 16> Regs; 4348 SmallPtrSet<const SCEV *, 16> LoserRegs; 4349 #ifndef NDEBUG 4350 bool ChangedFormulae = false; 4351 #endif 4352 4353 // Collect the best formula for each unique set of shared registers. This 4354 // is reset for each use. 4355 using BestFormulaeTy = 4356 DenseMap<SmallVector<const SCEV *, 4>, size_t, UniquifierDenseMapInfo>; 4357 4358 BestFormulaeTy BestFormulae; 4359 4360 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 4361 LSRUse &LU = Uses[LUIdx]; 4362 LLVM_DEBUG(dbgs() << "Filtering for use "; LU.print(dbgs()); 4363 dbgs() << '\n'); 4364 4365 bool Any = false; 4366 for (size_t FIdx = 0, NumForms = LU.Formulae.size(); 4367 FIdx != NumForms; ++FIdx) { 4368 Formula &F = LU.Formulae[FIdx]; 4369 4370 // Some formulas are instant losers. For example, they may depend on 4371 // nonexistent AddRecs from other loops. These need to be filtered 4372 // immediately, otherwise heuristics could choose them over others leading 4373 // to an unsatisfactory solution. Passing LoserRegs into RateFormula here 4374 // avoids the need to recompute this information across formulae using the 4375 // same bad AddRec. Passing LoserRegs is also essential unless we remove 4376 // the corresponding bad register from the Regs set. 4377 Cost CostF(L, SE, TTI, AMK); 4378 Regs.clear(); 4379 CostF.RateFormula(F, Regs, VisitedRegs, LU, &LoserRegs); 4380 if (CostF.isLoser()) { 4381 // During initial formula generation, undesirable formulae are generated 4382 // by uses within other loops that have some non-trivial address mode or 4383 // use the postinc form of the IV. LSR needs to provide these formulae 4384 // as the basis of rediscovering the desired formula that uses an AddRec 4385 // corresponding to the existing phi. Once all formulae have been 4386 // generated, these initial losers may be pruned. 4387 LLVM_DEBUG(dbgs() << " Filtering loser "; F.print(dbgs()); 4388 dbgs() << "\n"); 4389 } 4390 else { 4391 SmallVector<const SCEV *, 4> Key; 4392 for (const SCEV *Reg : F.BaseRegs) { 4393 if (RegUses.isRegUsedByUsesOtherThan(Reg, LUIdx)) 4394 Key.push_back(Reg); 4395 } 4396 if (F.ScaledReg && 4397 RegUses.isRegUsedByUsesOtherThan(F.ScaledReg, LUIdx)) 4398 Key.push_back(F.ScaledReg); 4399 // Unstable sort by host order ok, because this is only used for 4400 // uniquifying. 4401 llvm::sort(Key); 4402 4403 std::pair<BestFormulaeTy::const_iterator, bool> P = 4404 BestFormulae.insert(std::make_pair(Key, FIdx)); 4405 if (P.second) 4406 continue; 4407 4408 Formula &Best = LU.Formulae[P.first->second]; 4409 4410 Cost CostBest(L, SE, TTI, AMK); 4411 Regs.clear(); 4412 CostBest.RateFormula(Best, Regs, VisitedRegs, LU); 4413 if (CostF.isLess(CostBest)) 4414 std::swap(F, Best); 4415 LLVM_DEBUG(dbgs() << " Filtering out formula "; F.print(dbgs()); 4416 dbgs() << "\n" 4417 " in favor of formula "; 4418 Best.print(dbgs()); dbgs() << '\n'); 4419 } 4420 #ifndef NDEBUG 4421 ChangedFormulae = true; 4422 #endif 4423 LU.DeleteFormula(F); 4424 --FIdx; 4425 --NumForms; 4426 Any = true; 4427 } 4428 4429 // Now that we've filtered out some formulae, recompute the Regs set. 4430 if (Any) 4431 LU.RecomputeRegs(LUIdx, RegUses); 4432 4433 // Reset this to prepare for the next use. 4434 BestFormulae.clear(); 4435 } 4436 4437 LLVM_DEBUG(if (ChangedFormulae) { 4438 dbgs() << "\n" 4439 "After filtering out undesirable candidates:\n"; 4440 print_uses(dbgs()); 4441 }); 4442 } 4443 4444 /// Estimate the worst-case number of solutions the solver might have to 4445 /// consider. It almost never considers this many solutions because it prune the 4446 /// search space, but the pruning isn't always sufficient. 4447 size_t LSRInstance::EstimateSearchSpaceComplexity() const { 4448 size_t Power = 1; 4449 for (const LSRUse &LU : Uses) { 4450 size_t FSize = LU.Formulae.size(); 4451 if (FSize >= ComplexityLimit) { 4452 Power = ComplexityLimit; 4453 break; 4454 } 4455 Power *= FSize; 4456 if (Power >= ComplexityLimit) 4457 break; 4458 } 4459 return Power; 4460 } 4461 4462 /// When one formula uses a superset of the registers of another formula, it 4463 /// won't help reduce register pressure (though it may not necessarily hurt 4464 /// register pressure); remove it to simplify the system. 4465 void LSRInstance::NarrowSearchSpaceByDetectingSupersets() { 4466 if (EstimateSearchSpaceComplexity() >= ComplexityLimit) { 4467 LLVM_DEBUG(dbgs() << "The search space is too complex.\n"); 4468 4469 LLVM_DEBUG(dbgs() << "Narrowing the search space by eliminating formulae " 4470 "which use a superset of registers used by other " 4471 "formulae.\n"); 4472 4473 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 4474 LSRUse &LU = Uses[LUIdx]; 4475 bool Any = false; 4476 for (size_t i = 0, e = LU.Formulae.size(); i != e; ++i) { 4477 Formula &F = LU.Formulae[i]; 4478 // Look for a formula with a constant or GV in a register. If the use 4479 // also has a formula with that same value in an immediate field, 4480 // delete the one that uses a register. 4481 for (SmallVectorImpl<const SCEV *>::const_iterator 4482 I = F.BaseRegs.begin(), E = F.BaseRegs.end(); I != E; ++I) { 4483 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(*I)) { 4484 Formula NewF = F; 4485 //FIXME: Formulas should store bitwidth to do wrapping properly. 4486 // See PR41034. 4487 NewF.BaseOffset += (uint64_t)C->getValue()->getSExtValue(); 4488 NewF.BaseRegs.erase(NewF.BaseRegs.begin() + 4489 (I - F.BaseRegs.begin())); 4490 if (LU.HasFormulaWithSameRegs(NewF)) { 4491 LLVM_DEBUG(dbgs() << " Deleting "; F.print(dbgs()); 4492 dbgs() << '\n'); 4493 LU.DeleteFormula(F); 4494 --i; 4495 --e; 4496 Any = true; 4497 break; 4498 } 4499 } else if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(*I)) { 4500 if (GlobalValue *GV = dyn_cast<GlobalValue>(U->getValue())) 4501 if (!F.BaseGV) { 4502 Formula NewF = F; 4503 NewF.BaseGV = GV; 4504 NewF.BaseRegs.erase(NewF.BaseRegs.begin() + 4505 (I - F.BaseRegs.begin())); 4506 if (LU.HasFormulaWithSameRegs(NewF)) { 4507 LLVM_DEBUG(dbgs() << " Deleting "; F.print(dbgs()); 4508 dbgs() << '\n'); 4509 LU.DeleteFormula(F); 4510 --i; 4511 --e; 4512 Any = true; 4513 break; 4514 } 4515 } 4516 } 4517 } 4518 } 4519 if (Any) 4520 LU.RecomputeRegs(LUIdx, RegUses); 4521 } 4522 4523 LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs())); 4524 } 4525 } 4526 4527 /// When there are many registers for expressions like A, A+1, A+2, etc., 4528 /// allocate a single register for them. 4529 void LSRInstance::NarrowSearchSpaceByCollapsingUnrolledCode() { 4530 if (EstimateSearchSpaceComplexity() < ComplexityLimit) 4531 return; 4532 4533 LLVM_DEBUG( 4534 dbgs() << "The search space is too complex.\n" 4535 "Narrowing the search space by assuming that uses separated " 4536 "by a constant offset will use the same registers.\n"); 4537 4538 // This is especially useful for unrolled loops. 4539 4540 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 4541 LSRUse &LU = Uses[LUIdx]; 4542 for (const Formula &F : LU.Formulae) { 4543 if (F.BaseOffset == 0 || (F.Scale != 0 && F.Scale != 1)) 4544 continue; 4545 4546 LSRUse *LUThatHas = FindUseWithSimilarFormula(F, LU); 4547 if (!LUThatHas) 4548 continue; 4549 4550 if (!reconcileNewOffset(*LUThatHas, F.BaseOffset, /*HasBaseReg=*/ false, 4551 LU.Kind, LU.AccessTy)) 4552 continue; 4553 4554 LLVM_DEBUG(dbgs() << " Deleting use "; LU.print(dbgs()); dbgs() << '\n'); 4555 4556 LUThatHas->AllFixupsOutsideLoop &= LU.AllFixupsOutsideLoop; 4557 4558 // Transfer the fixups of LU to LUThatHas. 4559 for (LSRFixup &Fixup : LU.Fixups) { 4560 Fixup.Offset += F.BaseOffset; 4561 LUThatHas->pushFixup(Fixup); 4562 LLVM_DEBUG(dbgs() << "New fixup has offset " << Fixup.Offset << '\n'); 4563 } 4564 4565 // Delete formulae from the new use which are no longer legal. 4566 bool Any = false; 4567 for (size_t i = 0, e = LUThatHas->Formulae.size(); i != e; ++i) { 4568 Formula &F = LUThatHas->Formulae[i]; 4569 if (!isLegalUse(TTI, LUThatHas->MinOffset, LUThatHas->MaxOffset, 4570 LUThatHas->Kind, LUThatHas->AccessTy, F)) { 4571 LLVM_DEBUG(dbgs() << " Deleting "; F.print(dbgs()); dbgs() << '\n'); 4572 LUThatHas->DeleteFormula(F); 4573 --i; 4574 --e; 4575 Any = true; 4576 } 4577 } 4578 4579 if (Any) 4580 LUThatHas->RecomputeRegs(LUThatHas - &Uses.front(), RegUses); 4581 4582 // Delete the old use. 4583 DeleteUse(LU, LUIdx); 4584 --LUIdx; 4585 --NumUses; 4586 break; 4587 } 4588 } 4589 4590 LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs())); 4591 } 4592 4593 /// Call FilterOutUndesirableDedicatedRegisters again, if necessary, now that 4594 /// we've done more filtering, as it may be able to find more formulae to 4595 /// eliminate. 4596 void LSRInstance::NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters(){ 4597 if (EstimateSearchSpaceComplexity() >= ComplexityLimit) { 4598 LLVM_DEBUG(dbgs() << "The search space is too complex.\n"); 4599 4600 LLVM_DEBUG(dbgs() << "Narrowing the search space by re-filtering out " 4601 "undesirable dedicated registers.\n"); 4602 4603 FilterOutUndesirableDedicatedRegisters(); 4604 4605 LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs())); 4606 } 4607 } 4608 4609 /// If a LSRUse has multiple formulae with the same ScaledReg and Scale. 4610 /// Pick the best one and delete the others. 4611 /// This narrowing heuristic is to keep as many formulae with different 4612 /// Scale and ScaledReg pair as possible while narrowing the search space. 4613 /// The benefit is that it is more likely to find out a better solution 4614 /// from a formulae set with more Scale and ScaledReg variations than 4615 /// a formulae set with the same Scale and ScaledReg. The picking winner 4616 /// reg heuristic will often keep the formulae with the same Scale and 4617 /// ScaledReg and filter others, and we want to avoid that if possible. 4618 void LSRInstance::NarrowSearchSpaceByFilterFormulaWithSameScaledReg() { 4619 if (EstimateSearchSpaceComplexity() < ComplexityLimit) 4620 return; 4621 4622 LLVM_DEBUG( 4623 dbgs() << "The search space is too complex.\n" 4624 "Narrowing the search space by choosing the best Formula " 4625 "from the Formulae with the same Scale and ScaledReg.\n"); 4626 4627 // Map the "Scale * ScaledReg" pair to the best formula of current LSRUse. 4628 using BestFormulaeTy = DenseMap<std::pair<const SCEV *, int64_t>, size_t>; 4629 4630 BestFormulaeTy BestFormulae; 4631 #ifndef NDEBUG 4632 bool ChangedFormulae = false; 4633 #endif 4634 DenseSet<const SCEV *> VisitedRegs; 4635 SmallPtrSet<const SCEV *, 16> Regs; 4636 4637 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 4638 LSRUse &LU = Uses[LUIdx]; 4639 LLVM_DEBUG(dbgs() << "Filtering for use "; LU.print(dbgs()); 4640 dbgs() << '\n'); 4641 4642 // Return true if Formula FA is better than Formula FB. 4643 auto IsBetterThan = [&](Formula &FA, Formula &FB) { 4644 // First we will try to choose the Formula with fewer new registers. 4645 // For a register used by current Formula, the more the register is 4646 // shared among LSRUses, the less we increase the register number 4647 // counter of the formula. 4648 size_t FARegNum = 0; 4649 for (const SCEV *Reg : FA.BaseRegs) { 4650 const SmallBitVector &UsedByIndices = RegUses.getUsedByIndices(Reg); 4651 FARegNum += (NumUses - UsedByIndices.count() + 1); 4652 } 4653 size_t FBRegNum = 0; 4654 for (const SCEV *Reg : FB.BaseRegs) { 4655 const SmallBitVector &UsedByIndices = RegUses.getUsedByIndices(Reg); 4656 FBRegNum += (NumUses - UsedByIndices.count() + 1); 4657 } 4658 if (FARegNum != FBRegNum) 4659 return FARegNum < FBRegNum; 4660 4661 // If the new register numbers are the same, choose the Formula with 4662 // less Cost. 4663 Cost CostFA(L, SE, TTI, AMK); 4664 Cost CostFB(L, SE, TTI, AMK); 4665 Regs.clear(); 4666 CostFA.RateFormula(FA, Regs, VisitedRegs, LU); 4667 Regs.clear(); 4668 CostFB.RateFormula(FB, Regs, VisitedRegs, LU); 4669 return CostFA.isLess(CostFB); 4670 }; 4671 4672 bool Any = false; 4673 for (size_t FIdx = 0, NumForms = LU.Formulae.size(); FIdx != NumForms; 4674 ++FIdx) { 4675 Formula &F = LU.Formulae[FIdx]; 4676 if (!F.ScaledReg) 4677 continue; 4678 auto P = BestFormulae.insert({{F.ScaledReg, F.Scale}, FIdx}); 4679 if (P.second) 4680 continue; 4681 4682 Formula &Best = LU.Formulae[P.first->second]; 4683 if (IsBetterThan(F, Best)) 4684 std::swap(F, Best); 4685 LLVM_DEBUG(dbgs() << " Filtering out formula "; F.print(dbgs()); 4686 dbgs() << "\n" 4687 " in favor of formula "; 4688 Best.print(dbgs()); dbgs() << '\n'); 4689 #ifndef NDEBUG 4690 ChangedFormulae = true; 4691 #endif 4692 LU.DeleteFormula(F); 4693 --FIdx; 4694 --NumForms; 4695 Any = true; 4696 } 4697 if (Any) 4698 LU.RecomputeRegs(LUIdx, RegUses); 4699 4700 // Reset this to prepare for the next use. 4701 BestFormulae.clear(); 4702 } 4703 4704 LLVM_DEBUG(if (ChangedFormulae) { 4705 dbgs() << "\n" 4706 "After filtering out undesirable candidates:\n"; 4707 print_uses(dbgs()); 4708 }); 4709 } 4710 4711 /// If we are over the complexity limit, filter out any post-inc prefering 4712 /// variables to only post-inc values. 4713 void LSRInstance::NarrowSearchSpaceByFilterPostInc() { 4714 if (AMK != TTI::AMK_PostIndexed) 4715 return; 4716 if (EstimateSearchSpaceComplexity() < ComplexityLimit) 4717 return; 4718 4719 LLVM_DEBUG(dbgs() << "The search space is too complex.\n" 4720 "Narrowing the search space by choosing the lowest " 4721 "register Formula for PostInc Uses.\n"); 4722 4723 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 4724 LSRUse &LU = Uses[LUIdx]; 4725 4726 if (LU.Kind != LSRUse::Address) 4727 continue; 4728 if (!TTI.isIndexedLoadLegal(TTI.MIM_PostInc, LU.AccessTy.getType()) && 4729 !TTI.isIndexedStoreLegal(TTI.MIM_PostInc, LU.AccessTy.getType())) 4730 continue; 4731 4732 size_t MinRegs = std::numeric_limits<size_t>::max(); 4733 for (const Formula &F : LU.Formulae) 4734 MinRegs = std::min(F.getNumRegs(), MinRegs); 4735 4736 bool Any = false; 4737 for (size_t FIdx = 0, NumForms = LU.Formulae.size(); FIdx != NumForms; 4738 ++FIdx) { 4739 Formula &F = LU.Formulae[FIdx]; 4740 if (F.getNumRegs() > MinRegs) { 4741 LLVM_DEBUG(dbgs() << " Filtering out formula "; F.print(dbgs()); 4742 dbgs() << "\n"); 4743 LU.DeleteFormula(F); 4744 --FIdx; 4745 --NumForms; 4746 Any = true; 4747 } 4748 } 4749 if (Any) 4750 LU.RecomputeRegs(LUIdx, RegUses); 4751 4752 if (EstimateSearchSpaceComplexity() < ComplexityLimit) 4753 break; 4754 } 4755 4756 LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs())); 4757 } 4758 4759 /// The function delete formulas with high registers number expectation. 4760 /// Assuming we don't know the value of each formula (already delete 4761 /// all inefficient), generate probability of not selecting for each 4762 /// register. 4763 /// For example, 4764 /// Use1: 4765 /// reg(a) + reg({0,+,1}) 4766 /// reg(a) + reg({-1,+,1}) + 1 4767 /// reg({a,+,1}) 4768 /// Use2: 4769 /// reg(b) + reg({0,+,1}) 4770 /// reg(b) + reg({-1,+,1}) + 1 4771 /// reg({b,+,1}) 4772 /// Use3: 4773 /// reg(c) + reg(b) + reg({0,+,1}) 4774 /// reg(c) + reg({b,+,1}) 4775 /// 4776 /// Probability of not selecting 4777 /// Use1 Use2 Use3 4778 /// reg(a) (1/3) * 1 * 1 4779 /// reg(b) 1 * (1/3) * (1/2) 4780 /// reg({0,+,1}) (2/3) * (2/3) * (1/2) 4781 /// reg({-1,+,1}) (2/3) * (2/3) * 1 4782 /// reg({a,+,1}) (2/3) * 1 * 1 4783 /// reg({b,+,1}) 1 * (2/3) * (2/3) 4784 /// reg(c) 1 * 1 * 0 4785 /// 4786 /// Now count registers number mathematical expectation for each formula: 4787 /// Note that for each use we exclude probability if not selecting for the use. 4788 /// For example for Use1 probability for reg(a) would be just 1 * 1 (excluding 4789 /// probabilty 1/3 of not selecting for Use1). 4790 /// Use1: 4791 /// reg(a) + reg({0,+,1}) 1 + 1/3 -- to be deleted 4792 /// reg(a) + reg({-1,+,1}) + 1 1 + 4/9 -- to be deleted 4793 /// reg({a,+,1}) 1 4794 /// Use2: 4795 /// reg(b) + reg({0,+,1}) 1/2 + 1/3 -- to be deleted 4796 /// reg(b) + reg({-1,+,1}) + 1 1/2 + 2/3 -- to be deleted 4797 /// reg({b,+,1}) 2/3 4798 /// Use3: 4799 /// reg(c) + reg(b) + reg({0,+,1}) 1 + 1/3 + 4/9 -- to be deleted 4800 /// reg(c) + reg({b,+,1}) 1 + 2/3 4801 void LSRInstance::NarrowSearchSpaceByDeletingCostlyFormulas() { 4802 if (EstimateSearchSpaceComplexity() < ComplexityLimit) 4803 return; 4804 // Ok, we have too many of formulae on our hands to conveniently handle. 4805 // Use a rough heuristic to thin out the list. 4806 4807 // Set of Regs wich will be 100% used in final solution. 4808 // Used in each formula of a solution (in example above this is reg(c)). 4809 // We can skip them in calculations. 4810 SmallPtrSet<const SCEV *, 4> UniqRegs; 4811 LLVM_DEBUG(dbgs() << "The search space is too complex.\n"); 4812 4813 // Map each register to probability of not selecting 4814 DenseMap <const SCEV *, float> RegNumMap; 4815 for (const SCEV *Reg : RegUses) { 4816 if (UniqRegs.count(Reg)) 4817 continue; 4818 float PNotSel = 1; 4819 for (const LSRUse &LU : Uses) { 4820 if (!LU.Regs.count(Reg)) 4821 continue; 4822 float P = LU.getNotSelectedProbability(Reg); 4823 if (P != 0.0) 4824 PNotSel *= P; 4825 else 4826 UniqRegs.insert(Reg); 4827 } 4828 RegNumMap.insert(std::make_pair(Reg, PNotSel)); 4829 } 4830 4831 LLVM_DEBUG( 4832 dbgs() << "Narrowing the search space by deleting costly formulas\n"); 4833 4834 // Delete formulas where registers number expectation is high. 4835 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 4836 LSRUse &LU = Uses[LUIdx]; 4837 // If nothing to delete - continue. 4838 if (LU.Formulae.size() < 2) 4839 continue; 4840 // This is temporary solution to test performance. Float should be 4841 // replaced with round independent type (based on integers) to avoid 4842 // different results for different target builds. 4843 float FMinRegNum = LU.Formulae[0].getNumRegs(); 4844 float FMinARegNum = LU.Formulae[0].getNumRegs(); 4845 size_t MinIdx = 0; 4846 for (size_t i = 0, e = LU.Formulae.size(); i != e; ++i) { 4847 Formula &F = LU.Formulae[i]; 4848 float FRegNum = 0; 4849 float FARegNum = 0; 4850 for (const SCEV *BaseReg : F.BaseRegs) { 4851 if (UniqRegs.count(BaseReg)) 4852 continue; 4853 FRegNum += RegNumMap[BaseReg] / LU.getNotSelectedProbability(BaseReg); 4854 if (isa<SCEVAddRecExpr>(BaseReg)) 4855 FARegNum += 4856 RegNumMap[BaseReg] / LU.getNotSelectedProbability(BaseReg); 4857 } 4858 if (const SCEV *ScaledReg = F.ScaledReg) { 4859 if (!UniqRegs.count(ScaledReg)) { 4860 FRegNum += 4861 RegNumMap[ScaledReg] / LU.getNotSelectedProbability(ScaledReg); 4862 if (isa<SCEVAddRecExpr>(ScaledReg)) 4863 FARegNum += 4864 RegNumMap[ScaledReg] / LU.getNotSelectedProbability(ScaledReg); 4865 } 4866 } 4867 if (FMinRegNum > FRegNum || 4868 (FMinRegNum == FRegNum && FMinARegNum > FARegNum)) { 4869 FMinRegNum = FRegNum; 4870 FMinARegNum = FARegNum; 4871 MinIdx = i; 4872 } 4873 } 4874 LLVM_DEBUG(dbgs() << " The formula "; LU.Formulae[MinIdx].print(dbgs()); 4875 dbgs() << " with min reg num " << FMinRegNum << '\n'); 4876 if (MinIdx != 0) 4877 std::swap(LU.Formulae[MinIdx], LU.Formulae[0]); 4878 while (LU.Formulae.size() != 1) { 4879 LLVM_DEBUG(dbgs() << " Deleting "; LU.Formulae.back().print(dbgs()); 4880 dbgs() << '\n'); 4881 LU.Formulae.pop_back(); 4882 } 4883 LU.RecomputeRegs(LUIdx, RegUses); 4884 assert(LU.Formulae.size() == 1 && "Should be exactly 1 min regs formula"); 4885 Formula &F = LU.Formulae[0]; 4886 LLVM_DEBUG(dbgs() << " Leaving only "; F.print(dbgs()); dbgs() << '\n'); 4887 // When we choose the formula, the regs become unique. 4888 UniqRegs.insert(F.BaseRegs.begin(), F.BaseRegs.end()); 4889 if (F.ScaledReg) 4890 UniqRegs.insert(F.ScaledReg); 4891 } 4892 LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs())); 4893 } 4894 4895 /// Pick a register which seems likely to be profitable, and then in any use 4896 /// which has any reference to that register, delete all formulae which do not 4897 /// reference that register. 4898 void LSRInstance::NarrowSearchSpaceByPickingWinnerRegs() { 4899 // With all other options exhausted, loop until the system is simple 4900 // enough to handle. 4901 SmallPtrSet<const SCEV *, 4> Taken; 4902 while (EstimateSearchSpaceComplexity() >= ComplexityLimit) { 4903 // Ok, we have too many of formulae on our hands to conveniently handle. 4904 // Use a rough heuristic to thin out the list. 4905 LLVM_DEBUG(dbgs() << "The search space is too complex.\n"); 4906 4907 // Pick the register which is used by the most LSRUses, which is likely 4908 // to be a good reuse register candidate. 4909 const SCEV *Best = nullptr; 4910 unsigned BestNum = 0; 4911 for (const SCEV *Reg : RegUses) { 4912 if (Taken.count(Reg)) 4913 continue; 4914 if (!Best) { 4915 Best = Reg; 4916 BestNum = RegUses.getUsedByIndices(Reg).count(); 4917 } else { 4918 unsigned Count = RegUses.getUsedByIndices(Reg).count(); 4919 if (Count > BestNum) { 4920 Best = Reg; 4921 BestNum = Count; 4922 } 4923 } 4924 } 4925 assert(Best && "Failed to find best LSRUse candidate"); 4926 4927 LLVM_DEBUG(dbgs() << "Narrowing the search space by assuming " << *Best 4928 << " will yield profitable reuse.\n"); 4929 Taken.insert(Best); 4930 4931 // In any use with formulae which references this register, delete formulae 4932 // which don't reference it. 4933 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 4934 LSRUse &LU = Uses[LUIdx]; 4935 if (!LU.Regs.count(Best)) continue; 4936 4937 bool Any = false; 4938 for (size_t i = 0, e = LU.Formulae.size(); i != e; ++i) { 4939 Formula &F = LU.Formulae[i]; 4940 if (!F.referencesReg(Best)) { 4941 LLVM_DEBUG(dbgs() << " Deleting "; F.print(dbgs()); dbgs() << '\n'); 4942 LU.DeleteFormula(F); 4943 --e; 4944 --i; 4945 Any = true; 4946 assert(e != 0 && "Use has no formulae left! Is Regs inconsistent?"); 4947 continue; 4948 } 4949 } 4950 4951 if (Any) 4952 LU.RecomputeRegs(LUIdx, RegUses); 4953 } 4954 4955 LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs())); 4956 } 4957 } 4958 4959 /// If there are an extraordinary number of formulae to choose from, use some 4960 /// rough heuristics to prune down the number of formulae. This keeps the main 4961 /// solver from taking an extraordinary amount of time in some worst-case 4962 /// scenarios. 4963 void LSRInstance::NarrowSearchSpaceUsingHeuristics() { 4964 NarrowSearchSpaceByDetectingSupersets(); 4965 NarrowSearchSpaceByCollapsingUnrolledCode(); 4966 NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters(); 4967 if (FilterSameScaledReg) 4968 NarrowSearchSpaceByFilterFormulaWithSameScaledReg(); 4969 NarrowSearchSpaceByFilterPostInc(); 4970 if (LSRExpNarrow) 4971 NarrowSearchSpaceByDeletingCostlyFormulas(); 4972 else 4973 NarrowSearchSpaceByPickingWinnerRegs(); 4974 } 4975 4976 /// This is the recursive solver. 4977 void LSRInstance::SolveRecurse(SmallVectorImpl<const Formula *> &Solution, 4978 Cost &SolutionCost, 4979 SmallVectorImpl<const Formula *> &Workspace, 4980 const Cost &CurCost, 4981 const SmallPtrSet<const SCEV *, 16> &CurRegs, 4982 DenseSet<const SCEV *> &VisitedRegs) const { 4983 // Some ideas: 4984 // - prune more: 4985 // - use more aggressive filtering 4986 // - sort the formula so that the most profitable solutions are found first 4987 // - sort the uses too 4988 // - search faster: 4989 // - don't compute a cost, and then compare. compare while computing a cost 4990 // and bail early. 4991 // - track register sets with SmallBitVector 4992 4993 const LSRUse &LU = Uses[Workspace.size()]; 4994 4995 // If this use references any register that's already a part of the 4996 // in-progress solution, consider it a requirement that a formula must 4997 // reference that register in order to be considered. This prunes out 4998 // unprofitable searching. 4999 SmallSetVector<const SCEV *, 4> ReqRegs; 5000 for (const SCEV *S : CurRegs) 5001 if (LU.Regs.count(S)) 5002 ReqRegs.insert(S); 5003 5004 SmallPtrSet<const SCEV *, 16> NewRegs; 5005 Cost NewCost(L, SE, TTI, AMK); 5006 for (const Formula &F : LU.Formulae) { 5007 // Ignore formulae which may not be ideal in terms of register reuse of 5008 // ReqRegs. The formula should use all required registers before 5009 // introducing new ones. 5010 // This can sometimes (notably when trying to favour postinc) lead to 5011 // sub-optimial decisions. There it is best left to the cost modelling to 5012 // get correct. 5013 if (AMK != TTI::AMK_PostIndexed || LU.Kind != LSRUse::Address) { 5014 int NumReqRegsToFind = std::min(F.getNumRegs(), ReqRegs.size()); 5015 for (const SCEV *Reg : ReqRegs) { 5016 if ((F.ScaledReg && F.ScaledReg == Reg) || 5017 is_contained(F.BaseRegs, Reg)) { 5018 --NumReqRegsToFind; 5019 if (NumReqRegsToFind == 0) 5020 break; 5021 } 5022 } 5023 if (NumReqRegsToFind != 0) { 5024 // If none of the formulae satisfied the required registers, then we could 5025 // clear ReqRegs and try again. Currently, we simply give up in this case. 5026 continue; 5027 } 5028 } 5029 5030 // Evaluate the cost of the current formula. If it's already worse than 5031 // the current best, prune the search at that point. 5032 NewCost = CurCost; 5033 NewRegs = CurRegs; 5034 NewCost.RateFormula(F, NewRegs, VisitedRegs, LU); 5035 if (NewCost.isLess(SolutionCost)) { 5036 Workspace.push_back(&F); 5037 if (Workspace.size() != Uses.size()) { 5038 SolveRecurse(Solution, SolutionCost, Workspace, NewCost, 5039 NewRegs, VisitedRegs); 5040 if (F.getNumRegs() == 1 && Workspace.size() == 1) 5041 VisitedRegs.insert(F.ScaledReg ? F.ScaledReg : F.BaseRegs[0]); 5042 } else { 5043 LLVM_DEBUG(dbgs() << "New best at "; NewCost.print(dbgs()); 5044 dbgs() << ".\nRegs:\n"; 5045 for (const SCEV *S : NewRegs) dbgs() 5046 << "- " << *S << "\n"; 5047 dbgs() << '\n'); 5048 5049 SolutionCost = NewCost; 5050 Solution = Workspace; 5051 } 5052 Workspace.pop_back(); 5053 } 5054 } 5055 } 5056 5057 /// Choose one formula from each use. Return the results in the given Solution 5058 /// vector. 5059 void LSRInstance::Solve(SmallVectorImpl<const Formula *> &Solution) const { 5060 SmallVector<const Formula *, 8> Workspace; 5061 Cost SolutionCost(L, SE, TTI, AMK); 5062 SolutionCost.Lose(); 5063 Cost CurCost(L, SE, TTI, AMK); 5064 SmallPtrSet<const SCEV *, 16> CurRegs; 5065 DenseSet<const SCEV *> VisitedRegs; 5066 Workspace.reserve(Uses.size()); 5067 5068 // SolveRecurse does all the work. 5069 SolveRecurse(Solution, SolutionCost, Workspace, CurCost, 5070 CurRegs, VisitedRegs); 5071 if (Solution.empty()) { 5072 LLVM_DEBUG(dbgs() << "\nNo Satisfactory Solution\n"); 5073 return; 5074 } 5075 5076 // Ok, we've now made all our decisions. 5077 LLVM_DEBUG(dbgs() << "\n" 5078 "The chosen solution requires "; 5079 SolutionCost.print(dbgs()); dbgs() << ":\n"; 5080 for (size_t i = 0, e = Uses.size(); i != e; ++i) { 5081 dbgs() << " "; 5082 Uses[i].print(dbgs()); 5083 dbgs() << "\n" 5084 " "; 5085 Solution[i]->print(dbgs()); 5086 dbgs() << '\n'; 5087 }); 5088 5089 assert(Solution.size() == Uses.size() && "Malformed solution!"); 5090 } 5091 5092 /// Helper for AdjustInsertPositionForExpand. Climb up the dominator tree far as 5093 /// we can go while still being dominated by the input positions. This helps 5094 /// canonicalize the insert position, which encourages sharing. 5095 BasicBlock::iterator 5096 LSRInstance::HoistInsertPosition(BasicBlock::iterator IP, 5097 const SmallVectorImpl<Instruction *> &Inputs) 5098 const { 5099 Instruction *Tentative = &*IP; 5100 while (true) { 5101 bool AllDominate = true; 5102 Instruction *BetterPos = nullptr; 5103 // Don't bother attempting to insert before a catchswitch, their basic block 5104 // cannot have other non-PHI instructions. 5105 if (isa<CatchSwitchInst>(Tentative)) 5106 return IP; 5107 5108 for (Instruction *Inst : Inputs) { 5109 if (Inst == Tentative || !DT.dominates(Inst, Tentative)) { 5110 AllDominate = false; 5111 break; 5112 } 5113 // Attempt to find an insert position in the middle of the block, 5114 // instead of at the end, so that it can be used for other expansions. 5115 if (Tentative->getParent() == Inst->getParent() && 5116 (!BetterPos || !DT.dominates(Inst, BetterPos))) 5117 BetterPos = &*std::next(BasicBlock::iterator(Inst)); 5118 } 5119 if (!AllDominate) 5120 break; 5121 if (BetterPos) 5122 IP = BetterPos->getIterator(); 5123 else 5124 IP = Tentative->getIterator(); 5125 5126 const Loop *IPLoop = LI.getLoopFor(IP->getParent()); 5127 unsigned IPLoopDepth = IPLoop ? IPLoop->getLoopDepth() : 0; 5128 5129 BasicBlock *IDom; 5130 for (DomTreeNode *Rung = DT.getNode(IP->getParent()); ; ) { 5131 if (!Rung) return IP; 5132 Rung = Rung->getIDom(); 5133 if (!Rung) return IP; 5134 IDom = Rung->getBlock(); 5135 5136 // Don't climb into a loop though. 5137 const Loop *IDomLoop = LI.getLoopFor(IDom); 5138 unsigned IDomDepth = IDomLoop ? IDomLoop->getLoopDepth() : 0; 5139 if (IDomDepth <= IPLoopDepth && 5140 (IDomDepth != IPLoopDepth || IDomLoop == IPLoop)) 5141 break; 5142 } 5143 5144 Tentative = IDom->getTerminator(); 5145 } 5146 5147 return IP; 5148 } 5149 5150 /// Determine an input position which will be dominated by the operands and 5151 /// which will dominate the result. 5152 BasicBlock::iterator 5153 LSRInstance::AdjustInsertPositionForExpand(BasicBlock::iterator LowestIP, 5154 const LSRFixup &LF, 5155 const LSRUse &LU, 5156 SCEVExpander &Rewriter) const { 5157 // Collect some instructions which must be dominated by the 5158 // expanding replacement. These must be dominated by any operands that 5159 // will be required in the expansion. 5160 SmallVector<Instruction *, 4> Inputs; 5161 if (Instruction *I = dyn_cast<Instruction>(LF.OperandValToReplace)) 5162 Inputs.push_back(I); 5163 if (LU.Kind == LSRUse::ICmpZero) 5164 if (Instruction *I = 5165 dyn_cast<Instruction>(cast<ICmpInst>(LF.UserInst)->getOperand(1))) 5166 Inputs.push_back(I); 5167 if (LF.PostIncLoops.count(L)) { 5168 if (LF.isUseFullyOutsideLoop(L)) 5169 Inputs.push_back(L->getLoopLatch()->getTerminator()); 5170 else 5171 Inputs.push_back(IVIncInsertPos); 5172 } 5173 // The expansion must also be dominated by the increment positions of any 5174 // loops it for which it is using post-inc mode. 5175 for (const Loop *PIL : LF.PostIncLoops) { 5176 if (PIL == L) continue; 5177 5178 // Be dominated by the loop exit. 5179 SmallVector<BasicBlock *, 4> ExitingBlocks; 5180 PIL->getExitingBlocks(ExitingBlocks); 5181 if (!ExitingBlocks.empty()) { 5182 BasicBlock *BB = ExitingBlocks[0]; 5183 for (unsigned i = 1, e = ExitingBlocks.size(); i != e; ++i) 5184 BB = DT.findNearestCommonDominator(BB, ExitingBlocks[i]); 5185 Inputs.push_back(BB->getTerminator()); 5186 } 5187 } 5188 5189 assert(!isa<PHINode>(LowestIP) && !LowestIP->isEHPad() 5190 && !isa<DbgInfoIntrinsic>(LowestIP) && 5191 "Insertion point must be a normal instruction"); 5192 5193 // Then, climb up the immediate dominator tree as far as we can go while 5194 // still being dominated by the input positions. 5195 BasicBlock::iterator IP = HoistInsertPosition(LowestIP, Inputs); 5196 5197 // Don't insert instructions before PHI nodes. 5198 while (isa<PHINode>(IP)) ++IP; 5199 5200 // Ignore landingpad instructions. 5201 while (IP->isEHPad()) ++IP; 5202 5203 // Ignore debug intrinsics. 5204 while (isa<DbgInfoIntrinsic>(IP)) ++IP; 5205 5206 // Set IP below instructions recently inserted by SCEVExpander. This keeps the 5207 // IP consistent across expansions and allows the previously inserted 5208 // instructions to be reused by subsequent expansion. 5209 while (Rewriter.isInsertedInstruction(&*IP) && IP != LowestIP) 5210 ++IP; 5211 5212 return IP; 5213 } 5214 5215 /// Emit instructions for the leading candidate expression for this LSRUse (this 5216 /// is called "expanding"). 5217 Value *LSRInstance::Expand(const LSRUse &LU, const LSRFixup &LF, 5218 const Formula &F, BasicBlock::iterator IP, 5219 SCEVExpander &Rewriter, 5220 SmallVectorImpl<WeakTrackingVH> &DeadInsts) const { 5221 if (LU.RigidFormula) 5222 return LF.OperandValToReplace; 5223 5224 // Determine an input position which will be dominated by the operands and 5225 // which will dominate the result. 5226 IP = AdjustInsertPositionForExpand(IP, LF, LU, Rewriter); 5227 Rewriter.setInsertPoint(&*IP); 5228 5229 // Inform the Rewriter if we have a post-increment use, so that it can 5230 // perform an advantageous expansion. 5231 Rewriter.setPostInc(LF.PostIncLoops); 5232 5233 // This is the type that the user actually needs. 5234 Type *OpTy = LF.OperandValToReplace->getType(); 5235 // This will be the type that we'll initially expand to. 5236 Type *Ty = F.getType(); 5237 if (!Ty) 5238 // No type known; just expand directly to the ultimate type. 5239 Ty = OpTy; 5240 else if (SE.getEffectiveSCEVType(Ty) == SE.getEffectiveSCEVType(OpTy)) 5241 // Expand directly to the ultimate type if it's the right size. 5242 Ty = OpTy; 5243 // This is the type to do integer arithmetic in. 5244 Type *IntTy = SE.getEffectiveSCEVType(Ty); 5245 5246 // Build up a list of operands to add together to form the full base. 5247 SmallVector<const SCEV *, 8> Ops; 5248 5249 // Expand the BaseRegs portion. 5250 for (const SCEV *Reg : F.BaseRegs) { 5251 assert(!Reg->isZero() && "Zero allocated in a base register!"); 5252 5253 // If we're expanding for a post-inc user, make the post-inc adjustment. 5254 Reg = denormalizeForPostIncUse(Reg, LF.PostIncLoops, SE); 5255 Ops.push_back(SE.getUnknown(Rewriter.expandCodeFor(Reg, nullptr))); 5256 } 5257 5258 // Expand the ScaledReg portion. 5259 Value *ICmpScaledV = nullptr; 5260 if (F.Scale != 0) { 5261 const SCEV *ScaledS = F.ScaledReg; 5262 5263 // If we're expanding for a post-inc user, make the post-inc adjustment. 5264 PostIncLoopSet &Loops = const_cast<PostIncLoopSet &>(LF.PostIncLoops); 5265 ScaledS = denormalizeForPostIncUse(ScaledS, Loops, SE); 5266 5267 if (LU.Kind == LSRUse::ICmpZero) { 5268 // Expand ScaleReg as if it was part of the base regs. 5269 if (F.Scale == 1) 5270 Ops.push_back( 5271 SE.getUnknown(Rewriter.expandCodeFor(ScaledS, nullptr))); 5272 else { 5273 // An interesting way of "folding" with an icmp is to use a negated 5274 // scale, which we'll implement by inserting it into the other operand 5275 // of the icmp. 5276 assert(F.Scale == -1 && 5277 "The only scale supported by ICmpZero uses is -1!"); 5278 ICmpScaledV = Rewriter.expandCodeFor(ScaledS, nullptr); 5279 } 5280 } else { 5281 // Otherwise just expand the scaled register and an explicit scale, 5282 // which is expected to be matched as part of the address. 5283 5284 // Flush the operand list to suppress SCEVExpander hoisting address modes. 5285 // Unless the addressing mode will not be folded. 5286 if (!Ops.empty() && LU.Kind == LSRUse::Address && 5287 isAMCompletelyFolded(TTI, LU, F)) { 5288 Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), nullptr); 5289 Ops.clear(); 5290 Ops.push_back(SE.getUnknown(FullV)); 5291 } 5292 ScaledS = SE.getUnknown(Rewriter.expandCodeFor(ScaledS, nullptr)); 5293 if (F.Scale != 1) 5294 ScaledS = 5295 SE.getMulExpr(ScaledS, SE.getConstant(ScaledS->getType(), F.Scale)); 5296 Ops.push_back(ScaledS); 5297 } 5298 } 5299 5300 // Expand the GV portion. 5301 if (F.BaseGV) { 5302 // Flush the operand list to suppress SCEVExpander hoisting. 5303 if (!Ops.empty()) { 5304 Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty); 5305 Ops.clear(); 5306 Ops.push_back(SE.getUnknown(FullV)); 5307 } 5308 Ops.push_back(SE.getUnknown(F.BaseGV)); 5309 } 5310 5311 // Flush the operand list to suppress SCEVExpander hoisting of both folded and 5312 // unfolded offsets. LSR assumes they both live next to their uses. 5313 if (!Ops.empty()) { 5314 Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty); 5315 Ops.clear(); 5316 Ops.push_back(SE.getUnknown(FullV)); 5317 } 5318 5319 // Expand the immediate portion. 5320 int64_t Offset = (uint64_t)F.BaseOffset + LF.Offset; 5321 if (Offset != 0) { 5322 if (LU.Kind == LSRUse::ICmpZero) { 5323 // The other interesting way of "folding" with an ICmpZero is to use a 5324 // negated immediate. 5325 if (!ICmpScaledV) 5326 ICmpScaledV = ConstantInt::get(IntTy, -(uint64_t)Offset); 5327 else { 5328 Ops.push_back(SE.getUnknown(ICmpScaledV)); 5329 ICmpScaledV = ConstantInt::get(IntTy, Offset); 5330 } 5331 } else { 5332 // Just add the immediate values. These again are expected to be matched 5333 // as part of the address. 5334 Ops.push_back(SE.getUnknown(ConstantInt::getSigned(IntTy, Offset))); 5335 } 5336 } 5337 5338 // Expand the unfolded offset portion. 5339 int64_t UnfoldedOffset = F.UnfoldedOffset; 5340 if (UnfoldedOffset != 0) { 5341 // Just add the immediate values. 5342 Ops.push_back(SE.getUnknown(ConstantInt::getSigned(IntTy, 5343 UnfoldedOffset))); 5344 } 5345 5346 // Emit instructions summing all the operands. 5347 const SCEV *FullS = Ops.empty() ? 5348 SE.getConstant(IntTy, 0) : 5349 SE.getAddExpr(Ops); 5350 Value *FullV = Rewriter.expandCodeFor(FullS, Ty); 5351 5352 // We're done expanding now, so reset the rewriter. 5353 Rewriter.clearPostInc(); 5354 5355 // An ICmpZero Formula represents an ICmp which we're handling as a 5356 // comparison against zero. Now that we've expanded an expression for that 5357 // form, update the ICmp's other operand. 5358 if (LU.Kind == LSRUse::ICmpZero) { 5359 ICmpInst *CI = cast<ICmpInst>(LF.UserInst); 5360 if (auto *OperandIsInstr = dyn_cast<Instruction>(CI->getOperand(1))) 5361 DeadInsts.emplace_back(OperandIsInstr); 5362 assert(!F.BaseGV && "ICmp does not support folding a global value and " 5363 "a scale at the same time!"); 5364 if (F.Scale == -1) { 5365 if (ICmpScaledV->getType() != OpTy) { 5366 Instruction *Cast = 5367 CastInst::Create(CastInst::getCastOpcode(ICmpScaledV, false, 5368 OpTy, false), 5369 ICmpScaledV, OpTy, "tmp", CI); 5370 ICmpScaledV = Cast; 5371 } 5372 CI->setOperand(1, ICmpScaledV); 5373 } else { 5374 // A scale of 1 means that the scale has been expanded as part of the 5375 // base regs. 5376 assert((F.Scale == 0 || F.Scale == 1) && 5377 "ICmp does not support folding a global value and " 5378 "a scale at the same time!"); 5379 Constant *C = ConstantInt::getSigned(SE.getEffectiveSCEVType(OpTy), 5380 -(uint64_t)Offset); 5381 if (C->getType() != OpTy) 5382 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 5383 OpTy, false), 5384 C, OpTy); 5385 5386 CI->setOperand(1, C); 5387 } 5388 } 5389 5390 return FullV; 5391 } 5392 5393 /// Helper for Rewrite. PHI nodes are special because the use of their operands 5394 /// effectively happens in their predecessor blocks, so the expression may need 5395 /// to be expanded in multiple places. 5396 void LSRInstance::RewriteForPHI( 5397 PHINode *PN, const LSRUse &LU, const LSRFixup &LF, const Formula &F, 5398 SCEVExpander &Rewriter, SmallVectorImpl<WeakTrackingVH> &DeadInsts) const { 5399 DenseMap<BasicBlock *, Value *> Inserted; 5400 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 5401 if (PN->getIncomingValue(i) == LF.OperandValToReplace) { 5402 bool needUpdateFixups = false; 5403 BasicBlock *BB = PN->getIncomingBlock(i); 5404 5405 // If this is a critical edge, split the edge so that we do not insert 5406 // the code on all predecessor/successor paths. We do this unless this 5407 // is the canonical backedge for this loop, which complicates post-inc 5408 // users. 5409 if (e != 1 && BB->getTerminator()->getNumSuccessors() > 1 && 5410 !isa<IndirectBrInst>(BB->getTerminator()) && 5411 !isa<CatchSwitchInst>(BB->getTerminator())) { 5412 BasicBlock *Parent = PN->getParent(); 5413 Loop *PNLoop = LI.getLoopFor(Parent); 5414 if (!PNLoop || Parent != PNLoop->getHeader()) { 5415 // Split the critical edge. 5416 BasicBlock *NewBB = nullptr; 5417 if (!Parent->isLandingPad()) { 5418 NewBB = 5419 SplitCriticalEdge(BB, Parent, 5420 CriticalEdgeSplittingOptions(&DT, &LI, MSSAU) 5421 .setMergeIdenticalEdges() 5422 .setKeepOneInputPHIs()); 5423 } else { 5424 SmallVector<BasicBlock*, 2> NewBBs; 5425 SplitLandingPadPredecessors(Parent, BB, "", "", NewBBs, &DT, &LI); 5426 NewBB = NewBBs[0]; 5427 } 5428 // If NewBB==NULL, then SplitCriticalEdge refused to split because all 5429 // phi predecessors are identical. The simple thing to do is skip 5430 // splitting in this case rather than complicate the API. 5431 if (NewBB) { 5432 // If PN is outside of the loop and BB is in the loop, we want to 5433 // move the block to be immediately before the PHI block, not 5434 // immediately after BB. 5435 if (L->contains(BB) && !L->contains(PN)) 5436 NewBB->moveBefore(PN->getParent()); 5437 5438 // Splitting the edge can reduce the number of PHI entries we have. 5439 e = PN->getNumIncomingValues(); 5440 BB = NewBB; 5441 i = PN->getBasicBlockIndex(BB); 5442 5443 needUpdateFixups = true; 5444 } 5445 } 5446 } 5447 5448 std::pair<DenseMap<BasicBlock *, Value *>::iterator, bool> Pair = 5449 Inserted.insert(std::make_pair(BB, static_cast<Value *>(nullptr))); 5450 if (!Pair.second) 5451 PN->setIncomingValue(i, Pair.first->second); 5452 else { 5453 Value *FullV = Expand(LU, LF, F, BB->getTerminator()->getIterator(), 5454 Rewriter, DeadInsts); 5455 5456 // If this is reuse-by-noop-cast, insert the noop cast. 5457 Type *OpTy = LF.OperandValToReplace->getType(); 5458 if (FullV->getType() != OpTy) 5459 FullV = 5460 CastInst::Create(CastInst::getCastOpcode(FullV, false, 5461 OpTy, false), 5462 FullV, LF.OperandValToReplace->getType(), 5463 "tmp", BB->getTerminator()); 5464 5465 PN->setIncomingValue(i, FullV); 5466 Pair.first->second = FullV; 5467 } 5468 5469 // If LSR splits critical edge and phi node has other pending 5470 // fixup operands, we need to update those pending fixups. Otherwise 5471 // formulae will not be implemented completely and some instructions 5472 // will not be eliminated. 5473 if (needUpdateFixups) { 5474 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) 5475 for (LSRFixup &Fixup : Uses[LUIdx].Fixups) 5476 // If fixup is supposed to rewrite some operand in the phi 5477 // that was just updated, it may be already moved to 5478 // another phi node. Such fixup requires update. 5479 if (Fixup.UserInst == PN) { 5480 // Check if the operand we try to replace still exists in the 5481 // original phi. 5482 bool foundInOriginalPHI = false; 5483 for (const auto &val : PN->incoming_values()) 5484 if (val == Fixup.OperandValToReplace) { 5485 foundInOriginalPHI = true; 5486 break; 5487 } 5488 5489 // If fixup operand found in original PHI - nothing to do. 5490 if (foundInOriginalPHI) 5491 continue; 5492 5493 // Otherwise it might be moved to another PHI and requires update. 5494 // If fixup operand not found in any of the incoming blocks that 5495 // means we have already rewritten it - nothing to do. 5496 for (const auto &Block : PN->blocks()) 5497 for (BasicBlock::iterator I = Block->begin(); isa<PHINode>(I); 5498 ++I) { 5499 PHINode *NewPN = cast<PHINode>(I); 5500 for (const auto &val : NewPN->incoming_values()) 5501 if (val == Fixup.OperandValToReplace) 5502 Fixup.UserInst = NewPN; 5503 } 5504 } 5505 } 5506 } 5507 } 5508 5509 /// Emit instructions for the leading candidate expression for this LSRUse (this 5510 /// is called "expanding"), and update the UserInst to reference the newly 5511 /// expanded value. 5512 void LSRInstance::Rewrite(const LSRUse &LU, const LSRFixup &LF, 5513 const Formula &F, SCEVExpander &Rewriter, 5514 SmallVectorImpl<WeakTrackingVH> &DeadInsts) const { 5515 // First, find an insertion point that dominates UserInst. For PHI nodes, 5516 // find the nearest block which dominates all the relevant uses. 5517 if (PHINode *PN = dyn_cast<PHINode>(LF.UserInst)) { 5518 RewriteForPHI(PN, LU, LF, F, Rewriter, DeadInsts); 5519 } else { 5520 Value *FullV = 5521 Expand(LU, LF, F, LF.UserInst->getIterator(), Rewriter, DeadInsts); 5522 5523 // If this is reuse-by-noop-cast, insert the noop cast. 5524 Type *OpTy = LF.OperandValToReplace->getType(); 5525 if (FullV->getType() != OpTy) { 5526 Instruction *Cast = 5527 CastInst::Create(CastInst::getCastOpcode(FullV, false, OpTy, false), 5528 FullV, OpTy, "tmp", LF.UserInst); 5529 FullV = Cast; 5530 } 5531 5532 // Update the user. ICmpZero is handled specially here (for now) because 5533 // Expand may have updated one of the operands of the icmp already, and 5534 // its new value may happen to be equal to LF.OperandValToReplace, in 5535 // which case doing replaceUsesOfWith leads to replacing both operands 5536 // with the same value. TODO: Reorganize this. 5537 if (LU.Kind == LSRUse::ICmpZero) 5538 LF.UserInst->setOperand(0, FullV); 5539 else 5540 LF.UserInst->replaceUsesOfWith(LF.OperandValToReplace, FullV); 5541 } 5542 5543 if (auto *OperandIsInstr = dyn_cast<Instruction>(LF.OperandValToReplace)) 5544 DeadInsts.emplace_back(OperandIsInstr); 5545 } 5546 5547 /// Rewrite all the fixup locations with new values, following the chosen 5548 /// solution. 5549 void LSRInstance::ImplementSolution( 5550 const SmallVectorImpl<const Formula *> &Solution) { 5551 // Keep track of instructions we may have made dead, so that 5552 // we can remove them after we are done working. 5553 SmallVector<WeakTrackingVH, 16> DeadInsts; 5554 5555 SCEVExpander Rewriter(SE, L->getHeader()->getModule()->getDataLayout(), "lsr", 5556 false); 5557 #ifndef NDEBUG 5558 Rewriter.setDebugType(DEBUG_TYPE); 5559 #endif 5560 Rewriter.disableCanonicalMode(); 5561 Rewriter.enableLSRMode(); 5562 Rewriter.setIVIncInsertPos(L, IVIncInsertPos); 5563 5564 // Mark phi nodes that terminate chains so the expander tries to reuse them. 5565 for (const IVChain &Chain : IVChainVec) { 5566 if (PHINode *PN = dyn_cast<PHINode>(Chain.tailUserInst())) 5567 Rewriter.setChainedPhi(PN); 5568 } 5569 5570 // Expand the new value definitions and update the users. 5571 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) 5572 for (const LSRFixup &Fixup : Uses[LUIdx].Fixups) { 5573 Rewrite(Uses[LUIdx], Fixup, *Solution[LUIdx], Rewriter, DeadInsts); 5574 Changed = true; 5575 } 5576 5577 for (const IVChain &Chain : IVChainVec) { 5578 GenerateIVChain(Chain, Rewriter, DeadInsts); 5579 Changed = true; 5580 } 5581 // Clean up after ourselves. This must be done before deleting any 5582 // instructions. 5583 Rewriter.clear(); 5584 5585 Changed |= RecursivelyDeleteTriviallyDeadInstructionsPermissive(DeadInsts, 5586 &TLI, MSSAU); 5587 5588 // In our cost analysis above, we assume that each addrec consumes exactly 5589 // one register, and arrange to have increments inserted just before the 5590 // latch to maximimize the chance this is true. However, if we reused 5591 // existing IVs, we now need to move the increments to match our 5592 // expectations. Otherwise, our cost modeling results in us having a 5593 // chosen a non-optimal result for the actual schedule. (And yes, this 5594 // scheduling decision does impact later codegen.) 5595 for (PHINode &PN : L->getHeader()->phis()) { 5596 BinaryOperator *BO = nullptr; 5597 Value *Start = nullptr, *Step = nullptr; 5598 if (!matchSimpleRecurrence(&PN, BO, Start, Step)) 5599 continue; 5600 5601 switch (BO->getOpcode()) { 5602 case Instruction::Sub: 5603 if (BO->getOperand(0) != &PN) 5604 // sub is non-commutative - match handling elsewhere in LSR 5605 continue; 5606 break; 5607 case Instruction::Add: 5608 break; 5609 default: 5610 continue; 5611 }; 5612 5613 if (!isa<Constant>(Step)) 5614 // If not a constant step, might increase register pressure 5615 // (We assume constants have been canonicalized to RHS) 5616 continue; 5617 5618 if (BO->getParent() == IVIncInsertPos->getParent()) 5619 // Only bother moving across blocks. Isel can handle block local case. 5620 continue; 5621 5622 // Can we legally schedule inc at the desired point? 5623 if (!llvm::all_of(BO->uses(), 5624 [&](Use &U) {return DT.dominates(IVIncInsertPos, U);})) 5625 continue; 5626 BO->moveBefore(IVIncInsertPos); 5627 Changed = true; 5628 } 5629 5630 5631 } 5632 5633 LSRInstance::LSRInstance(Loop *L, IVUsers &IU, ScalarEvolution &SE, 5634 DominatorTree &DT, LoopInfo &LI, 5635 const TargetTransformInfo &TTI, AssumptionCache &AC, 5636 TargetLibraryInfo &TLI, MemorySSAUpdater *MSSAU) 5637 : IU(IU), SE(SE), DT(DT), LI(LI), AC(AC), TLI(TLI), TTI(TTI), L(L), 5638 MSSAU(MSSAU), AMK(PreferredAddresingMode.getNumOccurrences() > 0 ? 5639 PreferredAddresingMode : TTI.getPreferredAddressingMode(L, &SE)) { 5640 // If LoopSimplify form is not available, stay out of trouble. 5641 if (!L->isLoopSimplifyForm()) 5642 return; 5643 5644 // If there's no interesting work to be done, bail early. 5645 if (IU.empty()) return; 5646 5647 // If there's too much analysis to be done, bail early. We won't be able to 5648 // model the problem anyway. 5649 unsigned NumUsers = 0; 5650 for (const IVStrideUse &U : IU) { 5651 if (++NumUsers > MaxIVUsers) { 5652 (void)U; 5653 LLVM_DEBUG(dbgs() << "LSR skipping loop, too many IV Users in " << U 5654 << "\n"); 5655 return; 5656 } 5657 // Bail out if we have a PHI on an EHPad that gets a value from a 5658 // CatchSwitchInst. Because the CatchSwitchInst cannot be split, there is 5659 // no good place to stick any instructions. 5660 if (auto *PN = dyn_cast<PHINode>(U.getUser())) { 5661 auto *FirstNonPHI = PN->getParent()->getFirstNonPHI(); 5662 if (isa<FuncletPadInst>(FirstNonPHI) || 5663 isa<CatchSwitchInst>(FirstNonPHI)) 5664 for (BasicBlock *PredBB : PN->blocks()) 5665 if (isa<CatchSwitchInst>(PredBB->getFirstNonPHI())) 5666 return; 5667 } 5668 } 5669 5670 #ifndef NDEBUG 5671 // All dominating loops must have preheaders, or SCEVExpander may not be able 5672 // to materialize an AddRecExpr whose Start is an outer AddRecExpr. 5673 // 5674 // IVUsers analysis should only create users that are dominated by simple loop 5675 // headers. Since this loop should dominate all of its users, its user list 5676 // should be empty if this loop itself is not within a simple loop nest. 5677 for (DomTreeNode *Rung = DT.getNode(L->getLoopPreheader()); 5678 Rung; Rung = Rung->getIDom()) { 5679 BasicBlock *BB = Rung->getBlock(); 5680 const Loop *DomLoop = LI.getLoopFor(BB); 5681 if (DomLoop && DomLoop->getHeader() == BB) { 5682 assert(DomLoop->getLoopPreheader() && "LSR needs a simplified loop nest"); 5683 } 5684 } 5685 #endif // DEBUG 5686 5687 LLVM_DEBUG(dbgs() << "\nLSR on loop "; 5688 L->getHeader()->printAsOperand(dbgs(), /*PrintType=*/false); 5689 dbgs() << ":\n"); 5690 5691 // First, perform some low-level loop optimizations. 5692 OptimizeShadowIV(); 5693 OptimizeLoopTermCond(); 5694 5695 // If loop preparation eliminates all interesting IV users, bail. 5696 if (IU.empty()) return; 5697 5698 // Skip nested loops until we can model them better with formulae. 5699 if (!L->isInnermost()) { 5700 LLVM_DEBUG(dbgs() << "LSR skipping outer loop " << *L << "\n"); 5701 return; 5702 } 5703 5704 // Start collecting data and preparing for the solver. 5705 // If number of registers is not the major cost, we cannot benefit from the 5706 // current profitable chain optimization which is based on number of 5707 // registers. 5708 // FIXME: add profitable chain optimization for other kinds major cost, for 5709 // example number of instructions. 5710 if (TTI.isNumRegsMajorCostOfLSR() || StressIVChain) 5711 CollectChains(); 5712 CollectInterestingTypesAndFactors(); 5713 CollectFixupsAndInitialFormulae(); 5714 CollectLoopInvariantFixupsAndFormulae(); 5715 5716 if (Uses.empty()) 5717 return; 5718 5719 LLVM_DEBUG(dbgs() << "LSR found " << Uses.size() << " uses:\n"; 5720 print_uses(dbgs())); 5721 5722 // Now use the reuse data to generate a bunch of interesting ways 5723 // to formulate the values needed for the uses. 5724 GenerateAllReuseFormulae(); 5725 5726 FilterOutUndesirableDedicatedRegisters(); 5727 NarrowSearchSpaceUsingHeuristics(); 5728 5729 SmallVector<const Formula *, 8> Solution; 5730 Solve(Solution); 5731 5732 // Release memory that is no longer needed. 5733 Factors.clear(); 5734 Types.clear(); 5735 RegUses.clear(); 5736 5737 if (Solution.empty()) 5738 return; 5739 5740 #ifndef NDEBUG 5741 // Formulae should be legal. 5742 for (const LSRUse &LU : Uses) { 5743 for (const Formula &F : LU.Formulae) 5744 assert(isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, 5745 F) && "Illegal formula generated!"); 5746 }; 5747 #endif 5748 5749 // Now that we've decided what we want, make it so. 5750 ImplementSolution(Solution); 5751 } 5752 5753 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 5754 void LSRInstance::print_factors_and_types(raw_ostream &OS) const { 5755 if (Factors.empty() && Types.empty()) return; 5756 5757 OS << "LSR has identified the following interesting factors and types: "; 5758 bool First = true; 5759 5760 for (int64_t Factor : Factors) { 5761 if (!First) OS << ", "; 5762 First = false; 5763 OS << '*' << Factor; 5764 } 5765 5766 for (Type *Ty : Types) { 5767 if (!First) OS << ", "; 5768 First = false; 5769 OS << '(' << *Ty << ')'; 5770 } 5771 OS << '\n'; 5772 } 5773 5774 void LSRInstance::print_fixups(raw_ostream &OS) const { 5775 OS << "LSR is examining the following fixup sites:\n"; 5776 for (const LSRUse &LU : Uses) 5777 for (const LSRFixup &LF : LU.Fixups) { 5778 dbgs() << " "; 5779 LF.print(OS); 5780 OS << '\n'; 5781 } 5782 } 5783 5784 void LSRInstance::print_uses(raw_ostream &OS) const { 5785 OS << "LSR is examining the following uses:\n"; 5786 for (const LSRUse &LU : Uses) { 5787 dbgs() << " "; 5788 LU.print(OS); 5789 OS << '\n'; 5790 for (const Formula &F : LU.Formulae) { 5791 OS << " "; 5792 F.print(OS); 5793 OS << '\n'; 5794 } 5795 } 5796 } 5797 5798 void LSRInstance::print(raw_ostream &OS) const { 5799 print_factors_and_types(OS); 5800 print_fixups(OS); 5801 print_uses(OS); 5802 } 5803 5804 LLVM_DUMP_METHOD void LSRInstance::dump() const { 5805 print(errs()); errs() << '\n'; 5806 } 5807 #endif 5808 5809 namespace { 5810 5811 class LoopStrengthReduce : public LoopPass { 5812 public: 5813 static char ID; // Pass ID, replacement for typeid 5814 5815 LoopStrengthReduce(); 5816 5817 private: 5818 bool runOnLoop(Loop *L, LPPassManager &LPM) override; 5819 void getAnalysisUsage(AnalysisUsage &AU) const override; 5820 }; 5821 5822 } // end anonymous namespace 5823 5824 LoopStrengthReduce::LoopStrengthReduce() : LoopPass(ID) { 5825 initializeLoopStrengthReducePass(*PassRegistry::getPassRegistry()); 5826 } 5827 5828 void LoopStrengthReduce::getAnalysisUsage(AnalysisUsage &AU) const { 5829 // We split critical edges, so we change the CFG. However, we do update 5830 // many analyses if they are around. 5831 AU.addPreservedID(LoopSimplifyID); 5832 5833 AU.addRequired<LoopInfoWrapperPass>(); 5834 AU.addPreserved<LoopInfoWrapperPass>(); 5835 AU.addRequiredID(LoopSimplifyID); 5836 AU.addRequired<DominatorTreeWrapperPass>(); 5837 AU.addPreserved<DominatorTreeWrapperPass>(); 5838 AU.addRequired<ScalarEvolutionWrapperPass>(); 5839 AU.addPreserved<ScalarEvolutionWrapperPass>(); 5840 AU.addRequired<AssumptionCacheTracker>(); 5841 AU.addRequired<TargetLibraryInfoWrapperPass>(); 5842 // Requiring LoopSimplify a second time here prevents IVUsers from running 5843 // twice, since LoopSimplify was invalidated by running ScalarEvolution. 5844 AU.addRequiredID(LoopSimplifyID); 5845 AU.addRequired<IVUsersWrapperPass>(); 5846 AU.addPreserved<IVUsersWrapperPass>(); 5847 AU.addRequired<TargetTransformInfoWrapperPass>(); 5848 AU.addPreserved<MemorySSAWrapperPass>(); 5849 } 5850 5851 using EqualValues = SmallVector<std::tuple<WeakVH, int64_t>, 4>; 5852 using EqualValuesMap = 5853 DenseMap<DbgValueInst *, SmallVector<std::pair<unsigned, EqualValues>>>; 5854 using LocationMap = 5855 DenseMap<DbgValueInst *, std::pair<DIExpression *, Metadata *>>; 5856 5857 static void DbgGatherEqualValues(Loop *L, ScalarEvolution &SE, 5858 EqualValuesMap &DbgValueToEqualSet, 5859 LocationMap &DbgValueToLocation) { 5860 for (auto &B : L->getBlocks()) { 5861 for (auto &I : *B) { 5862 auto DVI = dyn_cast<DbgValueInst>(&I); 5863 if (!DVI) 5864 continue; 5865 for (unsigned Idx = 0; Idx < DVI->getNumVariableLocationOps(); ++Idx) { 5866 // TODO: We can duplicate results if the same arg appears more than 5867 // once. 5868 Value *V = DVI->getVariableLocationOp(Idx); 5869 if (!V || !SE.isSCEVable(V->getType())) 5870 continue; 5871 auto DbgValueSCEV = SE.getSCEV(V); 5872 EqualValues EqSet; 5873 for (PHINode &Phi : L->getHeader()->phis()) { 5874 if (V->getType() != Phi.getType()) 5875 continue; 5876 if (!SE.isSCEVable(Phi.getType())) 5877 continue; 5878 auto PhiSCEV = SE.getSCEV(&Phi); 5879 Optional<APInt> Offset = 5880 SE.computeConstantDifference(DbgValueSCEV, PhiSCEV); 5881 if (Offset && Offset->getMinSignedBits() <= 64) 5882 EqSet.emplace_back( 5883 std::make_tuple(&Phi, Offset.getValue().getSExtValue())); 5884 } 5885 DbgValueToEqualSet[DVI].push_back({Idx, std::move(EqSet)}); 5886 // If we fall back to using this raw location, at least one location op 5887 // must be dead. A DIArgList will automatically undef arguments when 5888 // they become unavailable, but a ValueAsMetadata will not; since we 5889 // know the value should be undef, we use the undef value directly here. 5890 Metadata *RawLocation = 5891 DVI->hasArgList() ? DVI->getRawLocation() 5892 : ValueAsMetadata::get(UndefValue::get( 5893 DVI->getVariableLocationOp(0)->getType())); 5894 DbgValueToLocation[DVI] = {DVI->getExpression(), RawLocation}; 5895 } 5896 } 5897 } 5898 } 5899 5900 static void DbgApplyEqualValues(EqualValuesMap &DbgValueToEqualSet, 5901 LocationMap &DbgValueToLocation) { 5902 for (auto A : DbgValueToEqualSet) { 5903 auto *DVI = A.first; 5904 // Only update those that are now undef. 5905 if (!DVI->isUndef()) 5906 continue; 5907 // The dbg.value may have had its value or expression changed during LSR by 5908 // a failed salvage attempt; refresh them from the map. 5909 auto *DbgDIExpr = DbgValueToLocation[DVI].first; 5910 DVI->setRawLocation(DbgValueToLocation[DVI].second); 5911 DVI->setExpression(DbgDIExpr); 5912 assert(DVI->isUndef() && "dbg.value with non-undef location should not " 5913 "have been modified by LSR."); 5914 for (auto IdxEV : A.second) { 5915 unsigned Idx = IdxEV.first; 5916 for (auto EV : IdxEV.second) { 5917 auto EVHandle = std::get<WeakVH>(EV); 5918 if (!EVHandle) 5919 continue; 5920 int64_t Offset = std::get<int64_t>(EV); 5921 DVI->replaceVariableLocationOp(Idx, EVHandle); 5922 if (Offset) { 5923 SmallVector<uint64_t, 8> Ops; 5924 DIExpression::appendOffset(Ops, Offset); 5925 DbgDIExpr = DIExpression::appendOpsToArg(DbgDIExpr, Ops, Idx, true); 5926 } 5927 DVI->setExpression(DbgDIExpr); 5928 break; 5929 } 5930 } 5931 } 5932 } 5933 5934 static bool ReduceLoopStrength(Loop *L, IVUsers &IU, ScalarEvolution &SE, 5935 DominatorTree &DT, LoopInfo &LI, 5936 const TargetTransformInfo &TTI, 5937 AssumptionCache &AC, TargetLibraryInfo &TLI, 5938 MemorySSA *MSSA) { 5939 5940 bool Changed = false; 5941 std::unique_ptr<MemorySSAUpdater> MSSAU; 5942 if (MSSA) 5943 MSSAU = std::make_unique<MemorySSAUpdater>(MSSA); 5944 5945 // Run the main LSR transformation. 5946 Changed |= 5947 LSRInstance(L, IU, SE, DT, LI, TTI, AC, TLI, MSSAU.get()).getChanged(); 5948 5949 // Debug preservation - before we start removing anything create equivalence 5950 // sets for the llvm.dbg.value intrinsics. 5951 EqualValuesMap DbgValueToEqualSet; 5952 LocationMap DbgValueToLocation; 5953 DbgGatherEqualValues(L, SE, DbgValueToEqualSet, DbgValueToLocation); 5954 5955 // Remove any extra phis created by processing inner loops. 5956 Changed |= DeleteDeadPHIs(L->getHeader(), &TLI, MSSAU.get()); 5957 if (EnablePhiElim && L->isLoopSimplifyForm()) { 5958 SmallVector<WeakTrackingVH, 16> DeadInsts; 5959 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 5960 SCEVExpander Rewriter(SE, DL, "lsr", false); 5961 #ifndef NDEBUG 5962 Rewriter.setDebugType(DEBUG_TYPE); 5963 #endif 5964 unsigned numFolded = Rewriter.replaceCongruentIVs(L, &DT, DeadInsts, &TTI); 5965 if (numFolded) { 5966 Changed = true; 5967 RecursivelyDeleteTriviallyDeadInstructionsPermissive(DeadInsts, &TLI, 5968 MSSAU.get()); 5969 DeleteDeadPHIs(L->getHeader(), &TLI, MSSAU.get()); 5970 } 5971 } 5972 5973 DbgApplyEqualValues(DbgValueToEqualSet, DbgValueToLocation); 5974 5975 return Changed; 5976 } 5977 5978 bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager & /*LPM*/) { 5979 if (skipLoop(L)) 5980 return false; 5981 5982 auto &IU = getAnalysis<IVUsersWrapperPass>().getIU(); 5983 auto &SE = getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 5984 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 5985 auto &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 5986 const auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI( 5987 *L->getHeader()->getParent()); 5988 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache( 5989 *L->getHeader()->getParent()); 5990 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI( 5991 *L->getHeader()->getParent()); 5992 auto *MSSAAnalysis = getAnalysisIfAvailable<MemorySSAWrapperPass>(); 5993 MemorySSA *MSSA = nullptr; 5994 if (MSSAAnalysis) 5995 MSSA = &MSSAAnalysis->getMSSA(); 5996 return ReduceLoopStrength(L, IU, SE, DT, LI, TTI, AC, TLI, MSSA); 5997 } 5998 5999 PreservedAnalyses LoopStrengthReducePass::run(Loop &L, LoopAnalysisManager &AM, 6000 LoopStandardAnalysisResults &AR, 6001 LPMUpdater &) { 6002 if (!ReduceLoopStrength(&L, AM.getResult<IVUsersAnalysis>(L, AR), AR.SE, 6003 AR.DT, AR.LI, AR.TTI, AR.AC, AR.TLI, AR.MSSA)) 6004 return PreservedAnalyses::all(); 6005 6006 auto PA = getLoopPassPreservedAnalyses(); 6007 if (AR.MSSA) 6008 PA.preserve<MemorySSAAnalysis>(); 6009 return PA; 6010 } 6011 6012 char LoopStrengthReduce::ID = 0; 6013 6014 INITIALIZE_PASS_BEGIN(LoopStrengthReduce, "loop-reduce", 6015 "Loop Strength Reduction", false, false) 6016 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 6017 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 6018 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 6019 INITIALIZE_PASS_DEPENDENCY(IVUsersWrapperPass) 6020 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 6021 INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 6022 INITIALIZE_PASS_END(LoopStrengthReduce, "loop-reduce", 6023 "Loop Strength Reduction", false, false) 6024 6025 Pass *llvm::createLoopStrengthReducePass() { return new LoopStrengthReduce(); } 6026