1 //===- LoopStrengthReduce.cpp - Strength Reduce IVs in Loops --------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This transformation analyzes and transforms the induction variables (and 11 // computations derived from them) into forms suitable for efficient execution 12 // on the target. 13 // 14 // This pass performs a strength reduction on array references inside loops that 15 // have as one or more of their components the loop induction variable, it 16 // rewrites expressions to take advantage of scaled-index addressing modes 17 // available on the target, and it performs a variety of other optimizations 18 // related to loop induction variables. 19 // 20 // Terminology note: this code has a lot of handling for "post-increment" or 21 // "post-inc" users. This is not talking about post-increment addressing modes; 22 // it is instead talking about code like this: 23 // 24 // %i = phi [ 0, %entry ], [ %i.next, %latch ] 25 // ... 26 // %i.next = add %i, 1 27 // %c = icmp eq %i.next, %n 28 // 29 // The SCEV for %i is {0,+,1}<%L>. The SCEV for %i.next is {1,+,1}<%L>, however 30 // it's useful to think about these as the same register, with some uses using 31 // the value of the register before the add and some using it after. In this 32 // example, the icmp is a post-increment user, since it uses %i.next, which is 33 // the value of the induction variable after the increment. The other common 34 // case of post-increment users is users outside the loop. 35 // 36 // TODO: More sophistication in the way Formulae are generated and filtered. 37 // 38 // TODO: Handle multiple loops at a time. 39 // 40 // TODO: Should the addressing mode BaseGV be changed to a ConstantExpr instead 41 // of a GlobalValue? 42 // 43 // TODO: When truncation is free, truncate ICmp users' operands to make it a 44 // smaller encoding (on x86 at least). 45 // 46 // TODO: When a negated register is used by an add (such as in a list of 47 // multiple base registers, or as the increment expression in an addrec), 48 // we may not actually need both reg and (-1 * reg) in registers; the 49 // negation can be implemented by using a sub instead of an add. The 50 // lack of support for taking this into consideration when making 51 // register pressure decisions is partly worked around by the "Special" 52 // use kind. 53 // 54 //===----------------------------------------------------------------------===// 55 56 #include "llvm/Transforms/Scalar/LoopStrengthReduce.h" 57 #include "llvm/ADT/APInt.h" 58 #include "llvm/ADT/DenseMap.h" 59 #include "llvm/ADT/DenseSet.h" 60 #include "llvm/ADT/Hashing.h" 61 #include "llvm/ADT/PointerIntPair.h" 62 #include "llvm/ADT/STLExtras.h" 63 #include "llvm/ADT/SetVector.h" 64 #include "llvm/ADT/SmallBitVector.h" 65 #include "llvm/ADT/SmallPtrSet.h" 66 #include "llvm/ADT/SmallSet.h" 67 #include "llvm/ADT/SmallVector.h" 68 #include "llvm/ADT/iterator_range.h" 69 #include "llvm/Analysis/IVUsers.h" 70 #include "llvm/Analysis/LoopAnalysisManager.h" 71 #include "llvm/Analysis/LoopInfo.h" 72 #include "llvm/Analysis/LoopPass.h" 73 #include "llvm/Analysis/ScalarEvolution.h" 74 #include "llvm/Analysis/ScalarEvolutionExpander.h" 75 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 76 #include "llvm/Analysis/ScalarEvolutionNormalization.h" 77 #include "llvm/Analysis/TargetTransformInfo.h" 78 #include "llvm/Transforms/Utils/Local.h" 79 #include "llvm/Config/llvm-config.h" 80 #include "llvm/IR/BasicBlock.h" 81 #include "llvm/IR/Constant.h" 82 #include "llvm/IR/Constants.h" 83 #include "llvm/IR/DerivedTypes.h" 84 #include "llvm/IR/Dominators.h" 85 #include "llvm/IR/GlobalValue.h" 86 #include "llvm/IR/IRBuilder.h" 87 #include "llvm/IR/InstrTypes.h" 88 #include "llvm/IR/Instruction.h" 89 #include "llvm/IR/Instructions.h" 90 #include "llvm/IR/IntrinsicInst.h" 91 #include "llvm/IR/Intrinsics.h" 92 #include "llvm/IR/Module.h" 93 #include "llvm/IR/OperandTraits.h" 94 #include "llvm/IR/Operator.h" 95 #include "llvm/IR/PassManager.h" 96 #include "llvm/IR/Type.h" 97 #include "llvm/IR/Use.h" 98 #include "llvm/IR/User.h" 99 #include "llvm/IR/Value.h" 100 #include "llvm/IR/ValueHandle.h" 101 #include "llvm/Pass.h" 102 #include "llvm/Support/Casting.h" 103 #include "llvm/Support/CommandLine.h" 104 #include "llvm/Support/Compiler.h" 105 #include "llvm/Support/Debug.h" 106 #include "llvm/Support/ErrorHandling.h" 107 #include "llvm/Support/MathExtras.h" 108 #include "llvm/Support/raw_ostream.h" 109 #include "llvm/Transforms/Scalar.h" 110 #include "llvm/Transforms/Utils.h" 111 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 112 #include <algorithm> 113 #include <cassert> 114 #include <cstddef> 115 #include <cstdint> 116 #include <cstdlib> 117 #include <iterator> 118 #include <limits> 119 #include <map> 120 #include <utility> 121 122 using namespace llvm; 123 124 #define DEBUG_TYPE "loop-reduce" 125 126 /// MaxIVUsers is an arbitrary threshold that provides an early opportunity for 127 /// bail out. This threshold is far beyond the number of users that LSR can 128 /// conceivably solve, so it should not affect generated code, but catches the 129 /// worst cases before LSR burns too much compile time and stack space. 130 static const unsigned MaxIVUsers = 200; 131 132 // Temporary flag to cleanup congruent phis after LSR phi expansion. 133 // It's currently disabled until we can determine whether it's truly useful or 134 // not. The flag should be removed after the v3.0 release. 135 // This is now needed for ivchains. 136 static cl::opt<bool> EnablePhiElim( 137 "enable-lsr-phielim", cl::Hidden, cl::init(true), 138 cl::desc("Enable LSR phi elimination")); 139 140 // The flag adds instruction count to solutions cost comparision. 141 static cl::opt<bool> InsnsCost( 142 "lsr-insns-cost", cl::Hidden, cl::init(true), 143 cl::desc("Add instruction count to a LSR cost model")); 144 145 // Flag to choose how to narrow complex lsr solution 146 static cl::opt<bool> LSRExpNarrow( 147 "lsr-exp-narrow", cl::Hidden, cl::init(false), 148 cl::desc("Narrow LSR complex solution using" 149 " expectation of registers number")); 150 151 // Flag to narrow search space by filtering non-optimal formulae with 152 // the same ScaledReg and Scale. 153 static cl::opt<bool> FilterSameScaledReg( 154 "lsr-filter-same-scaled-reg", cl::Hidden, cl::init(true), 155 cl::desc("Narrow LSR search space by filtering non-optimal formulae" 156 " with the same ScaledReg and Scale")); 157 158 #ifndef NDEBUG 159 // Stress test IV chain generation. 160 static cl::opt<bool> StressIVChain( 161 "stress-ivchain", cl::Hidden, cl::init(false), 162 cl::desc("Stress test LSR IV chains")); 163 #else 164 static bool StressIVChain = false; 165 #endif 166 167 namespace { 168 169 struct MemAccessTy { 170 /// Used in situations where the accessed memory type is unknown. 171 static const unsigned UnknownAddressSpace = 172 std::numeric_limits<unsigned>::max(); 173 174 Type *MemTy = nullptr; 175 unsigned AddrSpace = UnknownAddressSpace; 176 177 MemAccessTy() = default; 178 MemAccessTy(Type *Ty, unsigned AS) : MemTy(Ty), AddrSpace(AS) {} 179 180 bool operator==(MemAccessTy Other) const { 181 return MemTy == Other.MemTy && AddrSpace == Other.AddrSpace; 182 } 183 184 bool operator!=(MemAccessTy Other) const { return !(*this == Other); } 185 186 static MemAccessTy getUnknown(LLVMContext &Ctx, 187 unsigned AS = UnknownAddressSpace) { 188 return MemAccessTy(Type::getVoidTy(Ctx), AS); 189 } 190 191 Type *getType() { return MemTy; } 192 }; 193 194 /// This class holds data which is used to order reuse candidates. 195 class RegSortData { 196 public: 197 /// This represents the set of LSRUse indices which reference 198 /// a particular register. 199 SmallBitVector UsedByIndices; 200 201 void print(raw_ostream &OS) const; 202 void dump() const; 203 }; 204 205 } // end anonymous namespace 206 207 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 208 void RegSortData::print(raw_ostream &OS) const { 209 OS << "[NumUses=" << UsedByIndices.count() << ']'; 210 } 211 212 LLVM_DUMP_METHOD void RegSortData::dump() const { 213 print(errs()); errs() << '\n'; 214 } 215 #endif 216 217 namespace { 218 219 /// Map register candidates to information about how they are used. 220 class RegUseTracker { 221 using RegUsesTy = DenseMap<const SCEV *, RegSortData>; 222 223 RegUsesTy RegUsesMap; 224 SmallVector<const SCEV *, 16> RegSequence; 225 226 public: 227 void countRegister(const SCEV *Reg, size_t LUIdx); 228 void dropRegister(const SCEV *Reg, size_t LUIdx); 229 void swapAndDropUse(size_t LUIdx, size_t LastLUIdx); 230 231 bool isRegUsedByUsesOtherThan(const SCEV *Reg, size_t LUIdx) const; 232 233 const SmallBitVector &getUsedByIndices(const SCEV *Reg) const; 234 235 void clear(); 236 237 using iterator = SmallVectorImpl<const SCEV *>::iterator; 238 using const_iterator = SmallVectorImpl<const SCEV *>::const_iterator; 239 240 iterator begin() { return RegSequence.begin(); } 241 iterator end() { return RegSequence.end(); } 242 const_iterator begin() const { return RegSequence.begin(); } 243 const_iterator end() const { return RegSequence.end(); } 244 }; 245 246 } // end anonymous namespace 247 248 void 249 RegUseTracker::countRegister(const SCEV *Reg, size_t LUIdx) { 250 std::pair<RegUsesTy::iterator, bool> Pair = 251 RegUsesMap.insert(std::make_pair(Reg, RegSortData())); 252 RegSortData &RSD = Pair.first->second; 253 if (Pair.second) 254 RegSequence.push_back(Reg); 255 RSD.UsedByIndices.resize(std::max(RSD.UsedByIndices.size(), LUIdx + 1)); 256 RSD.UsedByIndices.set(LUIdx); 257 } 258 259 void 260 RegUseTracker::dropRegister(const SCEV *Reg, size_t LUIdx) { 261 RegUsesTy::iterator It = RegUsesMap.find(Reg); 262 assert(It != RegUsesMap.end()); 263 RegSortData &RSD = It->second; 264 assert(RSD.UsedByIndices.size() > LUIdx); 265 RSD.UsedByIndices.reset(LUIdx); 266 } 267 268 void 269 RegUseTracker::swapAndDropUse(size_t LUIdx, size_t LastLUIdx) { 270 assert(LUIdx <= LastLUIdx); 271 272 // Update RegUses. The data structure is not optimized for this purpose; 273 // we must iterate through it and update each of the bit vectors. 274 for (auto &Pair : RegUsesMap) { 275 SmallBitVector &UsedByIndices = Pair.second.UsedByIndices; 276 if (LUIdx < UsedByIndices.size()) 277 UsedByIndices[LUIdx] = 278 LastLUIdx < UsedByIndices.size() ? UsedByIndices[LastLUIdx] : false; 279 UsedByIndices.resize(std::min(UsedByIndices.size(), LastLUIdx)); 280 } 281 } 282 283 bool 284 RegUseTracker::isRegUsedByUsesOtherThan(const SCEV *Reg, size_t LUIdx) const { 285 RegUsesTy::const_iterator I = RegUsesMap.find(Reg); 286 if (I == RegUsesMap.end()) 287 return false; 288 const SmallBitVector &UsedByIndices = I->second.UsedByIndices; 289 int i = UsedByIndices.find_first(); 290 if (i == -1) return false; 291 if ((size_t)i != LUIdx) return true; 292 return UsedByIndices.find_next(i) != -1; 293 } 294 295 const SmallBitVector &RegUseTracker::getUsedByIndices(const SCEV *Reg) const { 296 RegUsesTy::const_iterator I = RegUsesMap.find(Reg); 297 assert(I != RegUsesMap.end() && "Unknown register!"); 298 return I->second.UsedByIndices; 299 } 300 301 void RegUseTracker::clear() { 302 RegUsesMap.clear(); 303 RegSequence.clear(); 304 } 305 306 namespace { 307 308 /// This class holds information that describes a formula for computing 309 /// satisfying a use. It may include broken-out immediates and scaled registers. 310 struct Formula { 311 /// Global base address used for complex addressing. 312 GlobalValue *BaseGV = nullptr; 313 314 /// Base offset for complex addressing. 315 int64_t BaseOffset = 0; 316 317 /// Whether any complex addressing has a base register. 318 bool HasBaseReg = false; 319 320 /// The scale of any complex addressing. 321 int64_t Scale = 0; 322 323 /// The list of "base" registers for this use. When this is non-empty. The 324 /// canonical representation of a formula is 325 /// 1. BaseRegs.size > 1 implies ScaledReg != NULL and 326 /// 2. ScaledReg != NULL implies Scale != 1 || !BaseRegs.empty(). 327 /// 3. The reg containing recurrent expr related with currect loop in the 328 /// formula should be put in the ScaledReg. 329 /// #1 enforces that the scaled register is always used when at least two 330 /// registers are needed by the formula: e.g., reg1 + reg2 is reg1 + 1 * reg2. 331 /// #2 enforces that 1 * reg is reg. 332 /// #3 ensures invariant regs with respect to current loop can be combined 333 /// together in LSR codegen. 334 /// This invariant can be temporarily broken while building a formula. 335 /// However, every formula inserted into the LSRInstance must be in canonical 336 /// form. 337 SmallVector<const SCEV *, 4> BaseRegs; 338 339 /// The 'scaled' register for this use. This should be non-null when Scale is 340 /// not zero. 341 const SCEV *ScaledReg = nullptr; 342 343 /// An additional constant offset which added near the use. This requires a 344 /// temporary register, but the offset itself can live in an add immediate 345 /// field rather than a register. 346 int64_t UnfoldedOffset = 0; 347 348 Formula() = default; 349 350 void initialMatch(const SCEV *S, Loop *L, ScalarEvolution &SE); 351 352 bool isCanonical(const Loop &L) const; 353 354 void canonicalize(const Loop &L); 355 356 bool unscale(); 357 358 bool hasZeroEnd() const; 359 360 size_t getNumRegs() const; 361 Type *getType() const; 362 363 void deleteBaseReg(const SCEV *&S); 364 365 bool referencesReg(const SCEV *S) const; 366 bool hasRegsUsedByUsesOtherThan(size_t LUIdx, 367 const RegUseTracker &RegUses) const; 368 369 void print(raw_ostream &OS) const; 370 void dump() const; 371 }; 372 373 } // end anonymous namespace 374 375 /// Recursion helper for initialMatch. 376 static void DoInitialMatch(const SCEV *S, Loop *L, 377 SmallVectorImpl<const SCEV *> &Good, 378 SmallVectorImpl<const SCEV *> &Bad, 379 ScalarEvolution &SE) { 380 // Collect expressions which properly dominate the loop header. 381 if (SE.properlyDominates(S, L->getHeader())) { 382 Good.push_back(S); 383 return; 384 } 385 386 // Look at add operands. 387 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 388 for (const SCEV *S : Add->operands()) 389 DoInitialMatch(S, L, Good, Bad, SE); 390 return; 391 } 392 393 // Look at addrec operands. 394 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) 395 if (!AR->getStart()->isZero() && AR->isAffine()) { 396 DoInitialMatch(AR->getStart(), L, Good, Bad, SE); 397 DoInitialMatch(SE.getAddRecExpr(SE.getConstant(AR->getType(), 0), 398 AR->getStepRecurrence(SE), 399 // FIXME: AR->getNoWrapFlags() 400 AR->getLoop(), SCEV::FlagAnyWrap), 401 L, Good, Bad, SE); 402 return; 403 } 404 405 // Handle a multiplication by -1 (negation) if it didn't fold. 406 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) 407 if (Mul->getOperand(0)->isAllOnesValue()) { 408 SmallVector<const SCEV *, 4> Ops(Mul->op_begin()+1, Mul->op_end()); 409 const SCEV *NewMul = SE.getMulExpr(Ops); 410 411 SmallVector<const SCEV *, 4> MyGood; 412 SmallVector<const SCEV *, 4> MyBad; 413 DoInitialMatch(NewMul, L, MyGood, MyBad, SE); 414 const SCEV *NegOne = SE.getSCEV(ConstantInt::getAllOnesValue( 415 SE.getEffectiveSCEVType(NewMul->getType()))); 416 for (const SCEV *S : MyGood) 417 Good.push_back(SE.getMulExpr(NegOne, S)); 418 for (const SCEV *S : MyBad) 419 Bad.push_back(SE.getMulExpr(NegOne, S)); 420 return; 421 } 422 423 // Ok, we can't do anything interesting. Just stuff the whole thing into a 424 // register and hope for the best. 425 Bad.push_back(S); 426 } 427 428 /// Incorporate loop-variant parts of S into this Formula, attempting to keep 429 /// all loop-invariant and loop-computable values in a single base register. 430 void Formula::initialMatch(const SCEV *S, Loop *L, ScalarEvolution &SE) { 431 SmallVector<const SCEV *, 4> Good; 432 SmallVector<const SCEV *, 4> Bad; 433 DoInitialMatch(S, L, Good, Bad, SE); 434 if (!Good.empty()) { 435 const SCEV *Sum = SE.getAddExpr(Good); 436 if (!Sum->isZero()) 437 BaseRegs.push_back(Sum); 438 HasBaseReg = true; 439 } 440 if (!Bad.empty()) { 441 const SCEV *Sum = SE.getAddExpr(Bad); 442 if (!Sum->isZero()) 443 BaseRegs.push_back(Sum); 444 HasBaseReg = true; 445 } 446 canonicalize(*L); 447 } 448 449 /// Check whether or not this formula satisfies the canonical 450 /// representation. 451 /// \see Formula::BaseRegs. 452 bool Formula::isCanonical(const Loop &L) const { 453 if (!ScaledReg) 454 return BaseRegs.size() <= 1; 455 456 if (Scale != 1) 457 return true; 458 459 if (Scale == 1 && BaseRegs.empty()) 460 return false; 461 462 const SCEVAddRecExpr *SAR = dyn_cast<const SCEVAddRecExpr>(ScaledReg); 463 if (SAR && SAR->getLoop() == &L) 464 return true; 465 466 // If ScaledReg is not a recurrent expr, or it is but its loop is not current 467 // loop, meanwhile BaseRegs contains a recurrent expr reg related with current 468 // loop, we want to swap the reg in BaseRegs with ScaledReg. 469 auto I = 470 find_if(make_range(BaseRegs.begin(), BaseRegs.end()), [&](const SCEV *S) { 471 return isa<const SCEVAddRecExpr>(S) && 472 (cast<SCEVAddRecExpr>(S)->getLoop() == &L); 473 }); 474 return I == BaseRegs.end(); 475 } 476 477 /// Helper method to morph a formula into its canonical representation. 478 /// \see Formula::BaseRegs. 479 /// Every formula having more than one base register, must use the ScaledReg 480 /// field. Otherwise, we would have to do special cases everywhere in LSR 481 /// to treat reg1 + reg2 + ... the same way as reg1 + 1*reg2 + ... 482 /// On the other hand, 1*reg should be canonicalized into reg. 483 void Formula::canonicalize(const Loop &L) { 484 if (isCanonical(L)) 485 return; 486 // So far we did not need this case. This is easy to implement but it is 487 // useless to maintain dead code. Beside it could hurt compile time. 488 assert(!BaseRegs.empty() && "1*reg => reg, should not be needed."); 489 490 // Keep the invariant sum in BaseRegs and one of the variant sum in ScaledReg. 491 if (!ScaledReg) { 492 ScaledReg = BaseRegs.back(); 493 BaseRegs.pop_back(); 494 Scale = 1; 495 } 496 497 // If ScaledReg is an invariant with respect to L, find the reg from 498 // BaseRegs containing the recurrent expr related with Loop L. Swap the 499 // reg with ScaledReg. 500 const SCEVAddRecExpr *SAR = dyn_cast<const SCEVAddRecExpr>(ScaledReg); 501 if (!SAR || SAR->getLoop() != &L) { 502 auto I = find_if(make_range(BaseRegs.begin(), BaseRegs.end()), 503 [&](const SCEV *S) { 504 return isa<const SCEVAddRecExpr>(S) && 505 (cast<SCEVAddRecExpr>(S)->getLoop() == &L); 506 }); 507 if (I != BaseRegs.end()) 508 std::swap(ScaledReg, *I); 509 } 510 } 511 512 /// Get rid of the scale in the formula. 513 /// In other words, this method morphes reg1 + 1*reg2 into reg1 + reg2. 514 /// \return true if it was possible to get rid of the scale, false otherwise. 515 /// \note After this operation the formula may not be in the canonical form. 516 bool Formula::unscale() { 517 if (Scale != 1) 518 return false; 519 Scale = 0; 520 BaseRegs.push_back(ScaledReg); 521 ScaledReg = nullptr; 522 return true; 523 } 524 525 bool Formula::hasZeroEnd() const { 526 if (UnfoldedOffset || BaseOffset) 527 return false; 528 if (BaseRegs.size() != 1 || ScaledReg) 529 return false; 530 return true; 531 } 532 533 /// Return the total number of register operands used by this formula. This does 534 /// not include register uses implied by non-constant addrec strides. 535 size_t Formula::getNumRegs() const { 536 return !!ScaledReg + BaseRegs.size(); 537 } 538 539 /// Return the type of this formula, if it has one, or null otherwise. This type 540 /// is meaningless except for the bit size. 541 Type *Formula::getType() const { 542 return !BaseRegs.empty() ? BaseRegs.front()->getType() : 543 ScaledReg ? ScaledReg->getType() : 544 BaseGV ? BaseGV->getType() : 545 nullptr; 546 } 547 548 /// Delete the given base reg from the BaseRegs list. 549 void Formula::deleteBaseReg(const SCEV *&S) { 550 if (&S != &BaseRegs.back()) 551 std::swap(S, BaseRegs.back()); 552 BaseRegs.pop_back(); 553 } 554 555 /// Test if this formula references the given register. 556 bool Formula::referencesReg(const SCEV *S) const { 557 return S == ScaledReg || is_contained(BaseRegs, S); 558 } 559 560 /// Test whether this formula uses registers which are used by uses other than 561 /// the use with the given index. 562 bool Formula::hasRegsUsedByUsesOtherThan(size_t LUIdx, 563 const RegUseTracker &RegUses) const { 564 if (ScaledReg) 565 if (RegUses.isRegUsedByUsesOtherThan(ScaledReg, LUIdx)) 566 return true; 567 for (const SCEV *BaseReg : BaseRegs) 568 if (RegUses.isRegUsedByUsesOtherThan(BaseReg, LUIdx)) 569 return true; 570 return false; 571 } 572 573 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 574 void Formula::print(raw_ostream &OS) const { 575 bool First = true; 576 if (BaseGV) { 577 if (!First) OS << " + "; else First = false; 578 BaseGV->printAsOperand(OS, /*PrintType=*/false); 579 } 580 if (BaseOffset != 0) { 581 if (!First) OS << " + "; else First = false; 582 OS << BaseOffset; 583 } 584 for (const SCEV *BaseReg : BaseRegs) { 585 if (!First) OS << " + "; else First = false; 586 OS << "reg(" << *BaseReg << ')'; 587 } 588 if (HasBaseReg && BaseRegs.empty()) { 589 if (!First) OS << " + "; else First = false; 590 OS << "**error: HasBaseReg**"; 591 } else if (!HasBaseReg && !BaseRegs.empty()) { 592 if (!First) OS << " + "; else First = false; 593 OS << "**error: !HasBaseReg**"; 594 } 595 if (Scale != 0) { 596 if (!First) OS << " + "; else First = false; 597 OS << Scale << "*reg("; 598 if (ScaledReg) 599 OS << *ScaledReg; 600 else 601 OS << "<unknown>"; 602 OS << ')'; 603 } 604 if (UnfoldedOffset != 0) { 605 if (!First) OS << " + "; 606 OS << "imm(" << UnfoldedOffset << ')'; 607 } 608 } 609 610 LLVM_DUMP_METHOD void Formula::dump() const { 611 print(errs()); errs() << '\n'; 612 } 613 #endif 614 615 /// Return true if the given addrec can be sign-extended without changing its 616 /// value. 617 static bool isAddRecSExtable(const SCEVAddRecExpr *AR, ScalarEvolution &SE) { 618 Type *WideTy = 619 IntegerType::get(SE.getContext(), SE.getTypeSizeInBits(AR->getType()) + 1); 620 return isa<SCEVAddRecExpr>(SE.getSignExtendExpr(AR, WideTy)); 621 } 622 623 /// Return true if the given add can be sign-extended without changing its 624 /// value. 625 static bool isAddSExtable(const SCEVAddExpr *A, ScalarEvolution &SE) { 626 Type *WideTy = 627 IntegerType::get(SE.getContext(), SE.getTypeSizeInBits(A->getType()) + 1); 628 return isa<SCEVAddExpr>(SE.getSignExtendExpr(A, WideTy)); 629 } 630 631 /// Return true if the given mul can be sign-extended without changing its 632 /// value. 633 static bool isMulSExtable(const SCEVMulExpr *M, ScalarEvolution &SE) { 634 Type *WideTy = 635 IntegerType::get(SE.getContext(), 636 SE.getTypeSizeInBits(M->getType()) * M->getNumOperands()); 637 return isa<SCEVMulExpr>(SE.getSignExtendExpr(M, WideTy)); 638 } 639 640 /// Return an expression for LHS /s RHS, if it can be determined and if the 641 /// remainder is known to be zero, or null otherwise. If IgnoreSignificantBits 642 /// is true, expressions like (X * Y) /s Y are simplified to Y, ignoring that 643 /// the multiplication may overflow, which is useful when the result will be 644 /// used in a context where the most significant bits are ignored. 645 static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS, 646 ScalarEvolution &SE, 647 bool IgnoreSignificantBits = false) { 648 // Handle the trivial case, which works for any SCEV type. 649 if (LHS == RHS) 650 return SE.getConstant(LHS->getType(), 1); 651 652 // Handle a few RHS special cases. 653 const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS); 654 if (RC) { 655 const APInt &RA = RC->getAPInt(); 656 // Handle x /s -1 as x * -1, to give ScalarEvolution a chance to do 657 // some folding. 658 if (RA.isAllOnesValue()) 659 return SE.getMulExpr(LHS, RC); 660 // Handle x /s 1 as x. 661 if (RA == 1) 662 return LHS; 663 } 664 665 // Check for a division of a constant by a constant. 666 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(LHS)) { 667 if (!RC) 668 return nullptr; 669 const APInt &LA = C->getAPInt(); 670 const APInt &RA = RC->getAPInt(); 671 if (LA.srem(RA) != 0) 672 return nullptr; 673 return SE.getConstant(LA.sdiv(RA)); 674 } 675 676 // Distribute the sdiv over addrec operands, if the addrec doesn't overflow. 677 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) { 678 if ((IgnoreSignificantBits || isAddRecSExtable(AR, SE)) && AR->isAffine()) { 679 const SCEV *Step = getExactSDiv(AR->getStepRecurrence(SE), RHS, SE, 680 IgnoreSignificantBits); 681 if (!Step) return nullptr; 682 const SCEV *Start = getExactSDiv(AR->getStart(), RHS, SE, 683 IgnoreSignificantBits); 684 if (!Start) return nullptr; 685 // FlagNW is independent of the start value, step direction, and is 686 // preserved with smaller magnitude steps. 687 // FIXME: AR->getNoWrapFlags(SCEV::FlagNW) 688 return SE.getAddRecExpr(Start, Step, AR->getLoop(), SCEV::FlagAnyWrap); 689 } 690 return nullptr; 691 } 692 693 // Distribute the sdiv over add operands, if the add doesn't overflow. 694 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(LHS)) { 695 if (IgnoreSignificantBits || isAddSExtable(Add, SE)) { 696 SmallVector<const SCEV *, 8> Ops; 697 for (const SCEV *S : Add->operands()) { 698 const SCEV *Op = getExactSDiv(S, RHS, SE, IgnoreSignificantBits); 699 if (!Op) return nullptr; 700 Ops.push_back(Op); 701 } 702 return SE.getAddExpr(Ops); 703 } 704 return nullptr; 705 } 706 707 // Check for a multiply operand that we can pull RHS out of. 708 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS)) { 709 if (IgnoreSignificantBits || isMulSExtable(Mul, SE)) { 710 SmallVector<const SCEV *, 4> Ops; 711 bool Found = false; 712 for (const SCEV *S : Mul->operands()) { 713 if (!Found) 714 if (const SCEV *Q = getExactSDiv(S, RHS, SE, 715 IgnoreSignificantBits)) { 716 S = Q; 717 Found = true; 718 } 719 Ops.push_back(S); 720 } 721 return Found ? SE.getMulExpr(Ops) : nullptr; 722 } 723 return nullptr; 724 } 725 726 // Otherwise we don't know. 727 return nullptr; 728 } 729 730 /// If S involves the addition of a constant integer value, return that integer 731 /// value, and mutate S to point to a new SCEV with that value excluded. 732 static int64_t ExtractImmediate(const SCEV *&S, ScalarEvolution &SE) { 733 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) { 734 if (C->getAPInt().getMinSignedBits() <= 64) { 735 S = SE.getConstant(C->getType(), 0); 736 return C->getValue()->getSExtValue(); 737 } 738 } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 739 SmallVector<const SCEV *, 8> NewOps(Add->op_begin(), Add->op_end()); 740 int64_t Result = ExtractImmediate(NewOps.front(), SE); 741 if (Result != 0) 742 S = SE.getAddExpr(NewOps); 743 return Result; 744 } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 745 SmallVector<const SCEV *, 8> NewOps(AR->op_begin(), AR->op_end()); 746 int64_t Result = ExtractImmediate(NewOps.front(), SE); 747 if (Result != 0) 748 S = SE.getAddRecExpr(NewOps, AR->getLoop(), 749 // FIXME: AR->getNoWrapFlags(SCEV::FlagNW) 750 SCEV::FlagAnyWrap); 751 return Result; 752 } 753 return 0; 754 } 755 756 /// If S involves the addition of a GlobalValue address, return that symbol, and 757 /// mutate S to point to a new SCEV with that value excluded. 758 static GlobalValue *ExtractSymbol(const SCEV *&S, ScalarEvolution &SE) { 759 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 760 if (GlobalValue *GV = dyn_cast<GlobalValue>(U->getValue())) { 761 S = SE.getConstant(GV->getType(), 0); 762 return GV; 763 } 764 } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 765 SmallVector<const SCEV *, 8> NewOps(Add->op_begin(), Add->op_end()); 766 GlobalValue *Result = ExtractSymbol(NewOps.back(), SE); 767 if (Result) 768 S = SE.getAddExpr(NewOps); 769 return Result; 770 } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 771 SmallVector<const SCEV *, 8> NewOps(AR->op_begin(), AR->op_end()); 772 GlobalValue *Result = ExtractSymbol(NewOps.front(), SE); 773 if (Result) 774 S = SE.getAddRecExpr(NewOps, AR->getLoop(), 775 // FIXME: AR->getNoWrapFlags(SCEV::FlagNW) 776 SCEV::FlagAnyWrap); 777 return Result; 778 } 779 return nullptr; 780 } 781 782 /// Returns true if the specified instruction is using the specified value as an 783 /// address. 784 static bool isAddressUse(const TargetTransformInfo &TTI, 785 Instruction *Inst, Value *OperandVal) { 786 bool isAddress = isa<LoadInst>(Inst); 787 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 788 if (SI->getPointerOperand() == OperandVal) 789 isAddress = true; 790 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 791 // Addressing modes can also be folded into prefetches and a variety 792 // of intrinsics. 793 switch (II->getIntrinsicID()) { 794 case Intrinsic::memset: 795 case Intrinsic::prefetch: 796 if (II->getArgOperand(0) == OperandVal) 797 isAddress = true; 798 break; 799 case Intrinsic::memmove: 800 case Intrinsic::memcpy: 801 if (II->getArgOperand(0) == OperandVal || 802 II->getArgOperand(1) == OperandVal) 803 isAddress = true; 804 break; 805 default: { 806 MemIntrinsicInfo IntrInfo; 807 if (TTI.getTgtMemIntrinsic(II, IntrInfo)) { 808 if (IntrInfo.PtrVal == OperandVal) 809 isAddress = true; 810 } 811 } 812 } 813 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(Inst)) { 814 if (RMW->getPointerOperand() == OperandVal) 815 isAddress = true; 816 } else if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(Inst)) { 817 if (CmpX->getPointerOperand() == OperandVal) 818 isAddress = true; 819 } 820 return isAddress; 821 } 822 823 /// Return the type of the memory being accessed. 824 static MemAccessTy getAccessType(const TargetTransformInfo &TTI, 825 Instruction *Inst, Value *OperandVal) { 826 MemAccessTy AccessTy(Inst->getType(), MemAccessTy::UnknownAddressSpace); 827 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 828 AccessTy.MemTy = SI->getOperand(0)->getType(); 829 AccessTy.AddrSpace = SI->getPointerAddressSpace(); 830 } else if (const LoadInst *LI = dyn_cast<LoadInst>(Inst)) { 831 AccessTy.AddrSpace = LI->getPointerAddressSpace(); 832 } else if (const AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(Inst)) { 833 AccessTy.AddrSpace = RMW->getPointerAddressSpace(); 834 } else if (const AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(Inst)) { 835 AccessTy.AddrSpace = CmpX->getPointerAddressSpace(); 836 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 837 switch (II->getIntrinsicID()) { 838 case Intrinsic::prefetch: 839 case Intrinsic::memset: 840 AccessTy.AddrSpace = II->getArgOperand(0)->getType()->getPointerAddressSpace(); 841 AccessTy.MemTy = OperandVal->getType(); 842 break; 843 case Intrinsic::memmove: 844 case Intrinsic::memcpy: 845 AccessTy.AddrSpace = OperandVal->getType()->getPointerAddressSpace(); 846 AccessTy.MemTy = OperandVal->getType(); 847 break; 848 default: { 849 MemIntrinsicInfo IntrInfo; 850 if (TTI.getTgtMemIntrinsic(II, IntrInfo) && IntrInfo.PtrVal) { 851 AccessTy.AddrSpace 852 = IntrInfo.PtrVal->getType()->getPointerAddressSpace(); 853 } 854 855 break; 856 } 857 } 858 } 859 860 // All pointers have the same requirements, so canonicalize them to an 861 // arbitrary pointer type to minimize variation. 862 if (PointerType *PTy = dyn_cast<PointerType>(AccessTy.MemTy)) 863 AccessTy.MemTy = PointerType::get(IntegerType::get(PTy->getContext(), 1), 864 PTy->getAddressSpace()); 865 866 return AccessTy; 867 } 868 869 /// Return true if this AddRec is already a phi in its loop. 870 static bool isExistingPhi(const SCEVAddRecExpr *AR, ScalarEvolution &SE) { 871 for (PHINode &PN : AR->getLoop()->getHeader()->phis()) { 872 if (SE.isSCEVable(PN.getType()) && 873 (SE.getEffectiveSCEVType(PN.getType()) == 874 SE.getEffectiveSCEVType(AR->getType())) && 875 SE.getSCEV(&PN) == AR) 876 return true; 877 } 878 return false; 879 } 880 881 /// Check if expanding this expression is likely to incur significant cost. This 882 /// is tricky because SCEV doesn't track which expressions are actually computed 883 /// by the current IR. 884 /// 885 /// We currently allow expansion of IV increments that involve adds, 886 /// multiplication by constants, and AddRecs from existing phis. 887 /// 888 /// TODO: Allow UDivExpr if we can find an existing IV increment that is an 889 /// obvious multiple of the UDivExpr. 890 static bool isHighCostExpansion(const SCEV *S, 891 SmallPtrSetImpl<const SCEV*> &Processed, 892 ScalarEvolution &SE) { 893 // Zero/One operand expressions 894 switch (S->getSCEVType()) { 895 case scUnknown: 896 case scConstant: 897 return false; 898 case scTruncate: 899 return isHighCostExpansion(cast<SCEVTruncateExpr>(S)->getOperand(), 900 Processed, SE); 901 case scZeroExtend: 902 return isHighCostExpansion(cast<SCEVZeroExtendExpr>(S)->getOperand(), 903 Processed, SE); 904 case scSignExtend: 905 return isHighCostExpansion(cast<SCEVSignExtendExpr>(S)->getOperand(), 906 Processed, SE); 907 } 908 909 if (!Processed.insert(S).second) 910 return false; 911 912 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 913 for (const SCEV *S : Add->operands()) { 914 if (isHighCostExpansion(S, Processed, SE)) 915 return true; 916 } 917 return false; 918 } 919 920 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 921 if (Mul->getNumOperands() == 2) { 922 // Multiplication by a constant is ok 923 if (isa<SCEVConstant>(Mul->getOperand(0))) 924 return isHighCostExpansion(Mul->getOperand(1), Processed, SE); 925 926 // If we have the value of one operand, check if an existing 927 // multiplication already generates this expression. 928 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Mul->getOperand(1))) { 929 Value *UVal = U->getValue(); 930 for (User *UR : UVal->users()) { 931 // If U is a constant, it may be used by a ConstantExpr. 932 Instruction *UI = dyn_cast<Instruction>(UR); 933 if (UI && UI->getOpcode() == Instruction::Mul && 934 SE.isSCEVable(UI->getType())) { 935 return SE.getSCEV(UI) == Mul; 936 } 937 } 938 } 939 } 940 } 941 942 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 943 if (isExistingPhi(AR, SE)) 944 return false; 945 } 946 947 // Fow now, consider any other type of expression (div/mul/min/max) high cost. 948 return true; 949 } 950 951 /// If any of the instructions in the specified set are trivially dead, delete 952 /// them and see if this makes any of their operands subsequently dead. 953 static bool 954 DeleteTriviallyDeadInstructions(SmallVectorImpl<WeakTrackingVH> &DeadInsts) { 955 bool Changed = false; 956 957 while (!DeadInsts.empty()) { 958 Value *V = DeadInsts.pop_back_val(); 959 Instruction *I = dyn_cast_or_null<Instruction>(V); 960 961 if (!I || !isInstructionTriviallyDead(I)) 962 continue; 963 964 for (Use &O : I->operands()) 965 if (Instruction *U = dyn_cast<Instruction>(O)) { 966 O = nullptr; 967 if (U->use_empty()) 968 DeadInsts.emplace_back(U); 969 } 970 971 I->eraseFromParent(); 972 Changed = true; 973 } 974 975 return Changed; 976 } 977 978 namespace { 979 980 class LSRUse; 981 982 } // end anonymous namespace 983 984 /// Check if the addressing mode defined by \p F is completely 985 /// folded in \p LU at isel time. 986 /// This includes address-mode folding and special icmp tricks. 987 /// This function returns true if \p LU can accommodate what \p F 988 /// defines and up to 1 base + 1 scaled + offset. 989 /// In other words, if \p F has several base registers, this function may 990 /// still return true. Therefore, users still need to account for 991 /// additional base registers and/or unfolded offsets to derive an 992 /// accurate cost model. 993 static bool isAMCompletelyFolded(const TargetTransformInfo &TTI, 994 const LSRUse &LU, const Formula &F); 995 996 // Get the cost of the scaling factor used in F for LU. 997 static unsigned getScalingFactorCost(const TargetTransformInfo &TTI, 998 const LSRUse &LU, const Formula &F, 999 const Loop &L); 1000 1001 namespace { 1002 1003 /// This class is used to measure and compare candidate formulae. 1004 class Cost { 1005 TargetTransformInfo::LSRCost C; 1006 1007 public: 1008 Cost() { 1009 C.Insns = 0; 1010 C.NumRegs = 0; 1011 C.AddRecCost = 0; 1012 C.NumIVMuls = 0; 1013 C.NumBaseAdds = 0; 1014 C.ImmCost = 0; 1015 C.SetupCost = 0; 1016 C.ScaleCost = 0; 1017 } 1018 1019 bool isLess(Cost &Other, const TargetTransformInfo &TTI); 1020 1021 void Lose(); 1022 1023 #ifndef NDEBUG 1024 // Once any of the metrics loses, they must all remain losers. 1025 bool isValid() { 1026 return ((C.Insns | C.NumRegs | C.AddRecCost | C.NumIVMuls | C.NumBaseAdds 1027 | C.ImmCost | C.SetupCost | C.ScaleCost) != ~0u) 1028 || ((C.Insns & C.NumRegs & C.AddRecCost & C.NumIVMuls & C.NumBaseAdds 1029 & C.ImmCost & C.SetupCost & C.ScaleCost) == ~0u); 1030 } 1031 #endif 1032 1033 bool isLoser() { 1034 assert(isValid() && "invalid cost"); 1035 return C.NumRegs == ~0u; 1036 } 1037 1038 void RateFormula(const TargetTransformInfo &TTI, 1039 const Formula &F, 1040 SmallPtrSetImpl<const SCEV *> &Regs, 1041 const DenseSet<const SCEV *> &VisitedRegs, 1042 const Loop *L, 1043 ScalarEvolution &SE, DominatorTree &DT, 1044 const LSRUse &LU, 1045 SmallPtrSetImpl<const SCEV *> *LoserRegs = nullptr); 1046 1047 void print(raw_ostream &OS) const; 1048 void dump() const; 1049 1050 private: 1051 void RateRegister(const SCEV *Reg, 1052 SmallPtrSetImpl<const SCEV *> &Regs, 1053 const Loop *L, 1054 ScalarEvolution &SE, DominatorTree &DT, 1055 const TargetTransformInfo &TTI); 1056 void RatePrimaryRegister(const SCEV *Reg, 1057 SmallPtrSetImpl<const SCEV *> &Regs, 1058 const Loop *L, 1059 ScalarEvolution &SE, DominatorTree &DT, 1060 SmallPtrSetImpl<const SCEV *> *LoserRegs, 1061 const TargetTransformInfo &TTI); 1062 }; 1063 1064 /// An operand value in an instruction which is to be replaced with some 1065 /// equivalent, possibly strength-reduced, replacement. 1066 struct LSRFixup { 1067 /// The instruction which will be updated. 1068 Instruction *UserInst = nullptr; 1069 1070 /// The operand of the instruction which will be replaced. The operand may be 1071 /// used more than once; every instance will be replaced. 1072 Value *OperandValToReplace = nullptr; 1073 1074 /// If this user is to use the post-incremented value of an induction 1075 /// variable, this set is non-empty and holds the loops associated with the 1076 /// induction variable. 1077 PostIncLoopSet PostIncLoops; 1078 1079 /// A constant offset to be added to the LSRUse expression. This allows 1080 /// multiple fixups to share the same LSRUse with different offsets, for 1081 /// example in an unrolled loop. 1082 int64_t Offset = 0; 1083 1084 LSRFixup() = default; 1085 1086 bool isUseFullyOutsideLoop(const Loop *L) const; 1087 1088 void print(raw_ostream &OS) const; 1089 void dump() const; 1090 }; 1091 1092 /// A DenseMapInfo implementation for holding DenseMaps and DenseSets of sorted 1093 /// SmallVectors of const SCEV*. 1094 struct UniquifierDenseMapInfo { 1095 static SmallVector<const SCEV *, 4> getEmptyKey() { 1096 SmallVector<const SCEV *, 4> V; 1097 V.push_back(reinterpret_cast<const SCEV *>(-1)); 1098 return V; 1099 } 1100 1101 static SmallVector<const SCEV *, 4> getTombstoneKey() { 1102 SmallVector<const SCEV *, 4> V; 1103 V.push_back(reinterpret_cast<const SCEV *>(-2)); 1104 return V; 1105 } 1106 1107 static unsigned getHashValue(const SmallVector<const SCEV *, 4> &V) { 1108 return static_cast<unsigned>(hash_combine_range(V.begin(), V.end())); 1109 } 1110 1111 static bool isEqual(const SmallVector<const SCEV *, 4> &LHS, 1112 const SmallVector<const SCEV *, 4> &RHS) { 1113 return LHS == RHS; 1114 } 1115 }; 1116 1117 /// This class holds the state that LSR keeps for each use in IVUsers, as well 1118 /// as uses invented by LSR itself. It includes information about what kinds of 1119 /// things can be folded into the user, information about the user itself, and 1120 /// information about how the use may be satisfied. TODO: Represent multiple 1121 /// users of the same expression in common? 1122 class LSRUse { 1123 DenseSet<SmallVector<const SCEV *, 4>, UniquifierDenseMapInfo> Uniquifier; 1124 1125 public: 1126 /// An enum for a kind of use, indicating what types of scaled and immediate 1127 /// operands it might support. 1128 enum KindType { 1129 Basic, ///< A normal use, with no folding. 1130 Special, ///< A special case of basic, allowing -1 scales. 1131 Address, ///< An address use; folding according to TargetLowering 1132 ICmpZero ///< An equality icmp with both operands folded into one. 1133 // TODO: Add a generic icmp too? 1134 }; 1135 1136 using SCEVUseKindPair = PointerIntPair<const SCEV *, 2, KindType>; 1137 1138 KindType Kind; 1139 MemAccessTy AccessTy; 1140 1141 /// The list of operands which are to be replaced. 1142 SmallVector<LSRFixup, 8> Fixups; 1143 1144 /// Keep track of the min and max offsets of the fixups. 1145 int64_t MinOffset = std::numeric_limits<int64_t>::max(); 1146 int64_t MaxOffset = std::numeric_limits<int64_t>::min(); 1147 1148 /// This records whether all of the fixups using this LSRUse are outside of 1149 /// the loop, in which case some special-case heuristics may be used. 1150 bool AllFixupsOutsideLoop = true; 1151 1152 /// RigidFormula is set to true to guarantee that this use will be associated 1153 /// with a single formula--the one that initially matched. Some SCEV 1154 /// expressions cannot be expanded. This allows LSR to consider the registers 1155 /// used by those expressions without the need to expand them later after 1156 /// changing the formula. 1157 bool RigidFormula = false; 1158 1159 /// This records the widest use type for any fixup using this 1160 /// LSRUse. FindUseWithSimilarFormula can't consider uses with different max 1161 /// fixup widths to be equivalent, because the narrower one may be relying on 1162 /// the implicit truncation to truncate away bogus bits. 1163 Type *WidestFixupType = nullptr; 1164 1165 /// A list of ways to build a value that can satisfy this user. After the 1166 /// list is populated, one of these is selected heuristically and used to 1167 /// formulate a replacement for OperandValToReplace in UserInst. 1168 SmallVector<Formula, 12> Formulae; 1169 1170 /// The set of register candidates used by all formulae in this LSRUse. 1171 SmallPtrSet<const SCEV *, 4> Regs; 1172 1173 LSRUse(KindType K, MemAccessTy AT) : Kind(K), AccessTy(AT) {} 1174 1175 LSRFixup &getNewFixup() { 1176 Fixups.push_back(LSRFixup()); 1177 return Fixups.back(); 1178 } 1179 1180 void pushFixup(LSRFixup &f) { 1181 Fixups.push_back(f); 1182 if (f.Offset > MaxOffset) 1183 MaxOffset = f.Offset; 1184 if (f.Offset < MinOffset) 1185 MinOffset = f.Offset; 1186 } 1187 1188 bool HasFormulaWithSameRegs(const Formula &F) const; 1189 float getNotSelectedProbability(const SCEV *Reg) const; 1190 bool InsertFormula(const Formula &F, const Loop &L); 1191 void DeleteFormula(Formula &F); 1192 void RecomputeRegs(size_t LUIdx, RegUseTracker &Reguses); 1193 1194 void print(raw_ostream &OS) const; 1195 void dump() const; 1196 }; 1197 1198 } // end anonymous namespace 1199 1200 static bool isAMCompletelyFolded(const TargetTransformInfo &TTI, 1201 LSRUse::KindType Kind, MemAccessTy AccessTy, 1202 GlobalValue *BaseGV, int64_t BaseOffset, 1203 bool HasBaseReg, int64_t Scale, 1204 Instruction *Fixup = nullptr); 1205 1206 /// Tally up interesting quantities from the given register. 1207 void Cost::RateRegister(const SCEV *Reg, 1208 SmallPtrSetImpl<const SCEV *> &Regs, 1209 const Loop *L, 1210 ScalarEvolution &SE, DominatorTree &DT, 1211 const TargetTransformInfo &TTI) { 1212 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Reg)) { 1213 // If this is an addrec for another loop, it should be an invariant 1214 // with respect to L since L is the innermost loop (at least 1215 // for now LSR only handles innermost loops). 1216 if (AR->getLoop() != L) { 1217 // If the AddRec exists, consider it's register free and leave it alone. 1218 if (isExistingPhi(AR, SE)) 1219 return; 1220 1221 // It is bad to allow LSR for current loop to add induction variables 1222 // for its sibling loops. 1223 if (!AR->getLoop()->contains(L)) { 1224 Lose(); 1225 return; 1226 } 1227 1228 // Otherwise, it will be an invariant with respect to Loop L. 1229 ++C.NumRegs; 1230 return; 1231 } 1232 1233 unsigned LoopCost = 1; 1234 if (TTI.shouldFavorPostInc()) { 1235 const SCEV *LoopStep = AR->getStepRecurrence(SE); 1236 if (isa<SCEVConstant>(LoopStep)) { 1237 // Check if a post-indexed load/store can be used. 1238 if (TTI.isIndexedLoadLegal(TTI.MIM_PostInc, AR->getType()) || 1239 TTI.isIndexedStoreLegal(TTI.MIM_PostInc, AR->getType())) { 1240 const SCEV *LoopStart = AR->getStart(); 1241 if (!isa<SCEVConstant>(LoopStart) && 1242 SE.isLoopInvariant(LoopStart, L)) 1243 LoopCost = 0; 1244 } 1245 } 1246 } 1247 C.AddRecCost += LoopCost; 1248 1249 // Add the step value register, if it needs one. 1250 // TODO: The non-affine case isn't precisely modeled here. 1251 if (!AR->isAffine() || !isa<SCEVConstant>(AR->getOperand(1))) { 1252 if (!Regs.count(AR->getOperand(1))) { 1253 RateRegister(AR->getOperand(1), Regs, L, SE, DT, TTI); 1254 if (isLoser()) 1255 return; 1256 } 1257 } 1258 } 1259 ++C.NumRegs; 1260 1261 // Rough heuristic; favor registers which don't require extra setup 1262 // instructions in the preheader. 1263 if (!isa<SCEVUnknown>(Reg) && 1264 !isa<SCEVConstant>(Reg) && 1265 !(isa<SCEVAddRecExpr>(Reg) && 1266 (isa<SCEVUnknown>(cast<SCEVAddRecExpr>(Reg)->getStart()) || 1267 isa<SCEVConstant>(cast<SCEVAddRecExpr>(Reg)->getStart())))) 1268 ++C.SetupCost; 1269 1270 C.NumIVMuls += isa<SCEVMulExpr>(Reg) && 1271 SE.hasComputableLoopEvolution(Reg, L); 1272 } 1273 1274 /// Record this register in the set. If we haven't seen it before, rate 1275 /// it. Optional LoserRegs provides a way to declare any formula that refers to 1276 /// one of those regs an instant loser. 1277 void Cost::RatePrimaryRegister(const SCEV *Reg, 1278 SmallPtrSetImpl<const SCEV *> &Regs, 1279 const Loop *L, 1280 ScalarEvolution &SE, DominatorTree &DT, 1281 SmallPtrSetImpl<const SCEV *> *LoserRegs, 1282 const TargetTransformInfo &TTI) { 1283 if (LoserRegs && LoserRegs->count(Reg)) { 1284 Lose(); 1285 return; 1286 } 1287 if (Regs.insert(Reg).second) { 1288 RateRegister(Reg, Regs, L, SE, DT, TTI); 1289 if (LoserRegs && isLoser()) 1290 LoserRegs->insert(Reg); 1291 } 1292 } 1293 1294 void Cost::RateFormula(const TargetTransformInfo &TTI, 1295 const Formula &F, 1296 SmallPtrSetImpl<const SCEV *> &Regs, 1297 const DenseSet<const SCEV *> &VisitedRegs, 1298 const Loop *L, 1299 ScalarEvolution &SE, DominatorTree &DT, 1300 const LSRUse &LU, 1301 SmallPtrSetImpl<const SCEV *> *LoserRegs) { 1302 assert(F.isCanonical(*L) && "Cost is accurate only for canonical formula"); 1303 // Tally up the registers. 1304 unsigned PrevAddRecCost = C.AddRecCost; 1305 unsigned PrevNumRegs = C.NumRegs; 1306 unsigned PrevNumBaseAdds = C.NumBaseAdds; 1307 if (const SCEV *ScaledReg = F.ScaledReg) { 1308 if (VisitedRegs.count(ScaledReg)) { 1309 Lose(); 1310 return; 1311 } 1312 RatePrimaryRegister(ScaledReg, Regs, L, SE, DT, LoserRegs, TTI); 1313 if (isLoser()) 1314 return; 1315 } 1316 for (const SCEV *BaseReg : F.BaseRegs) { 1317 if (VisitedRegs.count(BaseReg)) { 1318 Lose(); 1319 return; 1320 } 1321 RatePrimaryRegister(BaseReg, Regs, L, SE, DT, LoserRegs, TTI); 1322 if (isLoser()) 1323 return; 1324 } 1325 1326 // Determine how many (unfolded) adds we'll need inside the loop. 1327 size_t NumBaseParts = F.getNumRegs(); 1328 if (NumBaseParts > 1) 1329 // Do not count the base and a possible second register if the target 1330 // allows to fold 2 registers. 1331 C.NumBaseAdds += 1332 NumBaseParts - (1 + (F.Scale && isAMCompletelyFolded(TTI, LU, F))); 1333 C.NumBaseAdds += (F.UnfoldedOffset != 0); 1334 1335 // Accumulate non-free scaling amounts. 1336 C.ScaleCost += getScalingFactorCost(TTI, LU, F, *L); 1337 1338 // Tally up the non-zero immediates. 1339 for (const LSRFixup &Fixup : LU.Fixups) { 1340 int64_t O = Fixup.Offset; 1341 int64_t Offset = (uint64_t)O + F.BaseOffset; 1342 if (F.BaseGV) 1343 C.ImmCost += 64; // Handle symbolic values conservatively. 1344 // TODO: This should probably be the pointer size. 1345 else if (Offset != 0) 1346 C.ImmCost += APInt(64, Offset, true).getMinSignedBits(); 1347 1348 // Check with target if this offset with this instruction is 1349 // specifically not supported. 1350 if (LU.Kind == LSRUse::Address && Offset != 0 && 1351 !isAMCompletelyFolded(TTI, LSRUse::Address, LU.AccessTy, F.BaseGV, 1352 Offset, F.HasBaseReg, F.Scale, Fixup.UserInst)) 1353 C.NumBaseAdds++; 1354 } 1355 1356 // If we don't count instruction cost exit here. 1357 if (!InsnsCost) { 1358 assert(isValid() && "invalid cost"); 1359 return; 1360 } 1361 1362 // Treat every new register that exceeds TTI.getNumberOfRegisters() - 1 as 1363 // additional instruction (at least fill). 1364 unsigned TTIRegNum = TTI.getNumberOfRegisters(false) - 1; 1365 if (C.NumRegs > TTIRegNum) { 1366 // Cost already exceeded TTIRegNum, then only newly added register can add 1367 // new instructions. 1368 if (PrevNumRegs > TTIRegNum) 1369 C.Insns += (C.NumRegs - PrevNumRegs); 1370 else 1371 C.Insns += (C.NumRegs - TTIRegNum); 1372 } 1373 1374 // If ICmpZero formula ends with not 0, it could not be replaced by 1375 // just add or sub. We'll need to compare final result of AddRec. 1376 // That means we'll need an additional instruction. But if the target can 1377 // macro-fuse a compare with a branch, don't count this extra instruction. 1378 // For -10 + {0, +, 1}: 1379 // i = i + 1; 1380 // cmp i, 10 1381 // 1382 // For {-10, +, 1}: 1383 // i = i + 1; 1384 if (LU.Kind == LSRUse::ICmpZero && !F.hasZeroEnd() && !TTI.canMacroFuseCmp()) 1385 C.Insns++; 1386 // Each new AddRec adds 1 instruction to calculation. 1387 C.Insns += (C.AddRecCost - PrevAddRecCost); 1388 1389 // BaseAdds adds instructions for unfolded registers. 1390 if (LU.Kind != LSRUse::ICmpZero) 1391 C.Insns += C.NumBaseAdds - PrevNumBaseAdds; 1392 assert(isValid() && "invalid cost"); 1393 } 1394 1395 /// Set this cost to a losing value. 1396 void Cost::Lose() { 1397 C.Insns = std::numeric_limits<unsigned>::max(); 1398 C.NumRegs = std::numeric_limits<unsigned>::max(); 1399 C.AddRecCost = std::numeric_limits<unsigned>::max(); 1400 C.NumIVMuls = std::numeric_limits<unsigned>::max(); 1401 C.NumBaseAdds = std::numeric_limits<unsigned>::max(); 1402 C.ImmCost = std::numeric_limits<unsigned>::max(); 1403 C.SetupCost = std::numeric_limits<unsigned>::max(); 1404 C.ScaleCost = std::numeric_limits<unsigned>::max(); 1405 } 1406 1407 /// Choose the lower cost. 1408 bool Cost::isLess(Cost &Other, const TargetTransformInfo &TTI) { 1409 if (InsnsCost.getNumOccurrences() > 0 && InsnsCost && 1410 C.Insns != Other.C.Insns) 1411 return C.Insns < Other.C.Insns; 1412 return TTI.isLSRCostLess(C, Other.C); 1413 } 1414 1415 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1416 void Cost::print(raw_ostream &OS) const { 1417 if (InsnsCost) 1418 OS << C.Insns << " instruction" << (C.Insns == 1 ? " " : "s "); 1419 OS << C.NumRegs << " reg" << (C.NumRegs == 1 ? "" : "s"); 1420 if (C.AddRecCost != 0) 1421 OS << ", with addrec cost " << C.AddRecCost; 1422 if (C.NumIVMuls != 0) 1423 OS << ", plus " << C.NumIVMuls << " IV mul" 1424 << (C.NumIVMuls == 1 ? "" : "s"); 1425 if (C.NumBaseAdds != 0) 1426 OS << ", plus " << C.NumBaseAdds << " base add" 1427 << (C.NumBaseAdds == 1 ? "" : "s"); 1428 if (C.ScaleCost != 0) 1429 OS << ", plus " << C.ScaleCost << " scale cost"; 1430 if (C.ImmCost != 0) 1431 OS << ", plus " << C.ImmCost << " imm cost"; 1432 if (C.SetupCost != 0) 1433 OS << ", plus " << C.SetupCost << " setup cost"; 1434 } 1435 1436 LLVM_DUMP_METHOD void Cost::dump() const { 1437 print(errs()); errs() << '\n'; 1438 } 1439 #endif 1440 1441 /// Test whether this fixup always uses its value outside of the given loop. 1442 bool LSRFixup::isUseFullyOutsideLoop(const Loop *L) const { 1443 // PHI nodes use their value in their incoming blocks. 1444 if (const PHINode *PN = dyn_cast<PHINode>(UserInst)) { 1445 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 1446 if (PN->getIncomingValue(i) == OperandValToReplace && 1447 L->contains(PN->getIncomingBlock(i))) 1448 return false; 1449 return true; 1450 } 1451 1452 return !L->contains(UserInst); 1453 } 1454 1455 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1456 void LSRFixup::print(raw_ostream &OS) const { 1457 OS << "UserInst="; 1458 // Store is common and interesting enough to be worth special-casing. 1459 if (StoreInst *Store = dyn_cast<StoreInst>(UserInst)) { 1460 OS << "store "; 1461 Store->getOperand(0)->printAsOperand(OS, /*PrintType=*/false); 1462 } else if (UserInst->getType()->isVoidTy()) 1463 OS << UserInst->getOpcodeName(); 1464 else 1465 UserInst->printAsOperand(OS, /*PrintType=*/false); 1466 1467 OS << ", OperandValToReplace="; 1468 OperandValToReplace->printAsOperand(OS, /*PrintType=*/false); 1469 1470 for (const Loop *PIL : PostIncLoops) { 1471 OS << ", PostIncLoop="; 1472 PIL->getHeader()->printAsOperand(OS, /*PrintType=*/false); 1473 } 1474 1475 if (Offset != 0) 1476 OS << ", Offset=" << Offset; 1477 } 1478 1479 LLVM_DUMP_METHOD void LSRFixup::dump() const { 1480 print(errs()); errs() << '\n'; 1481 } 1482 #endif 1483 1484 /// Test whether this use as a formula which has the same registers as the given 1485 /// formula. 1486 bool LSRUse::HasFormulaWithSameRegs(const Formula &F) const { 1487 SmallVector<const SCEV *, 4> Key = F.BaseRegs; 1488 if (F.ScaledReg) Key.push_back(F.ScaledReg); 1489 // Unstable sort by host order ok, because this is only used for uniquifying. 1490 llvm::sort(Key.begin(), Key.end()); 1491 return Uniquifier.count(Key); 1492 } 1493 1494 /// The function returns a probability of selecting formula without Reg. 1495 float LSRUse::getNotSelectedProbability(const SCEV *Reg) const { 1496 unsigned FNum = 0; 1497 for (const Formula &F : Formulae) 1498 if (F.referencesReg(Reg)) 1499 FNum++; 1500 return ((float)(Formulae.size() - FNum)) / Formulae.size(); 1501 } 1502 1503 /// If the given formula has not yet been inserted, add it to the list, and 1504 /// return true. Return false otherwise. The formula must be in canonical form. 1505 bool LSRUse::InsertFormula(const Formula &F, const Loop &L) { 1506 assert(F.isCanonical(L) && "Invalid canonical representation"); 1507 1508 if (!Formulae.empty() && RigidFormula) 1509 return false; 1510 1511 SmallVector<const SCEV *, 4> Key = F.BaseRegs; 1512 if (F.ScaledReg) Key.push_back(F.ScaledReg); 1513 // Unstable sort by host order ok, because this is only used for uniquifying. 1514 llvm::sort(Key.begin(), Key.end()); 1515 1516 if (!Uniquifier.insert(Key).second) 1517 return false; 1518 1519 // Using a register to hold the value of 0 is not profitable. 1520 assert((!F.ScaledReg || !F.ScaledReg->isZero()) && 1521 "Zero allocated in a scaled register!"); 1522 #ifndef NDEBUG 1523 for (const SCEV *BaseReg : F.BaseRegs) 1524 assert(!BaseReg->isZero() && "Zero allocated in a base register!"); 1525 #endif 1526 1527 // Add the formula to the list. 1528 Formulae.push_back(F); 1529 1530 // Record registers now being used by this use. 1531 Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end()); 1532 if (F.ScaledReg) 1533 Regs.insert(F.ScaledReg); 1534 1535 return true; 1536 } 1537 1538 /// Remove the given formula from this use's list. 1539 void LSRUse::DeleteFormula(Formula &F) { 1540 if (&F != &Formulae.back()) 1541 std::swap(F, Formulae.back()); 1542 Formulae.pop_back(); 1543 } 1544 1545 /// Recompute the Regs field, and update RegUses. 1546 void LSRUse::RecomputeRegs(size_t LUIdx, RegUseTracker &RegUses) { 1547 // Now that we've filtered out some formulae, recompute the Regs set. 1548 SmallPtrSet<const SCEV *, 4> OldRegs = std::move(Regs); 1549 Regs.clear(); 1550 for (const Formula &F : Formulae) { 1551 if (F.ScaledReg) Regs.insert(F.ScaledReg); 1552 Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end()); 1553 } 1554 1555 // Update the RegTracker. 1556 for (const SCEV *S : OldRegs) 1557 if (!Regs.count(S)) 1558 RegUses.dropRegister(S, LUIdx); 1559 } 1560 1561 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1562 void LSRUse::print(raw_ostream &OS) const { 1563 OS << "LSR Use: Kind="; 1564 switch (Kind) { 1565 case Basic: OS << "Basic"; break; 1566 case Special: OS << "Special"; break; 1567 case ICmpZero: OS << "ICmpZero"; break; 1568 case Address: 1569 OS << "Address of "; 1570 if (AccessTy.MemTy->isPointerTy()) 1571 OS << "pointer"; // the full pointer type could be really verbose 1572 else { 1573 OS << *AccessTy.MemTy; 1574 } 1575 1576 OS << " in addrspace(" << AccessTy.AddrSpace << ')'; 1577 } 1578 1579 OS << ", Offsets={"; 1580 bool NeedComma = false; 1581 for (const LSRFixup &Fixup : Fixups) { 1582 if (NeedComma) OS << ','; 1583 OS << Fixup.Offset; 1584 NeedComma = true; 1585 } 1586 OS << '}'; 1587 1588 if (AllFixupsOutsideLoop) 1589 OS << ", all-fixups-outside-loop"; 1590 1591 if (WidestFixupType) 1592 OS << ", widest fixup type: " << *WidestFixupType; 1593 } 1594 1595 LLVM_DUMP_METHOD void LSRUse::dump() const { 1596 print(errs()); errs() << '\n'; 1597 } 1598 #endif 1599 1600 static bool isAMCompletelyFolded(const TargetTransformInfo &TTI, 1601 LSRUse::KindType Kind, MemAccessTy AccessTy, 1602 GlobalValue *BaseGV, int64_t BaseOffset, 1603 bool HasBaseReg, int64_t Scale, 1604 Instruction *Fixup/*= nullptr*/) { 1605 switch (Kind) { 1606 case LSRUse::Address: 1607 return TTI.isLegalAddressingMode(AccessTy.MemTy, BaseGV, BaseOffset, 1608 HasBaseReg, Scale, AccessTy.AddrSpace, Fixup); 1609 1610 case LSRUse::ICmpZero: 1611 // There's not even a target hook for querying whether it would be legal to 1612 // fold a GV into an ICmp. 1613 if (BaseGV) 1614 return false; 1615 1616 // ICmp only has two operands; don't allow more than two non-trivial parts. 1617 if (Scale != 0 && HasBaseReg && BaseOffset != 0) 1618 return false; 1619 1620 // ICmp only supports no scale or a -1 scale, as we can "fold" a -1 scale by 1621 // putting the scaled register in the other operand of the icmp. 1622 if (Scale != 0 && Scale != -1) 1623 return false; 1624 1625 // If we have low-level target information, ask the target if it can fold an 1626 // integer immediate on an icmp. 1627 if (BaseOffset != 0) { 1628 // We have one of: 1629 // ICmpZero BaseReg + BaseOffset => ICmp BaseReg, -BaseOffset 1630 // ICmpZero -1*ScaleReg + BaseOffset => ICmp ScaleReg, BaseOffset 1631 // Offs is the ICmp immediate. 1632 if (Scale == 0) 1633 // The cast does the right thing with 1634 // std::numeric_limits<int64_t>::min(). 1635 BaseOffset = -(uint64_t)BaseOffset; 1636 return TTI.isLegalICmpImmediate(BaseOffset); 1637 } 1638 1639 // ICmpZero BaseReg + -1*ScaleReg => ICmp BaseReg, ScaleReg 1640 return true; 1641 1642 case LSRUse::Basic: 1643 // Only handle single-register values. 1644 return !BaseGV && Scale == 0 && BaseOffset == 0; 1645 1646 case LSRUse::Special: 1647 // Special case Basic to handle -1 scales. 1648 return !BaseGV && (Scale == 0 || Scale == -1) && BaseOffset == 0; 1649 } 1650 1651 llvm_unreachable("Invalid LSRUse Kind!"); 1652 } 1653 1654 static bool isAMCompletelyFolded(const TargetTransformInfo &TTI, 1655 int64_t MinOffset, int64_t MaxOffset, 1656 LSRUse::KindType Kind, MemAccessTy AccessTy, 1657 GlobalValue *BaseGV, int64_t BaseOffset, 1658 bool HasBaseReg, int64_t Scale) { 1659 // Check for overflow. 1660 if (((int64_t)((uint64_t)BaseOffset + MinOffset) > BaseOffset) != 1661 (MinOffset > 0)) 1662 return false; 1663 MinOffset = (uint64_t)BaseOffset + MinOffset; 1664 if (((int64_t)((uint64_t)BaseOffset + MaxOffset) > BaseOffset) != 1665 (MaxOffset > 0)) 1666 return false; 1667 MaxOffset = (uint64_t)BaseOffset + MaxOffset; 1668 1669 return isAMCompletelyFolded(TTI, Kind, AccessTy, BaseGV, MinOffset, 1670 HasBaseReg, Scale) && 1671 isAMCompletelyFolded(TTI, Kind, AccessTy, BaseGV, MaxOffset, 1672 HasBaseReg, Scale); 1673 } 1674 1675 static bool isAMCompletelyFolded(const TargetTransformInfo &TTI, 1676 int64_t MinOffset, int64_t MaxOffset, 1677 LSRUse::KindType Kind, MemAccessTy AccessTy, 1678 const Formula &F, const Loop &L) { 1679 // For the purpose of isAMCompletelyFolded either having a canonical formula 1680 // or a scale not equal to zero is correct. 1681 // Problems may arise from non canonical formulae having a scale == 0. 1682 // Strictly speaking it would best to just rely on canonical formulae. 1683 // However, when we generate the scaled formulae, we first check that the 1684 // scaling factor is profitable before computing the actual ScaledReg for 1685 // compile time sake. 1686 assert((F.isCanonical(L) || F.Scale != 0)); 1687 return isAMCompletelyFolded(TTI, MinOffset, MaxOffset, Kind, AccessTy, 1688 F.BaseGV, F.BaseOffset, F.HasBaseReg, F.Scale); 1689 } 1690 1691 /// Test whether we know how to expand the current formula. 1692 static bool isLegalUse(const TargetTransformInfo &TTI, int64_t MinOffset, 1693 int64_t MaxOffset, LSRUse::KindType Kind, 1694 MemAccessTy AccessTy, GlobalValue *BaseGV, 1695 int64_t BaseOffset, bool HasBaseReg, int64_t Scale) { 1696 // We know how to expand completely foldable formulae. 1697 return isAMCompletelyFolded(TTI, MinOffset, MaxOffset, Kind, AccessTy, BaseGV, 1698 BaseOffset, HasBaseReg, Scale) || 1699 // Or formulae that use a base register produced by a sum of base 1700 // registers. 1701 (Scale == 1 && 1702 isAMCompletelyFolded(TTI, MinOffset, MaxOffset, Kind, AccessTy, 1703 BaseGV, BaseOffset, true, 0)); 1704 } 1705 1706 static bool isLegalUse(const TargetTransformInfo &TTI, int64_t MinOffset, 1707 int64_t MaxOffset, LSRUse::KindType Kind, 1708 MemAccessTy AccessTy, const Formula &F) { 1709 return isLegalUse(TTI, MinOffset, MaxOffset, Kind, AccessTy, F.BaseGV, 1710 F.BaseOffset, F.HasBaseReg, F.Scale); 1711 } 1712 1713 static bool isAMCompletelyFolded(const TargetTransformInfo &TTI, 1714 const LSRUse &LU, const Formula &F) { 1715 // Target may want to look at the user instructions. 1716 if (LU.Kind == LSRUse::Address && TTI.LSRWithInstrQueries()) { 1717 for (const LSRFixup &Fixup : LU.Fixups) 1718 if (!isAMCompletelyFolded(TTI, LSRUse::Address, LU.AccessTy, F.BaseGV, 1719 (F.BaseOffset + Fixup.Offset), F.HasBaseReg, 1720 F.Scale, Fixup.UserInst)) 1721 return false; 1722 return true; 1723 } 1724 1725 return isAMCompletelyFolded(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, 1726 LU.AccessTy, F.BaseGV, F.BaseOffset, F.HasBaseReg, 1727 F.Scale); 1728 } 1729 1730 static unsigned getScalingFactorCost(const TargetTransformInfo &TTI, 1731 const LSRUse &LU, const Formula &F, 1732 const Loop &L) { 1733 if (!F.Scale) 1734 return 0; 1735 1736 // If the use is not completely folded in that instruction, we will have to 1737 // pay an extra cost only for scale != 1. 1738 if (!isAMCompletelyFolded(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, 1739 LU.AccessTy, F, L)) 1740 return F.Scale != 1; 1741 1742 switch (LU.Kind) { 1743 case LSRUse::Address: { 1744 // Check the scaling factor cost with both the min and max offsets. 1745 int ScaleCostMinOffset = TTI.getScalingFactorCost( 1746 LU.AccessTy.MemTy, F.BaseGV, F.BaseOffset + LU.MinOffset, F.HasBaseReg, 1747 F.Scale, LU.AccessTy.AddrSpace); 1748 int ScaleCostMaxOffset = TTI.getScalingFactorCost( 1749 LU.AccessTy.MemTy, F.BaseGV, F.BaseOffset + LU.MaxOffset, F.HasBaseReg, 1750 F.Scale, LU.AccessTy.AddrSpace); 1751 1752 assert(ScaleCostMinOffset >= 0 && ScaleCostMaxOffset >= 0 && 1753 "Legal addressing mode has an illegal cost!"); 1754 return std::max(ScaleCostMinOffset, ScaleCostMaxOffset); 1755 } 1756 case LSRUse::ICmpZero: 1757 case LSRUse::Basic: 1758 case LSRUse::Special: 1759 // The use is completely folded, i.e., everything is folded into the 1760 // instruction. 1761 return 0; 1762 } 1763 1764 llvm_unreachable("Invalid LSRUse Kind!"); 1765 } 1766 1767 static bool isAlwaysFoldable(const TargetTransformInfo &TTI, 1768 LSRUse::KindType Kind, MemAccessTy AccessTy, 1769 GlobalValue *BaseGV, int64_t BaseOffset, 1770 bool HasBaseReg) { 1771 // Fast-path: zero is always foldable. 1772 if (BaseOffset == 0 && !BaseGV) return true; 1773 1774 // Conservatively, create an address with an immediate and a 1775 // base and a scale. 1776 int64_t Scale = Kind == LSRUse::ICmpZero ? -1 : 1; 1777 1778 // Canonicalize a scale of 1 to a base register if the formula doesn't 1779 // already have a base register. 1780 if (!HasBaseReg && Scale == 1) { 1781 Scale = 0; 1782 HasBaseReg = true; 1783 } 1784 1785 return isAMCompletelyFolded(TTI, Kind, AccessTy, BaseGV, BaseOffset, 1786 HasBaseReg, Scale); 1787 } 1788 1789 static bool isAlwaysFoldable(const TargetTransformInfo &TTI, 1790 ScalarEvolution &SE, int64_t MinOffset, 1791 int64_t MaxOffset, LSRUse::KindType Kind, 1792 MemAccessTy AccessTy, const SCEV *S, 1793 bool HasBaseReg) { 1794 // Fast-path: zero is always foldable. 1795 if (S->isZero()) return true; 1796 1797 // Conservatively, create an address with an immediate and a 1798 // base and a scale. 1799 int64_t BaseOffset = ExtractImmediate(S, SE); 1800 GlobalValue *BaseGV = ExtractSymbol(S, SE); 1801 1802 // If there's anything else involved, it's not foldable. 1803 if (!S->isZero()) return false; 1804 1805 // Fast-path: zero is always foldable. 1806 if (BaseOffset == 0 && !BaseGV) return true; 1807 1808 // Conservatively, create an address with an immediate and a 1809 // base and a scale. 1810 int64_t Scale = Kind == LSRUse::ICmpZero ? -1 : 1; 1811 1812 return isAMCompletelyFolded(TTI, MinOffset, MaxOffset, Kind, AccessTy, BaseGV, 1813 BaseOffset, HasBaseReg, Scale); 1814 } 1815 1816 namespace { 1817 1818 /// An individual increment in a Chain of IV increments. Relate an IV user to 1819 /// an expression that computes the IV it uses from the IV used by the previous 1820 /// link in the Chain. 1821 /// 1822 /// For the head of a chain, IncExpr holds the absolute SCEV expression for the 1823 /// original IVOperand. The head of the chain's IVOperand is only valid during 1824 /// chain collection, before LSR replaces IV users. During chain generation, 1825 /// IncExpr can be used to find the new IVOperand that computes the same 1826 /// expression. 1827 struct IVInc { 1828 Instruction *UserInst; 1829 Value* IVOperand; 1830 const SCEV *IncExpr; 1831 1832 IVInc(Instruction *U, Value *O, const SCEV *E) 1833 : UserInst(U), IVOperand(O), IncExpr(E) {} 1834 }; 1835 1836 // The list of IV increments in program order. We typically add the head of a 1837 // chain without finding subsequent links. 1838 struct IVChain { 1839 SmallVector<IVInc, 1> Incs; 1840 const SCEV *ExprBase = nullptr; 1841 1842 IVChain() = default; 1843 IVChain(const IVInc &Head, const SCEV *Base) 1844 : Incs(1, Head), ExprBase(Base) {} 1845 1846 using const_iterator = SmallVectorImpl<IVInc>::const_iterator; 1847 1848 // Return the first increment in the chain. 1849 const_iterator begin() const { 1850 assert(!Incs.empty()); 1851 return std::next(Incs.begin()); 1852 } 1853 const_iterator end() const { 1854 return Incs.end(); 1855 } 1856 1857 // Returns true if this chain contains any increments. 1858 bool hasIncs() const { return Incs.size() >= 2; } 1859 1860 // Add an IVInc to the end of this chain. 1861 void add(const IVInc &X) { Incs.push_back(X); } 1862 1863 // Returns the last UserInst in the chain. 1864 Instruction *tailUserInst() const { return Incs.back().UserInst; } 1865 1866 // Returns true if IncExpr can be profitably added to this chain. 1867 bool isProfitableIncrement(const SCEV *OperExpr, 1868 const SCEV *IncExpr, 1869 ScalarEvolution&); 1870 }; 1871 1872 /// Helper for CollectChains to track multiple IV increment uses. Distinguish 1873 /// between FarUsers that definitely cross IV increments and NearUsers that may 1874 /// be used between IV increments. 1875 struct ChainUsers { 1876 SmallPtrSet<Instruction*, 4> FarUsers; 1877 SmallPtrSet<Instruction*, 4> NearUsers; 1878 }; 1879 1880 /// This class holds state for the main loop strength reduction logic. 1881 class LSRInstance { 1882 IVUsers &IU; 1883 ScalarEvolution &SE; 1884 DominatorTree &DT; 1885 LoopInfo &LI; 1886 const TargetTransformInfo &TTI; 1887 Loop *const L; 1888 bool Changed = false; 1889 1890 /// This is the insert position that the current loop's induction variable 1891 /// increment should be placed. In simple loops, this is the latch block's 1892 /// terminator. But in more complicated cases, this is a position which will 1893 /// dominate all the in-loop post-increment users. 1894 Instruction *IVIncInsertPos = nullptr; 1895 1896 /// Interesting factors between use strides. 1897 /// 1898 /// We explicitly use a SetVector which contains a SmallSet, instead of the 1899 /// default, a SmallDenseSet, because we need to use the full range of 1900 /// int64_ts, and there's currently no good way of doing that with 1901 /// SmallDenseSet. 1902 SetVector<int64_t, SmallVector<int64_t, 8>, SmallSet<int64_t, 8>> Factors; 1903 1904 /// Interesting use types, to facilitate truncation reuse. 1905 SmallSetVector<Type *, 4> Types; 1906 1907 /// The list of interesting uses. 1908 SmallVector<LSRUse, 16> Uses; 1909 1910 /// Track which uses use which register candidates. 1911 RegUseTracker RegUses; 1912 1913 // Limit the number of chains to avoid quadratic behavior. We don't expect to 1914 // have more than a few IV increment chains in a loop. Missing a Chain falls 1915 // back to normal LSR behavior for those uses. 1916 static const unsigned MaxChains = 8; 1917 1918 /// IV users can form a chain of IV increments. 1919 SmallVector<IVChain, MaxChains> IVChainVec; 1920 1921 /// IV users that belong to profitable IVChains. 1922 SmallPtrSet<Use*, MaxChains> IVIncSet; 1923 1924 void OptimizeShadowIV(); 1925 bool FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse); 1926 ICmpInst *OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse); 1927 void OptimizeLoopTermCond(); 1928 1929 void ChainInstruction(Instruction *UserInst, Instruction *IVOper, 1930 SmallVectorImpl<ChainUsers> &ChainUsersVec); 1931 void FinalizeChain(IVChain &Chain); 1932 void CollectChains(); 1933 void GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter, 1934 SmallVectorImpl<WeakTrackingVH> &DeadInsts); 1935 1936 void CollectInterestingTypesAndFactors(); 1937 void CollectFixupsAndInitialFormulae(); 1938 1939 // Support for sharing of LSRUses between LSRFixups. 1940 using UseMapTy = DenseMap<LSRUse::SCEVUseKindPair, size_t>; 1941 UseMapTy UseMap; 1942 1943 bool reconcileNewOffset(LSRUse &LU, int64_t NewOffset, bool HasBaseReg, 1944 LSRUse::KindType Kind, MemAccessTy AccessTy); 1945 1946 std::pair<size_t, int64_t> getUse(const SCEV *&Expr, LSRUse::KindType Kind, 1947 MemAccessTy AccessTy); 1948 1949 void DeleteUse(LSRUse &LU, size_t LUIdx); 1950 1951 LSRUse *FindUseWithSimilarFormula(const Formula &F, const LSRUse &OrigLU); 1952 1953 void InsertInitialFormula(const SCEV *S, LSRUse &LU, size_t LUIdx); 1954 void InsertSupplementalFormula(const SCEV *S, LSRUse &LU, size_t LUIdx); 1955 void CountRegisters(const Formula &F, size_t LUIdx); 1956 bool InsertFormula(LSRUse &LU, unsigned LUIdx, const Formula &F); 1957 1958 void CollectLoopInvariantFixupsAndFormulae(); 1959 1960 void GenerateReassociations(LSRUse &LU, unsigned LUIdx, Formula Base, 1961 unsigned Depth = 0); 1962 1963 void GenerateReassociationsImpl(LSRUse &LU, unsigned LUIdx, 1964 const Formula &Base, unsigned Depth, 1965 size_t Idx, bool IsScaledReg = false); 1966 void GenerateCombinations(LSRUse &LU, unsigned LUIdx, Formula Base); 1967 void GenerateSymbolicOffsetsImpl(LSRUse &LU, unsigned LUIdx, 1968 const Formula &Base, size_t Idx, 1969 bool IsScaledReg = false); 1970 void GenerateSymbolicOffsets(LSRUse &LU, unsigned LUIdx, Formula Base); 1971 void GenerateConstantOffsetsImpl(LSRUse &LU, unsigned LUIdx, 1972 const Formula &Base, 1973 const SmallVectorImpl<int64_t> &Worklist, 1974 size_t Idx, bool IsScaledReg = false); 1975 void GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx, Formula Base); 1976 void GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx, Formula Base); 1977 void GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base); 1978 void GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base); 1979 void GenerateCrossUseConstantOffsets(); 1980 void GenerateAllReuseFormulae(); 1981 1982 void FilterOutUndesirableDedicatedRegisters(); 1983 1984 size_t EstimateSearchSpaceComplexity() const; 1985 void NarrowSearchSpaceByDetectingSupersets(); 1986 void NarrowSearchSpaceByCollapsingUnrolledCode(); 1987 void NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters(); 1988 void NarrowSearchSpaceByFilterFormulaWithSameScaledReg(); 1989 void NarrowSearchSpaceByDeletingCostlyFormulas(); 1990 void NarrowSearchSpaceByPickingWinnerRegs(); 1991 void NarrowSearchSpaceUsingHeuristics(); 1992 1993 void SolveRecurse(SmallVectorImpl<const Formula *> &Solution, 1994 Cost &SolutionCost, 1995 SmallVectorImpl<const Formula *> &Workspace, 1996 const Cost &CurCost, 1997 const SmallPtrSet<const SCEV *, 16> &CurRegs, 1998 DenseSet<const SCEV *> &VisitedRegs) const; 1999 void Solve(SmallVectorImpl<const Formula *> &Solution) const; 2000 2001 BasicBlock::iterator 2002 HoistInsertPosition(BasicBlock::iterator IP, 2003 const SmallVectorImpl<Instruction *> &Inputs) const; 2004 BasicBlock::iterator 2005 AdjustInsertPositionForExpand(BasicBlock::iterator IP, 2006 const LSRFixup &LF, 2007 const LSRUse &LU, 2008 SCEVExpander &Rewriter) const; 2009 2010 Value *Expand(const LSRUse &LU, const LSRFixup &LF, const Formula &F, 2011 BasicBlock::iterator IP, SCEVExpander &Rewriter, 2012 SmallVectorImpl<WeakTrackingVH> &DeadInsts) const; 2013 void RewriteForPHI(PHINode *PN, const LSRUse &LU, const LSRFixup &LF, 2014 const Formula &F, SCEVExpander &Rewriter, 2015 SmallVectorImpl<WeakTrackingVH> &DeadInsts) const; 2016 void Rewrite(const LSRUse &LU, const LSRFixup &LF, const Formula &F, 2017 SCEVExpander &Rewriter, 2018 SmallVectorImpl<WeakTrackingVH> &DeadInsts) const; 2019 void ImplementSolution(const SmallVectorImpl<const Formula *> &Solution); 2020 2021 public: 2022 LSRInstance(Loop *L, IVUsers &IU, ScalarEvolution &SE, DominatorTree &DT, 2023 LoopInfo &LI, const TargetTransformInfo &TTI); 2024 2025 bool getChanged() const { return Changed; } 2026 2027 void print_factors_and_types(raw_ostream &OS) const; 2028 void print_fixups(raw_ostream &OS) const; 2029 void print_uses(raw_ostream &OS) const; 2030 void print(raw_ostream &OS) const; 2031 void dump() const; 2032 }; 2033 2034 } // end anonymous namespace 2035 2036 /// If IV is used in a int-to-float cast inside the loop then try to eliminate 2037 /// the cast operation. 2038 void LSRInstance::OptimizeShadowIV() { 2039 const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L); 2040 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) 2041 return; 2042 2043 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); 2044 UI != E; /* empty */) { 2045 IVUsers::const_iterator CandidateUI = UI; 2046 ++UI; 2047 Instruction *ShadowUse = CandidateUI->getUser(); 2048 Type *DestTy = nullptr; 2049 bool IsSigned = false; 2050 2051 /* If shadow use is a int->float cast then insert a second IV 2052 to eliminate this cast. 2053 2054 for (unsigned i = 0; i < n; ++i) 2055 foo((double)i); 2056 2057 is transformed into 2058 2059 double d = 0.0; 2060 for (unsigned i = 0; i < n; ++i, ++d) 2061 foo(d); 2062 */ 2063 if (UIToFPInst *UCast = dyn_cast<UIToFPInst>(CandidateUI->getUser())) { 2064 IsSigned = false; 2065 DestTy = UCast->getDestTy(); 2066 } 2067 else if (SIToFPInst *SCast = dyn_cast<SIToFPInst>(CandidateUI->getUser())) { 2068 IsSigned = true; 2069 DestTy = SCast->getDestTy(); 2070 } 2071 if (!DestTy) continue; 2072 2073 // If target does not support DestTy natively then do not apply 2074 // this transformation. 2075 if (!TTI.isTypeLegal(DestTy)) continue; 2076 2077 PHINode *PH = dyn_cast<PHINode>(ShadowUse->getOperand(0)); 2078 if (!PH) continue; 2079 if (PH->getNumIncomingValues() != 2) continue; 2080 2081 // If the calculation in integers overflows, the result in FP type will 2082 // differ. So we only can do this transformation if we are guaranteed to not 2083 // deal with overflowing values 2084 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(PH)); 2085 if (!AR) continue; 2086 if (IsSigned && !AR->hasNoSignedWrap()) continue; 2087 if (!IsSigned && !AR->hasNoUnsignedWrap()) continue; 2088 2089 Type *SrcTy = PH->getType(); 2090 int Mantissa = DestTy->getFPMantissaWidth(); 2091 if (Mantissa == -1) continue; 2092 if ((int)SE.getTypeSizeInBits(SrcTy) > Mantissa) 2093 continue; 2094 2095 unsigned Entry, Latch; 2096 if (PH->getIncomingBlock(0) == L->getLoopPreheader()) { 2097 Entry = 0; 2098 Latch = 1; 2099 } else { 2100 Entry = 1; 2101 Latch = 0; 2102 } 2103 2104 ConstantInt *Init = dyn_cast<ConstantInt>(PH->getIncomingValue(Entry)); 2105 if (!Init) continue; 2106 Constant *NewInit = ConstantFP::get(DestTy, IsSigned ? 2107 (double)Init->getSExtValue() : 2108 (double)Init->getZExtValue()); 2109 2110 BinaryOperator *Incr = 2111 dyn_cast<BinaryOperator>(PH->getIncomingValue(Latch)); 2112 if (!Incr) continue; 2113 if (Incr->getOpcode() != Instruction::Add 2114 && Incr->getOpcode() != Instruction::Sub) 2115 continue; 2116 2117 /* Initialize new IV, double d = 0.0 in above example. */ 2118 ConstantInt *C = nullptr; 2119 if (Incr->getOperand(0) == PH) 2120 C = dyn_cast<ConstantInt>(Incr->getOperand(1)); 2121 else if (Incr->getOperand(1) == PH) 2122 C = dyn_cast<ConstantInt>(Incr->getOperand(0)); 2123 else 2124 continue; 2125 2126 if (!C) continue; 2127 2128 // Ignore negative constants, as the code below doesn't handle them 2129 // correctly. TODO: Remove this restriction. 2130 if (!C->getValue().isStrictlyPositive()) continue; 2131 2132 /* Add new PHINode. */ 2133 PHINode *NewPH = PHINode::Create(DestTy, 2, "IV.S.", PH); 2134 2135 /* create new increment. '++d' in above example. */ 2136 Constant *CFP = ConstantFP::get(DestTy, C->getZExtValue()); 2137 BinaryOperator *NewIncr = 2138 BinaryOperator::Create(Incr->getOpcode() == Instruction::Add ? 2139 Instruction::FAdd : Instruction::FSub, 2140 NewPH, CFP, "IV.S.next.", Incr); 2141 2142 NewPH->addIncoming(NewInit, PH->getIncomingBlock(Entry)); 2143 NewPH->addIncoming(NewIncr, PH->getIncomingBlock(Latch)); 2144 2145 /* Remove cast operation */ 2146 ShadowUse->replaceAllUsesWith(NewPH); 2147 ShadowUse->eraseFromParent(); 2148 Changed = true; 2149 break; 2150 } 2151 } 2152 2153 /// If Cond has an operand that is an expression of an IV, set the IV user and 2154 /// stride information and return true, otherwise return false. 2155 bool LSRInstance::FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse) { 2156 for (IVStrideUse &U : IU) 2157 if (U.getUser() == Cond) { 2158 // NOTE: we could handle setcc instructions with multiple uses here, but 2159 // InstCombine does it as well for simple uses, it's not clear that it 2160 // occurs enough in real life to handle. 2161 CondUse = &U; 2162 return true; 2163 } 2164 return false; 2165 } 2166 2167 /// Rewrite the loop's terminating condition if it uses a max computation. 2168 /// 2169 /// This is a narrow solution to a specific, but acute, problem. For loops 2170 /// like this: 2171 /// 2172 /// i = 0; 2173 /// do { 2174 /// p[i] = 0.0; 2175 /// } while (++i < n); 2176 /// 2177 /// the trip count isn't just 'n', because 'n' might not be positive. And 2178 /// unfortunately this can come up even for loops where the user didn't use 2179 /// a C do-while loop. For example, seemingly well-behaved top-test loops 2180 /// will commonly be lowered like this: 2181 /// 2182 /// if (n > 0) { 2183 /// i = 0; 2184 /// do { 2185 /// p[i] = 0.0; 2186 /// } while (++i < n); 2187 /// } 2188 /// 2189 /// and then it's possible for subsequent optimization to obscure the if 2190 /// test in such a way that indvars can't find it. 2191 /// 2192 /// When indvars can't find the if test in loops like this, it creates a 2193 /// max expression, which allows it to give the loop a canonical 2194 /// induction variable: 2195 /// 2196 /// i = 0; 2197 /// max = n < 1 ? 1 : n; 2198 /// do { 2199 /// p[i] = 0.0; 2200 /// } while (++i != max); 2201 /// 2202 /// Canonical induction variables are necessary because the loop passes 2203 /// are designed around them. The most obvious example of this is the 2204 /// LoopInfo analysis, which doesn't remember trip count values. It 2205 /// expects to be able to rediscover the trip count each time it is 2206 /// needed, and it does this using a simple analysis that only succeeds if 2207 /// the loop has a canonical induction variable. 2208 /// 2209 /// However, when it comes time to generate code, the maximum operation 2210 /// can be quite costly, especially if it's inside of an outer loop. 2211 /// 2212 /// This function solves this problem by detecting this type of loop and 2213 /// rewriting their conditions from ICMP_NE back to ICMP_SLT, and deleting 2214 /// the instructions for the maximum computation. 2215 ICmpInst *LSRInstance::OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse) { 2216 // Check that the loop matches the pattern we're looking for. 2217 if (Cond->getPredicate() != CmpInst::ICMP_EQ && 2218 Cond->getPredicate() != CmpInst::ICMP_NE) 2219 return Cond; 2220 2221 SelectInst *Sel = dyn_cast<SelectInst>(Cond->getOperand(1)); 2222 if (!Sel || !Sel->hasOneUse()) return Cond; 2223 2224 const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L); 2225 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) 2226 return Cond; 2227 const SCEV *One = SE.getConstant(BackedgeTakenCount->getType(), 1); 2228 2229 // Add one to the backedge-taken count to get the trip count. 2230 const SCEV *IterationCount = SE.getAddExpr(One, BackedgeTakenCount); 2231 if (IterationCount != SE.getSCEV(Sel)) return Cond; 2232 2233 // Check for a max calculation that matches the pattern. There's no check 2234 // for ICMP_ULE here because the comparison would be with zero, which 2235 // isn't interesting. 2236 CmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE; 2237 const SCEVNAryExpr *Max = nullptr; 2238 if (const SCEVSMaxExpr *S = dyn_cast<SCEVSMaxExpr>(BackedgeTakenCount)) { 2239 Pred = ICmpInst::ICMP_SLE; 2240 Max = S; 2241 } else if (const SCEVSMaxExpr *S = dyn_cast<SCEVSMaxExpr>(IterationCount)) { 2242 Pred = ICmpInst::ICMP_SLT; 2243 Max = S; 2244 } else if (const SCEVUMaxExpr *U = dyn_cast<SCEVUMaxExpr>(IterationCount)) { 2245 Pred = ICmpInst::ICMP_ULT; 2246 Max = U; 2247 } else { 2248 // No match; bail. 2249 return Cond; 2250 } 2251 2252 // To handle a max with more than two operands, this optimization would 2253 // require additional checking and setup. 2254 if (Max->getNumOperands() != 2) 2255 return Cond; 2256 2257 const SCEV *MaxLHS = Max->getOperand(0); 2258 const SCEV *MaxRHS = Max->getOperand(1); 2259 2260 // ScalarEvolution canonicalizes constants to the left. For < and >, look 2261 // for a comparison with 1. For <= and >=, a comparison with zero. 2262 if (!MaxLHS || 2263 (ICmpInst::isTrueWhenEqual(Pred) ? !MaxLHS->isZero() : (MaxLHS != One))) 2264 return Cond; 2265 2266 // Check the relevant induction variable for conformance to 2267 // the pattern. 2268 const SCEV *IV = SE.getSCEV(Cond->getOperand(0)); 2269 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(IV); 2270 if (!AR || !AR->isAffine() || 2271 AR->getStart() != One || 2272 AR->getStepRecurrence(SE) != One) 2273 return Cond; 2274 2275 assert(AR->getLoop() == L && 2276 "Loop condition operand is an addrec in a different loop!"); 2277 2278 // Check the right operand of the select, and remember it, as it will 2279 // be used in the new comparison instruction. 2280 Value *NewRHS = nullptr; 2281 if (ICmpInst::isTrueWhenEqual(Pred)) { 2282 // Look for n+1, and grab n. 2283 if (AddOperator *BO = dyn_cast<AddOperator>(Sel->getOperand(1))) 2284 if (ConstantInt *BO1 = dyn_cast<ConstantInt>(BO->getOperand(1))) 2285 if (BO1->isOne() && SE.getSCEV(BO->getOperand(0)) == MaxRHS) 2286 NewRHS = BO->getOperand(0); 2287 if (AddOperator *BO = dyn_cast<AddOperator>(Sel->getOperand(2))) 2288 if (ConstantInt *BO1 = dyn_cast<ConstantInt>(BO->getOperand(1))) 2289 if (BO1->isOne() && SE.getSCEV(BO->getOperand(0)) == MaxRHS) 2290 NewRHS = BO->getOperand(0); 2291 if (!NewRHS) 2292 return Cond; 2293 } else if (SE.getSCEV(Sel->getOperand(1)) == MaxRHS) 2294 NewRHS = Sel->getOperand(1); 2295 else if (SE.getSCEV(Sel->getOperand(2)) == MaxRHS) 2296 NewRHS = Sel->getOperand(2); 2297 else if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(MaxRHS)) 2298 NewRHS = SU->getValue(); 2299 else 2300 // Max doesn't match expected pattern. 2301 return Cond; 2302 2303 // Determine the new comparison opcode. It may be signed or unsigned, 2304 // and the original comparison may be either equality or inequality. 2305 if (Cond->getPredicate() == CmpInst::ICMP_EQ) 2306 Pred = CmpInst::getInversePredicate(Pred); 2307 2308 // Ok, everything looks ok to change the condition into an SLT or SGE and 2309 // delete the max calculation. 2310 ICmpInst *NewCond = 2311 new ICmpInst(Cond, Pred, Cond->getOperand(0), NewRHS, "scmp"); 2312 2313 // Delete the max calculation instructions. 2314 Cond->replaceAllUsesWith(NewCond); 2315 CondUse->setUser(NewCond); 2316 Instruction *Cmp = cast<Instruction>(Sel->getOperand(0)); 2317 Cond->eraseFromParent(); 2318 Sel->eraseFromParent(); 2319 if (Cmp->use_empty()) 2320 Cmp->eraseFromParent(); 2321 return NewCond; 2322 } 2323 2324 /// Change loop terminating condition to use the postinc iv when possible. 2325 void 2326 LSRInstance::OptimizeLoopTermCond() { 2327 SmallPtrSet<Instruction *, 4> PostIncs; 2328 2329 // We need a different set of heuristics for rotated and non-rotated loops. 2330 // If a loop is rotated then the latch is also the backedge, so inserting 2331 // post-inc expressions just before the latch is ideal. To reduce live ranges 2332 // it also makes sense to rewrite terminating conditions to use post-inc 2333 // expressions. 2334 // 2335 // If the loop is not rotated then the latch is not a backedge; the latch 2336 // check is done in the loop head. Adding post-inc expressions before the 2337 // latch will cause overlapping live-ranges of pre-inc and post-inc expressions 2338 // in the loop body. In this case we do *not* want to use post-inc expressions 2339 // in the latch check, and we want to insert post-inc expressions before 2340 // the backedge. 2341 BasicBlock *LatchBlock = L->getLoopLatch(); 2342 SmallVector<BasicBlock*, 8> ExitingBlocks; 2343 L->getExitingBlocks(ExitingBlocks); 2344 if (llvm::all_of(ExitingBlocks, [&LatchBlock](const BasicBlock *BB) { 2345 return LatchBlock != BB; 2346 })) { 2347 // The backedge doesn't exit the loop; treat this as a head-tested loop. 2348 IVIncInsertPos = LatchBlock->getTerminator(); 2349 return; 2350 } 2351 2352 // Otherwise treat this as a rotated loop. 2353 for (BasicBlock *ExitingBlock : ExitingBlocks) { 2354 // Get the terminating condition for the loop if possible. If we 2355 // can, we want to change it to use a post-incremented version of its 2356 // induction variable, to allow coalescing the live ranges for the IV into 2357 // one register value. 2358 2359 BranchInst *TermBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator()); 2360 if (!TermBr) 2361 continue; 2362 // FIXME: Overly conservative, termination condition could be an 'or' etc.. 2363 if (TermBr->isUnconditional() || !isa<ICmpInst>(TermBr->getCondition())) 2364 continue; 2365 2366 // Search IVUsesByStride to find Cond's IVUse if there is one. 2367 IVStrideUse *CondUse = nullptr; 2368 ICmpInst *Cond = cast<ICmpInst>(TermBr->getCondition()); 2369 if (!FindIVUserForCond(Cond, CondUse)) 2370 continue; 2371 2372 // If the trip count is computed in terms of a max (due to ScalarEvolution 2373 // being unable to find a sufficient guard, for example), change the loop 2374 // comparison to use SLT or ULT instead of NE. 2375 // One consequence of doing this now is that it disrupts the count-down 2376 // optimization. That's not always a bad thing though, because in such 2377 // cases it may still be worthwhile to avoid a max. 2378 Cond = OptimizeMax(Cond, CondUse); 2379 2380 // If this exiting block dominates the latch block, it may also use 2381 // the post-inc value if it won't be shared with other uses. 2382 // Check for dominance. 2383 if (!DT.dominates(ExitingBlock, LatchBlock)) 2384 continue; 2385 2386 // Conservatively avoid trying to use the post-inc value in non-latch 2387 // exits if there may be pre-inc users in intervening blocks. 2388 if (LatchBlock != ExitingBlock) 2389 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) 2390 // Test if the use is reachable from the exiting block. This dominator 2391 // query is a conservative approximation of reachability. 2392 if (&*UI != CondUse && 2393 !DT.properlyDominates(UI->getUser()->getParent(), ExitingBlock)) { 2394 // Conservatively assume there may be reuse if the quotient of their 2395 // strides could be a legal scale. 2396 const SCEV *A = IU.getStride(*CondUse, L); 2397 const SCEV *B = IU.getStride(*UI, L); 2398 if (!A || !B) continue; 2399 if (SE.getTypeSizeInBits(A->getType()) != 2400 SE.getTypeSizeInBits(B->getType())) { 2401 if (SE.getTypeSizeInBits(A->getType()) > 2402 SE.getTypeSizeInBits(B->getType())) 2403 B = SE.getSignExtendExpr(B, A->getType()); 2404 else 2405 A = SE.getSignExtendExpr(A, B->getType()); 2406 } 2407 if (const SCEVConstant *D = 2408 dyn_cast_or_null<SCEVConstant>(getExactSDiv(B, A, SE))) { 2409 const ConstantInt *C = D->getValue(); 2410 // Stride of one or negative one can have reuse with non-addresses. 2411 if (C->isOne() || C->isMinusOne()) 2412 goto decline_post_inc; 2413 // Avoid weird situations. 2414 if (C->getValue().getMinSignedBits() >= 64 || 2415 C->getValue().isMinSignedValue()) 2416 goto decline_post_inc; 2417 // Check for possible scaled-address reuse. 2418 if (isAddressUse(TTI, UI->getUser(), UI->getOperandValToReplace())) { 2419 MemAccessTy AccessTy = getAccessType( 2420 TTI, UI->getUser(), UI->getOperandValToReplace()); 2421 int64_t Scale = C->getSExtValue(); 2422 if (TTI.isLegalAddressingMode(AccessTy.MemTy, /*BaseGV=*/nullptr, 2423 /*BaseOffset=*/0, 2424 /*HasBaseReg=*/false, Scale, 2425 AccessTy.AddrSpace)) 2426 goto decline_post_inc; 2427 Scale = -Scale; 2428 if (TTI.isLegalAddressingMode(AccessTy.MemTy, /*BaseGV=*/nullptr, 2429 /*BaseOffset=*/0, 2430 /*HasBaseReg=*/false, Scale, 2431 AccessTy.AddrSpace)) 2432 goto decline_post_inc; 2433 } 2434 } 2435 } 2436 2437 LLVM_DEBUG(dbgs() << " Change loop exiting icmp to use postinc iv: " 2438 << *Cond << '\n'); 2439 2440 // It's possible for the setcc instruction to be anywhere in the loop, and 2441 // possible for it to have multiple users. If it is not immediately before 2442 // the exiting block branch, move it. 2443 if (&*++BasicBlock::iterator(Cond) != TermBr) { 2444 if (Cond->hasOneUse()) { 2445 Cond->moveBefore(TermBr); 2446 } else { 2447 // Clone the terminating condition and insert into the loopend. 2448 ICmpInst *OldCond = Cond; 2449 Cond = cast<ICmpInst>(Cond->clone()); 2450 Cond->setName(L->getHeader()->getName() + ".termcond"); 2451 ExitingBlock->getInstList().insert(TermBr->getIterator(), Cond); 2452 2453 // Clone the IVUse, as the old use still exists! 2454 CondUse = &IU.AddUser(Cond, CondUse->getOperandValToReplace()); 2455 TermBr->replaceUsesOfWith(OldCond, Cond); 2456 } 2457 } 2458 2459 // If we get to here, we know that we can transform the setcc instruction to 2460 // use the post-incremented version of the IV, allowing us to coalesce the 2461 // live ranges for the IV correctly. 2462 CondUse->transformToPostInc(L); 2463 Changed = true; 2464 2465 PostIncs.insert(Cond); 2466 decline_post_inc:; 2467 } 2468 2469 // Determine an insertion point for the loop induction variable increment. It 2470 // must dominate all the post-inc comparisons we just set up, and it must 2471 // dominate the loop latch edge. 2472 IVIncInsertPos = L->getLoopLatch()->getTerminator(); 2473 for (Instruction *Inst : PostIncs) { 2474 BasicBlock *BB = 2475 DT.findNearestCommonDominator(IVIncInsertPos->getParent(), 2476 Inst->getParent()); 2477 if (BB == Inst->getParent()) 2478 IVIncInsertPos = Inst; 2479 else if (BB != IVIncInsertPos->getParent()) 2480 IVIncInsertPos = BB->getTerminator(); 2481 } 2482 } 2483 2484 /// Determine if the given use can accommodate a fixup at the given offset and 2485 /// other details. If so, update the use and return true. 2486 bool LSRInstance::reconcileNewOffset(LSRUse &LU, int64_t NewOffset, 2487 bool HasBaseReg, LSRUse::KindType Kind, 2488 MemAccessTy AccessTy) { 2489 int64_t NewMinOffset = LU.MinOffset; 2490 int64_t NewMaxOffset = LU.MaxOffset; 2491 MemAccessTy NewAccessTy = AccessTy; 2492 2493 // Check for a mismatched kind. It's tempting to collapse mismatched kinds to 2494 // something conservative, however this can pessimize in the case that one of 2495 // the uses will have all its uses outside the loop, for example. 2496 if (LU.Kind != Kind) 2497 return false; 2498 2499 // Check for a mismatched access type, and fall back conservatively as needed. 2500 // TODO: Be less conservative when the type is similar and can use the same 2501 // addressing modes. 2502 if (Kind == LSRUse::Address) { 2503 if (AccessTy.MemTy != LU.AccessTy.MemTy) { 2504 NewAccessTy = MemAccessTy::getUnknown(AccessTy.MemTy->getContext(), 2505 AccessTy.AddrSpace); 2506 } 2507 } 2508 2509 // Conservatively assume HasBaseReg is true for now. 2510 if (NewOffset < LU.MinOffset) { 2511 if (!isAlwaysFoldable(TTI, Kind, NewAccessTy, /*BaseGV=*/nullptr, 2512 LU.MaxOffset - NewOffset, HasBaseReg)) 2513 return false; 2514 NewMinOffset = NewOffset; 2515 } else if (NewOffset > LU.MaxOffset) { 2516 if (!isAlwaysFoldable(TTI, Kind, NewAccessTy, /*BaseGV=*/nullptr, 2517 NewOffset - LU.MinOffset, HasBaseReg)) 2518 return false; 2519 NewMaxOffset = NewOffset; 2520 } 2521 2522 // Update the use. 2523 LU.MinOffset = NewMinOffset; 2524 LU.MaxOffset = NewMaxOffset; 2525 LU.AccessTy = NewAccessTy; 2526 return true; 2527 } 2528 2529 /// Return an LSRUse index and an offset value for a fixup which needs the given 2530 /// expression, with the given kind and optional access type. Either reuse an 2531 /// existing use or create a new one, as needed. 2532 std::pair<size_t, int64_t> LSRInstance::getUse(const SCEV *&Expr, 2533 LSRUse::KindType Kind, 2534 MemAccessTy AccessTy) { 2535 const SCEV *Copy = Expr; 2536 int64_t Offset = ExtractImmediate(Expr, SE); 2537 2538 // Basic uses can't accept any offset, for example. 2539 if (!isAlwaysFoldable(TTI, Kind, AccessTy, /*BaseGV=*/ nullptr, 2540 Offset, /*HasBaseReg=*/ true)) { 2541 Expr = Copy; 2542 Offset = 0; 2543 } 2544 2545 std::pair<UseMapTy::iterator, bool> P = 2546 UseMap.insert(std::make_pair(LSRUse::SCEVUseKindPair(Expr, Kind), 0)); 2547 if (!P.second) { 2548 // A use already existed with this base. 2549 size_t LUIdx = P.first->second; 2550 LSRUse &LU = Uses[LUIdx]; 2551 if (reconcileNewOffset(LU, Offset, /*HasBaseReg=*/true, Kind, AccessTy)) 2552 // Reuse this use. 2553 return std::make_pair(LUIdx, Offset); 2554 } 2555 2556 // Create a new use. 2557 size_t LUIdx = Uses.size(); 2558 P.first->second = LUIdx; 2559 Uses.push_back(LSRUse(Kind, AccessTy)); 2560 LSRUse &LU = Uses[LUIdx]; 2561 2562 LU.MinOffset = Offset; 2563 LU.MaxOffset = Offset; 2564 return std::make_pair(LUIdx, Offset); 2565 } 2566 2567 /// Delete the given use from the Uses list. 2568 void LSRInstance::DeleteUse(LSRUse &LU, size_t LUIdx) { 2569 if (&LU != &Uses.back()) 2570 std::swap(LU, Uses.back()); 2571 Uses.pop_back(); 2572 2573 // Update RegUses. 2574 RegUses.swapAndDropUse(LUIdx, Uses.size()); 2575 } 2576 2577 /// Look for a use distinct from OrigLU which is has a formula that has the same 2578 /// registers as the given formula. 2579 LSRUse * 2580 LSRInstance::FindUseWithSimilarFormula(const Formula &OrigF, 2581 const LSRUse &OrigLU) { 2582 // Search all uses for the formula. This could be more clever. 2583 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 2584 LSRUse &LU = Uses[LUIdx]; 2585 // Check whether this use is close enough to OrigLU, to see whether it's 2586 // worthwhile looking through its formulae. 2587 // Ignore ICmpZero uses because they may contain formulae generated by 2588 // GenerateICmpZeroScales, in which case adding fixup offsets may 2589 // be invalid. 2590 if (&LU != &OrigLU && 2591 LU.Kind != LSRUse::ICmpZero && 2592 LU.Kind == OrigLU.Kind && OrigLU.AccessTy == LU.AccessTy && 2593 LU.WidestFixupType == OrigLU.WidestFixupType && 2594 LU.HasFormulaWithSameRegs(OrigF)) { 2595 // Scan through this use's formulae. 2596 for (const Formula &F : LU.Formulae) { 2597 // Check to see if this formula has the same registers and symbols 2598 // as OrigF. 2599 if (F.BaseRegs == OrigF.BaseRegs && 2600 F.ScaledReg == OrigF.ScaledReg && 2601 F.BaseGV == OrigF.BaseGV && 2602 F.Scale == OrigF.Scale && 2603 F.UnfoldedOffset == OrigF.UnfoldedOffset) { 2604 if (F.BaseOffset == 0) 2605 return &LU; 2606 // This is the formula where all the registers and symbols matched; 2607 // there aren't going to be any others. Since we declined it, we 2608 // can skip the rest of the formulae and proceed to the next LSRUse. 2609 break; 2610 } 2611 } 2612 } 2613 } 2614 2615 // Nothing looked good. 2616 return nullptr; 2617 } 2618 2619 void LSRInstance::CollectInterestingTypesAndFactors() { 2620 SmallSetVector<const SCEV *, 4> Strides; 2621 2622 // Collect interesting types and strides. 2623 SmallVector<const SCEV *, 4> Worklist; 2624 for (const IVStrideUse &U : IU) { 2625 const SCEV *Expr = IU.getExpr(U); 2626 2627 // Collect interesting types. 2628 Types.insert(SE.getEffectiveSCEVType(Expr->getType())); 2629 2630 // Add strides for mentioned loops. 2631 Worklist.push_back(Expr); 2632 do { 2633 const SCEV *S = Worklist.pop_back_val(); 2634 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 2635 if (AR->getLoop() == L) 2636 Strides.insert(AR->getStepRecurrence(SE)); 2637 Worklist.push_back(AR->getStart()); 2638 } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 2639 Worklist.append(Add->op_begin(), Add->op_end()); 2640 } 2641 } while (!Worklist.empty()); 2642 } 2643 2644 // Compute interesting factors from the set of interesting strides. 2645 for (SmallSetVector<const SCEV *, 4>::const_iterator 2646 I = Strides.begin(), E = Strides.end(); I != E; ++I) 2647 for (SmallSetVector<const SCEV *, 4>::const_iterator NewStrideIter = 2648 std::next(I); NewStrideIter != E; ++NewStrideIter) { 2649 const SCEV *OldStride = *I; 2650 const SCEV *NewStride = *NewStrideIter; 2651 2652 if (SE.getTypeSizeInBits(OldStride->getType()) != 2653 SE.getTypeSizeInBits(NewStride->getType())) { 2654 if (SE.getTypeSizeInBits(OldStride->getType()) > 2655 SE.getTypeSizeInBits(NewStride->getType())) 2656 NewStride = SE.getSignExtendExpr(NewStride, OldStride->getType()); 2657 else 2658 OldStride = SE.getSignExtendExpr(OldStride, NewStride->getType()); 2659 } 2660 if (const SCEVConstant *Factor = 2661 dyn_cast_or_null<SCEVConstant>(getExactSDiv(NewStride, OldStride, 2662 SE, true))) { 2663 if (Factor->getAPInt().getMinSignedBits() <= 64) 2664 Factors.insert(Factor->getAPInt().getSExtValue()); 2665 } else if (const SCEVConstant *Factor = 2666 dyn_cast_or_null<SCEVConstant>(getExactSDiv(OldStride, 2667 NewStride, 2668 SE, true))) { 2669 if (Factor->getAPInt().getMinSignedBits() <= 64) 2670 Factors.insert(Factor->getAPInt().getSExtValue()); 2671 } 2672 } 2673 2674 // If all uses use the same type, don't bother looking for truncation-based 2675 // reuse. 2676 if (Types.size() == 1) 2677 Types.clear(); 2678 2679 LLVM_DEBUG(print_factors_and_types(dbgs())); 2680 } 2681 2682 /// Helper for CollectChains that finds an IV operand (computed by an AddRec in 2683 /// this loop) within [OI,OE) or returns OE. If IVUsers mapped Instructions to 2684 /// IVStrideUses, we could partially skip this. 2685 static User::op_iterator 2686 findIVOperand(User::op_iterator OI, User::op_iterator OE, 2687 Loop *L, ScalarEvolution &SE) { 2688 for(; OI != OE; ++OI) { 2689 if (Instruction *Oper = dyn_cast<Instruction>(*OI)) { 2690 if (!SE.isSCEVable(Oper->getType())) 2691 continue; 2692 2693 if (const SCEVAddRecExpr *AR = 2694 dyn_cast<SCEVAddRecExpr>(SE.getSCEV(Oper))) { 2695 if (AR->getLoop() == L) 2696 break; 2697 } 2698 } 2699 } 2700 return OI; 2701 } 2702 2703 /// IVChain logic must consistently peek base TruncInst operands, so wrap it in 2704 /// a convenient helper. 2705 static Value *getWideOperand(Value *Oper) { 2706 if (TruncInst *Trunc = dyn_cast<TruncInst>(Oper)) 2707 return Trunc->getOperand(0); 2708 return Oper; 2709 } 2710 2711 /// Return true if we allow an IV chain to include both types. 2712 static bool isCompatibleIVType(Value *LVal, Value *RVal) { 2713 Type *LType = LVal->getType(); 2714 Type *RType = RVal->getType(); 2715 return (LType == RType) || (LType->isPointerTy() && RType->isPointerTy() && 2716 // Different address spaces means (possibly) 2717 // different types of the pointer implementation, 2718 // e.g. i16 vs i32 so disallow that. 2719 (LType->getPointerAddressSpace() == 2720 RType->getPointerAddressSpace())); 2721 } 2722 2723 /// Return an approximation of this SCEV expression's "base", or NULL for any 2724 /// constant. Returning the expression itself is conservative. Returning a 2725 /// deeper subexpression is more precise and valid as long as it isn't less 2726 /// complex than another subexpression. For expressions involving multiple 2727 /// unscaled values, we need to return the pointer-type SCEVUnknown. This avoids 2728 /// forming chains across objects, such as: PrevOper==a[i], IVOper==b[i], 2729 /// IVInc==b-a. 2730 /// 2731 /// Since SCEVUnknown is the rightmost type, and pointers are the rightmost 2732 /// SCEVUnknown, we simply return the rightmost SCEV operand. 2733 static const SCEV *getExprBase(const SCEV *S) { 2734 switch (S->getSCEVType()) { 2735 default: // uncluding scUnknown. 2736 return S; 2737 case scConstant: 2738 return nullptr; 2739 case scTruncate: 2740 return getExprBase(cast<SCEVTruncateExpr>(S)->getOperand()); 2741 case scZeroExtend: 2742 return getExprBase(cast<SCEVZeroExtendExpr>(S)->getOperand()); 2743 case scSignExtend: 2744 return getExprBase(cast<SCEVSignExtendExpr>(S)->getOperand()); 2745 case scAddExpr: { 2746 // Skip over scaled operands (scMulExpr) to follow add operands as long as 2747 // there's nothing more complex. 2748 // FIXME: not sure if we want to recognize negation. 2749 const SCEVAddExpr *Add = cast<SCEVAddExpr>(S); 2750 for (std::reverse_iterator<SCEVAddExpr::op_iterator> I(Add->op_end()), 2751 E(Add->op_begin()); I != E; ++I) { 2752 const SCEV *SubExpr = *I; 2753 if (SubExpr->getSCEVType() == scAddExpr) 2754 return getExprBase(SubExpr); 2755 2756 if (SubExpr->getSCEVType() != scMulExpr) 2757 return SubExpr; 2758 } 2759 return S; // all operands are scaled, be conservative. 2760 } 2761 case scAddRecExpr: 2762 return getExprBase(cast<SCEVAddRecExpr>(S)->getStart()); 2763 } 2764 } 2765 2766 /// Return true if the chain increment is profitable to expand into a loop 2767 /// invariant value, which may require its own register. A profitable chain 2768 /// increment will be an offset relative to the same base. We allow such offsets 2769 /// to potentially be used as chain increment as long as it's not obviously 2770 /// expensive to expand using real instructions. 2771 bool IVChain::isProfitableIncrement(const SCEV *OperExpr, 2772 const SCEV *IncExpr, 2773 ScalarEvolution &SE) { 2774 // Aggressively form chains when -stress-ivchain. 2775 if (StressIVChain) 2776 return true; 2777 2778 // Do not replace a constant offset from IV head with a nonconstant IV 2779 // increment. 2780 if (!isa<SCEVConstant>(IncExpr)) { 2781 const SCEV *HeadExpr = SE.getSCEV(getWideOperand(Incs[0].IVOperand)); 2782 if (isa<SCEVConstant>(SE.getMinusSCEV(OperExpr, HeadExpr))) 2783 return false; 2784 } 2785 2786 SmallPtrSet<const SCEV*, 8> Processed; 2787 return !isHighCostExpansion(IncExpr, Processed, SE); 2788 } 2789 2790 /// Return true if the number of registers needed for the chain is estimated to 2791 /// be less than the number required for the individual IV users. First prohibit 2792 /// any IV users that keep the IV live across increments (the Users set should 2793 /// be empty). Next count the number and type of increments in the chain. 2794 /// 2795 /// Chaining IVs can lead to considerable code bloat if ISEL doesn't 2796 /// effectively use postinc addressing modes. Only consider it profitable it the 2797 /// increments can be computed in fewer registers when chained. 2798 /// 2799 /// TODO: Consider IVInc free if it's already used in another chains. 2800 static bool 2801 isProfitableChain(IVChain &Chain, SmallPtrSetImpl<Instruction*> &Users, 2802 ScalarEvolution &SE, const TargetTransformInfo &TTI) { 2803 if (StressIVChain) 2804 return true; 2805 2806 if (!Chain.hasIncs()) 2807 return false; 2808 2809 if (!Users.empty()) { 2810 LLVM_DEBUG(dbgs() << "Chain: " << *Chain.Incs[0].UserInst << " users:\n"; 2811 for (Instruction *Inst 2812 : Users) { dbgs() << " " << *Inst << "\n"; }); 2813 return false; 2814 } 2815 assert(!Chain.Incs.empty() && "empty IV chains are not allowed"); 2816 2817 // The chain itself may require a register, so intialize cost to 1. 2818 int cost = 1; 2819 2820 // A complete chain likely eliminates the need for keeping the original IV in 2821 // a register. LSR does not currently know how to form a complete chain unless 2822 // the header phi already exists. 2823 if (isa<PHINode>(Chain.tailUserInst()) 2824 && SE.getSCEV(Chain.tailUserInst()) == Chain.Incs[0].IncExpr) { 2825 --cost; 2826 } 2827 const SCEV *LastIncExpr = nullptr; 2828 unsigned NumConstIncrements = 0; 2829 unsigned NumVarIncrements = 0; 2830 unsigned NumReusedIncrements = 0; 2831 for (const IVInc &Inc : Chain) { 2832 if (Inc.IncExpr->isZero()) 2833 continue; 2834 2835 // Incrementing by zero or some constant is neutral. We assume constants can 2836 // be folded into an addressing mode or an add's immediate operand. 2837 if (isa<SCEVConstant>(Inc.IncExpr)) { 2838 ++NumConstIncrements; 2839 continue; 2840 } 2841 2842 if (Inc.IncExpr == LastIncExpr) 2843 ++NumReusedIncrements; 2844 else 2845 ++NumVarIncrements; 2846 2847 LastIncExpr = Inc.IncExpr; 2848 } 2849 // An IV chain with a single increment is handled by LSR's postinc 2850 // uses. However, a chain with multiple increments requires keeping the IV's 2851 // value live longer than it needs to be if chained. 2852 if (NumConstIncrements > 1) 2853 --cost; 2854 2855 // Materializing increment expressions in the preheader that didn't exist in 2856 // the original code may cost a register. For example, sign-extended array 2857 // indices can produce ridiculous increments like this: 2858 // IV + ((sext i32 (2 * %s) to i64) + (-1 * (sext i32 %s to i64))) 2859 cost += NumVarIncrements; 2860 2861 // Reusing variable increments likely saves a register to hold the multiple of 2862 // the stride. 2863 cost -= NumReusedIncrements; 2864 2865 LLVM_DEBUG(dbgs() << "Chain: " << *Chain.Incs[0].UserInst << " Cost: " << cost 2866 << "\n"); 2867 2868 return cost < 0; 2869 } 2870 2871 /// Add this IV user to an existing chain or make it the head of a new chain. 2872 void LSRInstance::ChainInstruction(Instruction *UserInst, Instruction *IVOper, 2873 SmallVectorImpl<ChainUsers> &ChainUsersVec) { 2874 // When IVs are used as types of varying widths, they are generally converted 2875 // to a wider type with some uses remaining narrow under a (free) trunc. 2876 Value *const NextIV = getWideOperand(IVOper); 2877 const SCEV *const OperExpr = SE.getSCEV(NextIV); 2878 const SCEV *const OperExprBase = getExprBase(OperExpr); 2879 2880 // Visit all existing chains. Check if its IVOper can be computed as a 2881 // profitable loop invariant increment from the last link in the Chain. 2882 unsigned ChainIdx = 0, NChains = IVChainVec.size(); 2883 const SCEV *LastIncExpr = nullptr; 2884 for (; ChainIdx < NChains; ++ChainIdx) { 2885 IVChain &Chain = IVChainVec[ChainIdx]; 2886 2887 // Prune the solution space aggressively by checking that both IV operands 2888 // are expressions that operate on the same unscaled SCEVUnknown. This 2889 // "base" will be canceled by the subsequent getMinusSCEV call. Checking 2890 // first avoids creating extra SCEV expressions. 2891 if (!StressIVChain && Chain.ExprBase != OperExprBase) 2892 continue; 2893 2894 Value *PrevIV = getWideOperand(Chain.Incs.back().IVOperand); 2895 if (!isCompatibleIVType(PrevIV, NextIV)) 2896 continue; 2897 2898 // A phi node terminates a chain. 2899 if (isa<PHINode>(UserInst) && isa<PHINode>(Chain.tailUserInst())) 2900 continue; 2901 2902 // The increment must be loop-invariant so it can be kept in a register. 2903 const SCEV *PrevExpr = SE.getSCEV(PrevIV); 2904 const SCEV *IncExpr = SE.getMinusSCEV(OperExpr, PrevExpr); 2905 if (!SE.isLoopInvariant(IncExpr, L)) 2906 continue; 2907 2908 if (Chain.isProfitableIncrement(OperExpr, IncExpr, SE)) { 2909 LastIncExpr = IncExpr; 2910 break; 2911 } 2912 } 2913 // If we haven't found a chain, create a new one, unless we hit the max. Don't 2914 // bother for phi nodes, because they must be last in the chain. 2915 if (ChainIdx == NChains) { 2916 if (isa<PHINode>(UserInst)) 2917 return; 2918 if (NChains >= MaxChains && !StressIVChain) { 2919 LLVM_DEBUG(dbgs() << "IV Chain Limit\n"); 2920 return; 2921 } 2922 LastIncExpr = OperExpr; 2923 // IVUsers may have skipped over sign/zero extensions. We don't currently 2924 // attempt to form chains involving extensions unless they can be hoisted 2925 // into this loop's AddRec. 2926 if (!isa<SCEVAddRecExpr>(LastIncExpr)) 2927 return; 2928 ++NChains; 2929 IVChainVec.push_back(IVChain(IVInc(UserInst, IVOper, LastIncExpr), 2930 OperExprBase)); 2931 ChainUsersVec.resize(NChains); 2932 LLVM_DEBUG(dbgs() << "IV Chain#" << ChainIdx << " Head: (" << *UserInst 2933 << ") IV=" << *LastIncExpr << "\n"); 2934 } else { 2935 LLVM_DEBUG(dbgs() << "IV Chain#" << ChainIdx << " Inc: (" << *UserInst 2936 << ") IV+" << *LastIncExpr << "\n"); 2937 // Add this IV user to the end of the chain. 2938 IVChainVec[ChainIdx].add(IVInc(UserInst, IVOper, LastIncExpr)); 2939 } 2940 IVChain &Chain = IVChainVec[ChainIdx]; 2941 2942 SmallPtrSet<Instruction*,4> &NearUsers = ChainUsersVec[ChainIdx].NearUsers; 2943 // This chain's NearUsers become FarUsers. 2944 if (!LastIncExpr->isZero()) { 2945 ChainUsersVec[ChainIdx].FarUsers.insert(NearUsers.begin(), 2946 NearUsers.end()); 2947 NearUsers.clear(); 2948 } 2949 2950 // All other uses of IVOperand become near uses of the chain. 2951 // We currently ignore intermediate values within SCEV expressions, assuming 2952 // they will eventually be used be the current chain, or can be computed 2953 // from one of the chain increments. To be more precise we could 2954 // transitively follow its user and only add leaf IV users to the set. 2955 for (User *U : IVOper->users()) { 2956 Instruction *OtherUse = dyn_cast<Instruction>(U); 2957 if (!OtherUse) 2958 continue; 2959 // Uses in the chain will no longer be uses if the chain is formed. 2960 // Include the head of the chain in this iteration (not Chain.begin()). 2961 IVChain::const_iterator IncIter = Chain.Incs.begin(); 2962 IVChain::const_iterator IncEnd = Chain.Incs.end(); 2963 for( ; IncIter != IncEnd; ++IncIter) { 2964 if (IncIter->UserInst == OtherUse) 2965 break; 2966 } 2967 if (IncIter != IncEnd) 2968 continue; 2969 2970 if (SE.isSCEVable(OtherUse->getType()) 2971 && !isa<SCEVUnknown>(SE.getSCEV(OtherUse)) 2972 && IU.isIVUserOrOperand(OtherUse)) { 2973 continue; 2974 } 2975 NearUsers.insert(OtherUse); 2976 } 2977 2978 // Since this user is part of the chain, it's no longer considered a use 2979 // of the chain. 2980 ChainUsersVec[ChainIdx].FarUsers.erase(UserInst); 2981 } 2982 2983 /// Populate the vector of Chains. 2984 /// 2985 /// This decreases ILP at the architecture level. Targets with ample registers, 2986 /// multiple memory ports, and no register renaming probably don't want 2987 /// this. However, such targets should probably disable LSR altogether. 2988 /// 2989 /// The job of LSR is to make a reasonable choice of induction variables across 2990 /// the loop. Subsequent passes can easily "unchain" computation exposing more 2991 /// ILP *within the loop* if the target wants it. 2992 /// 2993 /// Finding the best IV chain is potentially a scheduling problem. Since LSR 2994 /// will not reorder memory operations, it will recognize this as a chain, but 2995 /// will generate redundant IV increments. Ideally this would be corrected later 2996 /// by a smart scheduler: 2997 /// = A[i] 2998 /// = A[i+x] 2999 /// A[i] = 3000 /// A[i+x] = 3001 /// 3002 /// TODO: Walk the entire domtree within this loop, not just the path to the 3003 /// loop latch. This will discover chains on side paths, but requires 3004 /// maintaining multiple copies of the Chains state. 3005 void LSRInstance::CollectChains() { 3006 LLVM_DEBUG(dbgs() << "Collecting IV Chains.\n"); 3007 SmallVector<ChainUsers, 8> ChainUsersVec; 3008 3009 SmallVector<BasicBlock *,8> LatchPath; 3010 BasicBlock *LoopHeader = L->getHeader(); 3011 for (DomTreeNode *Rung = DT.getNode(L->getLoopLatch()); 3012 Rung->getBlock() != LoopHeader; Rung = Rung->getIDom()) { 3013 LatchPath.push_back(Rung->getBlock()); 3014 } 3015 LatchPath.push_back(LoopHeader); 3016 3017 // Walk the instruction stream from the loop header to the loop latch. 3018 for (BasicBlock *BB : reverse(LatchPath)) { 3019 for (Instruction &I : *BB) { 3020 // Skip instructions that weren't seen by IVUsers analysis. 3021 if (isa<PHINode>(I) || !IU.isIVUserOrOperand(&I)) 3022 continue; 3023 3024 // Ignore users that are part of a SCEV expression. This way we only 3025 // consider leaf IV Users. This effectively rediscovers a portion of 3026 // IVUsers analysis but in program order this time. 3027 if (SE.isSCEVable(I.getType()) && !isa<SCEVUnknown>(SE.getSCEV(&I))) 3028 continue; 3029 3030 // Remove this instruction from any NearUsers set it may be in. 3031 for (unsigned ChainIdx = 0, NChains = IVChainVec.size(); 3032 ChainIdx < NChains; ++ChainIdx) { 3033 ChainUsersVec[ChainIdx].NearUsers.erase(&I); 3034 } 3035 // Search for operands that can be chained. 3036 SmallPtrSet<Instruction*, 4> UniqueOperands; 3037 User::op_iterator IVOpEnd = I.op_end(); 3038 User::op_iterator IVOpIter = findIVOperand(I.op_begin(), IVOpEnd, L, SE); 3039 while (IVOpIter != IVOpEnd) { 3040 Instruction *IVOpInst = cast<Instruction>(*IVOpIter); 3041 if (UniqueOperands.insert(IVOpInst).second) 3042 ChainInstruction(&I, IVOpInst, ChainUsersVec); 3043 IVOpIter = findIVOperand(std::next(IVOpIter), IVOpEnd, L, SE); 3044 } 3045 } // Continue walking down the instructions. 3046 } // Continue walking down the domtree. 3047 // Visit phi backedges to determine if the chain can generate the IV postinc. 3048 for (PHINode &PN : L->getHeader()->phis()) { 3049 if (!SE.isSCEVable(PN.getType())) 3050 continue; 3051 3052 Instruction *IncV = 3053 dyn_cast<Instruction>(PN.getIncomingValueForBlock(L->getLoopLatch())); 3054 if (IncV) 3055 ChainInstruction(&PN, IncV, ChainUsersVec); 3056 } 3057 // Remove any unprofitable chains. 3058 unsigned ChainIdx = 0; 3059 for (unsigned UsersIdx = 0, NChains = IVChainVec.size(); 3060 UsersIdx < NChains; ++UsersIdx) { 3061 if (!isProfitableChain(IVChainVec[UsersIdx], 3062 ChainUsersVec[UsersIdx].FarUsers, SE, TTI)) 3063 continue; 3064 // Preserve the chain at UsesIdx. 3065 if (ChainIdx != UsersIdx) 3066 IVChainVec[ChainIdx] = IVChainVec[UsersIdx]; 3067 FinalizeChain(IVChainVec[ChainIdx]); 3068 ++ChainIdx; 3069 } 3070 IVChainVec.resize(ChainIdx); 3071 } 3072 3073 void LSRInstance::FinalizeChain(IVChain &Chain) { 3074 assert(!Chain.Incs.empty() && "empty IV chains are not allowed"); 3075 LLVM_DEBUG(dbgs() << "Final Chain: " << *Chain.Incs[0].UserInst << "\n"); 3076 3077 for (const IVInc &Inc : Chain) { 3078 LLVM_DEBUG(dbgs() << " Inc: " << *Inc.UserInst << "\n"); 3079 auto UseI = find(Inc.UserInst->operands(), Inc.IVOperand); 3080 assert(UseI != Inc.UserInst->op_end() && "cannot find IV operand"); 3081 IVIncSet.insert(UseI); 3082 } 3083 } 3084 3085 /// Return true if the IVInc can be folded into an addressing mode. 3086 static bool canFoldIVIncExpr(const SCEV *IncExpr, Instruction *UserInst, 3087 Value *Operand, const TargetTransformInfo &TTI) { 3088 const SCEVConstant *IncConst = dyn_cast<SCEVConstant>(IncExpr); 3089 if (!IncConst || !isAddressUse(TTI, UserInst, Operand)) 3090 return false; 3091 3092 if (IncConst->getAPInt().getMinSignedBits() > 64) 3093 return false; 3094 3095 MemAccessTy AccessTy = getAccessType(TTI, UserInst, Operand); 3096 int64_t IncOffset = IncConst->getValue()->getSExtValue(); 3097 if (!isAlwaysFoldable(TTI, LSRUse::Address, AccessTy, /*BaseGV=*/nullptr, 3098 IncOffset, /*HaseBaseReg=*/false)) 3099 return false; 3100 3101 return true; 3102 } 3103 3104 /// Generate an add or subtract for each IVInc in a chain to materialize the IV 3105 /// user's operand from the previous IV user's operand. 3106 void LSRInstance::GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter, 3107 SmallVectorImpl<WeakTrackingVH> &DeadInsts) { 3108 // Find the new IVOperand for the head of the chain. It may have been replaced 3109 // by LSR. 3110 const IVInc &Head = Chain.Incs[0]; 3111 User::op_iterator IVOpEnd = Head.UserInst->op_end(); 3112 // findIVOperand returns IVOpEnd if it can no longer find a valid IV user. 3113 User::op_iterator IVOpIter = findIVOperand(Head.UserInst->op_begin(), 3114 IVOpEnd, L, SE); 3115 Value *IVSrc = nullptr; 3116 while (IVOpIter != IVOpEnd) { 3117 IVSrc = getWideOperand(*IVOpIter); 3118 3119 // If this operand computes the expression that the chain needs, we may use 3120 // it. (Check this after setting IVSrc which is used below.) 3121 // 3122 // Note that if Head.IncExpr is wider than IVSrc, then this phi is too 3123 // narrow for the chain, so we can no longer use it. We do allow using a 3124 // wider phi, assuming the LSR checked for free truncation. In that case we 3125 // should already have a truncate on this operand such that 3126 // getSCEV(IVSrc) == IncExpr. 3127 if (SE.getSCEV(*IVOpIter) == Head.IncExpr 3128 || SE.getSCEV(IVSrc) == Head.IncExpr) { 3129 break; 3130 } 3131 IVOpIter = findIVOperand(std::next(IVOpIter), IVOpEnd, L, SE); 3132 } 3133 if (IVOpIter == IVOpEnd) { 3134 // Gracefully give up on this chain. 3135 LLVM_DEBUG(dbgs() << "Concealed chain head: " << *Head.UserInst << "\n"); 3136 return; 3137 } 3138 3139 LLVM_DEBUG(dbgs() << "Generate chain at: " << *IVSrc << "\n"); 3140 Type *IVTy = IVSrc->getType(); 3141 Type *IntTy = SE.getEffectiveSCEVType(IVTy); 3142 const SCEV *LeftOverExpr = nullptr; 3143 for (const IVInc &Inc : Chain) { 3144 Instruction *InsertPt = Inc.UserInst; 3145 if (isa<PHINode>(InsertPt)) 3146 InsertPt = L->getLoopLatch()->getTerminator(); 3147 3148 // IVOper will replace the current IV User's operand. IVSrc is the IV 3149 // value currently held in a register. 3150 Value *IVOper = IVSrc; 3151 if (!Inc.IncExpr->isZero()) { 3152 // IncExpr was the result of subtraction of two narrow values, so must 3153 // be signed. 3154 const SCEV *IncExpr = SE.getNoopOrSignExtend(Inc.IncExpr, IntTy); 3155 LeftOverExpr = LeftOverExpr ? 3156 SE.getAddExpr(LeftOverExpr, IncExpr) : IncExpr; 3157 } 3158 if (LeftOverExpr && !LeftOverExpr->isZero()) { 3159 // Expand the IV increment. 3160 Rewriter.clearPostInc(); 3161 Value *IncV = Rewriter.expandCodeFor(LeftOverExpr, IntTy, InsertPt); 3162 const SCEV *IVOperExpr = SE.getAddExpr(SE.getUnknown(IVSrc), 3163 SE.getUnknown(IncV)); 3164 IVOper = Rewriter.expandCodeFor(IVOperExpr, IVTy, InsertPt); 3165 3166 // If an IV increment can't be folded, use it as the next IV value. 3167 if (!canFoldIVIncExpr(LeftOverExpr, Inc.UserInst, Inc.IVOperand, TTI)) { 3168 assert(IVTy == IVOper->getType() && "inconsistent IV increment type"); 3169 IVSrc = IVOper; 3170 LeftOverExpr = nullptr; 3171 } 3172 } 3173 Type *OperTy = Inc.IVOperand->getType(); 3174 if (IVTy != OperTy) { 3175 assert(SE.getTypeSizeInBits(IVTy) >= SE.getTypeSizeInBits(OperTy) && 3176 "cannot extend a chained IV"); 3177 IRBuilder<> Builder(InsertPt); 3178 IVOper = Builder.CreateTruncOrBitCast(IVOper, OperTy, "lsr.chain"); 3179 } 3180 Inc.UserInst->replaceUsesOfWith(Inc.IVOperand, IVOper); 3181 DeadInsts.emplace_back(Inc.IVOperand); 3182 } 3183 // If LSR created a new, wider phi, we may also replace its postinc. We only 3184 // do this if we also found a wide value for the head of the chain. 3185 if (isa<PHINode>(Chain.tailUserInst())) { 3186 for (PHINode &Phi : L->getHeader()->phis()) { 3187 if (!isCompatibleIVType(&Phi, IVSrc)) 3188 continue; 3189 Instruction *PostIncV = dyn_cast<Instruction>( 3190 Phi.getIncomingValueForBlock(L->getLoopLatch())); 3191 if (!PostIncV || (SE.getSCEV(PostIncV) != SE.getSCEV(IVSrc))) 3192 continue; 3193 Value *IVOper = IVSrc; 3194 Type *PostIncTy = PostIncV->getType(); 3195 if (IVTy != PostIncTy) { 3196 assert(PostIncTy->isPointerTy() && "mixing int/ptr IV types"); 3197 IRBuilder<> Builder(L->getLoopLatch()->getTerminator()); 3198 Builder.SetCurrentDebugLocation(PostIncV->getDebugLoc()); 3199 IVOper = Builder.CreatePointerCast(IVSrc, PostIncTy, "lsr.chain"); 3200 } 3201 Phi.replaceUsesOfWith(PostIncV, IVOper); 3202 DeadInsts.emplace_back(PostIncV); 3203 } 3204 } 3205 } 3206 3207 void LSRInstance::CollectFixupsAndInitialFormulae() { 3208 for (const IVStrideUse &U : IU) { 3209 Instruction *UserInst = U.getUser(); 3210 // Skip IV users that are part of profitable IV Chains. 3211 User::op_iterator UseI = 3212 find(UserInst->operands(), U.getOperandValToReplace()); 3213 assert(UseI != UserInst->op_end() && "cannot find IV operand"); 3214 if (IVIncSet.count(UseI)) { 3215 LLVM_DEBUG(dbgs() << "Use is in profitable chain: " << **UseI << '\n'); 3216 continue; 3217 } 3218 3219 LSRUse::KindType Kind = LSRUse::Basic; 3220 MemAccessTy AccessTy; 3221 if (isAddressUse(TTI, UserInst, U.getOperandValToReplace())) { 3222 Kind = LSRUse::Address; 3223 AccessTy = getAccessType(TTI, UserInst, U.getOperandValToReplace()); 3224 } 3225 3226 const SCEV *S = IU.getExpr(U); 3227 PostIncLoopSet TmpPostIncLoops = U.getPostIncLoops(); 3228 3229 // Equality (== and !=) ICmps are special. We can rewrite (i == N) as 3230 // (N - i == 0), and this allows (N - i) to be the expression that we work 3231 // with rather than just N or i, so we can consider the register 3232 // requirements for both N and i at the same time. Limiting this code to 3233 // equality icmps is not a problem because all interesting loops use 3234 // equality icmps, thanks to IndVarSimplify. 3235 if (ICmpInst *CI = dyn_cast<ICmpInst>(UserInst)) 3236 if (CI->isEquality()) { 3237 // Swap the operands if needed to put the OperandValToReplace on the 3238 // left, for consistency. 3239 Value *NV = CI->getOperand(1); 3240 if (NV == U.getOperandValToReplace()) { 3241 CI->setOperand(1, CI->getOperand(0)); 3242 CI->setOperand(0, NV); 3243 NV = CI->getOperand(1); 3244 Changed = true; 3245 } 3246 3247 // x == y --> x - y == 0 3248 const SCEV *N = SE.getSCEV(NV); 3249 if (SE.isLoopInvariant(N, L) && isSafeToExpand(N, SE)) { 3250 // S is normalized, so normalize N before folding it into S 3251 // to keep the result normalized. 3252 N = normalizeForPostIncUse(N, TmpPostIncLoops, SE); 3253 Kind = LSRUse::ICmpZero; 3254 S = SE.getMinusSCEV(N, S); 3255 } 3256 3257 // -1 and the negations of all interesting strides (except the negation 3258 // of -1) are now also interesting. 3259 for (size_t i = 0, e = Factors.size(); i != e; ++i) 3260 if (Factors[i] != -1) 3261 Factors.insert(-(uint64_t)Factors[i]); 3262 Factors.insert(-1); 3263 } 3264 3265 // Get or create an LSRUse. 3266 std::pair<size_t, int64_t> P = getUse(S, Kind, AccessTy); 3267 size_t LUIdx = P.first; 3268 int64_t Offset = P.second; 3269 LSRUse &LU = Uses[LUIdx]; 3270 3271 // Record the fixup. 3272 LSRFixup &LF = LU.getNewFixup(); 3273 LF.UserInst = UserInst; 3274 LF.OperandValToReplace = U.getOperandValToReplace(); 3275 LF.PostIncLoops = TmpPostIncLoops; 3276 LF.Offset = Offset; 3277 LU.AllFixupsOutsideLoop &= LF.isUseFullyOutsideLoop(L); 3278 3279 if (!LU.WidestFixupType || 3280 SE.getTypeSizeInBits(LU.WidestFixupType) < 3281 SE.getTypeSizeInBits(LF.OperandValToReplace->getType())) 3282 LU.WidestFixupType = LF.OperandValToReplace->getType(); 3283 3284 // If this is the first use of this LSRUse, give it a formula. 3285 if (LU.Formulae.empty()) { 3286 InsertInitialFormula(S, LU, LUIdx); 3287 CountRegisters(LU.Formulae.back(), LUIdx); 3288 } 3289 } 3290 3291 LLVM_DEBUG(print_fixups(dbgs())); 3292 } 3293 3294 /// Insert a formula for the given expression into the given use, separating out 3295 /// loop-variant portions from loop-invariant and loop-computable portions. 3296 void 3297 LSRInstance::InsertInitialFormula(const SCEV *S, LSRUse &LU, size_t LUIdx) { 3298 // Mark uses whose expressions cannot be expanded. 3299 if (!isSafeToExpand(S, SE)) 3300 LU.RigidFormula = true; 3301 3302 Formula F; 3303 F.initialMatch(S, L, SE); 3304 bool Inserted = InsertFormula(LU, LUIdx, F); 3305 assert(Inserted && "Initial formula already exists!"); (void)Inserted; 3306 } 3307 3308 /// Insert a simple single-register formula for the given expression into the 3309 /// given use. 3310 void 3311 LSRInstance::InsertSupplementalFormula(const SCEV *S, 3312 LSRUse &LU, size_t LUIdx) { 3313 Formula F; 3314 F.BaseRegs.push_back(S); 3315 F.HasBaseReg = true; 3316 bool Inserted = InsertFormula(LU, LUIdx, F); 3317 assert(Inserted && "Supplemental formula already exists!"); (void)Inserted; 3318 } 3319 3320 /// Note which registers are used by the given formula, updating RegUses. 3321 void LSRInstance::CountRegisters(const Formula &F, size_t LUIdx) { 3322 if (F.ScaledReg) 3323 RegUses.countRegister(F.ScaledReg, LUIdx); 3324 for (const SCEV *BaseReg : F.BaseRegs) 3325 RegUses.countRegister(BaseReg, LUIdx); 3326 } 3327 3328 /// If the given formula has not yet been inserted, add it to the list, and 3329 /// return true. Return false otherwise. 3330 bool LSRInstance::InsertFormula(LSRUse &LU, unsigned LUIdx, const Formula &F) { 3331 // Do not insert formula that we will not be able to expand. 3332 assert(isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, F) && 3333 "Formula is illegal"); 3334 3335 if (!LU.InsertFormula(F, *L)) 3336 return false; 3337 3338 CountRegisters(F, LUIdx); 3339 return true; 3340 } 3341 3342 /// Check for other uses of loop-invariant values which we're tracking. These 3343 /// other uses will pin these values in registers, making them less profitable 3344 /// for elimination. 3345 /// TODO: This currently misses non-constant addrec step registers. 3346 /// TODO: Should this give more weight to users inside the loop? 3347 void 3348 LSRInstance::CollectLoopInvariantFixupsAndFormulae() { 3349 SmallVector<const SCEV *, 8> Worklist(RegUses.begin(), RegUses.end()); 3350 SmallPtrSet<const SCEV *, 32> Visited; 3351 3352 while (!Worklist.empty()) { 3353 const SCEV *S = Worklist.pop_back_val(); 3354 3355 // Don't process the same SCEV twice 3356 if (!Visited.insert(S).second) 3357 continue; 3358 3359 if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) 3360 Worklist.append(N->op_begin(), N->op_end()); 3361 else if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S)) 3362 Worklist.push_back(C->getOperand()); 3363 else if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) { 3364 Worklist.push_back(D->getLHS()); 3365 Worklist.push_back(D->getRHS()); 3366 } else if (const SCEVUnknown *US = dyn_cast<SCEVUnknown>(S)) { 3367 const Value *V = US->getValue(); 3368 if (const Instruction *Inst = dyn_cast<Instruction>(V)) { 3369 // Look for instructions defined outside the loop. 3370 if (L->contains(Inst)) continue; 3371 } else if (isa<UndefValue>(V)) 3372 // Undef doesn't have a live range, so it doesn't matter. 3373 continue; 3374 for (const Use &U : V->uses()) { 3375 const Instruction *UserInst = dyn_cast<Instruction>(U.getUser()); 3376 // Ignore non-instructions. 3377 if (!UserInst) 3378 continue; 3379 // Ignore instructions in other functions (as can happen with 3380 // Constants). 3381 if (UserInst->getParent()->getParent() != L->getHeader()->getParent()) 3382 continue; 3383 // Ignore instructions not dominated by the loop. 3384 const BasicBlock *UseBB = !isa<PHINode>(UserInst) ? 3385 UserInst->getParent() : 3386 cast<PHINode>(UserInst)->getIncomingBlock( 3387 PHINode::getIncomingValueNumForOperand(U.getOperandNo())); 3388 if (!DT.dominates(L->getHeader(), UseBB)) 3389 continue; 3390 // Don't bother if the instruction is in a BB which ends in an EHPad. 3391 if (UseBB->getTerminator()->isEHPad()) 3392 continue; 3393 // Don't bother rewriting PHIs in catchswitch blocks. 3394 if (isa<CatchSwitchInst>(UserInst->getParent()->getTerminator())) 3395 continue; 3396 // Ignore uses which are part of other SCEV expressions, to avoid 3397 // analyzing them multiple times. 3398 if (SE.isSCEVable(UserInst->getType())) { 3399 const SCEV *UserS = SE.getSCEV(const_cast<Instruction *>(UserInst)); 3400 // If the user is a no-op, look through to its uses. 3401 if (!isa<SCEVUnknown>(UserS)) 3402 continue; 3403 if (UserS == US) { 3404 Worklist.push_back( 3405 SE.getUnknown(const_cast<Instruction *>(UserInst))); 3406 continue; 3407 } 3408 } 3409 // Ignore icmp instructions which are already being analyzed. 3410 if (const ICmpInst *ICI = dyn_cast<ICmpInst>(UserInst)) { 3411 unsigned OtherIdx = !U.getOperandNo(); 3412 Value *OtherOp = const_cast<Value *>(ICI->getOperand(OtherIdx)); 3413 if (SE.hasComputableLoopEvolution(SE.getSCEV(OtherOp), L)) 3414 continue; 3415 } 3416 3417 std::pair<size_t, int64_t> P = getUse( 3418 S, LSRUse::Basic, MemAccessTy()); 3419 size_t LUIdx = P.first; 3420 int64_t Offset = P.second; 3421 LSRUse &LU = Uses[LUIdx]; 3422 LSRFixup &LF = LU.getNewFixup(); 3423 LF.UserInst = const_cast<Instruction *>(UserInst); 3424 LF.OperandValToReplace = U; 3425 LF.Offset = Offset; 3426 LU.AllFixupsOutsideLoop &= LF.isUseFullyOutsideLoop(L); 3427 if (!LU.WidestFixupType || 3428 SE.getTypeSizeInBits(LU.WidestFixupType) < 3429 SE.getTypeSizeInBits(LF.OperandValToReplace->getType())) 3430 LU.WidestFixupType = LF.OperandValToReplace->getType(); 3431 InsertSupplementalFormula(US, LU, LUIdx); 3432 CountRegisters(LU.Formulae.back(), Uses.size() - 1); 3433 break; 3434 } 3435 } 3436 } 3437 } 3438 3439 /// Split S into subexpressions which can be pulled out into separate 3440 /// registers. If C is non-null, multiply each subexpression by C. 3441 /// 3442 /// Return remainder expression after factoring the subexpressions captured by 3443 /// Ops. If Ops is complete, return NULL. 3444 static const SCEV *CollectSubexprs(const SCEV *S, const SCEVConstant *C, 3445 SmallVectorImpl<const SCEV *> &Ops, 3446 const Loop *L, 3447 ScalarEvolution &SE, 3448 unsigned Depth = 0) { 3449 // Arbitrarily cap recursion to protect compile time. 3450 if (Depth >= 3) 3451 return S; 3452 3453 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 3454 // Break out add operands. 3455 for (const SCEV *S : Add->operands()) { 3456 const SCEV *Remainder = CollectSubexprs(S, C, Ops, L, SE, Depth+1); 3457 if (Remainder) 3458 Ops.push_back(C ? SE.getMulExpr(C, Remainder) : Remainder); 3459 } 3460 return nullptr; 3461 } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 3462 // Split a non-zero base out of an addrec. 3463 if (AR->getStart()->isZero() || !AR->isAffine()) 3464 return S; 3465 3466 const SCEV *Remainder = CollectSubexprs(AR->getStart(), 3467 C, Ops, L, SE, Depth+1); 3468 // Split the non-zero AddRec unless it is part of a nested recurrence that 3469 // does not pertain to this loop. 3470 if (Remainder && (AR->getLoop() == L || !isa<SCEVAddRecExpr>(Remainder))) { 3471 Ops.push_back(C ? SE.getMulExpr(C, Remainder) : Remainder); 3472 Remainder = nullptr; 3473 } 3474 if (Remainder != AR->getStart()) { 3475 if (!Remainder) 3476 Remainder = SE.getConstant(AR->getType(), 0); 3477 return SE.getAddRecExpr(Remainder, 3478 AR->getStepRecurrence(SE), 3479 AR->getLoop(), 3480 //FIXME: AR->getNoWrapFlags(SCEV::FlagNW) 3481 SCEV::FlagAnyWrap); 3482 } 3483 } else if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 3484 // Break (C * (a + b + c)) into C*a + C*b + C*c. 3485 if (Mul->getNumOperands() != 2) 3486 return S; 3487 if (const SCEVConstant *Op0 = 3488 dyn_cast<SCEVConstant>(Mul->getOperand(0))) { 3489 C = C ? cast<SCEVConstant>(SE.getMulExpr(C, Op0)) : Op0; 3490 const SCEV *Remainder = 3491 CollectSubexprs(Mul->getOperand(1), C, Ops, L, SE, Depth+1); 3492 if (Remainder) 3493 Ops.push_back(SE.getMulExpr(C, Remainder)); 3494 return nullptr; 3495 } 3496 } 3497 return S; 3498 } 3499 3500 /// Return true if the SCEV represents a value that may end up as a 3501 /// post-increment operation. 3502 static bool mayUsePostIncMode(const TargetTransformInfo &TTI, 3503 LSRUse &LU, const SCEV *S, const Loop *L, 3504 ScalarEvolution &SE) { 3505 if (LU.Kind != LSRUse::Address || 3506 !LU.AccessTy.getType()->isIntOrIntVectorTy()) 3507 return false; 3508 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S); 3509 if (!AR) 3510 return false; 3511 const SCEV *LoopStep = AR->getStepRecurrence(SE); 3512 if (!isa<SCEVConstant>(LoopStep)) 3513 return false; 3514 if (LU.AccessTy.getType()->getScalarSizeInBits() != 3515 LoopStep->getType()->getScalarSizeInBits()) 3516 return false; 3517 // Check if a post-indexed load/store can be used. 3518 if (TTI.isIndexedLoadLegal(TTI.MIM_PostInc, AR->getType()) || 3519 TTI.isIndexedStoreLegal(TTI.MIM_PostInc, AR->getType())) { 3520 const SCEV *LoopStart = AR->getStart(); 3521 if (!isa<SCEVConstant>(LoopStart) && SE.isLoopInvariant(LoopStart, L)) 3522 return true; 3523 } 3524 return false; 3525 } 3526 3527 /// Helper function for LSRInstance::GenerateReassociations. 3528 void LSRInstance::GenerateReassociationsImpl(LSRUse &LU, unsigned LUIdx, 3529 const Formula &Base, 3530 unsigned Depth, size_t Idx, 3531 bool IsScaledReg) { 3532 const SCEV *BaseReg = IsScaledReg ? Base.ScaledReg : Base.BaseRegs[Idx]; 3533 // Don't generate reassociations for the base register of a value that 3534 // may generate a post-increment operator. The reason is that the 3535 // reassociations cause extra base+register formula to be created, 3536 // and possibly chosen, but the post-increment is more efficient. 3537 if (TTI.shouldFavorPostInc() && mayUsePostIncMode(TTI, LU, BaseReg, L, SE)) 3538 return; 3539 SmallVector<const SCEV *, 8> AddOps; 3540 const SCEV *Remainder = CollectSubexprs(BaseReg, nullptr, AddOps, L, SE); 3541 if (Remainder) 3542 AddOps.push_back(Remainder); 3543 3544 if (AddOps.size() == 1) 3545 return; 3546 3547 for (SmallVectorImpl<const SCEV *>::const_iterator J = AddOps.begin(), 3548 JE = AddOps.end(); 3549 J != JE; ++J) { 3550 // Loop-variant "unknown" values are uninteresting; we won't be able to 3551 // do anything meaningful with them. 3552 if (isa<SCEVUnknown>(*J) && !SE.isLoopInvariant(*J, L)) 3553 continue; 3554 3555 // Don't pull a constant into a register if the constant could be folded 3556 // into an immediate field. 3557 if (isAlwaysFoldable(TTI, SE, LU.MinOffset, LU.MaxOffset, LU.Kind, 3558 LU.AccessTy, *J, Base.getNumRegs() > 1)) 3559 continue; 3560 3561 // Collect all operands except *J. 3562 SmallVector<const SCEV *, 8> InnerAddOps( 3563 ((const SmallVector<const SCEV *, 8> &)AddOps).begin(), J); 3564 InnerAddOps.append(std::next(J), 3565 ((const SmallVector<const SCEV *, 8> &)AddOps).end()); 3566 3567 // Don't leave just a constant behind in a register if the constant could 3568 // be folded into an immediate field. 3569 if (InnerAddOps.size() == 1 && 3570 isAlwaysFoldable(TTI, SE, LU.MinOffset, LU.MaxOffset, LU.Kind, 3571 LU.AccessTy, InnerAddOps[0], Base.getNumRegs() > 1)) 3572 continue; 3573 3574 const SCEV *InnerSum = SE.getAddExpr(InnerAddOps); 3575 if (InnerSum->isZero()) 3576 continue; 3577 Formula F = Base; 3578 3579 // Add the remaining pieces of the add back into the new formula. 3580 const SCEVConstant *InnerSumSC = dyn_cast<SCEVConstant>(InnerSum); 3581 if (InnerSumSC && SE.getTypeSizeInBits(InnerSumSC->getType()) <= 64 && 3582 TTI.isLegalAddImmediate((uint64_t)F.UnfoldedOffset + 3583 InnerSumSC->getValue()->getZExtValue())) { 3584 F.UnfoldedOffset = 3585 (uint64_t)F.UnfoldedOffset + InnerSumSC->getValue()->getZExtValue(); 3586 if (IsScaledReg) 3587 F.ScaledReg = nullptr; 3588 else 3589 F.BaseRegs.erase(F.BaseRegs.begin() + Idx); 3590 } else if (IsScaledReg) 3591 F.ScaledReg = InnerSum; 3592 else 3593 F.BaseRegs[Idx] = InnerSum; 3594 3595 // Add J as its own register, or an unfolded immediate. 3596 const SCEVConstant *SC = dyn_cast<SCEVConstant>(*J); 3597 if (SC && SE.getTypeSizeInBits(SC->getType()) <= 64 && 3598 TTI.isLegalAddImmediate((uint64_t)F.UnfoldedOffset + 3599 SC->getValue()->getZExtValue())) 3600 F.UnfoldedOffset = 3601 (uint64_t)F.UnfoldedOffset + SC->getValue()->getZExtValue(); 3602 else 3603 F.BaseRegs.push_back(*J); 3604 // We may have changed the number of register in base regs, adjust the 3605 // formula accordingly. 3606 F.canonicalize(*L); 3607 3608 if (InsertFormula(LU, LUIdx, F)) 3609 // If that formula hadn't been seen before, recurse to find more like 3610 // it. 3611 // Add check on Log16(AddOps.size()) - same as Log2_32(AddOps.size()) >> 2) 3612 // Because just Depth is not enough to bound compile time. 3613 // This means that every time AddOps.size() is greater 16^x we will add 3614 // x to Depth. 3615 GenerateReassociations(LU, LUIdx, LU.Formulae.back(), 3616 Depth + 1 + (Log2_32(AddOps.size()) >> 2)); 3617 } 3618 } 3619 3620 /// Split out subexpressions from adds and the bases of addrecs. 3621 void LSRInstance::GenerateReassociations(LSRUse &LU, unsigned LUIdx, 3622 Formula Base, unsigned Depth) { 3623 assert(Base.isCanonical(*L) && "Input must be in the canonical form"); 3624 // Arbitrarily cap recursion to protect compile time. 3625 if (Depth >= 3) 3626 return; 3627 3628 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) 3629 GenerateReassociationsImpl(LU, LUIdx, Base, Depth, i); 3630 3631 if (Base.Scale == 1) 3632 GenerateReassociationsImpl(LU, LUIdx, Base, Depth, 3633 /* Idx */ -1, /* IsScaledReg */ true); 3634 } 3635 3636 /// Generate a formula consisting of all of the loop-dominating registers added 3637 /// into a single register. 3638 void LSRInstance::GenerateCombinations(LSRUse &LU, unsigned LUIdx, 3639 Formula Base) { 3640 // This method is only interesting on a plurality of registers. 3641 if (Base.BaseRegs.size() + (Base.Scale == 1) <= 1) 3642 return; 3643 3644 // Flatten the representation, i.e., reg1 + 1*reg2 => reg1 + reg2, before 3645 // processing the formula. 3646 Base.unscale(); 3647 Formula F = Base; 3648 F.BaseRegs.clear(); 3649 SmallVector<const SCEV *, 4> Ops; 3650 for (const SCEV *BaseReg : Base.BaseRegs) { 3651 if (SE.properlyDominates(BaseReg, L->getHeader()) && 3652 !SE.hasComputableLoopEvolution(BaseReg, L)) 3653 Ops.push_back(BaseReg); 3654 else 3655 F.BaseRegs.push_back(BaseReg); 3656 } 3657 if (Ops.size() > 1) { 3658 const SCEV *Sum = SE.getAddExpr(Ops); 3659 // TODO: If Sum is zero, it probably means ScalarEvolution missed an 3660 // opportunity to fold something. For now, just ignore such cases 3661 // rather than proceed with zero in a register. 3662 if (!Sum->isZero()) { 3663 F.BaseRegs.push_back(Sum); 3664 F.canonicalize(*L); 3665 (void)InsertFormula(LU, LUIdx, F); 3666 } 3667 } 3668 } 3669 3670 /// Helper function for LSRInstance::GenerateSymbolicOffsets. 3671 void LSRInstance::GenerateSymbolicOffsetsImpl(LSRUse &LU, unsigned LUIdx, 3672 const Formula &Base, size_t Idx, 3673 bool IsScaledReg) { 3674 const SCEV *G = IsScaledReg ? Base.ScaledReg : Base.BaseRegs[Idx]; 3675 GlobalValue *GV = ExtractSymbol(G, SE); 3676 if (G->isZero() || !GV) 3677 return; 3678 Formula F = Base; 3679 F.BaseGV = GV; 3680 if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, F)) 3681 return; 3682 if (IsScaledReg) 3683 F.ScaledReg = G; 3684 else 3685 F.BaseRegs[Idx] = G; 3686 (void)InsertFormula(LU, LUIdx, F); 3687 } 3688 3689 /// Generate reuse formulae using symbolic offsets. 3690 void LSRInstance::GenerateSymbolicOffsets(LSRUse &LU, unsigned LUIdx, 3691 Formula Base) { 3692 // We can't add a symbolic offset if the address already contains one. 3693 if (Base.BaseGV) return; 3694 3695 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) 3696 GenerateSymbolicOffsetsImpl(LU, LUIdx, Base, i); 3697 if (Base.Scale == 1) 3698 GenerateSymbolicOffsetsImpl(LU, LUIdx, Base, /* Idx */ -1, 3699 /* IsScaledReg */ true); 3700 } 3701 3702 /// Helper function for LSRInstance::GenerateConstantOffsets. 3703 void LSRInstance::GenerateConstantOffsetsImpl( 3704 LSRUse &LU, unsigned LUIdx, const Formula &Base, 3705 const SmallVectorImpl<int64_t> &Worklist, size_t Idx, bool IsScaledReg) { 3706 const SCEV *G = IsScaledReg ? Base.ScaledReg : Base.BaseRegs[Idx]; 3707 for (int64_t Offset : Worklist) { 3708 Formula F = Base; 3709 F.BaseOffset = (uint64_t)Base.BaseOffset - Offset; 3710 if (isLegalUse(TTI, LU.MinOffset - Offset, LU.MaxOffset - Offset, LU.Kind, 3711 LU.AccessTy, F)) { 3712 // Add the offset to the base register. 3713 const SCEV *NewG = SE.getAddExpr(SE.getConstant(G->getType(), Offset), G); 3714 // If it cancelled out, drop the base register, otherwise update it. 3715 if (NewG->isZero()) { 3716 if (IsScaledReg) { 3717 F.Scale = 0; 3718 F.ScaledReg = nullptr; 3719 } else 3720 F.deleteBaseReg(F.BaseRegs[Idx]); 3721 F.canonicalize(*L); 3722 } else if (IsScaledReg) 3723 F.ScaledReg = NewG; 3724 else 3725 F.BaseRegs[Idx] = NewG; 3726 3727 (void)InsertFormula(LU, LUIdx, F); 3728 } 3729 } 3730 3731 int64_t Imm = ExtractImmediate(G, SE); 3732 if (G->isZero() || Imm == 0) 3733 return; 3734 Formula F = Base; 3735 F.BaseOffset = (uint64_t)F.BaseOffset + Imm; 3736 if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, F)) 3737 return; 3738 if (IsScaledReg) 3739 F.ScaledReg = G; 3740 else 3741 F.BaseRegs[Idx] = G; 3742 (void)InsertFormula(LU, LUIdx, F); 3743 } 3744 3745 /// GenerateConstantOffsets - Generate reuse formulae using symbolic offsets. 3746 void LSRInstance::GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx, 3747 Formula Base) { 3748 // TODO: For now, just add the min and max offset, because it usually isn't 3749 // worthwhile looking at everything inbetween. 3750 SmallVector<int64_t, 2> Worklist; 3751 Worklist.push_back(LU.MinOffset); 3752 if (LU.MaxOffset != LU.MinOffset) 3753 Worklist.push_back(LU.MaxOffset); 3754 3755 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) 3756 GenerateConstantOffsetsImpl(LU, LUIdx, Base, Worklist, i); 3757 if (Base.Scale == 1) 3758 GenerateConstantOffsetsImpl(LU, LUIdx, Base, Worklist, /* Idx */ -1, 3759 /* IsScaledReg */ true); 3760 } 3761 3762 /// For ICmpZero, check to see if we can scale up the comparison. For example, x 3763 /// == y -> x*c == y*c. 3764 void LSRInstance::GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx, 3765 Formula Base) { 3766 if (LU.Kind != LSRUse::ICmpZero) return; 3767 3768 // Determine the integer type for the base formula. 3769 Type *IntTy = Base.getType(); 3770 if (!IntTy) return; 3771 if (SE.getTypeSizeInBits(IntTy) > 64) return; 3772 3773 // Don't do this if there is more than one offset. 3774 if (LU.MinOffset != LU.MaxOffset) return; 3775 3776 // Check if transformation is valid. It is illegal to multiply pointer. 3777 if (Base.ScaledReg && Base.ScaledReg->getType()->isPointerTy()) 3778 return; 3779 for (const SCEV *BaseReg : Base.BaseRegs) 3780 if (BaseReg->getType()->isPointerTy()) 3781 return; 3782 assert(!Base.BaseGV && "ICmpZero use is not legal!"); 3783 3784 // Check each interesting stride. 3785 for (int64_t Factor : Factors) { 3786 // Check that the multiplication doesn't overflow. 3787 if (Base.BaseOffset == std::numeric_limits<int64_t>::min() && Factor == -1) 3788 continue; 3789 int64_t NewBaseOffset = (uint64_t)Base.BaseOffset * Factor; 3790 if (NewBaseOffset / Factor != Base.BaseOffset) 3791 continue; 3792 // If the offset will be truncated at this use, check that it is in bounds. 3793 if (!IntTy->isPointerTy() && 3794 !ConstantInt::isValueValidForType(IntTy, NewBaseOffset)) 3795 continue; 3796 3797 // Check that multiplying with the use offset doesn't overflow. 3798 int64_t Offset = LU.MinOffset; 3799 if (Offset == std::numeric_limits<int64_t>::min() && Factor == -1) 3800 continue; 3801 Offset = (uint64_t)Offset * Factor; 3802 if (Offset / Factor != LU.MinOffset) 3803 continue; 3804 // If the offset will be truncated at this use, check that it is in bounds. 3805 if (!IntTy->isPointerTy() && 3806 !ConstantInt::isValueValidForType(IntTy, Offset)) 3807 continue; 3808 3809 Formula F = Base; 3810 F.BaseOffset = NewBaseOffset; 3811 3812 // Check that this scale is legal. 3813 if (!isLegalUse(TTI, Offset, Offset, LU.Kind, LU.AccessTy, F)) 3814 continue; 3815 3816 // Compensate for the use having MinOffset built into it. 3817 F.BaseOffset = (uint64_t)F.BaseOffset + Offset - LU.MinOffset; 3818 3819 const SCEV *FactorS = SE.getConstant(IntTy, Factor); 3820 3821 // Check that multiplying with each base register doesn't overflow. 3822 for (size_t i = 0, e = F.BaseRegs.size(); i != e; ++i) { 3823 F.BaseRegs[i] = SE.getMulExpr(F.BaseRegs[i], FactorS); 3824 if (getExactSDiv(F.BaseRegs[i], FactorS, SE) != Base.BaseRegs[i]) 3825 goto next; 3826 } 3827 3828 // Check that multiplying with the scaled register doesn't overflow. 3829 if (F.ScaledReg) { 3830 F.ScaledReg = SE.getMulExpr(F.ScaledReg, FactorS); 3831 if (getExactSDiv(F.ScaledReg, FactorS, SE) != Base.ScaledReg) 3832 continue; 3833 } 3834 3835 // Check that multiplying with the unfolded offset doesn't overflow. 3836 if (F.UnfoldedOffset != 0) { 3837 if (F.UnfoldedOffset == std::numeric_limits<int64_t>::min() && 3838 Factor == -1) 3839 continue; 3840 F.UnfoldedOffset = (uint64_t)F.UnfoldedOffset * Factor; 3841 if (F.UnfoldedOffset / Factor != Base.UnfoldedOffset) 3842 continue; 3843 // If the offset will be truncated, check that it is in bounds. 3844 if (!IntTy->isPointerTy() && 3845 !ConstantInt::isValueValidForType(IntTy, F.UnfoldedOffset)) 3846 continue; 3847 } 3848 3849 // If we make it here and it's legal, add it. 3850 (void)InsertFormula(LU, LUIdx, F); 3851 next:; 3852 } 3853 } 3854 3855 /// Generate stride factor reuse formulae by making use of scaled-offset address 3856 /// modes, for example. 3857 void LSRInstance::GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base) { 3858 // Determine the integer type for the base formula. 3859 Type *IntTy = Base.getType(); 3860 if (!IntTy) return; 3861 3862 // If this Formula already has a scaled register, we can't add another one. 3863 // Try to unscale the formula to generate a better scale. 3864 if (Base.Scale != 0 && !Base.unscale()) 3865 return; 3866 3867 assert(Base.Scale == 0 && "unscale did not did its job!"); 3868 3869 // Check each interesting stride. 3870 for (int64_t Factor : Factors) { 3871 Base.Scale = Factor; 3872 Base.HasBaseReg = Base.BaseRegs.size() > 1; 3873 // Check whether this scale is going to be legal. 3874 if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, 3875 Base)) { 3876 // As a special-case, handle special out-of-loop Basic users specially. 3877 // TODO: Reconsider this special case. 3878 if (LU.Kind == LSRUse::Basic && 3879 isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LSRUse::Special, 3880 LU.AccessTy, Base) && 3881 LU.AllFixupsOutsideLoop) 3882 LU.Kind = LSRUse::Special; 3883 else 3884 continue; 3885 } 3886 // For an ICmpZero, negating a solitary base register won't lead to 3887 // new solutions. 3888 if (LU.Kind == LSRUse::ICmpZero && 3889 !Base.HasBaseReg && Base.BaseOffset == 0 && !Base.BaseGV) 3890 continue; 3891 // For each addrec base reg, if its loop is current loop, apply the scale. 3892 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) { 3893 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Base.BaseRegs[i]); 3894 if (AR && (AR->getLoop() == L || LU.AllFixupsOutsideLoop)) { 3895 const SCEV *FactorS = SE.getConstant(IntTy, Factor); 3896 if (FactorS->isZero()) 3897 continue; 3898 // Divide out the factor, ignoring high bits, since we'll be 3899 // scaling the value back up in the end. 3900 if (const SCEV *Quotient = getExactSDiv(AR, FactorS, SE, true)) { 3901 // TODO: This could be optimized to avoid all the copying. 3902 Formula F = Base; 3903 F.ScaledReg = Quotient; 3904 F.deleteBaseReg(F.BaseRegs[i]); 3905 // The canonical representation of 1*reg is reg, which is already in 3906 // Base. In that case, do not try to insert the formula, it will be 3907 // rejected anyway. 3908 if (F.Scale == 1 && (F.BaseRegs.empty() || 3909 (AR->getLoop() != L && LU.AllFixupsOutsideLoop))) 3910 continue; 3911 // If AllFixupsOutsideLoop is true and F.Scale is 1, we may generate 3912 // non canonical Formula with ScaledReg's loop not being L. 3913 if (F.Scale == 1 && LU.AllFixupsOutsideLoop) 3914 F.canonicalize(*L); 3915 (void)InsertFormula(LU, LUIdx, F); 3916 } 3917 } 3918 } 3919 } 3920 } 3921 3922 /// Generate reuse formulae from different IV types. 3923 void LSRInstance::GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base) { 3924 // Don't bother truncating symbolic values. 3925 if (Base.BaseGV) return; 3926 3927 // Determine the integer type for the base formula. 3928 Type *DstTy = Base.getType(); 3929 if (!DstTy) return; 3930 DstTy = SE.getEffectiveSCEVType(DstTy); 3931 3932 for (Type *SrcTy : Types) { 3933 if (SrcTy != DstTy && TTI.isTruncateFree(SrcTy, DstTy)) { 3934 Formula F = Base; 3935 3936 if (F.ScaledReg) F.ScaledReg = SE.getAnyExtendExpr(F.ScaledReg, SrcTy); 3937 for (const SCEV *&BaseReg : F.BaseRegs) 3938 BaseReg = SE.getAnyExtendExpr(BaseReg, SrcTy); 3939 3940 // TODO: This assumes we've done basic processing on all uses and 3941 // have an idea what the register usage is. 3942 if (!F.hasRegsUsedByUsesOtherThan(LUIdx, RegUses)) 3943 continue; 3944 3945 F.canonicalize(*L); 3946 (void)InsertFormula(LU, LUIdx, F); 3947 } 3948 } 3949 } 3950 3951 namespace { 3952 3953 /// Helper class for GenerateCrossUseConstantOffsets. It's used to defer 3954 /// modifications so that the search phase doesn't have to worry about the data 3955 /// structures moving underneath it. 3956 struct WorkItem { 3957 size_t LUIdx; 3958 int64_t Imm; 3959 const SCEV *OrigReg; 3960 3961 WorkItem(size_t LI, int64_t I, const SCEV *R) 3962 : LUIdx(LI), Imm(I), OrigReg(R) {} 3963 3964 void print(raw_ostream &OS) const; 3965 void dump() const; 3966 }; 3967 3968 } // end anonymous namespace 3969 3970 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 3971 void WorkItem::print(raw_ostream &OS) const { 3972 OS << "in formulae referencing " << *OrigReg << " in use " << LUIdx 3973 << " , add offset " << Imm; 3974 } 3975 3976 LLVM_DUMP_METHOD void WorkItem::dump() const { 3977 print(errs()); errs() << '\n'; 3978 } 3979 #endif 3980 3981 /// Look for registers which are a constant distance apart and try to form reuse 3982 /// opportunities between them. 3983 void LSRInstance::GenerateCrossUseConstantOffsets() { 3984 // Group the registers by their value without any added constant offset. 3985 using ImmMapTy = std::map<int64_t, const SCEV *>; 3986 3987 DenseMap<const SCEV *, ImmMapTy> Map; 3988 DenseMap<const SCEV *, SmallBitVector> UsedByIndicesMap; 3989 SmallVector<const SCEV *, 8> Sequence; 3990 for (const SCEV *Use : RegUses) { 3991 const SCEV *Reg = Use; // Make a copy for ExtractImmediate to modify. 3992 int64_t Imm = ExtractImmediate(Reg, SE); 3993 auto Pair = Map.insert(std::make_pair(Reg, ImmMapTy())); 3994 if (Pair.second) 3995 Sequence.push_back(Reg); 3996 Pair.first->second.insert(std::make_pair(Imm, Use)); 3997 UsedByIndicesMap[Reg] |= RegUses.getUsedByIndices(Use); 3998 } 3999 4000 // Now examine each set of registers with the same base value. Build up 4001 // a list of work to do and do the work in a separate step so that we're 4002 // not adding formulae and register counts while we're searching. 4003 SmallVector<WorkItem, 32> WorkItems; 4004 SmallSet<std::pair<size_t, int64_t>, 32> UniqueItems; 4005 for (const SCEV *Reg : Sequence) { 4006 const ImmMapTy &Imms = Map.find(Reg)->second; 4007 4008 // It's not worthwhile looking for reuse if there's only one offset. 4009 if (Imms.size() == 1) 4010 continue; 4011 4012 LLVM_DEBUG(dbgs() << "Generating cross-use offsets for " << *Reg << ':'; 4013 for (const auto &Entry 4014 : Imms) dbgs() 4015 << ' ' << Entry.first; 4016 dbgs() << '\n'); 4017 4018 // Examine each offset. 4019 for (ImmMapTy::const_iterator J = Imms.begin(), JE = Imms.end(); 4020 J != JE; ++J) { 4021 const SCEV *OrigReg = J->second; 4022 4023 int64_t JImm = J->first; 4024 const SmallBitVector &UsedByIndices = RegUses.getUsedByIndices(OrigReg); 4025 4026 if (!isa<SCEVConstant>(OrigReg) && 4027 UsedByIndicesMap[Reg].count() == 1) { 4028 LLVM_DEBUG(dbgs() << "Skipping cross-use reuse for " << *OrigReg 4029 << '\n'); 4030 continue; 4031 } 4032 4033 // Conservatively examine offsets between this orig reg a few selected 4034 // other orig regs. 4035 ImmMapTy::const_iterator OtherImms[] = { 4036 Imms.begin(), std::prev(Imms.end()), 4037 Imms.lower_bound((Imms.begin()->first + std::prev(Imms.end())->first) / 4038 2) 4039 }; 4040 for (size_t i = 0, e = array_lengthof(OtherImms); i != e; ++i) { 4041 ImmMapTy::const_iterator M = OtherImms[i]; 4042 if (M == J || M == JE) continue; 4043 4044 // Compute the difference between the two. 4045 int64_t Imm = (uint64_t)JImm - M->first; 4046 for (unsigned LUIdx : UsedByIndices.set_bits()) 4047 // Make a memo of this use, offset, and register tuple. 4048 if (UniqueItems.insert(std::make_pair(LUIdx, Imm)).second) 4049 WorkItems.push_back(WorkItem(LUIdx, Imm, OrigReg)); 4050 } 4051 } 4052 } 4053 4054 Map.clear(); 4055 Sequence.clear(); 4056 UsedByIndicesMap.clear(); 4057 UniqueItems.clear(); 4058 4059 // Now iterate through the worklist and add new formulae. 4060 for (const WorkItem &WI : WorkItems) { 4061 size_t LUIdx = WI.LUIdx; 4062 LSRUse &LU = Uses[LUIdx]; 4063 int64_t Imm = WI.Imm; 4064 const SCEV *OrigReg = WI.OrigReg; 4065 4066 Type *IntTy = SE.getEffectiveSCEVType(OrigReg->getType()); 4067 const SCEV *NegImmS = SE.getSCEV(ConstantInt::get(IntTy, -(uint64_t)Imm)); 4068 unsigned BitWidth = SE.getTypeSizeInBits(IntTy); 4069 4070 // TODO: Use a more targeted data structure. 4071 for (size_t L = 0, LE = LU.Formulae.size(); L != LE; ++L) { 4072 Formula F = LU.Formulae[L]; 4073 // FIXME: The code for the scaled and unscaled registers looks 4074 // very similar but slightly different. Investigate if they 4075 // could be merged. That way, we would not have to unscale the 4076 // Formula. 4077 F.unscale(); 4078 // Use the immediate in the scaled register. 4079 if (F.ScaledReg == OrigReg) { 4080 int64_t Offset = (uint64_t)F.BaseOffset + Imm * (uint64_t)F.Scale; 4081 // Don't create 50 + reg(-50). 4082 if (F.referencesReg(SE.getSCEV( 4083 ConstantInt::get(IntTy, -(uint64_t)Offset)))) 4084 continue; 4085 Formula NewF = F; 4086 NewF.BaseOffset = Offset; 4087 if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, 4088 NewF)) 4089 continue; 4090 NewF.ScaledReg = SE.getAddExpr(NegImmS, NewF.ScaledReg); 4091 4092 // If the new scale is a constant in a register, and adding the constant 4093 // value to the immediate would produce a value closer to zero than the 4094 // immediate itself, then the formula isn't worthwhile. 4095 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(NewF.ScaledReg)) 4096 if (C->getValue()->isNegative() != (NewF.BaseOffset < 0) && 4097 (C->getAPInt().abs() * APInt(BitWidth, F.Scale)) 4098 .ule(std::abs(NewF.BaseOffset))) 4099 continue; 4100 4101 // OK, looks good. 4102 NewF.canonicalize(*this->L); 4103 (void)InsertFormula(LU, LUIdx, NewF); 4104 } else { 4105 // Use the immediate in a base register. 4106 for (size_t N = 0, NE = F.BaseRegs.size(); N != NE; ++N) { 4107 const SCEV *BaseReg = F.BaseRegs[N]; 4108 if (BaseReg != OrigReg) 4109 continue; 4110 Formula NewF = F; 4111 NewF.BaseOffset = (uint64_t)NewF.BaseOffset + Imm; 4112 if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, 4113 LU.Kind, LU.AccessTy, NewF)) { 4114 if (TTI.shouldFavorPostInc() && 4115 mayUsePostIncMode(TTI, LU, OrigReg, this->L, SE)) 4116 continue; 4117 if (!TTI.isLegalAddImmediate((uint64_t)NewF.UnfoldedOffset + Imm)) 4118 continue; 4119 NewF = F; 4120 NewF.UnfoldedOffset = (uint64_t)NewF.UnfoldedOffset + Imm; 4121 } 4122 NewF.BaseRegs[N] = SE.getAddExpr(NegImmS, BaseReg); 4123 4124 // If the new formula has a constant in a register, and adding the 4125 // constant value to the immediate would produce a value closer to 4126 // zero than the immediate itself, then the formula isn't worthwhile. 4127 for (const SCEV *NewReg : NewF.BaseRegs) 4128 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(NewReg)) 4129 if ((C->getAPInt() + NewF.BaseOffset) 4130 .abs() 4131 .slt(std::abs(NewF.BaseOffset)) && 4132 (C->getAPInt() + NewF.BaseOffset).countTrailingZeros() >= 4133 countTrailingZeros<uint64_t>(NewF.BaseOffset)) 4134 goto skip_formula; 4135 4136 // Ok, looks good. 4137 NewF.canonicalize(*this->L); 4138 (void)InsertFormula(LU, LUIdx, NewF); 4139 break; 4140 skip_formula:; 4141 } 4142 } 4143 } 4144 } 4145 } 4146 4147 /// Generate formulae for each use. 4148 void 4149 LSRInstance::GenerateAllReuseFormulae() { 4150 // This is split into multiple loops so that hasRegsUsedByUsesOtherThan 4151 // queries are more precise. 4152 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 4153 LSRUse &LU = Uses[LUIdx]; 4154 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 4155 GenerateReassociations(LU, LUIdx, LU.Formulae[i]); 4156 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 4157 GenerateCombinations(LU, LUIdx, LU.Formulae[i]); 4158 } 4159 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 4160 LSRUse &LU = Uses[LUIdx]; 4161 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 4162 GenerateSymbolicOffsets(LU, LUIdx, LU.Formulae[i]); 4163 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 4164 GenerateConstantOffsets(LU, LUIdx, LU.Formulae[i]); 4165 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 4166 GenerateICmpZeroScales(LU, LUIdx, LU.Formulae[i]); 4167 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 4168 GenerateScales(LU, LUIdx, LU.Formulae[i]); 4169 } 4170 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 4171 LSRUse &LU = Uses[LUIdx]; 4172 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 4173 GenerateTruncates(LU, LUIdx, LU.Formulae[i]); 4174 } 4175 4176 GenerateCrossUseConstantOffsets(); 4177 4178 LLVM_DEBUG(dbgs() << "\n" 4179 "After generating reuse formulae:\n"; 4180 print_uses(dbgs())); 4181 } 4182 4183 /// If there are multiple formulae with the same set of registers used 4184 /// by other uses, pick the best one and delete the others. 4185 void LSRInstance::FilterOutUndesirableDedicatedRegisters() { 4186 DenseSet<const SCEV *> VisitedRegs; 4187 SmallPtrSet<const SCEV *, 16> Regs; 4188 SmallPtrSet<const SCEV *, 16> LoserRegs; 4189 #ifndef NDEBUG 4190 bool ChangedFormulae = false; 4191 #endif 4192 4193 // Collect the best formula for each unique set of shared registers. This 4194 // is reset for each use. 4195 using BestFormulaeTy = 4196 DenseMap<SmallVector<const SCEV *, 4>, size_t, UniquifierDenseMapInfo>; 4197 4198 BestFormulaeTy BestFormulae; 4199 4200 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 4201 LSRUse &LU = Uses[LUIdx]; 4202 LLVM_DEBUG(dbgs() << "Filtering for use "; LU.print(dbgs()); 4203 dbgs() << '\n'); 4204 4205 bool Any = false; 4206 for (size_t FIdx = 0, NumForms = LU.Formulae.size(); 4207 FIdx != NumForms; ++FIdx) { 4208 Formula &F = LU.Formulae[FIdx]; 4209 4210 // Some formulas are instant losers. For example, they may depend on 4211 // nonexistent AddRecs from other loops. These need to be filtered 4212 // immediately, otherwise heuristics could choose them over others leading 4213 // to an unsatisfactory solution. Passing LoserRegs into RateFormula here 4214 // avoids the need to recompute this information across formulae using the 4215 // same bad AddRec. Passing LoserRegs is also essential unless we remove 4216 // the corresponding bad register from the Regs set. 4217 Cost CostF; 4218 Regs.clear(); 4219 CostF.RateFormula(TTI, F, Regs, VisitedRegs, L, SE, DT, LU, &LoserRegs); 4220 if (CostF.isLoser()) { 4221 // During initial formula generation, undesirable formulae are generated 4222 // by uses within other loops that have some non-trivial address mode or 4223 // use the postinc form of the IV. LSR needs to provide these formulae 4224 // as the basis of rediscovering the desired formula that uses an AddRec 4225 // corresponding to the existing phi. Once all formulae have been 4226 // generated, these initial losers may be pruned. 4227 LLVM_DEBUG(dbgs() << " Filtering loser "; F.print(dbgs()); 4228 dbgs() << "\n"); 4229 } 4230 else { 4231 SmallVector<const SCEV *, 4> Key; 4232 for (const SCEV *Reg : F.BaseRegs) { 4233 if (RegUses.isRegUsedByUsesOtherThan(Reg, LUIdx)) 4234 Key.push_back(Reg); 4235 } 4236 if (F.ScaledReg && 4237 RegUses.isRegUsedByUsesOtherThan(F.ScaledReg, LUIdx)) 4238 Key.push_back(F.ScaledReg); 4239 // Unstable sort by host order ok, because this is only used for 4240 // uniquifying. 4241 llvm::sort(Key.begin(), Key.end()); 4242 4243 std::pair<BestFormulaeTy::const_iterator, bool> P = 4244 BestFormulae.insert(std::make_pair(Key, FIdx)); 4245 if (P.second) 4246 continue; 4247 4248 Formula &Best = LU.Formulae[P.first->second]; 4249 4250 Cost CostBest; 4251 Regs.clear(); 4252 CostBest.RateFormula(TTI, Best, Regs, VisitedRegs, L, SE, DT, LU); 4253 if (CostF.isLess(CostBest, TTI)) 4254 std::swap(F, Best); 4255 LLVM_DEBUG(dbgs() << " Filtering out formula "; F.print(dbgs()); 4256 dbgs() << "\n" 4257 " in favor of formula "; 4258 Best.print(dbgs()); dbgs() << '\n'); 4259 } 4260 #ifndef NDEBUG 4261 ChangedFormulae = true; 4262 #endif 4263 LU.DeleteFormula(F); 4264 --FIdx; 4265 --NumForms; 4266 Any = true; 4267 } 4268 4269 // Now that we've filtered out some formulae, recompute the Regs set. 4270 if (Any) 4271 LU.RecomputeRegs(LUIdx, RegUses); 4272 4273 // Reset this to prepare for the next use. 4274 BestFormulae.clear(); 4275 } 4276 4277 LLVM_DEBUG(if (ChangedFormulae) { 4278 dbgs() << "\n" 4279 "After filtering out undesirable candidates:\n"; 4280 print_uses(dbgs()); 4281 }); 4282 } 4283 4284 // This is a rough guess that seems to work fairly well. 4285 static const size_t ComplexityLimit = std::numeric_limits<uint16_t>::max(); 4286 4287 /// Estimate the worst-case number of solutions the solver might have to 4288 /// consider. It almost never considers this many solutions because it prune the 4289 /// search space, but the pruning isn't always sufficient. 4290 size_t LSRInstance::EstimateSearchSpaceComplexity() const { 4291 size_t Power = 1; 4292 for (const LSRUse &LU : Uses) { 4293 size_t FSize = LU.Formulae.size(); 4294 if (FSize >= ComplexityLimit) { 4295 Power = ComplexityLimit; 4296 break; 4297 } 4298 Power *= FSize; 4299 if (Power >= ComplexityLimit) 4300 break; 4301 } 4302 return Power; 4303 } 4304 4305 /// When one formula uses a superset of the registers of another formula, it 4306 /// won't help reduce register pressure (though it may not necessarily hurt 4307 /// register pressure); remove it to simplify the system. 4308 void LSRInstance::NarrowSearchSpaceByDetectingSupersets() { 4309 if (EstimateSearchSpaceComplexity() >= ComplexityLimit) { 4310 LLVM_DEBUG(dbgs() << "The search space is too complex.\n"); 4311 4312 LLVM_DEBUG(dbgs() << "Narrowing the search space by eliminating formulae " 4313 "which use a superset of registers used by other " 4314 "formulae.\n"); 4315 4316 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 4317 LSRUse &LU = Uses[LUIdx]; 4318 bool Any = false; 4319 for (size_t i = 0, e = LU.Formulae.size(); i != e; ++i) { 4320 Formula &F = LU.Formulae[i]; 4321 // Look for a formula with a constant or GV in a register. If the use 4322 // also has a formula with that same value in an immediate field, 4323 // delete the one that uses a register. 4324 for (SmallVectorImpl<const SCEV *>::const_iterator 4325 I = F.BaseRegs.begin(), E = F.BaseRegs.end(); I != E; ++I) { 4326 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(*I)) { 4327 Formula NewF = F; 4328 NewF.BaseOffset += C->getValue()->getSExtValue(); 4329 NewF.BaseRegs.erase(NewF.BaseRegs.begin() + 4330 (I - F.BaseRegs.begin())); 4331 if (LU.HasFormulaWithSameRegs(NewF)) { 4332 LLVM_DEBUG(dbgs() << " Deleting "; F.print(dbgs()); 4333 dbgs() << '\n'); 4334 LU.DeleteFormula(F); 4335 --i; 4336 --e; 4337 Any = true; 4338 break; 4339 } 4340 } else if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(*I)) { 4341 if (GlobalValue *GV = dyn_cast<GlobalValue>(U->getValue())) 4342 if (!F.BaseGV) { 4343 Formula NewF = F; 4344 NewF.BaseGV = GV; 4345 NewF.BaseRegs.erase(NewF.BaseRegs.begin() + 4346 (I - F.BaseRegs.begin())); 4347 if (LU.HasFormulaWithSameRegs(NewF)) { 4348 LLVM_DEBUG(dbgs() << " Deleting "; F.print(dbgs()); 4349 dbgs() << '\n'); 4350 LU.DeleteFormula(F); 4351 --i; 4352 --e; 4353 Any = true; 4354 break; 4355 } 4356 } 4357 } 4358 } 4359 } 4360 if (Any) 4361 LU.RecomputeRegs(LUIdx, RegUses); 4362 } 4363 4364 LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs())); 4365 } 4366 } 4367 4368 /// When there are many registers for expressions like A, A+1, A+2, etc., 4369 /// allocate a single register for them. 4370 void LSRInstance::NarrowSearchSpaceByCollapsingUnrolledCode() { 4371 if (EstimateSearchSpaceComplexity() < ComplexityLimit) 4372 return; 4373 4374 LLVM_DEBUG( 4375 dbgs() << "The search space is too complex.\n" 4376 "Narrowing the search space by assuming that uses separated " 4377 "by a constant offset will use the same registers.\n"); 4378 4379 // This is especially useful for unrolled loops. 4380 4381 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 4382 LSRUse &LU = Uses[LUIdx]; 4383 for (const Formula &F : LU.Formulae) { 4384 if (F.BaseOffset == 0 || (F.Scale != 0 && F.Scale != 1)) 4385 continue; 4386 4387 LSRUse *LUThatHas = FindUseWithSimilarFormula(F, LU); 4388 if (!LUThatHas) 4389 continue; 4390 4391 if (!reconcileNewOffset(*LUThatHas, F.BaseOffset, /*HasBaseReg=*/ false, 4392 LU.Kind, LU.AccessTy)) 4393 continue; 4394 4395 LLVM_DEBUG(dbgs() << " Deleting use "; LU.print(dbgs()); dbgs() << '\n'); 4396 4397 LUThatHas->AllFixupsOutsideLoop &= LU.AllFixupsOutsideLoop; 4398 4399 // Transfer the fixups of LU to LUThatHas. 4400 for (LSRFixup &Fixup : LU.Fixups) { 4401 Fixup.Offset += F.BaseOffset; 4402 LUThatHas->pushFixup(Fixup); 4403 LLVM_DEBUG(dbgs() << "New fixup has offset " << Fixup.Offset << '\n'); 4404 } 4405 4406 // Delete formulae from the new use which are no longer legal. 4407 bool Any = false; 4408 for (size_t i = 0, e = LUThatHas->Formulae.size(); i != e; ++i) { 4409 Formula &F = LUThatHas->Formulae[i]; 4410 if (!isLegalUse(TTI, LUThatHas->MinOffset, LUThatHas->MaxOffset, 4411 LUThatHas->Kind, LUThatHas->AccessTy, F)) { 4412 LLVM_DEBUG(dbgs() << " Deleting "; F.print(dbgs()); dbgs() << '\n'); 4413 LUThatHas->DeleteFormula(F); 4414 --i; 4415 --e; 4416 Any = true; 4417 } 4418 } 4419 4420 if (Any) 4421 LUThatHas->RecomputeRegs(LUThatHas - &Uses.front(), RegUses); 4422 4423 // Delete the old use. 4424 DeleteUse(LU, LUIdx); 4425 --LUIdx; 4426 --NumUses; 4427 break; 4428 } 4429 } 4430 4431 LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs())); 4432 } 4433 4434 /// Call FilterOutUndesirableDedicatedRegisters again, if necessary, now that 4435 /// we've done more filtering, as it may be able to find more formulae to 4436 /// eliminate. 4437 void LSRInstance::NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters(){ 4438 if (EstimateSearchSpaceComplexity() >= ComplexityLimit) { 4439 LLVM_DEBUG(dbgs() << "The search space is too complex.\n"); 4440 4441 LLVM_DEBUG(dbgs() << "Narrowing the search space by re-filtering out " 4442 "undesirable dedicated registers.\n"); 4443 4444 FilterOutUndesirableDedicatedRegisters(); 4445 4446 LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs())); 4447 } 4448 } 4449 4450 /// If a LSRUse has multiple formulae with the same ScaledReg and Scale. 4451 /// Pick the best one and delete the others. 4452 /// This narrowing heuristic is to keep as many formulae with different 4453 /// Scale and ScaledReg pair as possible while narrowing the search space. 4454 /// The benefit is that it is more likely to find out a better solution 4455 /// from a formulae set with more Scale and ScaledReg variations than 4456 /// a formulae set with the same Scale and ScaledReg. The picking winner 4457 /// reg heuristic will often keep the formulae with the same Scale and 4458 /// ScaledReg and filter others, and we want to avoid that if possible. 4459 void LSRInstance::NarrowSearchSpaceByFilterFormulaWithSameScaledReg() { 4460 if (EstimateSearchSpaceComplexity() < ComplexityLimit) 4461 return; 4462 4463 LLVM_DEBUG( 4464 dbgs() << "The search space is too complex.\n" 4465 "Narrowing the search space by choosing the best Formula " 4466 "from the Formulae with the same Scale and ScaledReg.\n"); 4467 4468 // Map the "Scale * ScaledReg" pair to the best formula of current LSRUse. 4469 using BestFormulaeTy = DenseMap<std::pair<const SCEV *, int64_t>, size_t>; 4470 4471 BestFormulaeTy BestFormulae; 4472 #ifndef NDEBUG 4473 bool ChangedFormulae = false; 4474 #endif 4475 DenseSet<const SCEV *> VisitedRegs; 4476 SmallPtrSet<const SCEV *, 16> Regs; 4477 4478 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 4479 LSRUse &LU = Uses[LUIdx]; 4480 LLVM_DEBUG(dbgs() << "Filtering for use "; LU.print(dbgs()); 4481 dbgs() << '\n'); 4482 4483 // Return true if Formula FA is better than Formula FB. 4484 auto IsBetterThan = [&](Formula &FA, Formula &FB) { 4485 // First we will try to choose the Formula with fewer new registers. 4486 // For a register used by current Formula, the more the register is 4487 // shared among LSRUses, the less we increase the register number 4488 // counter of the formula. 4489 size_t FARegNum = 0; 4490 for (const SCEV *Reg : FA.BaseRegs) { 4491 const SmallBitVector &UsedByIndices = RegUses.getUsedByIndices(Reg); 4492 FARegNum += (NumUses - UsedByIndices.count() + 1); 4493 } 4494 size_t FBRegNum = 0; 4495 for (const SCEV *Reg : FB.BaseRegs) { 4496 const SmallBitVector &UsedByIndices = RegUses.getUsedByIndices(Reg); 4497 FBRegNum += (NumUses - UsedByIndices.count() + 1); 4498 } 4499 if (FARegNum != FBRegNum) 4500 return FARegNum < FBRegNum; 4501 4502 // If the new register numbers are the same, choose the Formula with 4503 // less Cost. 4504 Cost CostFA, CostFB; 4505 Regs.clear(); 4506 CostFA.RateFormula(TTI, FA, Regs, VisitedRegs, L, SE, DT, LU); 4507 Regs.clear(); 4508 CostFB.RateFormula(TTI, FB, Regs, VisitedRegs, L, SE, DT, LU); 4509 return CostFA.isLess(CostFB, TTI); 4510 }; 4511 4512 bool Any = false; 4513 for (size_t FIdx = 0, NumForms = LU.Formulae.size(); FIdx != NumForms; 4514 ++FIdx) { 4515 Formula &F = LU.Formulae[FIdx]; 4516 if (!F.ScaledReg) 4517 continue; 4518 auto P = BestFormulae.insert({{F.ScaledReg, F.Scale}, FIdx}); 4519 if (P.second) 4520 continue; 4521 4522 Formula &Best = LU.Formulae[P.first->second]; 4523 if (IsBetterThan(F, Best)) 4524 std::swap(F, Best); 4525 LLVM_DEBUG(dbgs() << " Filtering out formula "; F.print(dbgs()); 4526 dbgs() << "\n" 4527 " in favor of formula "; 4528 Best.print(dbgs()); dbgs() << '\n'); 4529 #ifndef NDEBUG 4530 ChangedFormulae = true; 4531 #endif 4532 LU.DeleteFormula(F); 4533 --FIdx; 4534 --NumForms; 4535 Any = true; 4536 } 4537 if (Any) 4538 LU.RecomputeRegs(LUIdx, RegUses); 4539 4540 // Reset this to prepare for the next use. 4541 BestFormulae.clear(); 4542 } 4543 4544 LLVM_DEBUG(if (ChangedFormulae) { 4545 dbgs() << "\n" 4546 "After filtering out undesirable candidates:\n"; 4547 print_uses(dbgs()); 4548 }); 4549 } 4550 4551 /// The function delete formulas with high registers number expectation. 4552 /// Assuming we don't know the value of each formula (already delete 4553 /// all inefficient), generate probability of not selecting for each 4554 /// register. 4555 /// For example, 4556 /// Use1: 4557 /// reg(a) + reg({0,+,1}) 4558 /// reg(a) + reg({-1,+,1}) + 1 4559 /// reg({a,+,1}) 4560 /// Use2: 4561 /// reg(b) + reg({0,+,1}) 4562 /// reg(b) + reg({-1,+,1}) + 1 4563 /// reg({b,+,1}) 4564 /// Use3: 4565 /// reg(c) + reg(b) + reg({0,+,1}) 4566 /// reg(c) + reg({b,+,1}) 4567 /// 4568 /// Probability of not selecting 4569 /// Use1 Use2 Use3 4570 /// reg(a) (1/3) * 1 * 1 4571 /// reg(b) 1 * (1/3) * (1/2) 4572 /// reg({0,+,1}) (2/3) * (2/3) * (1/2) 4573 /// reg({-1,+,1}) (2/3) * (2/3) * 1 4574 /// reg({a,+,1}) (2/3) * 1 * 1 4575 /// reg({b,+,1}) 1 * (2/3) * (2/3) 4576 /// reg(c) 1 * 1 * 0 4577 /// 4578 /// Now count registers number mathematical expectation for each formula: 4579 /// Note that for each use we exclude probability if not selecting for the use. 4580 /// For example for Use1 probability for reg(a) would be just 1 * 1 (excluding 4581 /// probabilty 1/3 of not selecting for Use1). 4582 /// Use1: 4583 /// reg(a) + reg({0,+,1}) 1 + 1/3 -- to be deleted 4584 /// reg(a) + reg({-1,+,1}) + 1 1 + 4/9 -- to be deleted 4585 /// reg({a,+,1}) 1 4586 /// Use2: 4587 /// reg(b) + reg({0,+,1}) 1/2 + 1/3 -- to be deleted 4588 /// reg(b) + reg({-1,+,1}) + 1 1/2 + 2/3 -- to be deleted 4589 /// reg({b,+,1}) 2/3 4590 /// Use3: 4591 /// reg(c) + reg(b) + reg({0,+,1}) 1 + 1/3 + 4/9 -- to be deleted 4592 /// reg(c) + reg({b,+,1}) 1 + 2/3 4593 void LSRInstance::NarrowSearchSpaceByDeletingCostlyFormulas() { 4594 if (EstimateSearchSpaceComplexity() < ComplexityLimit) 4595 return; 4596 // Ok, we have too many of formulae on our hands to conveniently handle. 4597 // Use a rough heuristic to thin out the list. 4598 4599 // Set of Regs wich will be 100% used in final solution. 4600 // Used in each formula of a solution (in example above this is reg(c)). 4601 // We can skip them in calculations. 4602 SmallPtrSet<const SCEV *, 4> UniqRegs; 4603 LLVM_DEBUG(dbgs() << "The search space is too complex.\n"); 4604 4605 // Map each register to probability of not selecting 4606 DenseMap <const SCEV *, float> RegNumMap; 4607 for (const SCEV *Reg : RegUses) { 4608 if (UniqRegs.count(Reg)) 4609 continue; 4610 float PNotSel = 1; 4611 for (const LSRUse &LU : Uses) { 4612 if (!LU.Regs.count(Reg)) 4613 continue; 4614 float P = LU.getNotSelectedProbability(Reg); 4615 if (P != 0.0) 4616 PNotSel *= P; 4617 else 4618 UniqRegs.insert(Reg); 4619 } 4620 RegNumMap.insert(std::make_pair(Reg, PNotSel)); 4621 } 4622 4623 LLVM_DEBUG( 4624 dbgs() << "Narrowing the search space by deleting costly formulas\n"); 4625 4626 // Delete formulas where registers number expectation is high. 4627 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 4628 LSRUse &LU = Uses[LUIdx]; 4629 // If nothing to delete - continue. 4630 if (LU.Formulae.size() < 2) 4631 continue; 4632 // This is temporary solution to test performance. Float should be 4633 // replaced with round independent type (based on integers) to avoid 4634 // different results for different target builds. 4635 float FMinRegNum = LU.Formulae[0].getNumRegs(); 4636 float FMinARegNum = LU.Formulae[0].getNumRegs(); 4637 size_t MinIdx = 0; 4638 for (size_t i = 0, e = LU.Formulae.size(); i != e; ++i) { 4639 Formula &F = LU.Formulae[i]; 4640 float FRegNum = 0; 4641 float FARegNum = 0; 4642 for (const SCEV *BaseReg : F.BaseRegs) { 4643 if (UniqRegs.count(BaseReg)) 4644 continue; 4645 FRegNum += RegNumMap[BaseReg] / LU.getNotSelectedProbability(BaseReg); 4646 if (isa<SCEVAddRecExpr>(BaseReg)) 4647 FARegNum += 4648 RegNumMap[BaseReg] / LU.getNotSelectedProbability(BaseReg); 4649 } 4650 if (const SCEV *ScaledReg = F.ScaledReg) { 4651 if (!UniqRegs.count(ScaledReg)) { 4652 FRegNum += 4653 RegNumMap[ScaledReg] / LU.getNotSelectedProbability(ScaledReg); 4654 if (isa<SCEVAddRecExpr>(ScaledReg)) 4655 FARegNum += 4656 RegNumMap[ScaledReg] / LU.getNotSelectedProbability(ScaledReg); 4657 } 4658 } 4659 if (FMinRegNum > FRegNum || 4660 (FMinRegNum == FRegNum && FMinARegNum > FARegNum)) { 4661 FMinRegNum = FRegNum; 4662 FMinARegNum = FARegNum; 4663 MinIdx = i; 4664 } 4665 } 4666 LLVM_DEBUG(dbgs() << " The formula "; LU.Formulae[MinIdx].print(dbgs()); 4667 dbgs() << " with min reg num " << FMinRegNum << '\n'); 4668 if (MinIdx != 0) 4669 std::swap(LU.Formulae[MinIdx], LU.Formulae[0]); 4670 while (LU.Formulae.size() != 1) { 4671 LLVM_DEBUG(dbgs() << " Deleting "; LU.Formulae.back().print(dbgs()); 4672 dbgs() << '\n'); 4673 LU.Formulae.pop_back(); 4674 } 4675 LU.RecomputeRegs(LUIdx, RegUses); 4676 assert(LU.Formulae.size() == 1 && "Should be exactly 1 min regs formula"); 4677 Formula &F = LU.Formulae[0]; 4678 LLVM_DEBUG(dbgs() << " Leaving only "; F.print(dbgs()); dbgs() << '\n'); 4679 // When we choose the formula, the regs become unique. 4680 UniqRegs.insert(F.BaseRegs.begin(), F.BaseRegs.end()); 4681 if (F.ScaledReg) 4682 UniqRegs.insert(F.ScaledReg); 4683 } 4684 LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs())); 4685 } 4686 4687 /// Pick a register which seems likely to be profitable, and then in any use 4688 /// which has any reference to that register, delete all formulae which do not 4689 /// reference that register. 4690 void LSRInstance::NarrowSearchSpaceByPickingWinnerRegs() { 4691 // With all other options exhausted, loop until the system is simple 4692 // enough to handle. 4693 SmallPtrSet<const SCEV *, 4> Taken; 4694 while (EstimateSearchSpaceComplexity() >= ComplexityLimit) { 4695 // Ok, we have too many of formulae on our hands to conveniently handle. 4696 // Use a rough heuristic to thin out the list. 4697 LLVM_DEBUG(dbgs() << "The search space is too complex.\n"); 4698 4699 // Pick the register which is used by the most LSRUses, which is likely 4700 // to be a good reuse register candidate. 4701 const SCEV *Best = nullptr; 4702 unsigned BestNum = 0; 4703 for (const SCEV *Reg : RegUses) { 4704 if (Taken.count(Reg)) 4705 continue; 4706 if (!Best) { 4707 Best = Reg; 4708 BestNum = RegUses.getUsedByIndices(Reg).count(); 4709 } else { 4710 unsigned Count = RegUses.getUsedByIndices(Reg).count(); 4711 if (Count > BestNum) { 4712 Best = Reg; 4713 BestNum = Count; 4714 } 4715 } 4716 } 4717 4718 LLVM_DEBUG(dbgs() << "Narrowing the search space by assuming " << *Best 4719 << " will yield profitable reuse.\n"); 4720 Taken.insert(Best); 4721 4722 // In any use with formulae which references this register, delete formulae 4723 // which don't reference it. 4724 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 4725 LSRUse &LU = Uses[LUIdx]; 4726 if (!LU.Regs.count(Best)) continue; 4727 4728 bool Any = false; 4729 for (size_t i = 0, e = LU.Formulae.size(); i != e; ++i) { 4730 Formula &F = LU.Formulae[i]; 4731 if (!F.referencesReg(Best)) { 4732 LLVM_DEBUG(dbgs() << " Deleting "; F.print(dbgs()); dbgs() << '\n'); 4733 LU.DeleteFormula(F); 4734 --e; 4735 --i; 4736 Any = true; 4737 assert(e != 0 && "Use has no formulae left! Is Regs inconsistent?"); 4738 continue; 4739 } 4740 } 4741 4742 if (Any) 4743 LU.RecomputeRegs(LUIdx, RegUses); 4744 } 4745 4746 LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs())); 4747 } 4748 } 4749 4750 /// If there are an extraordinary number of formulae to choose from, use some 4751 /// rough heuristics to prune down the number of formulae. This keeps the main 4752 /// solver from taking an extraordinary amount of time in some worst-case 4753 /// scenarios. 4754 void LSRInstance::NarrowSearchSpaceUsingHeuristics() { 4755 NarrowSearchSpaceByDetectingSupersets(); 4756 NarrowSearchSpaceByCollapsingUnrolledCode(); 4757 NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters(); 4758 if (FilterSameScaledReg) 4759 NarrowSearchSpaceByFilterFormulaWithSameScaledReg(); 4760 if (LSRExpNarrow) 4761 NarrowSearchSpaceByDeletingCostlyFormulas(); 4762 else 4763 NarrowSearchSpaceByPickingWinnerRegs(); 4764 } 4765 4766 /// This is the recursive solver. 4767 void LSRInstance::SolveRecurse(SmallVectorImpl<const Formula *> &Solution, 4768 Cost &SolutionCost, 4769 SmallVectorImpl<const Formula *> &Workspace, 4770 const Cost &CurCost, 4771 const SmallPtrSet<const SCEV *, 16> &CurRegs, 4772 DenseSet<const SCEV *> &VisitedRegs) const { 4773 // Some ideas: 4774 // - prune more: 4775 // - use more aggressive filtering 4776 // - sort the formula so that the most profitable solutions are found first 4777 // - sort the uses too 4778 // - search faster: 4779 // - don't compute a cost, and then compare. compare while computing a cost 4780 // and bail early. 4781 // - track register sets with SmallBitVector 4782 4783 const LSRUse &LU = Uses[Workspace.size()]; 4784 4785 // If this use references any register that's already a part of the 4786 // in-progress solution, consider it a requirement that a formula must 4787 // reference that register in order to be considered. This prunes out 4788 // unprofitable searching. 4789 SmallSetVector<const SCEV *, 4> ReqRegs; 4790 for (const SCEV *S : CurRegs) 4791 if (LU.Regs.count(S)) 4792 ReqRegs.insert(S); 4793 4794 SmallPtrSet<const SCEV *, 16> NewRegs; 4795 Cost NewCost; 4796 for (const Formula &F : LU.Formulae) { 4797 // Ignore formulae which may not be ideal in terms of register reuse of 4798 // ReqRegs. The formula should use all required registers before 4799 // introducing new ones. 4800 int NumReqRegsToFind = std::min(F.getNumRegs(), ReqRegs.size()); 4801 for (const SCEV *Reg : ReqRegs) { 4802 if ((F.ScaledReg && F.ScaledReg == Reg) || 4803 is_contained(F.BaseRegs, Reg)) { 4804 --NumReqRegsToFind; 4805 if (NumReqRegsToFind == 0) 4806 break; 4807 } 4808 } 4809 if (NumReqRegsToFind != 0) { 4810 // If none of the formulae satisfied the required registers, then we could 4811 // clear ReqRegs and try again. Currently, we simply give up in this case. 4812 continue; 4813 } 4814 4815 // Evaluate the cost of the current formula. If it's already worse than 4816 // the current best, prune the search at that point. 4817 NewCost = CurCost; 4818 NewRegs = CurRegs; 4819 NewCost.RateFormula(TTI, F, NewRegs, VisitedRegs, L, SE, DT, LU); 4820 if (NewCost.isLess(SolutionCost, TTI)) { 4821 Workspace.push_back(&F); 4822 if (Workspace.size() != Uses.size()) { 4823 SolveRecurse(Solution, SolutionCost, Workspace, NewCost, 4824 NewRegs, VisitedRegs); 4825 if (F.getNumRegs() == 1 && Workspace.size() == 1) 4826 VisitedRegs.insert(F.ScaledReg ? F.ScaledReg : F.BaseRegs[0]); 4827 } else { 4828 LLVM_DEBUG(dbgs() << "New best at "; NewCost.print(dbgs()); 4829 dbgs() << ".\n Regs:"; for (const SCEV *S 4830 : NewRegs) dbgs() 4831 << ' ' << *S; 4832 dbgs() << '\n'); 4833 4834 SolutionCost = NewCost; 4835 Solution = Workspace; 4836 } 4837 Workspace.pop_back(); 4838 } 4839 } 4840 } 4841 4842 /// Choose one formula from each use. Return the results in the given Solution 4843 /// vector. 4844 void LSRInstance::Solve(SmallVectorImpl<const Formula *> &Solution) const { 4845 SmallVector<const Formula *, 8> Workspace; 4846 Cost SolutionCost; 4847 SolutionCost.Lose(); 4848 Cost CurCost; 4849 SmallPtrSet<const SCEV *, 16> CurRegs; 4850 DenseSet<const SCEV *> VisitedRegs; 4851 Workspace.reserve(Uses.size()); 4852 4853 // SolveRecurse does all the work. 4854 SolveRecurse(Solution, SolutionCost, Workspace, CurCost, 4855 CurRegs, VisitedRegs); 4856 if (Solution.empty()) { 4857 LLVM_DEBUG(dbgs() << "\nNo Satisfactory Solution\n"); 4858 return; 4859 } 4860 4861 // Ok, we've now made all our decisions. 4862 LLVM_DEBUG(dbgs() << "\n" 4863 "The chosen solution requires "; 4864 SolutionCost.print(dbgs()); dbgs() << ":\n"; 4865 for (size_t i = 0, e = Uses.size(); i != e; ++i) { 4866 dbgs() << " "; 4867 Uses[i].print(dbgs()); 4868 dbgs() << "\n" 4869 " "; 4870 Solution[i]->print(dbgs()); 4871 dbgs() << '\n'; 4872 }); 4873 4874 assert(Solution.size() == Uses.size() && "Malformed solution!"); 4875 } 4876 4877 /// Helper for AdjustInsertPositionForExpand. Climb up the dominator tree far as 4878 /// we can go while still being dominated by the input positions. This helps 4879 /// canonicalize the insert position, which encourages sharing. 4880 BasicBlock::iterator 4881 LSRInstance::HoistInsertPosition(BasicBlock::iterator IP, 4882 const SmallVectorImpl<Instruction *> &Inputs) 4883 const { 4884 Instruction *Tentative = &*IP; 4885 while (true) { 4886 bool AllDominate = true; 4887 Instruction *BetterPos = nullptr; 4888 // Don't bother attempting to insert before a catchswitch, their basic block 4889 // cannot have other non-PHI instructions. 4890 if (isa<CatchSwitchInst>(Tentative)) 4891 return IP; 4892 4893 for (Instruction *Inst : Inputs) { 4894 if (Inst == Tentative || !DT.dominates(Inst, Tentative)) { 4895 AllDominate = false; 4896 break; 4897 } 4898 // Attempt to find an insert position in the middle of the block, 4899 // instead of at the end, so that it can be used for other expansions. 4900 if (Tentative->getParent() == Inst->getParent() && 4901 (!BetterPos || !DT.dominates(Inst, BetterPos))) 4902 BetterPos = &*std::next(BasicBlock::iterator(Inst)); 4903 } 4904 if (!AllDominate) 4905 break; 4906 if (BetterPos) 4907 IP = BetterPos->getIterator(); 4908 else 4909 IP = Tentative->getIterator(); 4910 4911 const Loop *IPLoop = LI.getLoopFor(IP->getParent()); 4912 unsigned IPLoopDepth = IPLoop ? IPLoop->getLoopDepth() : 0; 4913 4914 BasicBlock *IDom; 4915 for (DomTreeNode *Rung = DT.getNode(IP->getParent()); ; ) { 4916 if (!Rung) return IP; 4917 Rung = Rung->getIDom(); 4918 if (!Rung) return IP; 4919 IDom = Rung->getBlock(); 4920 4921 // Don't climb into a loop though. 4922 const Loop *IDomLoop = LI.getLoopFor(IDom); 4923 unsigned IDomDepth = IDomLoop ? IDomLoop->getLoopDepth() : 0; 4924 if (IDomDepth <= IPLoopDepth && 4925 (IDomDepth != IPLoopDepth || IDomLoop == IPLoop)) 4926 break; 4927 } 4928 4929 Tentative = IDom->getTerminator(); 4930 } 4931 4932 return IP; 4933 } 4934 4935 /// Determine an input position which will be dominated by the operands and 4936 /// which will dominate the result. 4937 BasicBlock::iterator 4938 LSRInstance::AdjustInsertPositionForExpand(BasicBlock::iterator LowestIP, 4939 const LSRFixup &LF, 4940 const LSRUse &LU, 4941 SCEVExpander &Rewriter) const { 4942 // Collect some instructions which must be dominated by the 4943 // expanding replacement. These must be dominated by any operands that 4944 // will be required in the expansion. 4945 SmallVector<Instruction *, 4> Inputs; 4946 if (Instruction *I = dyn_cast<Instruction>(LF.OperandValToReplace)) 4947 Inputs.push_back(I); 4948 if (LU.Kind == LSRUse::ICmpZero) 4949 if (Instruction *I = 4950 dyn_cast<Instruction>(cast<ICmpInst>(LF.UserInst)->getOperand(1))) 4951 Inputs.push_back(I); 4952 if (LF.PostIncLoops.count(L)) { 4953 if (LF.isUseFullyOutsideLoop(L)) 4954 Inputs.push_back(L->getLoopLatch()->getTerminator()); 4955 else 4956 Inputs.push_back(IVIncInsertPos); 4957 } 4958 // The expansion must also be dominated by the increment positions of any 4959 // loops it for which it is using post-inc mode. 4960 for (const Loop *PIL : LF.PostIncLoops) { 4961 if (PIL == L) continue; 4962 4963 // Be dominated by the loop exit. 4964 SmallVector<BasicBlock *, 4> ExitingBlocks; 4965 PIL->getExitingBlocks(ExitingBlocks); 4966 if (!ExitingBlocks.empty()) { 4967 BasicBlock *BB = ExitingBlocks[0]; 4968 for (unsigned i = 1, e = ExitingBlocks.size(); i != e; ++i) 4969 BB = DT.findNearestCommonDominator(BB, ExitingBlocks[i]); 4970 Inputs.push_back(BB->getTerminator()); 4971 } 4972 } 4973 4974 assert(!isa<PHINode>(LowestIP) && !LowestIP->isEHPad() 4975 && !isa<DbgInfoIntrinsic>(LowestIP) && 4976 "Insertion point must be a normal instruction"); 4977 4978 // Then, climb up the immediate dominator tree as far as we can go while 4979 // still being dominated by the input positions. 4980 BasicBlock::iterator IP = HoistInsertPosition(LowestIP, Inputs); 4981 4982 // Don't insert instructions before PHI nodes. 4983 while (isa<PHINode>(IP)) ++IP; 4984 4985 // Ignore landingpad instructions. 4986 while (IP->isEHPad()) ++IP; 4987 4988 // Ignore debug intrinsics. 4989 while (isa<DbgInfoIntrinsic>(IP)) ++IP; 4990 4991 // Set IP below instructions recently inserted by SCEVExpander. This keeps the 4992 // IP consistent across expansions and allows the previously inserted 4993 // instructions to be reused by subsequent expansion. 4994 while (Rewriter.isInsertedInstruction(&*IP) && IP != LowestIP) 4995 ++IP; 4996 4997 return IP; 4998 } 4999 5000 /// Emit instructions for the leading candidate expression for this LSRUse (this 5001 /// is called "expanding"). 5002 Value *LSRInstance::Expand(const LSRUse &LU, const LSRFixup &LF, 5003 const Formula &F, BasicBlock::iterator IP, 5004 SCEVExpander &Rewriter, 5005 SmallVectorImpl<WeakTrackingVH> &DeadInsts) const { 5006 if (LU.RigidFormula) 5007 return LF.OperandValToReplace; 5008 5009 // Determine an input position which will be dominated by the operands and 5010 // which will dominate the result. 5011 IP = AdjustInsertPositionForExpand(IP, LF, LU, Rewriter); 5012 Rewriter.setInsertPoint(&*IP); 5013 5014 // Inform the Rewriter if we have a post-increment use, so that it can 5015 // perform an advantageous expansion. 5016 Rewriter.setPostInc(LF.PostIncLoops); 5017 5018 // This is the type that the user actually needs. 5019 Type *OpTy = LF.OperandValToReplace->getType(); 5020 // This will be the type that we'll initially expand to. 5021 Type *Ty = F.getType(); 5022 if (!Ty) 5023 // No type known; just expand directly to the ultimate type. 5024 Ty = OpTy; 5025 else if (SE.getEffectiveSCEVType(Ty) == SE.getEffectiveSCEVType(OpTy)) 5026 // Expand directly to the ultimate type if it's the right size. 5027 Ty = OpTy; 5028 // This is the type to do integer arithmetic in. 5029 Type *IntTy = SE.getEffectiveSCEVType(Ty); 5030 5031 // Build up a list of operands to add together to form the full base. 5032 SmallVector<const SCEV *, 8> Ops; 5033 5034 // Expand the BaseRegs portion. 5035 for (const SCEV *Reg : F.BaseRegs) { 5036 assert(!Reg->isZero() && "Zero allocated in a base register!"); 5037 5038 // If we're expanding for a post-inc user, make the post-inc adjustment. 5039 Reg = denormalizeForPostIncUse(Reg, LF.PostIncLoops, SE); 5040 Ops.push_back(SE.getUnknown(Rewriter.expandCodeFor(Reg, nullptr))); 5041 } 5042 5043 // Expand the ScaledReg portion. 5044 Value *ICmpScaledV = nullptr; 5045 if (F.Scale != 0) { 5046 const SCEV *ScaledS = F.ScaledReg; 5047 5048 // If we're expanding for a post-inc user, make the post-inc adjustment. 5049 PostIncLoopSet &Loops = const_cast<PostIncLoopSet &>(LF.PostIncLoops); 5050 ScaledS = denormalizeForPostIncUse(ScaledS, Loops, SE); 5051 5052 if (LU.Kind == LSRUse::ICmpZero) { 5053 // Expand ScaleReg as if it was part of the base regs. 5054 if (F.Scale == 1) 5055 Ops.push_back( 5056 SE.getUnknown(Rewriter.expandCodeFor(ScaledS, nullptr))); 5057 else { 5058 // An interesting way of "folding" with an icmp is to use a negated 5059 // scale, which we'll implement by inserting it into the other operand 5060 // of the icmp. 5061 assert(F.Scale == -1 && 5062 "The only scale supported by ICmpZero uses is -1!"); 5063 ICmpScaledV = Rewriter.expandCodeFor(ScaledS, nullptr); 5064 } 5065 } else { 5066 // Otherwise just expand the scaled register and an explicit scale, 5067 // which is expected to be matched as part of the address. 5068 5069 // Flush the operand list to suppress SCEVExpander hoisting address modes. 5070 // Unless the addressing mode will not be folded. 5071 if (!Ops.empty() && LU.Kind == LSRUse::Address && 5072 isAMCompletelyFolded(TTI, LU, F)) { 5073 Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), nullptr); 5074 Ops.clear(); 5075 Ops.push_back(SE.getUnknown(FullV)); 5076 } 5077 ScaledS = SE.getUnknown(Rewriter.expandCodeFor(ScaledS, nullptr)); 5078 if (F.Scale != 1) 5079 ScaledS = 5080 SE.getMulExpr(ScaledS, SE.getConstant(ScaledS->getType(), F.Scale)); 5081 Ops.push_back(ScaledS); 5082 } 5083 } 5084 5085 // Expand the GV portion. 5086 if (F.BaseGV) { 5087 // Flush the operand list to suppress SCEVExpander hoisting. 5088 if (!Ops.empty()) { 5089 Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty); 5090 Ops.clear(); 5091 Ops.push_back(SE.getUnknown(FullV)); 5092 } 5093 Ops.push_back(SE.getUnknown(F.BaseGV)); 5094 } 5095 5096 // Flush the operand list to suppress SCEVExpander hoisting of both folded and 5097 // unfolded offsets. LSR assumes they both live next to their uses. 5098 if (!Ops.empty()) { 5099 Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty); 5100 Ops.clear(); 5101 Ops.push_back(SE.getUnknown(FullV)); 5102 } 5103 5104 // Expand the immediate portion. 5105 int64_t Offset = (uint64_t)F.BaseOffset + LF.Offset; 5106 if (Offset != 0) { 5107 if (LU.Kind == LSRUse::ICmpZero) { 5108 // The other interesting way of "folding" with an ICmpZero is to use a 5109 // negated immediate. 5110 if (!ICmpScaledV) 5111 ICmpScaledV = ConstantInt::get(IntTy, -(uint64_t)Offset); 5112 else { 5113 Ops.push_back(SE.getUnknown(ICmpScaledV)); 5114 ICmpScaledV = ConstantInt::get(IntTy, Offset); 5115 } 5116 } else { 5117 // Just add the immediate values. These again are expected to be matched 5118 // as part of the address. 5119 Ops.push_back(SE.getUnknown(ConstantInt::getSigned(IntTy, Offset))); 5120 } 5121 } 5122 5123 // Expand the unfolded offset portion. 5124 int64_t UnfoldedOffset = F.UnfoldedOffset; 5125 if (UnfoldedOffset != 0) { 5126 // Just add the immediate values. 5127 Ops.push_back(SE.getUnknown(ConstantInt::getSigned(IntTy, 5128 UnfoldedOffset))); 5129 } 5130 5131 // Emit instructions summing all the operands. 5132 const SCEV *FullS = Ops.empty() ? 5133 SE.getConstant(IntTy, 0) : 5134 SE.getAddExpr(Ops); 5135 Value *FullV = Rewriter.expandCodeFor(FullS, Ty); 5136 5137 // We're done expanding now, so reset the rewriter. 5138 Rewriter.clearPostInc(); 5139 5140 // An ICmpZero Formula represents an ICmp which we're handling as a 5141 // comparison against zero. Now that we've expanded an expression for that 5142 // form, update the ICmp's other operand. 5143 if (LU.Kind == LSRUse::ICmpZero) { 5144 ICmpInst *CI = cast<ICmpInst>(LF.UserInst); 5145 DeadInsts.emplace_back(CI->getOperand(1)); 5146 assert(!F.BaseGV && "ICmp does not support folding a global value and " 5147 "a scale at the same time!"); 5148 if (F.Scale == -1) { 5149 if (ICmpScaledV->getType() != OpTy) { 5150 Instruction *Cast = 5151 CastInst::Create(CastInst::getCastOpcode(ICmpScaledV, false, 5152 OpTy, false), 5153 ICmpScaledV, OpTy, "tmp", CI); 5154 ICmpScaledV = Cast; 5155 } 5156 CI->setOperand(1, ICmpScaledV); 5157 } else { 5158 // A scale of 1 means that the scale has been expanded as part of the 5159 // base regs. 5160 assert((F.Scale == 0 || F.Scale == 1) && 5161 "ICmp does not support folding a global value and " 5162 "a scale at the same time!"); 5163 Constant *C = ConstantInt::getSigned(SE.getEffectiveSCEVType(OpTy), 5164 -(uint64_t)Offset); 5165 if (C->getType() != OpTy) 5166 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 5167 OpTy, false), 5168 C, OpTy); 5169 5170 CI->setOperand(1, C); 5171 } 5172 } 5173 5174 return FullV; 5175 } 5176 5177 /// Helper for Rewrite. PHI nodes are special because the use of their operands 5178 /// effectively happens in their predecessor blocks, so the expression may need 5179 /// to be expanded in multiple places. 5180 void LSRInstance::RewriteForPHI( 5181 PHINode *PN, const LSRUse &LU, const LSRFixup &LF, const Formula &F, 5182 SCEVExpander &Rewriter, SmallVectorImpl<WeakTrackingVH> &DeadInsts) const { 5183 DenseMap<BasicBlock *, Value *> Inserted; 5184 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 5185 if (PN->getIncomingValue(i) == LF.OperandValToReplace) { 5186 BasicBlock *BB = PN->getIncomingBlock(i); 5187 5188 // If this is a critical edge, split the edge so that we do not insert 5189 // the code on all predecessor/successor paths. We do this unless this 5190 // is the canonical backedge for this loop, which complicates post-inc 5191 // users. 5192 if (e != 1 && BB->getTerminator()->getNumSuccessors() > 1 && 5193 !isa<IndirectBrInst>(BB->getTerminator()) && 5194 !isa<CatchSwitchInst>(BB->getTerminator())) { 5195 BasicBlock *Parent = PN->getParent(); 5196 Loop *PNLoop = LI.getLoopFor(Parent); 5197 if (!PNLoop || Parent != PNLoop->getHeader()) { 5198 // Split the critical edge. 5199 BasicBlock *NewBB = nullptr; 5200 if (!Parent->isLandingPad()) { 5201 NewBB = SplitCriticalEdge(BB, Parent, 5202 CriticalEdgeSplittingOptions(&DT, &LI) 5203 .setMergeIdenticalEdges() 5204 .setDontDeleteUselessPHIs()); 5205 } else { 5206 SmallVector<BasicBlock*, 2> NewBBs; 5207 SplitLandingPadPredecessors(Parent, BB, "", "", NewBBs, &DT, &LI); 5208 NewBB = NewBBs[0]; 5209 } 5210 // If NewBB==NULL, then SplitCriticalEdge refused to split because all 5211 // phi predecessors are identical. The simple thing to do is skip 5212 // splitting in this case rather than complicate the API. 5213 if (NewBB) { 5214 // If PN is outside of the loop and BB is in the loop, we want to 5215 // move the block to be immediately before the PHI block, not 5216 // immediately after BB. 5217 if (L->contains(BB) && !L->contains(PN)) 5218 NewBB->moveBefore(PN->getParent()); 5219 5220 // Splitting the edge can reduce the number of PHI entries we have. 5221 e = PN->getNumIncomingValues(); 5222 BB = NewBB; 5223 i = PN->getBasicBlockIndex(BB); 5224 } 5225 } 5226 } 5227 5228 std::pair<DenseMap<BasicBlock *, Value *>::iterator, bool> Pair = 5229 Inserted.insert(std::make_pair(BB, static_cast<Value *>(nullptr))); 5230 if (!Pair.second) 5231 PN->setIncomingValue(i, Pair.first->second); 5232 else { 5233 Value *FullV = Expand(LU, LF, F, BB->getTerminator()->getIterator(), 5234 Rewriter, DeadInsts); 5235 5236 // If this is reuse-by-noop-cast, insert the noop cast. 5237 Type *OpTy = LF.OperandValToReplace->getType(); 5238 if (FullV->getType() != OpTy) 5239 FullV = 5240 CastInst::Create(CastInst::getCastOpcode(FullV, false, 5241 OpTy, false), 5242 FullV, LF.OperandValToReplace->getType(), 5243 "tmp", BB->getTerminator()); 5244 5245 PN->setIncomingValue(i, FullV); 5246 Pair.first->second = FullV; 5247 } 5248 } 5249 } 5250 5251 /// Emit instructions for the leading candidate expression for this LSRUse (this 5252 /// is called "expanding"), and update the UserInst to reference the newly 5253 /// expanded value. 5254 void LSRInstance::Rewrite(const LSRUse &LU, const LSRFixup &LF, 5255 const Formula &F, SCEVExpander &Rewriter, 5256 SmallVectorImpl<WeakTrackingVH> &DeadInsts) const { 5257 // First, find an insertion point that dominates UserInst. For PHI nodes, 5258 // find the nearest block which dominates all the relevant uses. 5259 if (PHINode *PN = dyn_cast<PHINode>(LF.UserInst)) { 5260 RewriteForPHI(PN, LU, LF, F, Rewriter, DeadInsts); 5261 } else { 5262 Value *FullV = 5263 Expand(LU, LF, F, LF.UserInst->getIterator(), Rewriter, DeadInsts); 5264 5265 // If this is reuse-by-noop-cast, insert the noop cast. 5266 Type *OpTy = LF.OperandValToReplace->getType(); 5267 if (FullV->getType() != OpTy) { 5268 Instruction *Cast = 5269 CastInst::Create(CastInst::getCastOpcode(FullV, false, OpTy, false), 5270 FullV, OpTy, "tmp", LF.UserInst); 5271 FullV = Cast; 5272 } 5273 5274 // Update the user. ICmpZero is handled specially here (for now) because 5275 // Expand may have updated one of the operands of the icmp already, and 5276 // its new value may happen to be equal to LF.OperandValToReplace, in 5277 // which case doing replaceUsesOfWith leads to replacing both operands 5278 // with the same value. TODO: Reorganize this. 5279 if (LU.Kind == LSRUse::ICmpZero) 5280 LF.UserInst->setOperand(0, FullV); 5281 else 5282 LF.UserInst->replaceUsesOfWith(LF.OperandValToReplace, FullV); 5283 } 5284 5285 DeadInsts.emplace_back(LF.OperandValToReplace); 5286 } 5287 5288 /// Rewrite all the fixup locations with new values, following the chosen 5289 /// solution. 5290 void LSRInstance::ImplementSolution( 5291 const SmallVectorImpl<const Formula *> &Solution) { 5292 // Keep track of instructions we may have made dead, so that 5293 // we can remove them after we are done working. 5294 SmallVector<WeakTrackingVH, 16> DeadInsts; 5295 5296 SCEVExpander Rewriter(SE, L->getHeader()->getModule()->getDataLayout(), 5297 "lsr"); 5298 #ifndef NDEBUG 5299 Rewriter.setDebugType(DEBUG_TYPE); 5300 #endif 5301 Rewriter.disableCanonicalMode(); 5302 Rewriter.enableLSRMode(); 5303 Rewriter.setIVIncInsertPos(L, IVIncInsertPos); 5304 5305 // Mark phi nodes that terminate chains so the expander tries to reuse them. 5306 for (const IVChain &Chain : IVChainVec) { 5307 if (PHINode *PN = dyn_cast<PHINode>(Chain.tailUserInst())) 5308 Rewriter.setChainedPhi(PN); 5309 } 5310 5311 // Expand the new value definitions and update the users. 5312 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) 5313 for (const LSRFixup &Fixup : Uses[LUIdx].Fixups) { 5314 Rewrite(Uses[LUIdx], Fixup, *Solution[LUIdx], Rewriter, DeadInsts); 5315 Changed = true; 5316 } 5317 5318 for (const IVChain &Chain : IVChainVec) { 5319 GenerateIVChain(Chain, Rewriter, DeadInsts); 5320 Changed = true; 5321 } 5322 // Clean up after ourselves. This must be done before deleting any 5323 // instructions. 5324 Rewriter.clear(); 5325 5326 Changed |= DeleteTriviallyDeadInstructions(DeadInsts); 5327 } 5328 5329 LSRInstance::LSRInstance(Loop *L, IVUsers &IU, ScalarEvolution &SE, 5330 DominatorTree &DT, LoopInfo &LI, 5331 const TargetTransformInfo &TTI) 5332 : IU(IU), SE(SE), DT(DT), LI(LI), TTI(TTI), L(L) { 5333 // If LoopSimplify form is not available, stay out of trouble. 5334 if (!L->isLoopSimplifyForm()) 5335 return; 5336 5337 // If there's no interesting work to be done, bail early. 5338 if (IU.empty()) return; 5339 5340 // If there's too much analysis to be done, bail early. We won't be able to 5341 // model the problem anyway. 5342 unsigned NumUsers = 0; 5343 for (const IVStrideUse &U : IU) { 5344 if (++NumUsers > MaxIVUsers) { 5345 (void)U; 5346 LLVM_DEBUG(dbgs() << "LSR skipping loop, too many IV Users in " << U 5347 << "\n"); 5348 return; 5349 } 5350 // Bail out if we have a PHI on an EHPad that gets a value from a 5351 // CatchSwitchInst. Because the CatchSwitchInst cannot be split, there is 5352 // no good place to stick any instructions. 5353 if (auto *PN = dyn_cast<PHINode>(U.getUser())) { 5354 auto *FirstNonPHI = PN->getParent()->getFirstNonPHI(); 5355 if (isa<FuncletPadInst>(FirstNonPHI) || 5356 isa<CatchSwitchInst>(FirstNonPHI)) 5357 for (BasicBlock *PredBB : PN->blocks()) 5358 if (isa<CatchSwitchInst>(PredBB->getFirstNonPHI())) 5359 return; 5360 } 5361 } 5362 5363 #ifndef NDEBUG 5364 // All dominating loops must have preheaders, or SCEVExpander may not be able 5365 // to materialize an AddRecExpr whose Start is an outer AddRecExpr. 5366 // 5367 // IVUsers analysis should only create users that are dominated by simple loop 5368 // headers. Since this loop should dominate all of its users, its user list 5369 // should be empty if this loop itself is not within a simple loop nest. 5370 for (DomTreeNode *Rung = DT.getNode(L->getLoopPreheader()); 5371 Rung; Rung = Rung->getIDom()) { 5372 BasicBlock *BB = Rung->getBlock(); 5373 const Loop *DomLoop = LI.getLoopFor(BB); 5374 if (DomLoop && DomLoop->getHeader() == BB) { 5375 assert(DomLoop->getLoopPreheader() && "LSR needs a simplified loop nest"); 5376 } 5377 } 5378 #endif // DEBUG 5379 5380 LLVM_DEBUG(dbgs() << "\nLSR on loop "; 5381 L->getHeader()->printAsOperand(dbgs(), /*PrintType=*/false); 5382 dbgs() << ":\n"); 5383 5384 // First, perform some low-level loop optimizations. 5385 OptimizeShadowIV(); 5386 OptimizeLoopTermCond(); 5387 5388 // If loop preparation eliminates all interesting IV users, bail. 5389 if (IU.empty()) return; 5390 5391 // Skip nested loops until we can model them better with formulae. 5392 if (!L->empty()) { 5393 LLVM_DEBUG(dbgs() << "LSR skipping outer loop " << *L << "\n"); 5394 return; 5395 } 5396 5397 // Start collecting data and preparing for the solver. 5398 CollectChains(); 5399 CollectInterestingTypesAndFactors(); 5400 CollectFixupsAndInitialFormulae(); 5401 CollectLoopInvariantFixupsAndFormulae(); 5402 5403 if (Uses.empty()) 5404 return; 5405 5406 LLVM_DEBUG(dbgs() << "LSR found " << Uses.size() << " uses:\n"; 5407 print_uses(dbgs())); 5408 5409 // Now use the reuse data to generate a bunch of interesting ways 5410 // to formulate the values needed for the uses. 5411 GenerateAllReuseFormulae(); 5412 5413 FilterOutUndesirableDedicatedRegisters(); 5414 NarrowSearchSpaceUsingHeuristics(); 5415 5416 SmallVector<const Formula *, 8> Solution; 5417 Solve(Solution); 5418 5419 // Release memory that is no longer needed. 5420 Factors.clear(); 5421 Types.clear(); 5422 RegUses.clear(); 5423 5424 if (Solution.empty()) 5425 return; 5426 5427 #ifndef NDEBUG 5428 // Formulae should be legal. 5429 for (const LSRUse &LU : Uses) { 5430 for (const Formula &F : LU.Formulae) 5431 assert(isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, 5432 F) && "Illegal formula generated!"); 5433 }; 5434 #endif 5435 5436 // Now that we've decided what we want, make it so. 5437 ImplementSolution(Solution); 5438 } 5439 5440 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 5441 void LSRInstance::print_factors_and_types(raw_ostream &OS) const { 5442 if (Factors.empty() && Types.empty()) return; 5443 5444 OS << "LSR has identified the following interesting factors and types: "; 5445 bool First = true; 5446 5447 for (int64_t Factor : Factors) { 5448 if (!First) OS << ", "; 5449 First = false; 5450 OS << '*' << Factor; 5451 } 5452 5453 for (Type *Ty : Types) { 5454 if (!First) OS << ", "; 5455 First = false; 5456 OS << '(' << *Ty << ')'; 5457 } 5458 OS << '\n'; 5459 } 5460 5461 void LSRInstance::print_fixups(raw_ostream &OS) const { 5462 OS << "LSR is examining the following fixup sites:\n"; 5463 for (const LSRUse &LU : Uses) 5464 for (const LSRFixup &LF : LU.Fixups) { 5465 dbgs() << " "; 5466 LF.print(OS); 5467 OS << '\n'; 5468 } 5469 } 5470 5471 void LSRInstance::print_uses(raw_ostream &OS) const { 5472 OS << "LSR is examining the following uses:\n"; 5473 for (const LSRUse &LU : Uses) { 5474 dbgs() << " "; 5475 LU.print(OS); 5476 OS << '\n'; 5477 for (const Formula &F : LU.Formulae) { 5478 OS << " "; 5479 F.print(OS); 5480 OS << '\n'; 5481 } 5482 } 5483 } 5484 5485 void LSRInstance::print(raw_ostream &OS) const { 5486 print_factors_and_types(OS); 5487 print_fixups(OS); 5488 print_uses(OS); 5489 } 5490 5491 LLVM_DUMP_METHOD void LSRInstance::dump() const { 5492 print(errs()); errs() << '\n'; 5493 } 5494 #endif 5495 5496 namespace { 5497 5498 class LoopStrengthReduce : public LoopPass { 5499 public: 5500 static char ID; // Pass ID, replacement for typeid 5501 5502 LoopStrengthReduce(); 5503 5504 private: 5505 bool runOnLoop(Loop *L, LPPassManager &LPM) override; 5506 void getAnalysisUsage(AnalysisUsage &AU) const override; 5507 }; 5508 5509 } // end anonymous namespace 5510 5511 LoopStrengthReduce::LoopStrengthReduce() : LoopPass(ID) { 5512 initializeLoopStrengthReducePass(*PassRegistry::getPassRegistry()); 5513 } 5514 5515 void LoopStrengthReduce::getAnalysisUsage(AnalysisUsage &AU) const { 5516 // We split critical edges, so we change the CFG. However, we do update 5517 // many analyses if they are around. 5518 AU.addPreservedID(LoopSimplifyID); 5519 5520 AU.addRequired<LoopInfoWrapperPass>(); 5521 AU.addPreserved<LoopInfoWrapperPass>(); 5522 AU.addRequiredID(LoopSimplifyID); 5523 AU.addRequired<DominatorTreeWrapperPass>(); 5524 AU.addPreserved<DominatorTreeWrapperPass>(); 5525 AU.addRequired<ScalarEvolutionWrapperPass>(); 5526 AU.addPreserved<ScalarEvolutionWrapperPass>(); 5527 // Requiring LoopSimplify a second time here prevents IVUsers from running 5528 // twice, since LoopSimplify was invalidated by running ScalarEvolution. 5529 AU.addRequiredID(LoopSimplifyID); 5530 AU.addRequired<IVUsersWrapperPass>(); 5531 AU.addPreserved<IVUsersWrapperPass>(); 5532 AU.addRequired<TargetTransformInfoWrapperPass>(); 5533 } 5534 5535 static bool ReduceLoopStrength(Loop *L, IVUsers &IU, ScalarEvolution &SE, 5536 DominatorTree &DT, LoopInfo &LI, 5537 const TargetTransformInfo &TTI) { 5538 bool Changed = false; 5539 5540 // Run the main LSR transformation. 5541 Changed |= LSRInstance(L, IU, SE, DT, LI, TTI).getChanged(); 5542 5543 // Remove any extra phis created by processing inner loops. 5544 Changed |= DeleteDeadPHIs(L->getHeader()); 5545 if (EnablePhiElim && L->isLoopSimplifyForm()) { 5546 SmallVector<WeakTrackingVH, 16> DeadInsts; 5547 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 5548 SCEVExpander Rewriter(SE, DL, "lsr"); 5549 #ifndef NDEBUG 5550 Rewriter.setDebugType(DEBUG_TYPE); 5551 #endif 5552 unsigned numFolded = Rewriter.replaceCongruentIVs(L, &DT, DeadInsts, &TTI); 5553 if (numFolded) { 5554 Changed = true; 5555 DeleteTriviallyDeadInstructions(DeadInsts); 5556 DeleteDeadPHIs(L->getHeader()); 5557 } 5558 } 5559 return Changed; 5560 } 5561 5562 bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager & /*LPM*/) { 5563 if (skipLoop(L)) 5564 return false; 5565 5566 auto &IU = getAnalysis<IVUsersWrapperPass>().getIU(); 5567 auto &SE = getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 5568 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 5569 auto &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 5570 const auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI( 5571 *L->getHeader()->getParent()); 5572 return ReduceLoopStrength(L, IU, SE, DT, LI, TTI); 5573 } 5574 5575 PreservedAnalyses LoopStrengthReducePass::run(Loop &L, LoopAnalysisManager &AM, 5576 LoopStandardAnalysisResults &AR, 5577 LPMUpdater &) { 5578 if (!ReduceLoopStrength(&L, AM.getResult<IVUsersAnalysis>(L, AR), AR.SE, 5579 AR.DT, AR.LI, AR.TTI)) 5580 return PreservedAnalyses::all(); 5581 5582 return getLoopPassPreservedAnalyses(); 5583 } 5584 5585 char LoopStrengthReduce::ID = 0; 5586 5587 INITIALIZE_PASS_BEGIN(LoopStrengthReduce, "loop-reduce", 5588 "Loop Strength Reduction", false, false) 5589 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 5590 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 5591 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 5592 INITIALIZE_PASS_DEPENDENCY(IVUsersWrapperPass) 5593 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 5594 INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 5595 INITIALIZE_PASS_END(LoopStrengthReduce, "loop-reduce", 5596 "Loop Strength Reduction", false, false) 5597 5598 Pass *llvm::createLoopStrengthReducePass() { return new LoopStrengthReduce(); } 5599