1 //===- LoopStrengthReduce.cpp - Strength Reduce IVs in Loops --------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This transformation analyzes and transforms the induction variables (and 11 // computations derived from them) into forms suitable for efficient execution 12 // on the target. 13 // 14 // This pass performs a strength reduction on array references inside loops that 15 // have as one or more of their components the loop induction variable, it 16 // rewrites expressions to take advantage of scaled-index addressing modes 17 // available on the target, and it performs a variety of other optimizations 18 // related to loop induction variables. 19 // 20 // Terminology note: this code has a lot of handling for "post-increment" or 21 // "post-inc" users. This is not talking about post-increment addressing modes; 22 // it is instead talking about code like this: 23 // 24 // %i = phi [ 0, %entry ], [ %i.next, %latch ] 25 // ... 26 // %i.next = add %i, 1 27 // %c = icmp eq %i.next, %n 28 // 29 // The SCEV for %i is {0,+,1}<%L>. The SCEV for %i.next is {1,+,1}<%L>, however 30 // it's useful to think about these as the same register, with some uses using 31 // the value of the register before the add and some using // it after. In this 32 // example, the icmp is a post-increment user, since it uses %i.next, which is 33 // the value of the induction variable after the increment. The other common 34 // case of post-increment users is users outside the loop. 35 // 36 // TODO: More sophistication in the way Formulae are generated and filtered. 37 // 38 // TODO: Handle multiple loops at a time. 39 // 40 // TODO: Should the addressing mode BaseGV be changed to a ConstantExpr instead 41 // of a GlobalValue? 42 // 43 // TODO: When truncation is free, truncate ICmp users' operands to make it a 44 // smaller encoding (on x86 at least). 45 // 46 // TODO: When a negated register is used by an add (such as in a list of 47 // multiple base registers, or as the increment expression in an addrec), 48 // we may not actually need both reg and (-1 * reg) in registers; the 49 // negation can be implemented by using a sub instead of an add. The 50 // lack of support for taking this into consideration when making 51 // register pressure decisions is partly worked around by the "Special" 52 // use kind. 53 // 54 //===----------------------------------------------------------------------===// 55 56 #define DEBUG_TYPE "loop-reduce" 57 #include "llvm/Transforms/Scalar.h" 58 #include "llvm/ADT/DenseSet.h" 59 #include "llvm/ADT/SetVector.h" 60 #include "llvm/ADT/SmallBitVector.h" 61 #include "llvm/Analysis/Dominators.h" 62 #include "llvm/Analysis/IVUsers.h" 63 #include "llvm/Analysis/LoopPass.h" 64 #include "llvm/Analysis/ScalarEvolutionExpander.h" 65 #include "llvm/Analysis/TargetTransformInfo.h" 66 #include "llvm/Assembly/Writer.h" 67 #include "llvm/IR/Constants.h" 68 #include "llvm/IR/DerivedTypes.h" 69 #include "llvm/IR/Instructions.h" 70 #include "llvm/IR/IntrinsicInst.h" 71 #include "llvm/Support/CommandLine.h" 72 #include "llvm/Support/Debug.h" 73 #include "llvm/Support/ValueHandle.h" 74 #include "llvm/Support/raw_ostream.h" 75 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 76 #include "llvm/Transforms/Utils/Local.h" 77 #include <algorithm> 78 using namespace llvm; 79 80 /// MaxIVUsers is an arbitrary threshold that provides an early opportunitiy for 81 /// bail out. This threshold is far beyond the number of users that LSR can 82 /// conceivably solve, so it should not affect generated code, but catches the 83 /// worst cases before LSR burns too much compile time and stack space. 84 static const unsigned MaxIVUsers = 200; 85 86 // Temporary flag to cleanup congruent phis after LSR phi expansion. 87 // It's currently disabled until we can determine whether it's truly useful or 88 // not. The flag should be removed after the v3.0 release. 89 // This is now needed for ivchains. 90 static cl::opt<bool> EnablePhiElim( 91 "enable-lsr-phielim", cl::Hidden, cl::init(true), 92 cl::desc("Enable LSR phi elimination")); 93 94 #ifndef NDEBUG 95 // Stress test IV chain generation. 96 static cl::opt<bool> StressIVChain( 97 "stress-ivchain", cl::Hidden, cl::init(false), 98 cl::desc("Stress test LSR IV chains")); 99 #else 100 static bool StressIVChain = false; 101 #endif 102 103 namespace { 104 105 /// RegSortData - This class holds data which is used to order reuse candidates. 106 class RegSortData { 107 public: 108 /// UsedByIndices - This represents the set of LSRUse indices which reference 109 /// a particular register. 110 SmallBitVector UsedByIndices; 111 112 RegSortData() {} 113 114 void print(raw_ostream &OS) const; 115 void dump() const; 116 }; 117 118 } 119 120 void RegSortData::print(raw_ostream &OS) const { 121 OS << "[NumUses=" << UsedByIndices.count() << ']'; 122 } 123 124 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 125 void RegSortData::dump() const { 126 print(errs()); errs() << '\n'; 127 } 128 #endif 129 130 namespace { 131 132 /// RegUseTracker - Map register candidates to information about how they are 133 /// used. 134 class RegUseTracker { 135 typedef DenseMap<const SCEV *, RegSortData> RegUsesTy; 136 137 RegUsesTy RegUsesMap; 138 SmallVector<const SCEV *, 16> RegSequence; 139 140 public: 141 void CountRegister(const SCEV *Reg, size_t LUIdx); 142 void DropRegister(const SCEV *Reg, size_t LUIdx); 143 void SwapAndDropUse(size_t LUIdx, size_t LastLUIdx); 144 145 bool isRegUsedByUsesOtherThan(const SCEV *Reg, size_t LUIdx) const; 146 147 const SmallBitVector &getUsedByIndices(const SCEV *Reg) const; 148 149 void clear(); 150 151 typedef SmallVectorImpl<const SCEV *>::iterator iterator; 152 typedef SmallVectorImpl<const SCEV *>::const_iterator const_iterator; 153 iterator begin() { return RegSequence.begin(); } 154 iterator end() { return RegSequence.end(); } 155 const_iterator begin() const { return RegSequence.begin(); } 156 const_iterator end() const { return RegSequence.end(); } 157 }; 158 159 } 160 161 void 162 RegUseTracker::CountRegister(const SCEV *Reg, size_t LUIdx) { 163 std::pair<RegUsesTy::iterator, bool> Pair = 164 RegUsesMap.insert(std::make_pair(Reg, RegSortData())); 165 RegSortData &RSD = Pair.first->second; 166 if (Pair.second) 167 RegSequence.push_back(Reg); 168 RSD.UsedByIndices.resize(std::max(RSD.UsedByIndices.size(), LUIdx + 1)); 169 RSD.UsedByIndices.set(LUIdx); 170 } 171 172 void 173 RegUseTracker::DropRegister(const SCEV *Reg, size_t LUIdx) { 174 RegUsesTy::iterator It = RegUsesMap.find(Reg); 175 assert(It != RegUsesMap.end()); 176 RegSortData &RSD = It->second; 177 assert(RSD.UsedByIndices.size() > LUIdx); 178 RSD.UsedByIndices.reset(LUIdx); 179 } 180 181 void 182 RegUseTracker::SwapAndDropUse(size_t LUIdx, size_t LastLUIdx) { 183 assert(LUIdx <= LastLUIdx); 184 185 // Update RegUses. The data structure is not optimized for this purpose; 186 // we must iterate through it and update each of the bit vectors. 187 for (RegUsesTy::iterator I = RegUsesMap.begin(), E = RegUsesMap.end(); 188 I != E; ++I) { 189 SmallBitVector &UsedByIndices = I->second.UsedByIndices; 190 if (LUIdx < UsedByIndices.size()) 191 UsedByIndices[LUIdx] = 192 LastLUIdx < UsedByIndices.size() ? UsedByIndices[LastLUIdx] : 0; 193 UsedByIndices.resize(std::min(UsedByIndices.size(), LastLUIdx)); 194 } 195 } 196 197 bool 198 RegUseTracker::isRegUsedByUsesOtherThan(const SCEV *Reg, size_t LUIdx) const { 199 RegUsesTy::const_iterator I = RegUsesMap.find(Reg); 200 if (I == RegUsesMap.end()) 201 return false; 202 const SmallBitVector &UsedByIndices = I->second.UsedByIndices; 203 int i = UsedByIndices.find_first(); 204 if (i == -1) return false; 205 if ((size_t)i != LUIdx) return true; 206 return UsedByIndices.find_next(i) != -1; 207 } 208 209 const SmallBitVector &RegUseTracker::getUsedByIndices(const SCEV *Reg) const { 210 RegUsesTy::const_iterator I = RegUsesMap.find(Reg); 211 assert(I != RegUsesMap.end() && "Unknown register!"); 212 return I->second.UsedByIndices; 213 } 214 215 void RegUseTracker::clear() { 216 RegUsesMap.clear(); 217 RegSequence.clear(); 218 } 219 220 namespace { 221 222 /// Formula - This class holds information that describes a formula for 223 /// computing satisfying a use. It may include broken-out immediates and scaled 224 /// registers. 225 struct Formula { 226 /// Global base address used for complex addressing. 227 GlobalValue *BaseGV; 228 229 /// Base offset for complex addressing. 230 int64_t BaseOffset; 231 232 /// Whether any complex addressing has a base register. 233 bool HasBaseReg; 234 235 /// The scale of any complex addressing. 236 int64_t Scale; 237 238 /// BaseRegs - The list of "base" registers for this use. When this is 239 /// non-empty, 240 SmallVector<const SCEV *, 2> BaseRegs; 241 242 /// ScaledReg - The 'scaled' register for this use. This should be non-null 243 /// when Scale is not zero. 244 const SCEV *ScaledReg; 245 246 /// UnfoldedOffset - An additional constant offset which added near the 247 /// use. This requires a temporary register, but the offset itself can 248 /// live in an add immediate field rather than a register. 249 int64_t UnfoldedOffset; 250 251 Formula() 252 : BaseGV(0), BaseOffset(0), HasBaseReg(false), Scale(0), ScaledReg(0), 253 UnfoldedOffset(0) {} 254 255 void InitialMatch(const SCEV *S, Loop *L, ScalarEvolution &SE); 256 257 unsigned getNumRegs() const; 258 Type *getType() const; 259 260 void DeleteBaseReg(const SCEV *&S); 261 262 bool referencesReg(const SCEV *S) const; 263 bool hasRegsUsedByUsesOtherThan(size_t LUIdx, 264 const RegUseTracker &RegUses) const; 265 266 void print(raw_ostream &OS) const; 267 void dump() const; 268 }; 269 270 } 271 272 /// DoInitialMatch - Recursion helper for InitialMatch. 273 static void DoInitialMatch(const SCEV *S, Loop *L, 274 SmallVectorImpl<const SCEV *> &Good, 275 SmallVectorImpl<const SCEV *> &Bad, 276 ScalarEvolution &SE) { 277 // Collect expressions which properly dominate the loop header. 278 if (SE.properlyDominates(S, L->getHeader())) { 279 Good.push_back(S); 280 return; 281 } 282 283 // Look at add operands. 284 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 285 for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end(); 286 I != E; ++I) 287 DoInitialMatch(*I, L, Good, Bad, SE); 288 return; 289 } 290 291 // Look at addrec operands. 292 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) 293 if (!AR->getStart()->isZero()) { 294 DoInitialMatch(AR->getStart(), L, Good, Bad, SE); 295 DoInitialMatch(SE.getAddRecExpr(SE.getConstant(AR->getType(), 0), 296 AR->getStepRecurrence(SE), 297 // FIXME: AR->getNoWrapFlags() 298 AR->getLoop(), SCEV::FlagAnyWrap), 299 L, Good, Bad, SE); 300 return; 301 } 302 303 // Handle a multiplication by -1 (negation) if it didn't fold. 304 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) 305 if (Mul->getOperand(0)->isAllOnesValue()) { 306 SmallVector<const SCEV *, 4> Ops(Mul->op_begin()+1, Mul->op_end()); 307 const SCEV *NewMul = SE.getMulExpr(Ops); 308 309 SmallVector<const SCEV *, 4> MyGood; 310 SmallVector<const SCEV *, 4> MyBad; 311 DoInitialMatch(NewMul, L, MyGood, MyBad, SE); 312 const SCEV *NegOne = SE.getSCEV(ConstantInt::getAllOnesValue( 313 SE.getEffectiveSCEVType(NewMul->getType()))); 314 for (SmallVectorImpl<const SCEV *>::const_iterator I = MyGood.begin(), 315 E = MyGood.end(); I != E; ++I) 316 Good.push_back(SE.getMulExpr(NegOne, *I)); 317 for (SmallVectorImpl<const SCEV *>::const_iterator I = MyBad.begin(), 318 E = MyBad.end(); I != E; ++I) 319 Bad.push_back(SE.getMulExpr(NegOne, *I)); 320 return; 321 } 322 323 // Ok, we can't do anything interesting. Just stuff the whole thing into a 324 // register and hope for the best. 325 Bad.push_back(S); 326 } 327 328 /// InitialMatch - Incorporate loop-variant parts of S into this Formula, 329 /// attempting to keep all loop-invariant and loop-computable values in a 330 /// single base register. 331 void Formula::InitialMatch(const SCEV *S, Loop *L, ScalarEvolution &SE) { 332 SmallVector<const SCEV *, 4> Good; 333 SmallVector<const SCEV *, 4> Bad; 334 DoInitialMatch(S, L, Good, Bad, SE); 335 if (!Good.empty()) { 336 const SCEV *Sum = SE.getAddExpr(Good); 337 if (!Sum->isZero()) 338 BaseRegs.push_back(Sum); 339 HasBaseReg = true; 340 } 341 if (!Bad.empty()) { 342 const SCEV *Sum = SE.getAddExpr(Bad); 343 if (!Sum->isZero()) 344 BaseRegs.push_back(Sum); 345 HasBaseReg = true; 346 } 347 } 348 349 /// getNumRegs - Return the total number of register operands used by this 350 /// formula. This does not include register uses implied by non-constant 351 /// addrec strides. 352 unsigned Formula::getNumRegs() const { 353 return !!ScaledReg + BaseRegs.size(); 354 } 355 356 /// getType - Return the type of this formula, if it has one, or null 357 /// otherwise. This type is meaningless except for the bit size. 358 Type *Formula::getType() const { 359 return !BaseRegs.empty() ? BaseRegs.front()->getType() : 360 ScaledReg ? ScaledReg->getType() : 361 BaseGV ? BaseGV->getType() : 362 0; 363 } 364 365 /// DeleteBaseReg - Delete the given base reg from the BaseRegs list. 366 void Formula::DeleteBaseReg(const SCEV *&S) { 367 if (&S != &BaseRegs.back()) 368 std::swap(S, BaseRegs.back()); 369 BaseRegs.pop_back(); 370 } 371 372 /// referencesReg - Test if this formula references the given register. 373 bool Formula::referencesReg(const SCEV *S) const { 374 return S == ScaledReg || 375 std::find(BaseRegs.begin(), BaseRegs.end(), S) != BaseRegs.end(); 376 } 377 378 /// hasRegsUsedByUsesOtherThan - Test whether this formula uses registers 379 /// which are used by uses other than the use with the given index. 380 bool Formula::hasRegsUsedByUsesOtherThan(size_t LUIdx, 381 const RegUseTracker &RegUses) const { 382 if (ScaledReg) 383 if (RegUses.isRegUsedByUsesOtherThan(ScaledReg, LUIdx)) 384 return true; 385 for (SmallVectorImpl<const SCEV *>::const_iterator I = BaseRegs.begin(), 386 E = BaseRegs.end(); I != E; ++I) 387 if (RegUses.isRegUsedByUsesOtherThan(*I, LUIdx)) 388 return true; 389 return false; 390 } 391 392 void Formula::print(raw_ostream &OS) const { 393 bool First = true; 394 if (BaseGV) { 395 if (!First) OS << " + "; else First = false; 396 WriteAsOperand(OS, BaseGV, /*PrintType=*/false); 397 } 398 if (BaseOffset != 0) { 399 if (!First) OS << " + "; else First = false; 400 OS << BaseOffset; 401 } 402 for (SmallVectorImpl<const SCEV *>::const_iterator I = BaseRegs.begin(), 403 E = BaseRegs.end(); I != E; ++I) { 404 if (!First) OS << " + "; else First = false; 405 OS << "reg(" << **I << ')'; 406 } 407 if (HasBaseReg && BaseRegs.empty()) { 408 if (!First) OS << " + "; else First = false; 409 OS << "**error: HasBaseReg**"; 410 } else if (!HasBaseReg && !BaseRegs.empty()) { 411 if (!First) OS << " + "; else First = false; 412 OS << "**error: !HasBaseReg**"; 413 } 414 if (Scale != 0) { 415 if (!First) OS << " + "; else First = false; 416 OS << Scale << "*reg("; 417 if (ScaledReg) 418 OS << *ScaledReg; 419 else 420 OS << "<unknown>"; 421 OS << ')'; 422 } 423 if (UnfoldedOffset != 0) { 424 if (!First) OS << " + "; else First = false; 425 OS << "imm(" << UnfoldedOffset << ')'; 426 } 427 } 428 429 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 430 void Formula::dump() const { 431 print(errs()); errs() << '\n'; 432 } 433 #endif 434 435 /// isAddRecSExtable - Return true if the given addrec can be sign-extended 436 /// without changing its value. 437 static bool isAddRecSExtable(const SCEVAddRecExpr *AR, ScalarEvolution &SE) { 438 Type *WideTy = 439 IntegerType::get(SE.getContext(), SE.getTypeSizeInBits(AR->getType()) + 1); 440 return isa<SCEVAddRecExpr>(SE.getSignExtendExpr(AR, WideTy)); 441 } 442 443 /// isAddSExtable - Return true if the given add can be sign-extended 444 /// without changing its value. 445 static bool isAddSExtable(const SCEVAddExpr *A, ScalarEvolution &SE) { 446 Type *WideTy = 447 IntegerType::get(SE.getContext(), SE.getTypeSizeInBits(A->getType()) + 1); 448 return isa<SCEVAddExpr>(SE.getSignExtendExpr(A, WideTy)); 449 } 450 451 /// isMulSExtable - Return true if the given mul can be sign-extended 452 /// without changing its value. 453 static bool isMulSExtable(const SCEVMulExpr *M, ScalarEvolution &SE) { 454 Type *WideTy = 455 IntegerType::get(SE.getContext(), 456 SE.getTypeSizeInBits(M->getType()) * M->getNumOperands()); 457 return isa<SCEVMulExpr>(SE.getSignExtendExpr(M, WideTy)); 458 } 459 460 /// getExactSDiv - Return an expression for LHS /s RHS, if it can be determined 461 /// and if the remainder is known to be zero, or null otherwise. If 462 /// IgnoreSignificantBits is true, expressions like (X * Y) /s Y are simplified 463 /// to Y, ignoring that the multiplication may overflow, which is useful when 464 /// the result will be used in a context where the most significant bits are 465 /// ignored. 466 static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS, 467 ScalarEvolution &SE, 468 bool IgnoreSignificantBits = false) { 469 // Handle the trivial case, which works for any SCEV type. 470 if (LHS == RHS) 471 return SE.getConstant(LHS->getType(), 1); 472 473 // Handle a few RHS special cases. 474 const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS); 475 if (RC) { 476 const APInt &RA = RC->getValue()->getValue(); 477 // Handle x /s -1 as x * -1, to give ScalarEvolution a chance to do 478 // some folding. 479 if (RA.isAllOnesValue()) 480 return SE.getMulExpr(LHS, RC); 481 // Handle x /s 1 as x. 482 if (RA == 1) 483 return LHS; 484 } 485 486 // Check for a division of a constant by a constant. 487 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(LHS)) { 488 if (!RC) 489 return 0; 490 const APInt &LA = C->getValue()->getValue(); 491 const APInt &RA = RC->getValue()->getValue(); 492 if (LA.srem(RA) != 0) 493 return 0; 494 return SE.getConstant(LA.sdiv(RA)); 495 } 496 497 // Distribute the sdiv over addrec operands, if the addrec doesn't overflow. 498 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) { 499 if (IgnoreSignificantBits || isAddRecSExtable(AR, SE)) { 500 const SCEV *Step = getExactSDiv(AR->getStepRecurrence(SE), RHS, SE, 501 IgnoreSignificantBits); 502 if (!Step) return 0; 503 const SCEV *Start = getExactSDiv(AR->getStart(), RHS, SE, 504 IgnoreSignificantBits); 505 if (!Start) return 0; 506 // FlagNW is independent of the start value, step direction, and is 507 // preserved with smaller magnitude steps. 508 // FIXME: AR->getNoWrapFlags(SCEV::FlagNW) 509 return SE.getAddRecExpr(Start, Step, AR->getLoop(), SCEV::FlagAnyWrap); 510 } 511 return 0; 512 } 513 514 // Distribute the sdiv over add operands, if the add doesn't overflow. 515 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(LHS)) { 516 if (IgnoreSignificantBits || isAddSExtable(Add, SE)) { 517 SmallVector<const SCEV *, 8> Ops; 518 for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end(); 519 I != E; ++I) { 520 const SCEV *Op = getExactSDiv(*I, RHS, SE, 521 IgnoreSignificantBits); 522 if (!Op) return 0; 523 Ops.push_back(Op); 524 } 525 return SE.getAddExpr(Ops); 526 } 527 return 0; 528 } 529 530 // Check for a multiply operand that we can pull RHS out of. 531 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS)) { 532 if (IgnoreSignificantBits || isMulSExtable(Mul, SE)) { 533 SmallVector<const SCEV *, 4> Ops; 534 bool Found = false; 535 for (SCEVMulExpr::op_iterator I = Mul->op_begin(), E = Mul->op_end(); 536 I != E; ++I) { 537 const SCEV *S = *I; 538 if (!Found) 539 if (const SCEV *Q = getExactSDiv(S, RHS, SE, 540 IgnoreSignificantBits)) { 541 S = Q; 542 Found = true; 543 } 544 Ops.push_back(S); 545 } 546 return Found ? SE.getMulExpr(Ops) : 0; 547 } 548 return 0; 549 } 550 551 // Otherwise we don't know. 552 return 0; 553 } 554 555 /// ExtractImmediate - If S involves the addition of a constant integer value, 556 /// return that integer value, and mutate S to point to a new SCEV with that 557 /// value excluded. 558 static int64_t ExtractImmediate(const SCEV *&S, ScalarEvolution &SE) { 559 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) { 560 if (C->getValue()->getValue().getMinSignedBits() <= 64) { 561 S = SE.getConstant(C->getType(), 0); 562 return C->getValue()->getSExtValue(); 563 } 564 } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 565 SmallVector<const SCEV *, 8> NewOps(Add->op_begin(), Add->op_end()); 566 int64_t Result = ExtractImmediate(NewOps.front(), SE); 567 if (Result != 0) 568 S = SE.getAddExpr(NewOps); 569 return Result; 570 } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 571 SmallVector<const SCEV *, 8> NewOps(AR->op_begin(), AR->op_end()); 572 int64_t Result = ExtractImmediate(NewOps.front(), SE); 573 if (Result != 0) 574 S = SE.getAddRecExpr(NewOps, AR->getLoop(), 575 // FIXME: AR->getNoWrapFlags(SCEV::FlagNW) 576 SCEV::FlagAnyWrap); 577 return Result; 578 } 579 return 0; 580 } 581 582 /// ExtractSymbol - If S involves the addition of a GlobalValue address, 583 /// return that symbol, and mutate S to point to a new SCEV with that 584 /// value excluded. 585 static GlobalValue *ExtractSymbol(const SCEV *&S, ScalarEvolution &SE) { 586 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 587 if (GlobalValue *GV = dyn_cast<GlobalValue>(U->getValue())) { 588 S = SE.getConstant(GV->getType(), 0); 589 return GV; 590 } 591 } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 592 SmallVector<const SCEV *, 8> NewOps(Add->op_begin(), Add->op_end()); 593 GlobalValue *Result = ExtractSymbol(NewOps.back(), SE); 594 if (Result) 595 S = SE.getAddExpr(NewOps); 596 return Result; 597 } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 598 SmallVector<const SCEV *, 8> NewOps(AR->op_begin(), AR->op_end()); 599 GlobalValue *Result = ExtractSymbol(NewOps.front(), SE); 600 if (Result) 601 S = SE.getAddRecExpr(NewOps, AR->getLoop(), 602 // FIXME: AR->getNoWrapFlags(SCEV::FlagNW) 603 SCEV::FlagAnyWrap); 604 return Result; 605 } 606 return 0; 607 } 608 609 /// isAddressUse - Returns true if the specified instruction is using the 610 /// specified value as an address. 611 static bool isAddressUse(Instruction *Inst, Value *OperandVal) { 612 bool isAddress = isa<LoadInst>(Inst); 613 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 614 if (SI->getOperand(1) == OperandVal) 615 isAddress = true; 616 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 617 // Addressing modes can also be folded into prefetches and a variety 618 // of intrinsics. 619 switch (II->getIntrinsicID()) { 620 default: break; 621 case Intrinsic::prefetch: 622 case Intrinsic::x86_sse_storeu_ps: 623 case Intrinsic::x86_sse2_storeu_pd: 624 case Intrinsic::x86_sse2_storeu_dq: 625 case Intrinsic::x86_sse2_storel_dq: 626 if (II->getArgOperand(0) == OperandVal) 627 isAddress = true; 628 break; 629 } 630 } 631 return isAddress; 632 } 633 634 /// getAccessType - Return the type of the memory being accessed. 635 static Type *getAccessType(const Instruction *Inst) { 636 Type *AccessTy = Inst->getType(); 637 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) 638 AccessTy = SI->getOperand(0)->getType(); 639 else if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 640 // Addressing modes can also be folded into prefetches and a variety 641 // of intrinsics. 642 switch (II->getIntrinsicID()) { 643 default: break; 644 case Intrinsic::x86_sse_storeu_ps: 645 case Intrinsic::x86_sse2_storeu_pd: 646 case Intrinsic::x86_sse2_storeu_dq: 647 case Intrinsic::x86_sse2_storel_dq: 648 AccessTy = II->getArgOperand(0)->getType(); 649 break; 650 } 651 } 652 653 // All pointers have the same requirements, so canonicalize them to an 654 // arbitrary pointer type to minimize variation. 655 if (PointerType *PTy = dyn_cast<PointerType>(AccessTy)) 656 AccessTy = PointerType::get(IntegerType::get(PTy->getContext(), 1), 657 PTy->getAddressSpace()); 658 659 return AccessTy; 660 } 661 662 /// isExistingPhi - Return true if this AddRec is already a phi in its loop. 663 static bool isExistingPhi(const SCEVAddRecExpr *AR, ScalarEvolution &SE) { 664 for (BasicBlock::iterator I = AR->getLoop()->getHeader()->begin(); 665 PHINode *PN = dyn_cast<PHINode>(I); ++I) { 666 if (SE.isSCEVable(PN->getType()) && 667 (SE.getEffectiveSCEVType(PN->getType()) == 668 SE.getEffectiveSCEVType(AR->getType())) && 669 SE.getSCEV(PN) == AR) 670 return true; 671 } 672 return false; 673 } 674 675 /// Check if expanding this expression is likely to incur significant cost. This 676 /// is tricky because SCEV doesn't track which expressions are actually computed 677 /// by the current IR. 678 /// 679 /// We currently allow expansion of IV increments that involve adds, 680 /// multiplication by constants, and AddRecs from existing phis. 681 /// 682 /// TODO: Allow UDivExpr if we can find an existing IV increment that is an 683 /// obvious multiple of the UDivExpr. 684 static bool isHighCostExpansion(const SCEV *S, 685 SmallPtrSet<const SCEV*, 8> &Processed, 686 ScalarEvolution &SE) { 687 // Zero/One operand expressions 688 switch (S->getSCEVType()) { 689 case scUnknown: 690 case scConstant: 691 return false; 692 case scTruncate: 693 return isHighCostExpansion(cast<SCEVTruncateExpr>(S)->getOperand(), 694 Processed, SE); 695 case scZeroExtend: 696 return isHighCostExpansion(cast<SCEVZeroExtendExpr>(S)->getOperand(), 697 Processed, SE); 698 case scSignExtend: 699 return isHighCostExpansion(cast<SCEVSignExtendExpr>(S)->getOperand(), 700 Processed, SE); 701 } 702 703 if (!Processed.insert(S)) 704 return false; 705 706 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 707 for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end(); 708 I != E; ++I) { 709 if (isHighCostExpansion(*I, Processed, SE)) 710 return true; 711 } 712 return false; 713 } 714 715 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 716 if (Mul->getNumOperands() == 2) { 717 // Multiplication by a constant is ok 718 if (isa<SCEVConstant>(Mul->getOperand(0))) 719 return isHighCostExpansion(Mul->getOperand(1), Processed, SE); 720 721 // If we have the value of one operand, check if an existing 722 // multiplication already generates this expression. 723 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Mul->getOperand(1))) { 724 Value *UVal = U->getValue(); 725 for (Value::use_iterator UI = UVal->use_begin(), UE = UVal->use_end(); 726 UI != UE; ++UI) { 727 // If U is a constant, it may be used by a ConstantExpr. 728 Instruction *User = dyn_cast<Instruction>(*UI); 729 if (User && User->getOpcode() == Instruction::Mul 730 && SE.isSCEVable(User->getType())) { 731 return SE.getSCEV(User) == Mul; 732 } 733 } 734 } 735 } 736 } 737 738 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 739 if (isExistingPhi(AR, SE)) 740 return false; 741 } 742 743 // Fow now, consider any other type of expression (div/mul/min/max) high cost. 744 return true; 745 } 746 747 /// DeleteTriviallyDeadInstructions - If any of the instructions is the 748 /// specified set are trivially dead, delete them and see if this makes any of 749 /// their operands subsequently dead. 750 static bool 751 DeleteTriviallyDeadInstructions(SmallVectorImpl<WeakVH> &DeadInsts) { 752 bool Changed = false; 753 754 while (!DeadInsts.empty()) { 755 Value *V = DeadInsts.pop_back_val(); 756 Instruction *I = dyn_cast_or_null<Instruction>(V); 757 758 if (I == 0 || !isInstructionTriviallyDead(I)) 759 continue; 760 761 for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI) 762 if (Instruction *U = dyn_cast<Instruction>(*OI)) { 763 *OI = 0; 764 if (U->use_empty()) 765 DeadInsts.push_back(U); 766 } 767 768 I->eraseFromParent(); 769 Changed = true; 770 } 771 772 return Changed; 773 } 774 775 namespace { 776 777 /// Cost - This class is used to measure and compare candidate formulae. 778 class Cost { 779 /// TODO: Some of these could be merged. Also, a lexical ordering 780 /// isn't always optimal. 781 unsigned NumRegs; 782 unsigned AddRecCost; 783 unsigned NumIVMuls; 784 unsigned NumBaseAdds; 785 unsigned ImmCost; 786 unsigned SetupCost; 787 788 public: 789 Cost() 790 : NumRegs(0), AddRecCost(0), NumIVMuls(0), NumBaseAdds(0), ImmCost(0), 791 SetupCost(0) {} 792 793 bool operator<(const Cost &Other) const; 794 795 void Loose(); 796 797 #ifndef NDEBUG 798 // Once any of the metrics loses, they must all remain losers. 799 bool isValid() { 800 return ((NumRegs | AddRecCost | NumIVMuls | NumBaseAdds 801 | ImmCost | SetupCost) != ~0u) 802 || ((NumRegs & AddRecCost & NumIVMuls & NumBaseAdds 803 & ImmCost & SetupCost) == ~0u); 804 } 805 #endif 806 807 bool isLoser() { 808 assert(isValid() && "invalid cost"); 809 return NumRegs == ~0u; 810 } 811 812 void RateFormula(const Formula &F, 813 SmallPtrSet<const SCEV *, 16> &Regs, 814 const DenseSet<const SCEV *> &VisitedRegs, 815 const Loop *L, 816 const SmallVectorImpl<int64_t> &Offsets, 817 ScalarEvolution &SE, DominatorTree &DT, 818 SmallPtrSet<const SCEV *, 16> *LoserRegs = 0); 819 820 void print(raw_ostream &OS) const; 821 void dump() const; 822 823 private: 824 void RateRegister(const SCEV *Reg, 825 SmallPtrSet<const SCEV *, 16> &Regs, 826 const Loop *L, 827 ScalarEvolution &SE, DominatorTree &DT); 828 void RatePrimaryRegister(const SCEV *Reg, 829 SmallPtrSet<const SCEV *, 16> &Regs, 830 const Loop *L, 831 ScalarEvolution &SE, DominatorTree &DT, 832 SmallPtrSet<const SCEV *, 16> *LoserRegs); 833 }; 834 835 } 836 837 /// RateRegister - Tally up interesting quantities from the given register. 838 void Cost::RateRegister(const SCEV *Reg, 839 SmallPtrSet<const SCEV *, 16> &Regs, 840 const Loop *L, 841 ScalarEvolution &SE, DominatorTree &DT) { 842 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Reg)) { 843 // If this is an addrec for another loop, don't second-guess its addrec phi 844 // nodes. LSR isn't currently smart enough to reason about more than one 845 // loop at a time. LSR has already run on inner loops, will not run on outer 846 // loops, and cannot be expected to change sibling loops. 847 if (AR->getLoop() != L) { 848 // If the AddRec exists, consider it's register free and leave it alone. 849 if (isExistingPhi(AR, SE)) 850 return; 851 852 // Otherwise, do not consider this formula at all. 853 Loose(); 854 return; 855 } 856 AddRecCost += 1; /// TODO: This should be a function of the stride. 857 858 // Add the step value register, if it needs one. 859 // TODO: The non-affine case isn't precisely modeled here. 860 if (!AR->isAffine() || !isa<SCEVConstant>(AR->getOperand(1))) { 861 if (!Regs.count(AR->getOperand(1))) { 862 RateRegister(AR->getOperand(1), Regs, L, SE, DT); 863 if (isLoser()) 864 return; 865 } 866 } 867 } 868 ++NumRegs; 869 870 // Rough heuristic; favor registers which don't require extra setup 871 // instructions in the preheader. 872 if (!isa<SCEVUnknown>(Reg) && 873 !isa<SCEVConstant>(Reg) && 874 !(isa<SCEVAddRecExpr>(Reg) && 875 (isa<SCEVUnknown>(cast<SCEVAddRecExpr>(Reg)->getStart()) || 876 isa<SCEVConstant>(cast<SCEVAddRecExpr>(Reg)->getStart())))) 877 ++SetupCost; 878 879 NumIVMuls += isa<SCEVMulExpr>(Reg) && 880 SE.hasComputableLoopEvolution(Reg, L); 881 } 882 883 /// RatePrimaryRegister - Record this register in the set. If we haven't seen it 884 /// before, rate it. Optional LoserRegs provides a way to declare any formula 885 /// that refers to one of those regs an instant loser. 886 void Cost::RatePrimaryRegister(const SCEV *Reg, 887 SmallPtrSet<const SCEV *, 16> &Regs, 888 const Loop *L, 889 ScalarEvolution &SE, DominatorTree &DT, 890 SmallPtrSet<const SCEV *, 16> *LoserRegs) { 891 if (LoserRegs && LoserRegs->count(Reg)) { 892 Loose(); 893 return; 894 } 895 if (Regs.insert(Reg)) { 896 RateRegister(Reg, Regs, L, SE, DT); 897 if (isLoser()) 898 LoserRegs->insert(Reg); 899 } 900 } 901 902 void Cost::RateFormula(const Formula &F, 903 SmallPtrSet<const SCEV *, 16> &Regs, 904 const DenseSet<const SCEV *> &VisitedRegs, 905 const Loop *L, 906 const SmallVectorImpl<int64_t> &Offsets, 907 ScalarEvolution &SE, DominatorTree &DT, 908 SmallPtrSet<const SCEV *, 16> *LoserRegs) { 909 // Tally up the registers. 910 if (const SCEV *ScaledReg = F.ScaledReg) { 911 if (VisitedRegs.count(ScaledReg)) { 912 Loose(); 913 return; 914 } 915 RatePrimaryRegister(ScaledReg, Regs, L, SE, DT, LoserRegs); 916 if (isLoser()) 917 return; 918 } 919 for (SmallVectorImpl<const SCEV *>::const_iterator I = F.BaseRegs.begin(), 920 E = F.BaseRegs.end(); I != E; ++I) { 921 const SCEV *BaseReg = *I; 922 if (VisitedRegs.count(BaseReg)) { 923 Loose(); 924 return; 925 } 926 RatePrimaryRegister(BaseReg, Regs, L, SE, DT, LoserRegs); 927 if (isLoser()) 928 return; 929 } 930 931 // Determine how many (unfolded) adds we'll need inside the loop. 932 size_t NumBaseParts = F.BaseRegs.size() + (F.UnfoldedOffset != 0); 933 if (NumBaseParts > 1) 934 NumBaseAdds += NumBaseParts - 1; 935 936 // Tally up the non-zero immediates. 937 for (SmallVectorImpl<int64_t>::const_iterator I = Offsets.begin(), 938 E = Offsets.end(); I != E; ++I) { 939 int64_t Offset = (uint64_t)*I + F.BaseOffset; 940 if (F.BaseGV) 941 ImmCost += 64; // Handle symbolic values conservatively. 942 // TODO: This should probably be the pointer size. 943 else if (Offset != 0) 944 ImmCost += APInt(64, Offset, true).getMinSignedBits(); 945 } 946 assert(isValid() && "invalid cost"); 947 } 948 949 /// Loose - Set this cost to a losing value. 950 void Cost::Loose() { 951 NumRegs = ~0u; 952 AddRecCost = ~0u; 953 NumIVMuls = ~0u; 954 NumBaseAdds = ~0u; 955 ImmCost = ~0u; 956 SetupCost = ~0u; 957 } 958 959 /// operator< - Choose the lower cost. 960 bool Cost::operator<(const Cost &Other) const { 961 if (NumRegs != Other.NumRegs) 962 return NumRegs < Other.NumRegs; 963 if (AddRecCost != Other.AddRecCost) 964 return AddRecCost < Other.AddRecCost; 965 if (NumIVMuls != Other.NumIVMuls) 966 return NumIVMuls < Other.NumIVMuls; 967 if (NumBaseAdds != Other.NumBaseAdds) 968 return NumBaseAdds < Other.NumBaseAdds; 969 if (ImmCost != Other.ImmCost) 970 return ImmCost < Other.ImmCost; 971 if (SetupCost != Other.SetupCost) 972 return SetupCost < Other.SetupCost; 973 return false; 974 } 975 976 void Cost::print(raw_ostream &OS) const { 977 OS << NumRegs << " reg" << (NumRegs == 1 ? "" : "s"); 978 if (AddRecCost != 0) 979 OS << ", with addrec cost " << AddRecCost; 980 if (NumIVMuls != 0) 981 OS << ", plus " << NumIVMuls << " IV mul" << (NumIVMuls == 1 ? "" : "s"); 982 if (NumBaseAdds != 0) 983 OS << ", plus " << NumBaseAdds << " base add" 984 << (NumBaseAdds == 1 ? "" : "s"); 985 if (ImmCost != 0) 986 OS << ", plus " << ImmCost << " imm cost"; 987 if (SetupCost != 0) 988 OS << ", plus " << SetupCost << " setup cost"; 989 } 990 991 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 992 void Cost::dump() const { 993 print(errs()); errs() << '\n'; 994 } 995 #endif 996 997 namespace { 998 999 /// LSRFixup - An operand value in an instruction which is to be replaced 1000 /// with some equivalent, possibly strength-reduced, replacement. 1001 struct LSRFixup { 1002 /// UserInst - The instruction which will be updated. 1003 Instruction *UserInst; 1004 1005 /// OperandValToReplace - The operand of the instruction which will 1006 /// be replaced. The operand may be used more than once; every instance 1007 /// will be replaced. 1008 Value *OperandValToReplace; 1009 1010 /// PostIncLoops - If this user is to use the post-incremented value of an 1011 /// induction variable, this variable is non-null and holds the loop 1012 /// associated with the induction variable. 1013 PostIncLoopSet PostIncLoops; 1014 1015 /// LUIdx - The index of the LSRUse describing the expression which 1016 /// this fixup needs, minus an offset (below). 1017 size_t LUIdx; 1018 1019 /// Offset - A constant offset to be added to the LSRUse expression. 1020 /// This allows multiple fixups to share the same LSRUse with different 1021 /// offsets, for example in an unrolled loop. 1022 int64_t Offset; 1023 1024 bool isUseFullyOutsideLoop(const Loop *L) const; 1025 1026 LSRFixup(); 1027 1028 void print(raw_ostream &OS) const; 1029 void dump() const; 1030 }; 1031 1032 } 1033 1034 LSRFixup::LSRFixup() 1035 : UserInst(0), OperandValToReplace(0), LUIdx(~size_t(0)), Offset(0) {} 1036 1037 /// isUseFullyOutsideLoop - Test whether this fixup always uses its 1038 /// value outside of the given loop. 1039 bool LSRFixup::isUseFullyOutsideLoop(const Loop *L) const { 1040 // PHI nodes use their value in their incoming blocks. 1041 if (const PHINode *PN = dyn_cast<PHINode>(UserInst)) { 1042 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 1043 if (PN->getIncomingValue(i) == OperandValToReplace && 1044 L->contains(PN->getIncomingBlock(i))) 1045 return false; 1046 return true; 1047 } 1048 1049 return !L->contains(UserInst); 1050 } 1051 1052 void LSRFixup::print(raw_ostream &OS) const { 1053 OS << "UserInst="; 1054 // Store is common and interesting enough to be worth special-casing. 1055 if (StoreInst *Store = dyn_cast<StoreInst>(UserInst)) { 1056 OS << "store "; 1057 WriteAsOperand(OS, Store->getOperand(0), /*PrintType=*/false); 1058 } else if (UserInst->getType()->isVoidTy()) 1059 OS << UserInst->getOpcodeName(); 1060 else 1061 WriteAsOperand(OS, UserInst, /*PrintType=*/false); 1062 1063 OS << ", OperandValToReplace="; 1064 WriteAsOperand(OS, OperandValToReplace, /*PrintType=*/false); 1065 1066 for (PostIncLoopSet::const_iterator I = PostIncLoops.begin(), 1067 E = PostIncLoops.end(); I != E; ++I) { 1068 OS << ", PostIncLoop="; 1069 WriteAsOperand(OS, (*I)->getHeader(), /*PrintType=*/false); 1070 } 1071 1072 if (LUIdx != ~size_t(0)) 1073 OS << ", LUIdx=" << LUIdx; 1074 1075 if (Offset != 0) 1076 OS << ", Offset=" << Offset; 1077 } 1078 1079 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1080 void LSRFixup::dump() const { 1081 print(errs()); errs() << '\n'; 1082 } 1083 #endif 1084 1085 namespace { 1086 1087 /// UniquifierDenseMapInfo - A DenseMapInfo implementation for holding 1088 /// DenseMaps and DenseSets of sorted SmallVectors of const SCEV*. 1089 struct UniquifierDenseMapInfo { 1090 static SmallVector<const SCEV *, 2> getEmptyKey() { 1091 SmallVector<const SCEV *, 2> V; 1092 V.push_back(reinterpret_cast<const SCEV *>(-1)); 1093 return V; 1094 } 1095 1096 static SmallVector<const SCEV *, 2> getTombstoneKey() { 1097 SmallVector<const SCEV *, 2> V; 1098 V.push_back(reinterpret_cast<const SCEV *>(-2)); 1099 return V; 1100 } 1101 1102 static unsigned getHashValue(const SmallVector<const SCEV *, 2> &V) { 1103 unsigned Result = 0; 1104 for (SmallVectorImpl<const SCEV *>::const_iterator I = V.begin(), 1105 E = V.end(); I != E; ++I) 1106 Result ^= DenseMapInfo<const SCEV *>::getHashValue(*I); 1107 return Result; 1108 } 1109 1110 static bool isEqual(const SmallVector<const SCEV *, 2> &LHS, 1111 const SmallVector<const SCEV *, 2> &RHS) { 1112 return LHS == RHS; 1113 } 1114 }; 1115 1116 /// LSRUse - This class holds the state that LSR keeps for each use in 1117 /// IVUsers, as well as uses invented by LSR itself. It includes information 1118 /// about what kinds of things can be folded into the user, information about 1119 /// the user itself, and information about how the use may be satisfied. 1120 /// TODO: Represent multiple users of the same expression in common? 1121 class LSRUse { 1122 DenseSet<SmallVector<const SCEV *, 2>, UniquifierDenseMapInfo> Uniquifier; 1123 1124 public: 1125 /// KindType - An enum for a kind of use, indicating what types of 1126 /// scaled and immediate operands it might support. 1127 enum KindType { 1128 Basic, ///< A normal use, with no folding. 1129 Special, ///< A special case of basic, allowing -1 scales. 1130 Address, ///< An address use; folding according to TargetLowering 1131 ICmpZero ///< An equality icmp with both operands folded into one. 1132 // TODO: Add a generic icmp too? 1133 }; 1134 1135 KindType Kind; 1136 Type *AccessTy; 1137 1138 SmallVector<int64_t, 8> Offsets; 1139 int64_t MinOffset; 1140 int64_t MaxOffset; 1141 1142 /// AllFixupsOutsideLoop - This records whether all of the fixups using this 1143 /// LSRUse are outside of the loop, in which case some special-case heuristics 1144 /// may be used. 1145 bool AllFixupsOutsideLoop; 1146 1147 /// WidestFixupType - This records the widest use type for any fixup using 1148 /// this LSRUse. FindUseWithSimilarFormula can't consider uses with different 1149 /// max fixup widths to be equivalent, because the narrower one may be relying 1150 /// on the implicit truncation to truncate away bogus bits. 1151 Type *WidestFixupType; 1152 1153 /// Formulae - A list of ways to build a value that can satisfy this user. 1154 /// After the list is populated, one of these is selected heuristically and 1155 /// used to formulate a replacement for OperandValToReplace in UserInst. 1156 SmallVector<Formula, 12> Formulae; 1157 1158 /// Regs - The set of register candidates used by all formulae in this LSRUse. 1159 SmallPtrSet<const SCEV *, 4> Regs; 1160 1161 LSRUse(KindType K, Type *T) : Kind(K), AccessTy(T), 1162 MinOffset(INT64_MAX), 1163 MaxOffset(INT64_MIN), 1164 AllFixupsOutsideLoop(true), 1165 WidestFixupType(0) {} 1166 1167 bool HasFormulaWithSameRegs(const Formula &F) const; 1168 bool InsertFormula(const Formula &F); 1169 void DeleteFormula(Formula &F); 1170 void RecomputeRegs(size_t LUIdx, RegUseTracker &Reguses); 1171 1172 void print(raw_ostream &OS) const; 1173 void dump() const; 1174 }; 1175 1176 } 1177 1178 /// HasFormula - Test whether this use as a formula which has the same 1179 /// registers as the given formula. 1180 bool LSRUse::HasFormulaWithSameRegs(const Formula &F) const { 1181 SmallVector<const SCEV *, 2> Key = F.BaseRegs; 1182 if (F.ScaledReg) Key.push_back(F.ScaledReg); 1183 // Unstable sort by host order ok, because this is only used for uniquifying. 1184 std::sort(Key.begin(), Key.end()); 1185 return Uniquifier.count(Key); 1186 } 1187 1188 /// InsertFormula - If the given formula has not yet been inserted, add it to 1189 /// the list, and return true. Return false otherwise. 1190 bool LSRUse::InsertFormula(const Formula &F) { 1191 SmallVector<const SCEV *, 2> Key = F.BaseRegs; 1192 if (F.ScaledReg) Key.push_back(F.ScaledReg); 1193 // Unstable sort by host order ok, because this is only used for uniquifying. 1194 std::sort(Key.begin(), Key.end()); 1195 1196 if (!Uniquifier.insert(Key).second) 1197 return false; 1198 1199 // Using a register to hold the value of 0 is not profitable. 1200 assert((!F.ScaledReg || !F.ScaledReg->isZero()) && 1201 "Zero allocated in a scaled register!"); 1202 #ifndef NDEBUG 1203 for (SmallVectorImpl<const SCEV *>::const_iterator I = 1204 F.BaseRegs.begin(), E = F.BaseRegs.end(); I != E; ++I) 1205 assert(!(*I)->isZero() && "Zero allocated in a base register!"); 1206 #endif 1207 1208 // Add the formula to the list. 1209 Formulae.push_back(F); 1210 1211 // Record registers now being used by this use. 1212 Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end()); 1213 1214 return true; 1215 } 1216 1217 /// DeleteFormula - Remove the given formula from this use's list. 1218 void LSRUse::DeleteFormula(Formula &F) { 1219 if (&F != &Formulae.back()) 1220 std::swap(F, Formulae.back()); 1221 Formulae.pop_back(); 1222 } 1223 1224 /// RecomputeRegs - Recompute the Regs field, and update RegUses. 1225 void LSRUse::RecomputeRegs(size_t LUIdx, RegUseTracker &RegUses) { 1226 // Now that we've filtered out some formulae, recompute the Regs set. 1227 SmallPtrSet<const SCEV *, 4> OldRegs = Regs; 1228 Regs.clear(); 1229 for (SmallVectorImpl<Formula>::const_iterator I = Formulae.begin(), 1230 E = Formulae.end(); I != E; ++I) { 1231 const Formula &F = *I; 1232 if (F.ScaledReg) Regs.insert(F.ScaledReg); 1233 Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end()); 1234 } 1235 1236 // Update the RegTracker. 1237 for (SmallPtrSet<const SCEV *, 4>::iterator I = OldRegs.begin(), 1238 E = OldRegs.end(); I != E; ++I) 1239 if (!Regs.count(*I)) 1240 RegUses.DropRegister(*I, LUIdx); 1241 } 1242 1243 void LSRUse::print(raw_ostream &OS) const { 1244 OS << "LSR Use: Kind="; 1245 switch (Kind) { 1246 case Basic: OS << "Basic"; break; 1247 case Special: OS << "Special"; break; 1248 case ICmpZero: OS << "ICmpZero"; break; 1249 case Address: 1250 OS << "Address of "; 1251 if (AccessTy->isPointerTy()) 1252 OS << "pointer"; // the full pointer type could be really verbose 1253 else 1254 OS << *AccessTy; 1255 } 1256 1257 OS << ", Offsets={"; 1258 for (SmallVectorImpl<int64_t>::const_iterator I = Offsets.begin(), 1259 E = Offsets.end(); I != E; ++I) { 1260 OS << *I; 1261 if (llvm::next(I) != E) 1262 OS << ','; 1263 } 1264 OS << '}'; 1265 1266 if (AllFixupsOutsideLoop) 1267 OS << ", all-fixups-outside-loop"; 1268 1269 if (WidestFixupType) 1270 OS << ", widest fixup type: " << *WidestFixupType; 1271 } 1272 1273 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1274 void LSRUse::dump() const { 1275 print(errs()); errs() << '\n'; 1276 } 1277 #endif 1278 1279 /// isLegalUse - Test whether the use described by AM is "legal", meaning it can 1280 /// be completely folded into the user instruction at isel time. This includes 1281 /// address-mode folding and special icmp tricks. 1282 static bool isLegalUse(const TargetTransformInfo &TTI, LSRUse::KindType Kind, 1283 Type *AccessTy, GlobalValue *BaseGV, int64_t BaseOffset, 1284 bool HasBaseReg, int64_t Scale) { 1285 switch (Kind) { 1286 case LSRUse::Address: 1287 return TTI.isLegalAddressingMode(AccessTy, BaseGV, BaseOffset, HasBaseReg, Scale); 1288 1289 // Otherwise, just guess that reg+reg addressing is legal. 1290 //return ; 1291 1292 case LSRUse::ICmpZero: 1293 // There's not even a target hook for querying whether it would be legal to 1294 // fold a GV into an ICmp. 1295 if (BaseGV) 1296 return false; 1297 1298 // ICmp only has two operands; don't allow more than two non-trivial parts. 1299 if (Scale != 0 && HasBaseReg && BaseOffset != 0) 1300 return false; 1301 1302 // ICmp only supports no scale or a -1 scale, as we can "fold" a -1 scale by 1303 // putting the scaled register in the other operand of the icmp. 1304 if (Scale != 0 && Scale != -1) 1305 return false; 1306 1307 // If we have low-level target information, ask the target if it can fold an 1308 // integer immediate on an icmp. 1309 if (BaseOffset != 0) { 1310 // We have one of: 1311 // ICmpZero BaseReg + BaseOffset => ICmp BaseReg, -BaseOffset 1312 // ICmpZero -1*ScaleReg + BaseOffset => ICmp ScaleReg, BaseOffset 1313 // Offs is the ICmp immediate. 1314 if (Scale == 0) 1315 // The cast does the right thing with INT64_MIN. 1316 BaseOffset = -(uint64_t)BaseOffset; 1317 return TTI.isLegalICmpImmediate(BaseOffset); 1318 } 1319 1320 // ICmpZero BaseReg + -1*ScaleReg => ICmp BaseReg, ScaleReg 1321 return true; 1322 1323 case LSRUse::Basic: 1324 // Only handle single-register values. 1325 return !BaseGV && Scale == 0 && BaseOffset == 0; 1326 1327 case LSRUse::Special: 1328 // Special case Basic to handle -1 scales. 1329 return !BaseGV && (Scale == 0 || Scale == -1) && BaseOffset == 0; 1330 } 1331 1332 llvm_unreachable("Invalid LSRUse Kind!"); 1333 } 1334 1335 static bool isLegalUse(const TargetTransformInfo &TTI, int64_t MinOffset, 1336 int64_t MaxOffset, LSRUse::KindType Kind, Type *AccessTy, 1337 GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, 1338 int64_t Scale) { 1339 // Check for overflow. 1340 if (((int64_t)((uint64_t)BaseOffset + MinOffset) > BaseOffset) != 1341 (MinOffset > 0)) 1342 return false; 1343 MinOffset = (uint64_t)BaseOffset + MinOffset; 1344 if (((int64_t)((uint64_t)BaseOffset + MaxOffset) > BaseOffset) != 1345 (MaxOffset > 0)) 1346 return false; 1347 MaxOffset = (uint64_t)BaseOffset + MaxOffset; 1348 1349 return isLegalUse(TTI, Kind, AccessTy, BaseGV, MinOffset, HasBaseReg, 1350 Scale) && 1351 isLegalUse(TTI, Kind, AccessTy, BaseGV, MaxOffset, HasBaseReg, Scale); 1352 } 1353 1354 static bool isLegalUse(const TargetTransformInfo &TTI, int64_t MinOffset, 1355 int64_t MaxOffset, LSRUse::KindType Kind, Type *AccessTy, 1356 const Formula &F) { 1357 return isLegalUse(TTI, MinOffset, MaxOffset, Kind, AccessTy, F.BaseGV, 1358 F.BaseOffset, F.HasBaseReg, F.Scale); 1359 } 1360 1361 static bool isAlwaysFoldable(const TargetTransformInfo &TTI, 1362 LSRUse::KindType Kind, Type *AccessTy, 1363 GlobalValue *BaseGV, int64_t BaseOffset, 1364 bool HasBaseReg) { 1365 // Fast-path: zero is always foldable. 1366 if (BaseOffset == 0 && !BaseGV) return true; 1367 1368 // Conservatively, create an address with an immediate and a 1369 // base and a scale. 1370 int64_t Scale = Kind == LSRUse::ICmpZero ? -1 : 1; 1371 1372 // Canonicalize a scale of 1 to a base register if the formula doesn't 1373 // already have a base register. 1374 if (!HasBaseReg && Scale == 1) { 1375 Scale = 0; 1376 HasBaseReg = true; 1377 } 1378 1379 return isLegalUse(TTI, Kind, AccessTy, BaseGV, BaseOffset, HasBaseReg, Scale); 1380 } 1381 1382 static bool isAlwaysFoldable(const TargetTransformInfo &TTI, 1383 ScalarEvolution &SE, int64_t MinOffset, 1384 int64_t MaxOffset, LSRUse::KindType Kind, 1385 Type *AccessTy, const SCEV *S, bool HasBaseReg) { 1386 // Fast-path: zero is always foldable. 1387 if (S->isZero()) return true; 1388 1389 // Conservatively, create an address with an immediate and a 1390 // base and a scale. 1391 int64_t BaseOffset = ExtractImmediate(S, SE); 1392 GlobalValue *BaseGV = ExtractSymbol(S, SE); 1393 1394 // If there's anything else involved, it's not foldable. 1395 if (!S->isZero()) return false; 1396 1397 // Fast-path: zero is always foldable. 1398 if (BaseOffset == 0 && !BaseGV) return true; 1399 1400 // Conservatively, create an address with an immediate and a 1401 // base and a scale. 1402 int64_t Scale = Kind == LSRUse::ICmpZero ? -1 : 1; 1403 1404 return isLegalUse(TTI, MinOffset, MaxOffset, Kind, AccessTy, BaseGV, 1405 BaseOffset, HasBaseReg, Scale); 1406 } 1407 1408 namespace { 1409 1410 /// UseMapDenseMapInfo - A DenseMapInfo implementation for holding 1411 /// DenseMaps and DenseSets of pairs of const SCEV* and LSRUse::Kind. 1412 struct UseMapDenseMapInfo { 1413 static std::pair<const SCEV *, LSRUse::KindType> getEmptyKey() { 1414 return std::make_pair(reinterpret_cast<const SCEV *>(-1), LSRUse::Basic); 1415 } 1416 1417 static std::pair<const SCEV *, LSRUse::KindType> getTombstoneKey() { 1418 return std::make_pair(reinterpret_cast<const SCEV *>(-2), LSRUse::Basic); 1419 } 1420 1421 static unsigned 1422 getHashValue(const std::pair<const SCEV *, LSRUse::KindType> &V) { 1423 unsigned Result = DenseMapInfo<const SCEV *>::getHashValue(V.first); 1424 Result ^= DenseMapInfo<unsigned>::getHashValue(unsigned(V.second)); 1425 return Result; 1426 } 1427 1428 static bool isEqual(const std::pair<const SCEV *, LSRUse::KindType> &LHS, 1429 const std::pair<const SCEV *, LSRUse::KindType> &RHS) { 1430 return LHS == RHS; 1431 } 1432 }; 1433 1434 /// IVInc - An individual increment in a Chain of IV increments. 1435 /// Relate an IV user to an expression that computes the IV it uses from the IV 1436 /// used by the previous link in the Chain. 1437 /// 1438 /// For the head of a chain, IncExpr holds the absolute SCEV expression for the 1439 /// original IVOperand. The head of the chain's IVOperand is only valid during 1440 /// chain collection, before LSR replaces IV users. During chain generation, 1441 /// IncExpr can be used to find the new IVOperand that computes the same 1442 /// expression. 1443 struct IVInc { 1444 Instruction *UserInst; 1445 Value* IVOperand; 1446 const SCEV *IncExpr; 1447 1448 IVInc(Instruction *U, Value *O, const SCEV *E): 1449 UserInst(U), IVOperand(O), IncExpr(E) {} 1450 }; 1451 1452 // IVChain - The list of IV increments in program order. 1453 // We typically add the head of a chain without finding subsequent links. 1454 struct IVChain { 1455 SmallVector<IVInc,1> Incs; 1456 const SCEV *ExprBase; 1457 1458 IVChain() : ExprBase(0) {} 1459 1460 IVChain(const IVInc &Head, const SCEV *Base) 1461 : Incs(1, Head), ExprBase(Base) {} 1462 1463 typedef SmallVectorImpl<IVInc>::const_iterator const_iterator; 1464 1465 // begin - return the first increment in the chain. 1466 const_iterator begin() const { 1467 assert(!Incs.empty()); 1468 return llvm::next(Incs.begin()); 1469 } 1470 const_iterator end() const { 1471 return Incs.end(); 1472 } 1473 1474 // hasIncs - Returns true if this chain contains any increments. 1475 bool hasIncs() const { return Incs.size() >= 2; } 1476 1477 // add - Add an IVInc to the end of this chain. 1478 void add(const IVInc &X) { Incs.push_back(X); } 1479 1480 // tailUserInst - Returns the last UserInst in the chain. 1481 Instruction *tailUserInst() const { return Incs.back().UserInst; } 1482 1483 // isProfitableIncrement - Returns true if IncExpr can be profitably added to 1484 // this chain. 1485 bool isProfitableIncrement(const SCEV *OperExpr, 1486 const SCEV *IncExpr, 1487 ScalarEvolution&); 1488 }; 1489 1490 /// ChainUsers - Helper for CollectChains to track multiple IV increment uses. 1491 /// Distinguish between FarUsers that definitely cross IV increments and 1492 /// NearUsers that may be used between IV increments. 1493 struct ChainUsers { 1494 SmallPtrSet<Instruction*, 4> FarUsers; 1495 SmallPtrSet<Instruction*, 4> NearUsers; 1496 }; 1497 1498 /// LSRInstance - This class holds state for the main loop strength reduction 1499 /// logic. 1500 class LSRInstance { 1501 IVUsers &IU; 1502 ScalarEvolution &SE; 1503 DominatorTree &DT; 1504 LoopInfo &LI; 1505 const TargetTransformInfo &TTI; 1506 Loop *const L; 1507 bool Changed; 1508 1509 /// IVIncInsertPos - This is the insert position that the current loop's 1510 /// induction variable increment should be placed. In simple loops, this is 1511 /// the latch block's terminator. But in more complicated cases, this is a 1512 /// position which will dominate all the in-loop post-increment users. 1513 Instruction *IVIncInsertPos; 1514 1515 /// Factors - Interesting factors between use strides. 1516 SmallSetVector<int64_t, 8> Factors; 1517 1518 /// Types - Interesting use types, to facilitate truncation reuse. 1519 SmallSetVector<Type *, 4> Types; 1520 1521 /// Fixups - The list of operands which are to be replaced. 1522 SmallVector<LSRFixup, 16> Fixups; 1523 1524 /// Uses - The list of interesting uses. 1525 SmallVector<LSRUse, 16> Uses; 1526 1527 /// RegUses - Track which uses use which register candidates. 1528 RegUseTracker RegUses; 1529 1530 // Limit the number of chains to avoid quadratic behavior. We don't expect to 1531 // have more than a few IV increment chains in a loop. Missing a Chain falls 1532 // back to normal LSR behavior for those uses. 1533 static const unsigned MaxChains = 8; 1534 1535 /// IVChainVec - IV users can form a chain of IV increments. 1536 SmallVector<IVChain, MaxChains> IVChainVec; 1537 1538 /// IVIncSet - IV users that belong to profitable IVChains. 1539 SmallPtrSet<Use*, MaxChains> IVIncSet; 1540 1541 void OptimizeShadowIV(); 1542 bool FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse); 1543 ICmpInst *OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse); 1544 void OptimizeLoopTermCond(); 1545 1546 void ChainInstruction(Instruction *UserInst, Instruction *IVOper, 1547 SmallVectorImpl<ChainUsers> &ChainUsersVec); 1548 void FinalizeChain(IVChain &Chain); 1549 void CollectChains(); 1550 void GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter, 1551 SmallVectorImpl<WeakVH> &DeadInsts); 1552 1553 void CollectInterestingTypesAndFactors(); 1554 void CollectFixupsAndInitialFormulae(); 1555 1556 LSRFixup &getNewFixup() { 1557 Fixups.push_back(LSRFixup()); 1558 return Fixups.back(); 1559 } 1560 1561 // Support for sharing of LSRUses between LSRFixups. 1562 typedef DenseMap<std::pair<const SCEV *, LSRUse::KindType>, 1563 size_t, 1564 UseMapDenseMapInfo> UseMapTy; 1565 UseMapTy UseMap; 1566 1567 bool reconcileNewOffset(LSRUse &LU, int64_t NewOffset, bool HasBaseReg, 1568 LSRUse::KindType Kind, Type *AccessTy); 1569 1570 std::pair<size_t, int64_t> getUse(const SCEV *&Expr, 1571 LSRUse::KindType Kind, 1572 Type *AccessTy); 1573 1574 void DeleteUse(LSRUse &LU, size_t LUIdx); 1575 1576 LSRUse *FindUseWithSimilarFormula(const Formula &F, const LSRUse &OrigLU); 1577 1578 void InsertInitialFormula(const SCEV *S, LSRUse &LU, size_t LUIdx); 1579 void InsertSupplementalFormula(const SCEV *S, LSRUse &LU, size_t LUIdx); 1580 void CountRegisters(const Formula &F, size_t LUIdx); 1581 bool InsertFormula(LSRUse &LU, unsigned LUIdx, const Formula &F); 1582 1583 void CollectLoopInvariantFixupsAndFormulae(); 1584 1585 void GenerateReassociations(LSRUse &LU, unsigned LUIdx, Formula Base, 1586 unsigned Depth = 0); 1587 void GenerateCombinations(LSRUse &LU, unsigned LUIdx, Formula Base); 1588 void GenerateSymbolicOffsets(LSRUse &LU, unsigned LUIdx, Formula Base); 1589 void GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx, Formula Base); 1590 void GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx, Formula Base); 1591 void GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base); 1592 void GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base); 1593 void GenerateCrossUseConstantOffsets(); 1594 void GenerateAllReuseFormulae(); 1595 1596 void FilterOutUndesirableDedicatedRegisters(); 1597 1598 size_t EstimateSearchSpaceComplexity() const; 1599 void NarrowSearchSpaceByDetectingSupersets(); 1600 void NarrowSearchSpaceByCollapsingUnrolledCode(); 1601 void NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters(); 1602 void NarrowSearchSpaceByPickingWinnerRegs(); 1603 void NarrowSearchSpaceUsingHeuristics(); 1604 1605 void SolveRecurse(SmallVectorImpl<const Formula *> &Solution, 1606 Cost &SolutionCost, 1607 SmallVectorImpl<const Formula *> &Workspace, 1608 const Cost &CurCost, 1609 const SmallPtrSet<const SCEV *, 16> &CurRegs, 1610 DenseSet<const SCEV *> &VisitedRegs) const; 1611 void Solve(SmallVectorImpl<const Formula *> &Solution) const; 1612 1613 BasicBlock::iterator 1614 HoistInsertPosition(BasicBlock::iterator IP, 1615 const SmallVectorImpl<Instruction *> &Inputs) const; 1616 BasicBlock::iterator 1617 AdjustInsertPositionForExpand(BasicBlock::iterator IP, 1618 const LSRFixup &LF, 1619 const LSRUse &LU, 1620 SCEVExpander &Rewriter) const; 1621 1622 Value *Expand(const LSRFixup &LF, 1623 const Formula &F, 1624 BasicBlock::iterator IP, 1625 SCEVExpander &Rewriter, 1626 SmallVectorImpl<WeakVH> &DeadInsts) const; 1627 void RewriteForPHI(PHINode *PN, const LSRFixup &LF, 1628 const Formula &F, 1629 SCEVExpander &Rewriter, 1630 SmallVectorImpl<WeakVH> &DeadInsts, 1631 Pass *P) const; 1632 void Rewrite(const LSRFixup &LF, 1633 const Formula &F, 1634 SCEVExpander &Rewriter, 1635 SmallVectorImpl<WeakVH> &DeadInsts, 1636 Pass *P) const; 1637 void ImplementSolution(const SmallVectorImpl<const Formula *> &Solution, 1638 Pass *P); 1639 1640 public: 1641 LSRInstance(Loop *L, Pass *P); 1642 1643 bool getChanged() const { return Changed; } 1644 1645 void print_factors_and_types(raw_ostream &OS) const; 1646 void print_fixups(raw_ostream &OS) const; 1647 void print_uses(raw_ostream &OS) const; 1648 void print(raw_ostream &OS) const; 1649 void dump() const; 1650 }; 1651 1652 } 1653 1654 /// OptimizeShadowIV - If IV is used in a int-to-float cast 1655 /// inside the loop then try to eliminate the cast operation. 1656 void LSRInstance::OptimizeShadowIV() { 1657 const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L); 1658 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) 1659 return; 1660 1661 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); 1662 UI != E; /* empty */) { 1663 IVUsers::const_iterator CandidateUI = UI; 1664 ++UI; 1665 Instruction *ShadowUse = CandidateUI->getUser(); 1666 Type *DestTy = NULL; 1667 bool IsSigned = false; 1668 1669 /* If shadow use is a int->float cast then insert a second IV 1670 to eliminate this cast. 1671 1672 for (unsigned i = 0; i < n; ++i) 1673 foo((double)i); 1674 1675 is transformed into 1676 1677 double d = 0.0; 1678 for (unsigned i = 0; i < n; ++i, ++d) 1679 foo(d); 1680 */ 1681 if (UIToFPInst *UCast = dyn_cast<UIToFPInst>(CandidateUI->getUser())) { 1682 IsSigned = false; 1683 DestTy = UCast->getDestTy(); 1684 } 1685 else if (SIToFPInst *SCast = dyn_cast<SIToFPInst>(CandidateUI->getUser())) { 1686 IsSigned = true; 1687 DestTy = SCast->getDestTy(); 1688 } 1689 if (!DestTy) continue; 1690 1691 // If target does not support DestTy natively then do not apply 1692 // this transformation. 1693 if (!TTI.isTypeLegal(DestTy)) continue; 1694 1695 PHINode *PH = dyn_cast<PHINode>(ShadowUse->getOperand(0)); 1696 if (!PH) continue; 1697 if (PH->getNumIncomingValues() != 2) continue; 1698 1699 Type *SrcTy = PH->getType(); 1700 int Mantissa = DestTy->getFPMantissaWidth(); 1701 if (Mantissa == -1) continue; 1702 if ((int)SE.getTypeSizeInBits(SrcTy) > Mantissa) 1703 continue; 1704 1705 unsigned Entry, Latch; 1706 if (PH->getIncomingBlock(0) == L->getLoopPreheader()) { 1707 Entry = 0; 1708 Latch = 1; 1709 } else { 1710 Entry = 1; 1711 Latch = 0; 1712 } 1713 1714 ConstantInt *Init = dyn_cast<ConstantInt>(PH->getIncomingValue(Entry)); 1715 if (!Init) continue; 1716 Constant *NewInit = ConstantFP::get(DestTy, IsSigned ? 1717 (double)Init->getSExtValue() : 1718 (double)Init->getZExtValue()); 1719 1720 BinaryOperator *Incr = 1721 dyn_cast<BinaryOperator>(PH->getIncomingValue(Latch)); 1722 if (!Incr) continue; 1723 if (Incr->getOpcode() != Instruction::Add 1724 && Incr->getOpcode() != Instruction::Sub) 1725 continue; 1726 1727 /* Initialize new IV, double d = 0.0 in above example. */ 1728 ConstantInt *C = NULL; 1729 if (Incr->getOperand(0) == PH) 1730 C = dyn_cast<ConstantInt>(Incr->getOperand(1)); 1731 else if (Incr->getOperand(1) == PH) 1732 C = dyn_cast<ConstantInt>(Incr->getOperand(0)); 1733 else 1734 continue; 1735 1736 if (!C) continue; 1737 1738 // Ignore negative constants, as the code below doesn't handle them 1739 // correctly. TODO: Remove this restriction. 1740 if (!C->getValue().isStrictlyPositive()) continue; 1741 1742 /* Add new PHINode. */ 1743 PHINode *NewPH = PHINode::Create(DestTy, 2, "IV.S.", PH); 1744 1745 /* create new increment. '++d' in above example. */ 1746 Constant *CFP = ConstantFP::get(DestTy, C->getZExtValue()); 1747 BinaryOperator *NewIncr = 1748 BinaryOperator::Create(Incr->getOpcode() == Instruction::Add ? 1749 Instruction::FAdd : Instruction::FSub, 1750 NewPH, CFP, "IV.S.next.", Incr); 1751 1752 NewPH->addIncoming(NewInit, PH->getIncomingBlock(Entry)); 1753 NewPH->addIncoming(NewIncr, PH->getIncomingBlock(Latch)); 1754 1755 /* Remove cast operation */ 1756 ShadowUse->replaceAllUsesWith(NewPH); 1757 ShadowUse->eraseFromParent(); 1758 Changed = true; 1759 break; 1760 } 1761 } 1762 1763 /// FindIVUserForCond - If Cond has an operand that is an expression of an IV, 1764 /// set the IV user and stride information and return true, otherwise return 1765 /// false. 1766 bool LSRInstance::FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse) { 1767 for (IVUsers::iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) 1768 if (UI->getUser() == Cond) { 1769 // NOTE: we could handle setcc instructions with multiple uses here, but 1770 // InstCombine does it as well for simple uses, it's not clear that it 1771 // occurs enough in real life to handle. 1772 CondUse = UI; 1773 return true; 1774 } 1775 return false; 1776 } 1777 1778 /// OptimizeMax - Rewrite the loop's terminating condition if it uses 1779 /// a max computation. 1780 /// 1781 /// This is a narrow solution to a specific, but acute, problem. For loops 1782 /// like this: 1783 /// 1784 /// i = 0; 1785 /// do { 1786 /// p[i] = 0.0; 1787 /// } while (++i < n); 1788 /// 1789 /// the trip count isn't just 'n', because 'n' might not be positive. And 1790 /// unfortunately this can come up even for loops where the user didn't use 1791 /// a C do-while loop. For example, seemingly well-behaved top-test loops 1792 /// will commonly be lowered like this: 1793 // 1794 /// if (n > 0) { 1795 /// i = 0; 1796 /// do { 1797 /// p[i] = 0.0; 1798 /// } while (++i < n); 1799 /// } 1800 /// 1801 /// and then it's possible for subsequent optimization to obscure the if 1802 /// test in such a way that indvars can't find it. 1803 /// 1804 /// When indvars can't find the if test in loops like this, it creates a 1805 /// max expression, which allows it to give the loop a canonical 1806 /// induction variable: 1807 /// 1808 /// i = 0; 1809 /// max = n < 1 ? 1 : n; 1810 /// do { 1811 /// p[i] = 0.0; 1812 /// } while (++i != max); 1813 /// 1814 /// Canonical induction variables are necessary because the loop passes 1815 /// are designed around them. The most obvious example of this is the 1816 /// LoopInfo analysis, which doesn't remember trip count values. It 1817 /// expects to be able to rediscover the trip count each time it is 1818 /// needed, and it does this using a simple analysis that only succeeds if 1819 /// the loop has a canonical induction variable. 1820 /// 1821 /// However, when it comes time to generate code, the maximum operation 1822 /// can be quite costly, especially if it's inside of an outer loop. 1823 /// 1824 /// This function solves this problem by detecting this type of loop and 1825 /// rewriting their conditions from ICMP_NE back to ICMP_SLT, and deleting 1826 /// the instructions for the maximum computation. 1827 /// 1828 ICmpInst *LSRInstance::OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse) { 1829 // Check that the loop matches the pattern we're looking for. 1830 if (Cond->getPredicate() != CmpInst::ICMP_EQ && 1831 Cond->getPredicate() != CmpInst::ICMP_NE) 1832 return Cond; 1833 1834 SelectInst *Sel = dyn_cast<SelectInst>(Cond->getOperand(1)); 1835 if (!Sel || !Sel->hasOneUse()) return Cond; 1836 1837 const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L); 1838 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) 1839 return Cond; 1840 const SCEV *One = SE.getConstant(BackedgeTakenCount->getType(), 1); 1841 1842 // Add one to the backedge-taken count to get the trip count. 1843 const SCEV *IterationCount = SE.getAddExpr(One, BackedgeTakenCount); 1844 if (IterationCount != SE.getSCEV(Sel)) return Cond; 1845 1846 // Check for a max calculation that matches the pattern. There's no check 1847 // for ICMP_ULE here because the comparison would be with zero, which 1848 // isn't interesting. 1849 CmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE; 1850 const SCEVNAryExpr *Max = 0; 1851 if (const SCEVSMaxExpr *S = dyn_cast<SCEVSMaxExpr>(BackedgeTakenCount)) { 1852 Pred = ICmpInst::ICMP_SLE; 1853 Max = S; 1854 } else if (const SCEVSMaxExpr *S = dyn_cast<SCEVSMaxExpr>(IterationCount)) { 1855 Pred = ICmpInst::ICMP_SLT; 1856 Max = S; 1857 } else if (const SCEVUMaxExpr *U = dyn_cast<SCEVUMaxExpr>(IterationCount)) { 1858 Pred = ICmpInst::ICMP_ULT; 1859 Max = U; 1860 } else { 1861 // No match; bail. 1862 return Cond; 1863 } 1864 1865 // To handle a max with more than two operands, this optimization would 1866 // require additional checking and setup. 1867 if (Max->getNumOperands() != 2) 1868 return Cond; 1869 1870 const SCEV *MaxLHS = Max->getOperand(0); 1871 const SCEV *MaxRHS = Max->getOperand(1); 1872 1873 // ScalarEvolution canonicalizes constants to the left. For < and >, look 1874 // for a comparison with 1. For <= and >=, a comparison with zero. 1875 if (!MaxLHS || 1876 (ICmpInst::isTrueWhenEqual(Pred) ? !MaxLHS->isZero() : (MaxLHS != One))) 1877 return Cond; 1878 1879 // Check the relevant induction variable for conformance to 1880 // the pattern. 1881 const SCEV *IV = SE.getSCEV(Cond->getOperand(0)); 1882 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(IV); 1883 if (!AR || !AR->isAffine() || 1884 AR->getStart() != One || 1885 AR->getStepRecurrence(SE) != One) 1886 return Cond; 1887 1888 assert(AR->getLoop() == L && 1889 "Loop condition operand is an addrec in a different loop!"); 1890 1891 // Check the right operand of the select, and remember it, as it will 1892 // be used in the new comparison instruction. 1893 Value *NewRHS = 0; 1894 if (ICmpInst::isTrueWhenEqual(Pred)) { 1895 // Look for n+1, and grab n. 1896 if (AddOperator *BO = dyn_cast<AddOperator>(Sel->getOperand(1))) 1897 if (isa<ConstantInt>(BO->getOperand(1)) && 1898 cast<ConstantInt>(BO->getOperand(1))->isOne() && 1899 SE.getSCEV(BO->getOperand(0)) == MaxRHS) 1900 NewRHS = BO->getOperand(0); 1901 if (AddOperator *BO = dyn_cast<AddOperator>(Sel->getOperand(2))) 1902 if (isa<ConstantInt>(BO->getOperand(1)) && 1903 cast<ConstantInt>(BO->getOperand(1))->isOne() && 1904 SE.getSCEV(BO->getOperand(0)) == MaxRHS) 1905 NewRHS = BO->getOperand(0); 1906 if (!NewRHS) 1907 return Cond; 1908 } else if (SE.getSCEV(Sel->getOperand(1)) == MaxRHS) 1909 NewRHS = Sel->getOperand(1); 1910 else if (SE.getSCEV(Sel->getOperand(2)) == MaxRHS) 1911 NewRHS = Sel->getOperand(2); 1912 else if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(MaxRHS)) 1913 NewRHS = SU->getValue(); 1914 else 1915 // Max doesn't match expected pattern. 1916 return Cond; 1917 1918 // Determine the new comparison opcode. It may be signed or unsigned, 1919 // and the original comparison may be either equality or inequality. 1920 if (Cond->getPredicate() == CmpInst::ICMP_EQ) 1921 Pred = CmpInst::getInversePredicate(Pred); 1922 1923 // Ok, everything looks ok to change the condition into an SLT or SGE and 1924 // delete the max calculation. 1925 ICmpInst *NewCond = 1926 new ICmpInst(Cond, Pred, Cond->getOperand(0), NewRHS, "scmp"); 1927 1928 // Delete the max calculation instructions. 1929 Cond->replaceAllUsesWith(NewCond); 1930 CondUse->setUser(NewCond); 1931 Instruction *Cmp = cast<Instruction>(Sel->getOperand(0)); 1932 Cond->eraseFromParent(); 1933 Sel->eraseFromParent(); 1934 if (Cmp->use_empty()) 1935 Cmp->eraseFromParent(); 1936 return NewCond; 1937 } 1938 1939 /// OptimizeLoopTermCond - Change loop terminating condition to use the 1940 /// postinc iv when possible. 1941 void 1942 LSRInstance::OptimizeLoopTermCond() { 1943 SmallPtrSet<Instruction *, 4> PostIncs; 1944 1945 BasicBlock *LatchBlock = L->getLoopLatch(); 1946 SmallVector<BasicBlock*, 8> ExitingBlocks; 1947 L->getExitingBlocks(ExitingBlocks); 1948 1949 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { 1950 BasicBlock *ExitingBlock = ExitingBlocks[i]; 1951 1952 // Get the terminating condition for the loop if possible. If we 1953 // can, we want to change it to use a post-incremented version of its 1954 // induction variable, to allow coalescing the live ranges for the IV into 1955 // one register value. 1956 1957 BranchInst *TermBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator()); 1958 if (!TermBr) 1959 continue; 1960 // FIXME: Overly conservative, termination condition could be an 'or' etc.. 1961 if (TermBr->isUnconditional() || !isa<ICmpInst>(TermBr->getCondition())) 1962 continue; 1963 1964 // Search IVUsesByStride to find Cond's IVUse if there is one. 1965 IVStrideUse *CondUse = 0; 1966 ICmpInst *Cond = cast<ICmpInst>(TermBr->getCondition()); 1967 if (!FindIVUserForCond(Cond, CondUse)) 1968 continue; 1969 1970 // If the trip count is computed in terms of a max (due to ScalarEvolution 1971 // being unable to find a sufficient guard, for example), change the loop 1972 // comparison to use SLT or ULT instead of NE. 1973 // One consequence of doing this now is that it disrupts the count-down 1974 // optimization. That's not always a bad thing though, because in such 1975 // cases it may still be worthwhile to avoid a max. 1976 Cond = OptimizeMax(Cond, CondUse); 1977 1978 // If this exiting block dominates the latch block, it may also use 1979 // the post-inc value if it won't be shared with other uses. 1980 // Check for dominance. 1981 if (!DT.dominates(ExitingBlock, LatchBlock)) 1982 continue; 1983 1984 // Conservatively avoid trying to use the post-inc value in non-latch 1985 // exits if there may be pre-inc users in intervening blocks. 1986 if (LatchBlock != ExitingBlock) 1987 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) 1988 // Test if the use is reachable from the exiting block. This dominator 1989 // query is a conservative approximation of reachability. 1990 if (&*UI != CondUse && 1991 !DT.properlyDominates(UI->getUser()->getParent(), ExitingBlock)) { 1992 // Conservatively assume there may be reuse if the quotient of their 1993 // strides could be a legal scale. 1994 const SCEV *A = IU.getStride(*CondUse, L); 1995 const SCEV *B = IU.getStride(*UI, L); 1996 if (!A || !B) continue; 1997 if (SE.getTypeSizeInBits(A->getType()) != 1998 SE.getTypeSizeInBits(B->getType())) { 1999 if (SE.getTypeSizeInBits(A->getType()) > 2000 SE.getTypeSizeInBits(B->getType())) 2001 B = SE.getSignExtendExpr(B, A->getType()); 2002 else 2003 A = SE.getSignExtendExpr(A, B->getType()); 2004 } 2005 if (const SCEVConstant *D = 2006 dyn_cast_or_null<SCEVConstant>(getExactSDiv(B, A, SE))) { 2007 const ConstantInt *C = D->getValue(); 2008 // Stride of one or negative one can have reuse with non-addresses. 2009 if (C->isOne() || C->isAllOnesValue()) 2010 goto decline_post_inc; 2011 // Avoid weird situations. 2012 if (C->getValue().getMinSignedBits() >= 64 || 2013 C->getValue().isMinSignedValue()) 2014 goto decline_post_inc; 2015 // Check for possible scaled-address reuse. 2016 Type *AccessTy = getAccessType(UI->getUser()); 2017 int64_t Scale = C->getSExtValue(); 2018 if (TTI.isLegalAddressingMode(AccessTy, /*BaseGV=*/ 0, 2019 /*BaseOffset=*/ 0, 2020 /*HasBaseReg=*/ false, Scale)) 2021 goto decline_post_inc; 2022 Scale = -Scale; 2023 if (TTI.isLegalAddressingMode(AccessTy, /*BaseGV=*/ 0, 2024 /*BaseOffset=*/ 0, 2025 /*HasBaseReg=*/ false, Scale)) 2026 goto decline_post_inc; 2027 } 2028 } 2029 2030 DEBUG(dbgs() << " Change loop exiting icmp to use postinc iv: " 2031 << *Cond << '\n'); 2032 2033 // It's possible for the setcc instruction to be anywhere in the loop, and 2034 // possible for it to have multiple users. If it is not immediately before 2035 // the exiting block branch, move it. 2036 if (&*++BasicBlock::iterator(Cond) != TermBr) { 2037 if (Cond->hasOneUse()) { 2038 Cond->moveBefore(TermBr); 2039 } else { 2040 // Clone the terminating condition and insert into the loopend. 2041 ICmpInst *OldCond = Cond; 2042 Cond = cast<ICmpInst>(Cond->clone()); 2043 Cond->setName(L->getHeader()->getName() + ".termcond"); 2044 ExitingBlock->getInstList().insert(TermBr, Cond); 2045 2046 // Clone the IVUse, as the old use still exists! 2047 CondUse = &IU.AddUser(Cond, CondUse->getOperandValToReplace()); 2048 TermBr->replaceUsesOfWith(OldCond, Cond); 2049 } 2050 } 2051 2052 // If we get to here, we know that we can transform the setcc instruction to 2053 // use the post-incremented version of the IV, allowing us to coalesce the 2054 // live ranges for the IV correctly. 2055 CondUse->transformToPostInc(L); 2056 Changed = true; 2057 2058 PostIncs.insert(Cond); 2059 decline_post_inc:; 2060 } 2061 2062 // Determine an insertion point for the loop induction variable increment. It 2063 // must dominate all the post-inc comparisons we just set up, and it must 2064 // dominate the loop latch edge. 2065 IVIncInsertPos = L->getLoopLatch()->getTerminator(); 2066 for (SmallPtrSet<Instruction *, 4>::const_iterator I = PostIncs.begin(), 2067 E = PostIncs.end(); I != E; ++I) { 2068 BasicBlock *BB = 2069 DT.findNearestCommonDominator(IVIncInsertPos->getParent(), 2070 (*I)->getParent()); 2071 if (BB == (*I)->getParent()) 2072 IVIncInsertPos = *I; 2073 else if (BB != IVIncInsertPos->getParent()) 2074 IVIncInsertPos = BB->getTerminator(); 2075 } 2076 } 2077 2078 /// reconcileNewOffset - Determine if the given use can accommodate a fixup 2079 /// at the given offset and other details. If so, update the use and 2080 /// return true. 2081 bool 2082 LSRInstance::reconcileNewOffset(LSRUse &LU, int64_t NewOffset, bool HasBaseReg, 2083 LSRUse::KindType Kind, Type *AccessTy) { 2084 int64_t NewMinOffset = LU.MinOffset; 2085 int64_t NewMaxOffset = LU.MaxOffset; 2086 Type *NewAccessTy = AccessTy; 2087 2088 // Check for a mismatched kind. It's tempting to collapse mismatched kinds to 2089 // something conservative, however this can pessimize in the case that one of 2090 // the uses will have all its uses outside the loop, for example. 2091 if (LU.Kind != Kind) 2092 return false; 2093 // Conservatively assume HasBaseReg is true for now. 2094 if (NewOffset < LU.MinOffset) { 2095 if (!isAlwaysFoldable(TTI, Kind, AccessTy, /*BaseGV=*/ 0, 2096 LU.MaxOffset - NewOffset, HasBaseReg)) 2097 return false; 2098 NewMinOffset = NewOffset; 2099 } else if (NewOffset > LU.MaxOffset) { 2100 if (!isAlwaysFoldable(TTI, Kind, AccessTy, /*BaseGV=*/ 0, 2101 NewOffset - LU.MinOffset, HasBaseReg)) 2102 return false; 2103 NewMaxOffset = NewOffset; 2104 } 2105 // Check for a mismatched access type, and fall back conservatively as needed. 2106 // TODO: Be less conservative when the type is similar and can use the same 2107 // addressing modes. 2108 if (Kind == LSRUse::Address && AccessTy != LU.AccessTy) 2109 NewAccessTy = Type::getVoidTy(AccessTy->getContext()); 2110 2111 // Update the use. 2112 LU.MinOffset = NewMinOffset; 2113 LU.MaxOffset = NewMaxOffset; 2114 LU.AccessTy = NewAccessTy; 2115 if (NewOffset != LU.Offsets.back()) 2116 LU.Offsets.push_back(NewOffset); 2117 return true; 2118 } 2119 2120 /// getUse - Return an LSRUse index and an offset value for a fixup which 2121 /// needs the given expression, with the given kind and optional access type. 2122 /// Either reuse an existing use or create a new one, as needed. 2123 std::pair<size_t, int64_t> 2124 LSRInstance::getUse(const SCEV *&Expr, 2125 LSRUse::KindType Kind, Type *AccessTy) { 2126 const SCEV *Copy = Expr; 2127 int64_t Offset = ExtractImmediate(Expr, SE); 2128 2129 // Basic uses can't accept any offset, for example. 2130 if (!isAlwaysFoldable(TTI, Kind, AccessTy, /*BaseGV=*/ 0, 2131 Offset, /*HasBaseReg=*/ true)) { 2132 Expr = Copy; 2133 Offset = 0; 2134 } 2135 2136 std::pair<UseMapTy::iterator, bool> P = 2137 UseMap.insert(std::make_pair(std::make_pair(Expr, Kind), 0)); 2138 if (!P.second) { 2139 // A use already existed with this base. 2140 size_t LUIdx = P.first->second; 2141 LSRUse &LU = Uses[LUIdx]; 2142 if (reconcileNewOffset(LU, Offset, /*HasBaseReg=*/true, Kind, AccessTy)) 2143 // Reuse this use. 2144 return std::make_pair(LUIdx, Offset); 2145 } 2146 2147 // Create a new use. 2148 size_t LUIdx = Uses.size(); 2149 P.first->second = LUIdx; 2150 Uses.push_back(LSRUse(Kind, AccessTy)); 2151 LSRUse &LU = Uses[LUIdx]; 2152 2153 // We don't need to track redundant offsets, but we don't need to go out 2154 // of our way here to avoid them. 2155 if (LU.Offsets.empty() || Offset != LU.Offsets.back()) 2156 LU.Offsets.push_back(Offset); 2157 2158 LU.MinOffset = Offset; 2159 LU.MaxOffset = Offset; 2160 return std::make_pair(LUIdx, Offset); 2161 } 2162 2163 /// DeleteUse - Delete the given use from the Uses list. 2164 void LSRInstance::DeleteUse(LSRUse &LU, size_t LUIdx) { 2165 if (&LU != &Uses.back()) 2166 std::swap(LU, Uses.back()); 2167 Uses.pop_back(); 2168 2169 // Update RegUses. 2170 RegUses.SwapAndDropUse(LUIdx, Uses.size()); 2171 } 2172 2173 /// FindUseWithFormula - Look for a use distinct from OrigLU which is has 2174 /// a formula that has the same registers as the given formula. 2175 LSRUse * 2176 LSRInstance::FindUseWithSimilarFormula(const Formula &OrigF, 2177 const LSRUse &OrigLU) { 2178 // Search all uses for the formula. This could be more clever. 2179 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 2180 LSRUse &LU = Uses[LUIdx]; 2181 // Check whether this use is close enough to OrigLU, to see whether it's 2182 // worthwhile looking through its formulae. 2183 // Ignore ICmpZero uses because they may contain formulae generated by 2184 // GenerateICmpZeroScales, in which case adding fixup offsets may 2185 // be invalid. 2186 if (&LU != &OrigLU && 2187 LU.Kind != LSRUse::ICmpZero && 2188 LU.Kind == OrigLU.Kind && OrigLU.AccessTy == LU.AccessTy && 2189 LU.WidestFixupType == OrigLU.WidestFixupType && 2190 LU.HasFormulaWithSameRegs(OrigF)) { 2191 // Scan through this use's formulae. 2192 for (SmallVectorImpl<Formula>::const_iterator I = LU.Formulae.begin(), 2193 E = LU.Formulae.end(); I != E; ++I) { 2194 const Formula &F = *I; 2195 // Check to see if this formula has the same registers and symbols 2196 // as OrigF. 2197 if (F.BaseRegs == OrigF.BaseRegs && 2198 F.ScaledReg == OrigF.ScaledReg && 2199 F.BaseGV == OrigF.BaseGV && 2200 F.Scale == OrigF.Scale && 2201 F.UnfoldedOffset == OrigF.UnfoldedOffset) { 2202 if (F.BaseOffset == 0) 2203 return &LU; 2204 // This is the formula where all the registers and symbols matched; 2205 // there aren't going to be any others. Since we declined it, we 2206 // can skip the rest of the formulae and proceed to the next LSRUse. 2207 break; 2208 } 2209 } 2210 } 2211 } 2212 2213 // Nothing looked good. 2214 return 0; 2215 } 2216 2217 void LSRInstance::CollectInterestingTypesAndFactors() { 2218 SmallSetVector<const SCEV *, 4> Strides; 2219 2220 // Collect interesting types and strides. 2221 SmallVector<const SCEV *, 4> Worklist; 2222 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) { 2223 const SCEV *Expr = IU.getExpr(*UI); 2224 2225 // Collect interesting types. 2226 Types.insert(SE.getEffectiveSCEVType(Expr->getType())); 2227 2228 // Add strides for mentioned loops. 2229 Worklist.push_back(Expr); 2230 do { 2231 const SCEV *S = Worklist.pop_back_val(); 2232 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 2233 if (AR->getLoop() == L) 2234 Strides.insert(AR->getStepRecurrence(SE)); 2235 Worklist.push_back(AR->getStart()); 2236 } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 2237 Worklist.append(Add->op_begin(), Add->op_end()); 2238 } 2239 } while (!Worklist.empty()); 2240 } 2241 2242 // Compute interesting factors from the set of interesting strides. 2243 for (SmallSetVector<const SCEV *, 4>::const_iterator 2244 I = Strides.begin(), E = Strides.end(); I != E; ++I) 2245 for (SmallSetVector<const SCEV *, 4>::const_iterator NewStrideIter = 2246 llvm::next(I); NewStrideIter != E; ++NewStrideIter) { 2247 const SCEV *OldStride = *I; 2248 const SCEV *NewStride = *NewStrideIter; 2249 2250 if (SE.getTypeSizeInBits(OldStride->getType()) != 2251 SE.getTypeSizeInBits(NewStride->getType())) { 2252 if (SE.getTypeSizeInBits(OldStride->getType()) > 2253 SE.getTypeSizeInBits(NewStride->getType())) 2254 NewStride = SE.getSignExtendExpr(NewStride, OldStride->getType()); 2255 else 2256 OldStride = SE.getSignExtendExpr(OldStride, NewStride->getType()); 2257 } 2258 if (const SCEVConstant *Factor = 2259 dyn_cast_or_null<SCEVConstant>(getExactSDiv(NewStride, OldStride, 2260 SE, true))) { 2261 if (Factor->getValue()->getValue().getMinSignedBits() <= 64) 2262 Factors.insert(Factor->getValue()->getValue().getSExtValue()); 2263 } else if (const SCEVConstant *Factor = 2264 dyn_cast_or_null<SCEVConstant>(getExactSDiv(OldStride, 2265 NewStride, 2266 SE, true))) { 2267 if (Factor->getValue()->getValue().getMinSignedBits() <= 64) 2268 Factors.insert(Factor->getValue()->getValue().getSExtValue()); 2269 } 2270 } 2271 2272 // If all uses use the same type, don't bother looking for truncation-based 2273 // reuse. 2274 if (Types.size() == 1) 2275 Types.clear(); 2276 2277 DEBUG(print_factors_and_types(dbgs())); 2278 } 2279 2280 /// findIVOperand - Helper for CollectChains that finds an IV operand (computed 2281 /// by an AddRec in this loop) within [OI,OE) or returns OE. If IVUsers mapped 2282 /// Instructions to IVStrideUses, we could partially skip this. 2283 static User::op_iterator 2284 findIVOperand(User::op_iterator OI, User::op_iterator OE, 2285 Loop *L, ScalarEvolution &SE) { 2286 for(; OI != OE; ++OI) { 2287 if (Instruction *Oper = dyn_cast<Instruction>(*OI)) { 2288 if (!SE.isSCEVable(Oper->getType())) 2289 continue; 2290 2291 if (const SCEVAddRecExpr *AR = 2292 dyn_cast<SCEVAddRecExpr>(SE.getSCEV(Oper))) { 2293 if (AR->getLoop() == L) 2294 break; 2295 } 2296 } 2297 } 2298 return OI; 2299 } 2300 2301 /// getWideOperand - IVChain logic must consistenctly peek base TruncInst 2302 /// operands, so wrap it in a convenient helper. 2303 static Value *getWideOperand(Value *Oper) { 2304 if (TruncInst *Trunc = dyn_cast<TruncInst>(Oper)) 2305 return Trunc->getOperand(0); 2306 return Oper; 2307 } 2308 2309 /// isCompatibleIVType - Return true if we allow an IV chain to include both 2310 /// types. 2311 static bool isCompatibleIVType(Value *LVal, Value *RVal) { 2312 Type *LType = LVal->getType(); 2313 Type *RType = RVal->getType(); 2314 return (LType == RType) || (LType->isPointerTy() && RType->isPointerTy()); 2315 } 2316 2317 /// getExprBase - Return an approximation of this SCEV expression's "base", or 2318 /// NULL for any constant. Returning the expression itself is 2319 /// conservative. Returning a deeper subexpression is more precise and valid as 2320 /// long as it isn't less complex than another subexpression. For expressions 2321 /// involving multiple unscaled values, we need to return the pointer-type 2322 /// SCEVUnknown. This avoids forming chains across objects, such as: 2323 /// PrevOper==a[i], IVOper==b[i], IVInc==b-a. 2324 /// 2325 /// Since SCEVUnknown is the rightmost type, and pointers are the rightmost 2326 /// SCEVUnknown, we simply return the rightmost SCEV operand. 2327 static const SCEV *getExprBase(const SCEV *S) { 2328 switch (S->getSCEVType()) { 2329 default: // uncluding scUnknown. 2330 return S; 2331 case scConstant: 2332 return 0; 2333 case scTruncate: 2334 return getExprBase(cast<SCEVTruncateExpr>(S)->getOperand()); 2335 case scZeroExtend: 2336 return getExprBase(cast<SCEVZeroExtendExpr>(S)->getOperand()); 2337 case scSignExtend: 2338 return getExprBase(cast<SCEVSignExtendExpr>(S)->getOperand()); 2339 case scAddExpr: { 2340 // Skip over scaled operands (scMulExpr) to follow add operands as long as 2341 // there's nothing more complex. 2342 // FIXME: not sure if we want to recognize negation. 2343 const SCEVAddExpr *Add = cast<SCEVAddExpr>(S); 2344 for (std::reverse_iterator<SCEVAddExpr::op_iterator> I(Add->op_end()), 2345 E(Add->op_begin()); I != E; ++I) { 2346 const SCEV *SubExpr = *I; 2347 if (SubExpr->getSCEVType() == scAddExpr) 2348 return getExprBase(SubExpr); 2349 2350 if (SubExpr->getSCEVType() != scMulExpr) 2351 return SubExpr; 2352 } 2353 return S; // all operands are scaled, be conservative. 2354 } 2355 case scAddRecExpr: 2356 return getExprBase(cast<SCEVAddRecExpr>(S)->getStart()); 2357 } 2358 } 2359 2360 /// Return true if the chain increment is profitable to expand into a loop 2361 /// invariant value, which may require its own register. A profitable chain 2362 /// increment will be an offset relative to the same base. We allow such offsets 2363 /// to potentially be used as chain increment as long as it's not obviously 2364 /// expensive to expand using real instructions. 2365 bool IVChain::isProfitableIncrement(const SCEV *OperExpr, 2366 const SCEV *IncExpr, 2367 ScalarEvolution &SE) { 2368 // Aggressively form chains when -stress-ivchain. 2369 if (StressIVChain) 2370 return true; 2371 2372 // Do not replace a constant offset from IV head with a nonconstant IV 2373 // increment. 2374 if (!isa<SCEVConstant>(IncExpr)) { 2375 const SCEV *HeadExpr = SE.getSCEV(getWideOperand(Incs[0].IVOperand)); 2376 if (isa<SCEVConstant>(SE.getMinusSCEV(OperExpr, HeadExpr))) 2377 return 0; 2378 } 2379 2380 SmallPtrSet<const SCEV*, 8> Processed; 2381 return !isHighCostExpansion(IncExpr, Processed, SE); 2382 } 2383 2384 /// Return true if the number of registers needed for the chain is estimated to 2385 /// be less than the number required for the individual IV users. First prohibit 2386 /// any IV users that keep the IV live across increments (the Users set should 2387 /// be empty). Next count the number and type of increments in the chain. 2388 /// 2389 /// Chaining IVs can lead to considerable code bloat if ISEL doesn't 2390 /// effectively use postinc addressing modes. Only consider it profitable it the 2391 /// increments can be computed in fewer registers when chained. 2392 /// 2393 /// TODO: Consider IVInc free if it's already used in another chains. 2394 static bool 2395 isProfitableChain(IVChain &Chain, SmallPtrSet<Instruction*, 4> &Users, 2396 ScalarEvolution &SE, const TargetTransformInfo &TTI) { 2397 if (StressIVChain) 2398 return true; 2399 2400 if (!Chain.hasIncs()) 2401 return false; 2402 2403 if (!Users.empty()) { 2404 DEBUG(dbgs() << "Chain: " << *Chain.Incs[0].UserInst << " users:\n"; 2405 for (SmallPtrSet<Instruction*, 4>::const_iterator I = Users.begin(), 2406 E = Users.end(); I != E; ++I) { 2407 dbgs() << " " << **I << "\n"; 2408 }); 2409 return false; 2410 } 2411 assert(!Chain.Incs.empty() && "empty IV chains are not allowed"); 2412 2413 // The chain itself may require a register, so intialize cost to 1. 2414 int cost = 1; 2415 2416 // A complete chain likely eliminates the need for keeping the original IV in 2417 // a register. LSR does not currently know how to form a complete chain unless 2418 // the header phi already exists. 2419 if (isa<PHINode>(Chain.tailUserInst()) 2420 && SE.getSCEV(Chain.tailUserInst()) == Chain.Incs[0].IncExpr) { 2421 --cost; 2422 } 2423 const SCEV *LastIncExpr = 0; 2424 unsigned NumConstIncrements = 0; 2425 unsigned NumVarIncrements = 0; 2426 unsigned NumReusedIncrements = 0; 2427 for (IVChain::const_iterator I = Chain.begin(), E = Chain.end(); 2428 I != E; ++I) { 2429 2430 if (I->IncExpr->isZero()) 2431 continue; 2432 2433 // Incrementing by zero or some constant is neutral. We assume constants can 2434 // be folded into an addressing mode or an add's immediate operand. 2435 if (isa<SCEVConstant>(I->IncExpr)) { 2436 ++NumConstIncrements; 2437 continue; 2438 } 2439 2440 if (I->IncExpr == LastIncExpr) 2441 ++NumReusedIncrements; 2442 else 2443 ++NumVarIncrements; 2444 2445 LastIncExpr = I->IncExpr; 2446 } 2447 // An IV chain with a single increment is handled by LSR's postinc 2448 // uses. However, a chain with multiple increments requires keeping the IV's 2449 // value live longer than it needs to be if chained. 2450 if (NumConstIncrements > 1) 2451 --cost; 2452 2453 // Materializing increment expressions in the preheader that didn't exist in 2454 // the original code may cost a register. For example, sign-extended array 2455 // indices can produce ridiculous increments like this: 2456 // IV + ((sext i32 (2 * %s) to i64) + (-1 * (sext i32 %s to i64))) 2457 cost += NumVarIncrements; 2458 2459 // Reusing variable increments likely saves a register to hold the multiple of 2460 // the stride. 2461 cost -= NumReusedIncrements; 2462 2463 DEBUG(dbgs() << "Chain: " << *Chain.Incs[0].UserInst << " Cost: " << cost 2464 << "\n"); 2465 2466 return cost < 0; 2467 } 2468 2469 /// ChainInstruction - Add this IV user to an existing chain or make it the head 2470 /// of a new chain. 2471 void LSRInstance::ChainInstruction(Instruction *UserInst, Instruction *IVOper, 2472 SmallVectorImpl<ChainUsers> &ChainUsersVec) { 2473 // When IVs are used as types of varying widths, they are generally converted 2474 // to a wider type with some uses remaining narrow under a (free) trunc. 2475 Value *const NextIV = getWideOperand(IVOper); 2476 const SCEV *const OperExpr = SE.getSCEV(NextIV); 2477 const SCEV *const OperExprBase = getExprBase(OperExpr); 2478 2479 // Visit all existing chains. Check if its IVOper can be computed as a 2480 // profitable loop invariant increment from the last link in the Chain. 2481 unsigned ChainIdx = 0, NChains = IVChainVec.size(); 2482 const SCEV *LastIncExpr = 0; 2483 for (; ChainIdx < NChains; ++ChainIdx) { 2484 IVChain &Chain = IVChainVec[ChainIdx]; 2485 2486 // Prune the solution space aggressively by checking that both IV operands 2487 // are expressions that operate on the same unscaled SCEVUnknown. This 2488 // "base" will be canceled by the subsequent getMinusSCEV call. Checking 2489 // first avoids creating extra SCEV expressions. 2490 if (!StressIVChain && Chain.ExprBase != OperExprBase) 2491 continue; 2492 2493 Value *PrevIV = getWideOperand(Chain.Incs.back().IVOperand); 2494 if (!isCompatibleIVType(PrevIV, NextIV)) 2495 continue; 2496 2497 // A phi node terminates a chain. 2498 if (isa<PHINode>(UserInst) && isa<PHINode>(Chain.tailUserInst())) 2499 continue; 2500 2501 // The increment must be loop-invariant so it can be kept in a register. 2502 const SCEV *PrevExpr = SE.getSCEV(PrevIV); 2503 const SCEV *IncExpr = SE.getMinusSCEV(OperExpr, PrevExpr); 2504 if (!SE.isLoopInvariant(IncExpr, L)) 2505 continue; 2506 2507 if (Chain.isProfitableIncrement(OperExpr, IncExpr, SE)) { 2508 LastIncExpr = IncExpr; 2509 break; 2510 } 2511 } 2512 // If we haven't found a chain, create a new one, unless we hit the max. Don't 2513 // bother for phi nodes, because they must be last in the chain. 2514 if (ChainIdx == NChains) { 2515 if (isa<PHINode>(UserInst)) 2516 return; 2517 if (NChains >= MaxChains && !StressIVChain) { 2518 DEBUG(dbgs() << "IV Chain Limit\n"); 2519 return; 2520 } 2521 LastIncExpr = OperExpr; 2522 // IVUsers may have skipped over sign/zero extensions. We don't currently 2523 // attempt to form chains involving extensions unless they can be hoisted 2524 // into this loop's AddRec. 2525 if (!isa<SCEVAddRecExpr>(LastIncExpr)) 2526 return; 2527 ++NChains; 2528 IVChainVec.push_back(IVChain(IVInc(UserInst, IVOper, LastIncExpr), 2529 OperExprBase)); 2530 ChainUsersVec.resize(NChains); 2531 DEBUG(dbgs() << "IV Chain#" << ChainIdx << " Head: (" << *UserInst 2532 << ") IV=" << *LastIncExpr << "\n"); 2533 } else { 2534 DEBUG(dbgs() << "IV Chain#" << ChainIdx << " Inc: (" << *UserInst 2535 << ") IV+" << *LastIncExpr << "\n"); 2536 // Add this IV user to the end of the chain. 2537 IVChainVec[ChainIdx].add(IVInc(UserInst, IVOper, LastIncExpr)); 2538 } 2539 2540 SmallPtrSet<Instruction*,4> &NearUsers = ChainUsersVec[ChainIdx].NearUsers; 2541 // This chain's NearUsers become FarUsers. 2542 if (!LastIncExpr->isZero()) { 2543 ChainUsersVec[ChainIdx].FarUsers.insert(NearUsers.begin(), 2544 NearUsers.end()); 2545 NearUsers.clear(); 2546 } 2547 2548 // All other uses of IVOperand become near uses of the chain. 2549 // We currently ignore intermediate values within SCEV expressions, assuming 2550 // they will eventually be used be the current chain, or can be computed 2551 // from one of the chain increments. To be more precise we could 2552 // transitively follow its user and only add leaf IV users to the set. 2553 for (Value::use_iterator UseIter = IVOper->use_begin(), 2554 UseEnd = IVOper->use_end(); UseIter != UseEnd; ++UseIter) { 2555 Instruction *OtherUse = dyn_cast<Instruction>(*UseIter); 2556 if (!OtherUse || OtherUse == UserInst) 2557 continue; 2558 if (SE.isSCEVable(OtherUse->getType()) 2559 && !isa<SCEVUnknown>(SE.getSCEV(OtherUse)) 2560 && IU.isIVUserOrOperand(OtherUse)) { 2561 continue; 2562 } 2563 NearUsers.insert(OtherUse); 2564 } 2565 2566 // Since this user is part of the chain, it's no longer considered a use 2567 // of the chain. 2568 ChainUsersVec[ChainIdx].FarUsers.erase(UserInst); 2569 } 2570 2571 /// CollectChains - Populate the vector of Chains. 2572 /// 2573 /// This decreases ILP at the architecture level. Targets with ample registers, 2574 /// multiple memory ports, and no register renaming probably don't want 2575 /// this. However, such targets should probably disable LSR altogether. 2576 /// 2577 /// The job of LSR is to make a reasonable choice of induction variables across 2578 /// the loop. Subsequent passes can easily "unchain" computation exposing more 2579 /// ILP *within the loop* if the target wants it. 2580 /// 2581 /// Finding the best IV chain is potentially a scheduling problem. Since LSR 2582 /// will not reorder memory operations, it will recognize this as a chain, but 2583 /// will generate redundant IV increments. Ideally this would be corrected later 2584 /// by a smart scheduler: 2585 /// = A[i] 2586 /// = A[i+x] 2587 /// A[i] = 2588 /// A[i+x] = 2589 /// 2590 /// TODO: Walk the entire domtree within this loop, not just the path to the 2591 /// loop latch. This will discover chains on side paths, but requires 2592 /// maintaining multiple copies of the Chains state. 2593 void LSRInstance::CollectChains() { 2594 DEBUG(dbgs() << "Collecting IV Chains.\n"); 2595 SmallVector<ChainUsers, 8> ChainUsersVec; 2596 2597 SmallVector<BasicBlock *,8> LatchPath; 2598 BasicBlock *LoopHeader = L->getHeader(); 2599 for (DomTreeNode *Rung = DT.getNode(L->getLoopLatch()); 2600 Rung->getBlock() != LoopHeader; Rung = Rung->getIDom()) { 2601 LatchPath.push_back(Rung->getBlock()); 2602 } 2603 LatchPath.push_back(LoopHeader); 2604 2605 // Walk the instruction stream from the loop header to the loop latch. 2606 for (SmallVectorImpl<BasicBlock *>::reverse_iterator 2607 BBIter = LatchPath.rbegin(), BBEnd = LatchPath.rend(); 2608 BBIter != BBEnd; ++BBIter) { 2609 for (BasicBlock::iterator I = (*BBIter)->begin(), E = (*BBIter)->end(); 2610 I != E; ++I) { 2611 // Skip instructions that weren't seen by IVUsers analysis. 2612 if (isa<PHINode>(I) || !IU.isIVUserOrOperand(I)) 2613 continue; 2614 2615 // Ignore users that are part of a SCEV expression. This way we only 2616 // consider leaf IV Users. This effectively rediscovers a portion of 2617 // IVUsers analysis but in program order this time. 2618 if (SE.isSCEVable(I->getType()) && !isa<SCEVUnknown>(SE.getSCEV(I))) 2619 continue; 2620 2621 // Remove this instruction from any NearUsers set it may be in. 2622 for (unsigned ChainIdx = 0, NChains = IVChainVec.size(); 2623 ChainIdx < NChains; ++ChainIdx) { 2624 ChainUsersVec[ChainIdx].NearUsers.erase(I); 2625 } 2626 // Search for operands that can be chained. 2627 SmallPtrSet<Instruction*, 4> UniqueOperands; 2628 User::op_iterator IVOpEnd = I->op_end(); 2629 User::op_iterator IVOpIter = findIVOperand(I->op_begin(), IVOpEnd, L, SE); 2630 while (IVOpIter != IVOpEnd) { 2631 Instruction *IVOpInst = cast<Instruction>(*IVOpIter); 2632 if (UniqueOperands.insert(IVOpInst)) 2633 ChainInstruction(I, IVOpInst, ChainUsersVec); 2634 IVOpIter = findIVOperand(llvm::next(IVOpIter), IVOpEnd, L, SE); 2635 } 2636 } // Continue walking down the instructions. 2637 } // Continue walking down the domtree. 2638 // Visit phi backedges to determine if the chain can generate the IV postinc. 2639 for (BasicBlock::iterator I = L->getHeader()->begin(); 2640 PHINode *PN = dyn_cast<PHINode>(I); ++I) { 2641 if (!SE.isSCEVable(PN->getType())) 2642 continue; 2643 2644 Instruction *IncV = 2645 dyn_cast<Instruction>(PN->getIncomingValueForBlock(L->getLoopLatch())); 2646 if (IncV) 2647 ChainInstruction(PN, IncV, ChainUsersVec); 2648 } 2649 // Remove any unprofitable chains. 2650 unsigned ChainIdx = 0; 2651 for (unsigned UsersIdx = 0, NChains = IVChainVec.size(); 2652 UsersIdx < NChains; ++UsersIdx) { 2653 if (!isProfitableChain(IVChainVec[UsersIdx], 2654 ChainUsersVec[UsersIdx].FarUsers, SE, TTI)) 2655 continue; 2656 // Preserve the chain at UsesIdx. 2657 if (ChainIdx != UsersIdx) 2658 IVChainVec[ChainIdx] = IVChainVec[UsersIdx]; 2659 FinalizeChain(IVChainVec[ChainIdx]); 2660 ++ChainIdx; 2661 } 2662 IVChainVec.resize(ChainIdx); 2663 } 2664 2665 void LSRInstance::FinalizeChain(IVChain &Chain) { 2666 assert(!Chain.Incs.empty() && "empty IV chains are not allowed"); 2667 DEBUG(dbgs() << "Final Chain: " << *Chain.Incs[0].UserInst << "\n"); 2668 2669 for (IVChain::const_iterator I = Chain.begin(), E = Chain.end(); 2670 I != E; ++I) { 2671 DEBUG(dbgs() << " Inc: " << *I->UserInst << "\n"); 2672 User::op_iterator UseI = 2673 std::find(I->UserInst->op_begin(), I->UserInst->op_end(), I->IVOperand); 2674 assert(UseI != I->UserInst->op_end() && "cannot find IV operand"); 2675 IVIncSet.insert(UseI); 2676 } 2677 } 2678 2679 /// Return true if the IVInc can be folded into an addressing mode. 2680 static bool canFoldIVIncExpr(const SCEV *IncExpr, Instruction *UserInst, 2681 Value *Operand, const TargetTransformInfo &TTI) { 2682 const SCEVConstant *IncConst = dyn_cast<SCEVConstant>(IncExpr); 2683 if (!IncConst || !isAddressUse(UserInst, Operand)) 2684 return false; 2685 2686 if (IncConst->getValue()->getValue().getMinSignedBits() > 64) 2687 return false; 2688 2689 int64_t IncOffset = IncConst->getValue()->getSExtValue(); 2690 if (!isAlwaysFoldable(TTI, LSRUse::Address, 2691 getAccessType(UserInst), /*BaseGV=*/ 0, 2692 IncOffset, /*HaseBaseReg=*/ false)) 2693 return false; 2694 2695 return true; 2696 } 2697 2698 /// GenerateIVChains - Generate an add or subtract for each IVInc in a chain to 2699 /// materialize the IV user's operand from the previous IV user's operand. 2700 void LSRInstance::GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter, 2701 SmallVectorImpl<WeakVH> &DeadInsts) { 2702 // Find the new IVOperand for the head of the chain. It may have been replaced 2703 // by LSR. 2704 const IVInc &Head = Chain.Incs[0]; 2705 User::op_iterator IVOpEnd = Head.UserInst->op_end(); 2706 User::op_iterator IVOpIter = findIVOperand(Head.UserInst->op_begin(), 2707 IVOpEnd, L, SE); 2708 Value *IVSrc = 0; 2709 while (IVOpIter != IVOpEnd) { 2710 IVSrc = getWideOperand(*IVOpIter); 2711 2712 // If this operand computes the expression that the chain needs, we may use 2713 // it. (Check this after setting IVSrc which is used below.) 2714 // 2715 // Note that if Head.IncExpr is wider than IVSrc, then this phi is too 2716 // narrow for the chain, so we can no longer use it. We do allow using a 2717 // wider phi, assuming the LSR checked for free truncation. In that case we 2718 // should already have a truncate on this operand such that 2719 // getSCEV(IVSrc) == IncExpr. 2720 if (SE.getSCEV(*IVOpIter) == Head.IncExpr 2721 || SE.getSCEV(IVSrc) == Head.IncExpr) { 2722 break; 2723 } 2724 IVOpIter = findIVOperand(llvm::next(IVOpIter), IVOpEnd, L, SE); 2725 } 2726 if (IVOpIter == IVOpEnd) { 2727 // Gracefully give up on this chain. 2728 DEBUG(dbgs() << "Concealed chain head: " << *Head.UserInst << "\n"); 2729 return; 2730 } 2731 2732 DEBUG(dbgs() << "Generate chain at: " << *IVSrc << "\n"); 2733 Type *IVTy = IVSrc->getType(); 2734 Type *IntTy = SE.getEffectiveSCEVType(IVTy); 2735 const SCEV *LeftOverExpr = 0; 2736 for (IVChain::const_iterator IncI = Chain.begin(), 2737 IncE = Chain.end(); IncI != IncE; ++IncI) { 2738 2739 Instruction *InsertPt = IncI->UserInst; 2740 if (isa<PHINode>(InsertPt)) 2741 InsertPt = L->getLoopLatch()->getTerminator(); 2742 2743 // IVOper will replace the current IV User's operand. IVSrc is the IV 2744 // value currently held in a register. 2745 Value *IVOper = IVSrc; 2746 if (!IncI->IncExpr->isZero()) { 2747 // IncExpr was the result of subtraction of two narrow values, so must 2748 // be signed. 2749 const SCEV *IncExpr = SE.getNoopOrSignExtend(IncI->IncExpr, IntTy); 2750 LeftOverExpr = LeftOverExpr ? 2751 SE.getAddExpr(LeftOverExpr, IncExpr) : IncExpr; 2752 } 2753 if (LeftOverExpr && !LeftOverExpr->isZero()) { 2754 // Expand the IV increment. 2755 Rewriter.clearPostInc(); 2756 Value *IncV = Rewriter.expandCodeFor(LeftOverExpr, IntTy, InsertPt); 2757 const SCEV *IVOperExpr = SE.getAddExpr(SE.getUnknown(IVSrc), 2758 SE.getUnknown(IncV)); 2759 IVOper = Rewriter.expandCodeFor(IVOperExpr, IVTy, InsertPt); 2760 2761 // If an IV increment can't be folded, use it as the next IV value. 2762 if (!canFoldIVIncExpr(LeftOverExpr, IncI->UserInst, IncI->IVOperand, 2763 TTI)) { 2764 assert(IVTy == IVOper->getType() && "inconsistent IV increment type"); 2765 IVSrc = IVOper; 2766 LeftOverExpr = 0; 2767 } 2768 } 2769 Type *OperTy = IncI->IVOperand->getType(); 2770 if (IVTy != OperTy) { 2771 assert(SE.getTypeSizeInBits(IVTy) >= SE.getTypeSizeInBits(OperTy) && 2772 "cannot extend a chained IV"); 2773 IRBuilder<> Builder(InsertPt); 2774 IVOper = Builder.CreateTruncOrBitCast(IVOper, OperTy, "lsr.chain"); 2775 } 2776 IncI->UserInst->replaceUsesOfWith(IncI->IVOperand, IVOper); 2777 DeadInsts.push_back(IncI->IVOperand); 2778 } 2779 // If LSR created a new, wider phi, we may also replace its postinc. We only 2780 // do this if we also found a wide value for the head of the chain. 2781 if (isa<PHINode>(Chain.tailUserInst())) { 2782 for (BasicBlock::iterator I = L->getHeader()->begin(); 2783 PHINode *Phi = dyn_cast<PHINode>(I); ++I) { 2784 if (!isCompatibleIVType(Phi, IVSrc)) 2785 continue; 2786 Instruction *PostIncV = dyn_cast<Instruction>( 2787 Phi->getIncomingValueForBlock(L->getLoopLatch())); 2788 if (!PostIncV || (SE.getSCEV(PostIncV) != SE.getSCEV(IVSrc))) 2789 continue; 2790 Value *IVOper = IVSrc; 2791 Type *PostIncTy = PostIncV->getType(); 2792 if (IVTy != PostIncTy) { 2793 assert(PostIncTy->isPointerTy() && "mixing int/ptr IV types"); 2794 IRBuilder<> Builder(L->getLoopLatch()->getTerminator()); 2795 Builder.SetCurrentDebugLocation(PostIncV->getDebugLoc()); 2796 IVOper = Builder.CreatePointerCast(IVSrc, PostIncTy, "lsr.chain"); 2797 } 2798 Phi->replaceUsesOfWith(PostIncV, IVOper); 2799 DeadInsts.push_back(PostIncV); 2800 } 2801 } 2802 } 2803 2804 void LSRInstance::CollectFixupsAndInitialFormulae() { 2805 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) { 2806 Instruction *UserInst = UI->getUser(); 2807 // Skip IV users that are part of profitable IV Chains. 2808 User::op_iterator UseI = std::find(UserInst->op_begin(), UserInst->op_end(), 2809 UI->getOperandValToReplace()); 2810 assert(UseI != UserInst->op_end() && "cannot find IV operand"); 2811 if (IVIncSet.count(UseI)) 2812 continue; 2813 2814 // Record the uses. 2815 LSRFixup &LF = getNewFixup(); 2816 LF.UserInst = UserInst; 2817 LF.OperandValToReplace = UI->getOperandValToReplace(); 2818 LF.PostIncLoops = UI->getPostIncLoops(); 2819 2820 LSRUse::KindType Kind = LSRUse::Basic; 2821 Type *AccessTy = 0; 2822 if (isAddressUse(LF.UserInst, LF.OperandValToReplace)) { 2823 Kind = LSRUse::Address; 2824 AccessTy = getAccessType(LF.UserInst); 2825 } 2826 2827 const SCEV *S = IU.getExpr(*UI); 2828 2829 // Equality (== and !=) ICmps are special. We can rewrite (i == N) as 2830 // (N - i == 0), and this allows (N - i) to be the expression that we work 2831 // with rather than just N or i, so we can consider the register 2832 // requirements for both N and i at the same time. Limiting this code to 2833 // equality icmps is not a problem because all interesting loops use 2834 // equality icmps, thanks to IndVarSimplify. 2835 if (ICmpInst *CI = dyn_cast<ICmpInst>(LF.UserInst)) 2836 if (CI->isEquality()) { 2837 // Swap the operands if needed to put the OperandValToReplace on the 2838 // left, for consistency. 2839 Value *NV = CI->getOperand(1); 2840 if (NV == LF.OperandValToReplace) { 2841 CI->setOperand(1, CI->getOperand(0)); 2842 CI->setOperand(0, NV); 2843 NV = CI->getOperand(1); 2844 Changed = true; 2845 } 2846 2847 // x == y --> x - y == 0 2848 const SCEV *N = SE.getSCEV(NV); 2849 if (SE.isLoopInvariant(N, L) && isSafeToExpand(N)) { 2850 // S is normalized, so normalize N before folding it into S 2851 // to keep the result normalized. 2852 N = TransformForPostIncUse(Normalize, N, CI, 0, 2853 LF.PostIncLoops, SE, DT); 2854 Kind = LSRUse::ICmpZero; 2855 S = SE.getMinusSCEV(N, S); 2856 } 2857 2858 // -1 and the negations of all interesting strides (except the negation 2859 // of -1) are now also interesting. 2860 for (size_t i = 0, e = Factors.size(); i != e; ++i) 2861 if (Factors[i] != -1) 2862 Factors.insert(-(uint64_t)Factors[i]); 2863 Factors.insert(-1); 2864 } 2865 2866 // Set up the initial formula for this use. 2867 std::pair<size_t, int64_t> P = getUse(S, Kind, AccessTy); 2868 LF.LUIdx = P.first; 2869 LF.Offset = P.second; 2870 LSRUse &LU = Uses[LF.LUIdx]; 2871 LU.AllFixupsOutsideLoop &= LF.isUseFullyOutsideLoop(L); 2872 if (!LU.WidestFixupType || 2873 SE.getTypeSizeInBits(LU.WidestFixupType) < 2874 SE.getTypeSizeInBits(LF.OperandValToReplace->getType())) 2875 LU.WidestFixupType = LF.OperandValToReplace->getType(); 2876 2877 // If this is the first use of this LSRUse, give it a formula. 2878 if (LU.Formulae.empty()) { 2879 InsertInitialFormula(S, LU, LF.LUIdx); 2880 CountRegisters(LU.Formulae.back(), LF.LUIdx); 2881 } 2882 } 2883 2884 DEBUG(print_fixups(dbgs())); 2885 } 2886 2887 /// InsertInitialFormula - Insert a formula for the given expression into 2888 /// the given use, separating out loop-variant portions from loop-invariant 2889 /// and loop-computable portions. 2890 void 2891 LSRInstance::InsertInitialFormula(const SCEV *S, LSRUse &LU, size_t LUIdx) { 2892 Formula F; 2893 F.InitialMatch(S, L, SE); 2894 bool Inserted = InsertFormula(LU, LUIdx, F); 2895 assert(Inserted && "Initial formula already exists!"); (void)Inserted; 2896 } 2897 2898 /// InsertSupplementalFormula - Insert a simple single-register formula for 2899 /// the given expression into the given use. 2900 void 2901 LSRInstance::InsertSupplementalFormula(const SCEV *S, 2902 LSRUse &LU, size_t LUIdx) { 2903 Formula F; 2904 F.BaseRegs.push_back(S); 2905 F.HasBaseReg = true; 2906 bool Inserted = InsertFormula(LU, LUIdx, F); 2907 assert(Inserted && "Supplemental formula already exists!"); (void)Inserted; 2908 } 2909 2910 /// CountRegisters - Note which registers are used by the given formula, 2911 /// updating RegUses. 2912 void LSRInstance::CountRegisters(const Formula &F, size_t LUIdx) { 2913 if (F.ScaledReg) 2914 RegUses.CountRegister(F.ScaledReg, LUIdx); 2915 for (SmallVectorImpl<const SCEV *>::const_iterator I = F.BaseRegs.begin(), 2916 E = F.BaseRegs.end(); I != E; ++I) 2917 RegUses.CountRegister(*I, LUIdx); 2918 } 2919 2920 /// InsertFormula - If the given formula has not yet been inserted, add it to 2921 /// the list, and return true. Return false otherwise. 2922 bool LSRInstance::InsertFormula(LSRUse &LU, unsigned LUIdx, const Formula &F) { 2923 if (!LU.InsertFormula(F)) 2924 return false; 2925 2926 CountRegisters(F, LUIdx); 2927 return true; 2928 } 2929 2930 /// CollectLoopInvariantFixupsAndFormulae - Check for other uses of 2931 /// loop-invariant values which we're tracking. These other uses will pin these 2932 /// values in registers, making them less profitable for elimination. 2933 /// TODO: This currently misses non-constant addrec step registers. 2934 /// TODO: Should this give more weight to users inside the loop? 2935 void 2936 LSRInstance::CollectLoopInvariantFixupsAndFormulae() { 2937 SmallVector<const SCEV *, 8> Worklist(RegUses.begin(), RegUses.end()); 2938 SmallPtrSet<const SCEV *, 8> Inserted; 2939 2940 while (!Worklist.empty()) { 2941 const SCEV *S = Worklist.pop_back_val(); 2942 2943 if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) 2944 Worklist.append(N->op_begin(), N->op_end()); 2945 else if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S)) 2946 Worklist.push_back(C->getOperand()); 2947 else if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) { 2948 Worklist.push_back(D->getLHS()); 2949 Worklist.push_back(D->getRHS()); 2950 } else if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 2951 if (!Inserted.insert(U)) continue; 2952 const Value *V = U->getValue(); 2953 if (const Instruction *Inst = dyn_cast<Instruction>(V)) { 2954 // Look for instructions defined outside the loop. 2955 if (L->contains(Inst)) continue; 2956 } else if (isa<UndefValue>(V)) 2957 // Undef doesn't have a live range, so it doesn't matter. 2958 continue; 2959 for (Value::const_use_iterator UI = V->use_begin(), UE = V->use_end(); 2960 UI != UE; ++UI) { 2961 const Instruction *UserInst = dyn_cast<Instruction>(*UI); 2962 // Ignore non-instructions. 2963 if (!UserInst) 2964 continue; 2965 // Ignore instructions in other functions (as can happen with 2966 // Constants). 2967 if (UserInst->getParent()->getParent() != L->getHeader()->getParent()) 2968 continue; 2969 // Ignore instructions not dominated by the loop. 2970 const BasicBlock *UseBB = !isa<PHINode>(UserInst) ? 2971 UserInst->getParent() : 2972 cast<PHINode>(UserInst)->getIncomingBlock( 2973 PHINode::getIncomingValueNumForOperand(UI.getOperandNo())); 2974 if (!DT.dominates(L->getHeader(), UseBB)) 2975 continue; 2976 // Ignore uses which are part of other SCEV expressions, to avoid 2977 // analyzing them multiple times. 2978 if (SE.isSCEVable(UserInst->getType())) { 2979 const SCEV *UserS = SE.getSCEV(const_cast<Instruction *>(UserInst)); 2980 // If the user is a no-op, look through to its uses. 2981 if (!isa<SCEVUnknown>(UserS)) 2982 continue; 2983 if (UserS == U) { 2984 Worklist.push_back( 2985 SE.getUnknown(const_cast<Instruction *>(UserInst))); 2986 continue; 2987 } 2988 } 2989 // Ignore icmp instructions which are already being analyzed. 2990 if (const ICmpInst *ICI = dyn_cast<ICmpInst>(UserInst)) { 2991 unsigned OtherIdx = !UI.getOperandNo(); 2992 Value *OtherOp = const_cast<Value *>(ICI->getOperand(OtherIdx)); 2993 if (SE.hasComputableLoopEvolution(SE.getSCEV(OtherOp), L)) 2994 continue; 2995 } 2996 2997 LSRFixup &LF = getNewFixup(); 2998 LF.UserInst = const_cast<Instruction *>(UserInst); 2999 LF.OperandValToReplace = UI.getUse(); 3000 std::pair<size_t, int64_t> P = getUse(S, LSRUse::Basic, 0); 3001 LF.LUIdx = P.first; 3002 LF.Offset = P.second; 3003 LSRUse &LU = Uses[LF.LUIdx]; 3004 LU.AllFixupsOutsideLoop &= LF.isUseFullyOutsideLoop(L); 3005 if (!LU.WidestFixupType || 3006 SE.getTypeSizeInBits(LU.WidestFixupType) < 3007 SE.getTypeSizeInBits(LF.OperandValToReplace->getType())) 3008 LU.WidestFixupType = LF.OperandValToReplace->getType(); 3009 InsertSupplementalFormula(U, LU, LF.LUIdx); 3010 CountRegisters(LU.Formulae.back(), Uses.size() - 1); 3011 break; 3012 } 3013 } 3014 } 3015 } 3016 3017 /// CollectSubexprs - Split S into subexpressions which can be pulled out into 3018 /// separate registers. If C is non-null, multiply each subexpression by C. 3019 /// 3020 /// Return remainder expression after factoring the subexpressions captured by 3021 /// Ops. If Ops is complete, return NULL. 3022 static const SCEV *CollectSubexprs(const SCEV *S, const SCEVConstant *C, 3023 SmallVectorImpl<const SCEV *> &Ops, 3024 const Loop *L, 3025 ScalarEvolution &SE, 3026 unsigned Depth = 0) { 3027 // Arbitrarily cap recursion to protect compile time. 3028 if (Depth >= 3) 3029 return S; 3030 3031 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 3032 // Break out add operands. 3033 for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end(); 3034 I != E; ++I) { 3035 const SCEV *Remainder = CollectSubexprs(*I, C, Ops, L, SE, Depth+1); 3036 if (Remainder) 3037 Ops.push_back(C ? SE.getMulExpr(C, Remainder) : Remainder); 3038 } 3039 return NULL; 3040 } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 3041 // Split a non-zero base out of an addrec. 3042 if (AR->getStart()->isZero()) 3043 return S; 3044 3045 const SCEV *Remainder = CollectSubexprs(AR->getStart(), 3046 C, Ops, L, SE, Depth+1); 3047 // Split the non-zero AddRec unless it is part of a nested recurrence that 3048 // does not pertain to this loop. 3049 if (Remainder && (AR->getLoop() == L || !isa<SCEVAddRecExpr>(Remainder))) { 3050 Ops.push_back(C ? SE.getMulExpr(C, Remainder) : Remainder); 3051 Remainder = NULL; 3052 } 3053 if (Remainder != AR->getStart()) { 3054 if (!Remainder) 3055 Remainder = SE.getConstant(AR->getType(), 0); 3056 return SE.getAddRecExpr(Remainder, 3057 AR->getStepRecurrence(SE), 3058 AR->getLoop(), 3059 //FIXME: AR->getNoWrapFlags(SCEV::FlagNW) 3060 SCEV::FlagAnyWrap); 3061 } 3062 } else if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 3063 // Break (C * (a + b + c)) into C*a + C*b + C*c. 3064 if (Mul->getNumOperands() != 2) 3065 return S; 3066 if (const SCEVConstant *Op0 = 3067 dyn_cast<SCEVConstant>(Mul->getOperand(0))) { 3068 C = C ? cast<SCEVConstant>(SE.getMulExpr(C, Op0)) : Op0; 3069 const SCEV *Remainder = 3070 CollectSubexprs(Mul->getOperand(1), C, Ops, L, SE, Depth+1); 3071 if (Remainder) 3072 Ops.push_back(SE.getMulExpr(C, Remainder)); 3073 return NULL; 3074 } 3075 } 3076 return S; 3077 } 3078 3079 /// GenerateReassociations - Split out subexpressions from adds and the bases of 3080 /// addrecs. 3081 void LSRInstance::GenerateReassociations(LSRUse &LU, unsigned LUIdx, 3082 Formula Base, 3083 unsigned Depth) { 3084 // Arbitrarily cap recursion to protect compile time. 3085 if (Depth >= 3) return; 3086 3087 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) { 3088 const SCEV *BaseReg = Base.BaseRegs[i]; 3089 3090 SmallVector<const SCEV *, 8> AddOps; 3091 const SCEV *Remainder = CollectSubexprs(BaseReg, 0, AddOps, L, SE); 3092 if (Remainder) 3093 AddOps.push_back(Remainder); 3094 3095 if (AddOps.size() == 1) continue; 3096 3097 for (SmallVectorImpl<const SCEV *>::const_iterator J = AddOps.begin(), 3098 JE = AddOps.end(); J != JE; ++J) { 3099 3100 // Loop-variant "unknown" values are uninteresting; we won't be able to 3101 // do anything meaningful with them. 3102 if (isa<SCEVUnknown>(*J) && !SE.isLoopInvariant(*J, L)) 3103 continue; 3104 3105 // Don't pull a constant into a register if the constant could be folded 3106 // into an immediate field. 3107 if (isAlwaysFoldable(TTI, SE, LU.MinOffset, LU.MaxOffset, LU.Kind, 3108 LU.AccessTy, *J, Base.getNumRegs() > 1)) 3109 continue; 3110 3111 // Collect all operands except *J. 3112 SmallVector<const SCEV *, 8> InnerAddOps 3113 (((const SmallVector<const SCEV *, 8> &)AddOps).begin(), J); 3114 InnerAddOps.append 3115 (llvm::next(J), ((const SmallVector<const SCEV *, 8> &)AddOps).end()); 3116 3117 // Don't leave just a constant behind in a register if the constant could 3118 // be folded into an immediate field. 3119 if (InnerAddOps.size() == 1 && 3120 isAlwaysFoldable(TTI, SE, LU.MinOffset, LU.MaxOffset, LU.Kind, 3121 LU.AccessTy, InnerAddOps[0], Base.getNumRegs() > 1)) 3122 continue; 3123 3124 const SCEV *InnerSum = SE.getAddExpr(InnerAddOps); 3125 if (InnerSum->isZero()) 3126 continue; 3127 Formula F = Base; 3128 3129 // Add the remaining pieces of the add back into the new formula. 3130 const SCEVConstant *InnerSumSC = dyn_cast<SCEVConstant>(InnerSum); 3131 if (InnerSumSC && 3132 SE.getTypeSizeInBits(InnerSumSC->getType()) <= 64 && 3133 TTI.isLegalAddImmediate((uint64_t)F.UnfoldedOffset + 3134 InnerSumSC->getValue()->getZExtValue())) { 3135 F.UnfoldedOffset = (uint64_t)F.UnfoldedOffset + 3136 InnerSumSC->getValue()->getZExtValue(); 3137 F.BaseRegs.erase(F.BaseRegs.begin() + i); 3138 } else 3139 F.BaseRegs[i] = InnerSum; 3140 3141 // Add J as its own register, or an unfolded immediate. 3142 const SCEVConstant *SC = dyn_cast<SCEVConstant>(*J); 3143 if (SC && SE.getTypeSizeInBits(SC->getType()) <= 64 && 3144 TTI.isLegalAddImmediate((uint64_t)F.UnfoldedOffset + 3145 SC->getValue()->getZExtValue())) 3146 F.UnfoldedOffset = (uint64_t)F.UnfoldedOffset + 3147 SC->getValue()->getZExtValue(); 3148 else 3149 F.BaseRegs.push_back(*J); 3150 3151 if (InsertFormula(LU, LUIdx, F)) 3152 // If that formula hadn't been seen before, recurse to find more like 3153 // it. 3154 GenerateReassociations(LU, LUIdx, LU.Formulae.back(), Depth+1); 3155 } 3156 } 3157 } 3158 3159 /// GenerateCombinations - Generate a formula consisting of all of the 3160 /// loop-dominating registers added into a single register. 3161 void LSRInstance::GenerateCombinations(LSRUse &LU, unsigned LUIdx, 3162 Formula Base) { 3163 // This method is only interesting on a plurality of registers. 3164 if (Base.BaseRegs.size() <= 1) return; 3165 3166 Formula F = Base; 3167 F.BaseRegs.clear(); 3168 SmallVector<const SCEV *, 4> Ops; 3169 for (SmallVectorImpl<const SCEV *>::const_iterator 3170 I = Base.BaseRegs.begin(), E = Base.BaseRegs.end(); I != E; ++I) { 3171 const SCEV *BaseReg = *I; 3172 if (SE.properlyDominates(BaseReg, L->getHeader()) && 3173 !SE.hasComputableLoopEvolution(BaseReg, L)) 3174 Ops.push_back(BaseReg); 3175 else 3176 F.BaseRegs.push_back(BaseReg); 3177 } 3178 if (Ops.size() > 1) { 3179 const SCEV *Sum = SE.getAddExpr(Ops); 3180 // TODO: If Sum is zero, it probably means ScalarEvolution missed an 3181 // opportunity to fold something. For now, just ignore such cases 3182 // rather than proceed with zero in a register. 3183 if (!Sum->isZero()) { 3184 F.BaseRegs.push_back(Sum); 3185 (void)InsertFormula(LU, LUIdx, F); 3186 } 3187 } 3188 } 3189 3190 /// GenerateSymbolicOffsets - Generate reuse formulae using symbolic offsets. 3191 void LSRInstance::GenerateSymbolicOffsets(LSRUse &LU, unsigned LUIdx, 3192 Formula Base) { 3193 // We can't add a symbolic offset if the address already contains one. 3194 if (Base.BaseGV) return; 3195 3196 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) { 3197 const SCEV *G = Base.BaseRegs[i]; 3198 GlobalValue *GV = ExtractSymbol(G, SE); 3199 if (G->isZero() || !GV) 3200 continue; 3201 Formula F = Base; 3202 F.BaseGV = GV; 3203 if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, F)) 3204 continue; 3205 F.BaseRegs[i] = G; 3206 (void)InsertFormula(LU, LUIdx, F); 3207 } 3208 } 3209 3210 /// GenerateConstantOffsets - Generate reuse formulae using symbolic offsets. 3211 void LSRInstance::GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx, 3212 Formula Base) { 3213 // TODO: For now, just add the min and max offset, because it usually isn't 3214 // worthwhile looking at everything inbetween. 3215 SmallVector<int64_t, 2> Worklist; 3216 Worklist.push_back(LU.MinOffset); 3217 if (LU.MaxOffset != LU.MinOffset) 3218 Worklist.push_back(LU.MaxOffset); 3219 3220 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) { 3221 const SCEV *G = Base.BaseRegs[i]; 3222 3223 for (SmallVectorImpl<int64_t>::const_iterator I = Worklist.begin(), 3224 E = Worklist.end(); I != E; ++I) { 3225 Formula F = Base; 3226 F.BaseOffset = (uint64_t)Base.BaseOffset - *I; 3227 if (isLegalUse(TTI, LU.MinOffset - *I, LU.MaxOffset - *I, LU.Kind, 3228 LU.AccessTy, F)) { 3229 // Add the offset to the base register. 3230 const SCEV *NewG = SE.getAddExpr(SE.getConstant(G->getType(), *I), G); 3231 // If it cancelled out, drop the base register, otherwise update it. 3232 if (NewG->isZero()) { 3233 std::swap(F.BaseRegs[i], F.BaseRegs.back()); 3234 F.BaseRegs.pop_back(); 3235 } else 3236 F.BaseRegs[i] = NewG; 3237 3238 (void)InsertFormula(LU, LUIdx, F); 3239 } 3240 } 3241 3242 int64_t Imm = ExtractImmediate(G, SE); 3243 if (G->isZero() || Imm == 0) 3244 continue; 3245 Formula F = Base; 3246 F.BaseOffset = (uint64_t)F.BaseOffset + Imm; 3247 if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, F)) 3248 continue; 3249 F.BaseRegs[i] = G; 3250 (void)InsertFormula(LU, LUIdx, F); 3251 } 3252 } 3253 3254 /// GenerateICmpZeroScales - For ICmpZero, check to see if we can scale up 3255 /// the comparison. For example, x == y -> x*c == y*c. 3256 void LSRInstance::GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx, 3257 Formula Base) { 3258 if (LU.Kind != LSRUse::ICmpZero) return; 3259 3260 // Determine the integer type for the base formula. 3261 Type *IntTy = Base.getType(); 3262 if (!IntTy) return; 3263 if (SE.getTypeSizeInBits(IntTy) > 64) return; 3264 3265 // Don't do this if there is more than one offset. 3266 if (LU.MinOffset != LU.MaxOffset) return; 3267 3268 assert(!Base.BaseGV && "ICmpZero use is not legal!"); 3269 3270 // Check each interesting stride. 3271 for (SmallSetVector<int64_t, 8>::const_iterator 3272 I = Factors.begin(), E = Factors.end(); I != E; ++I) { 3273 int64_t Factor = *I; 3274 3275 // Check that the multiplication doesn't overflow. 3276 if (Base.BaseOffset == INT64_MIN && Factor == -1) 3277 continue; 3278 int64_t NewBaseOffset = (uint64_t)Base.BaseOffset * Factor; 3279 if (NewBaseOffset / Factor != Base.BaseOffset) 3280 continue; 3281 3282 // Check that multiplying with the use offset doesn't overflow. 3283 int64_t Offset = LU.MinOffset; 3284 if (Offset == INT64_MIN && Factor == -1) 3285 continue; 3286 Offset = (uint64_t)Offset * Factor; 3287 if (Offset / Factor != LU.MinOffset) 3288 continue; 3289 3290 Formula F = Base; 3291 F.BaseOffset = NewBaseOffset; 3292 3293 // Check that this scale is legal. 3294 if (!isLegalUse(TTI, Offset, Offset, LU.Kind, LU.AccessTy, F)) 3295 continue; 3296 3297 // Compensate for the use having MinOffset built into it. 3298 F.BaseOffset = (uint64_t)F.BaseOffset + Offset - LU.MinOffset; 3299 3300 const SCEV *FactorS = SE.getConstant(IntTy, Factor); 3301 3302 // Check that multiplying with each base register doesn't overflow. 3303 for (size_t i = 0, e = F.BaseRegs.size(); i != e; ++i) { 3304 F.BaseRegs[i] = SE.getMulExpr(F.BaseRegs[i], FactorS); 3305 if (getExactSDiv(F.BaseRegs[i], FactorS, SE) != Base.BaseRegs[i]) 3306 goto next; 3307 } 3308 3309 // Check that multiplying with the scaled register doesn't overflow. 3310 if (F.ScaledReg) { 3311 F.ScaledReg = SE.getMulExpr(F.ScaledReg, FactorS); 3312 if (getExactSDiv(F.ScaledReg, FactorS, SE) != Base.ScaledReg) 3313 continue; 3314 } 3315 3316 // Check that multiplying with the unfolded offset doesn't overflow. 3317 if (F.UnfoldedOffset != 0) { 3318 if (F.UnfoldedOffset == INT64_MIN && Factor == -1) 3319 continue; 3320 F.UnfoldedOffset = (uint64_t)F.UnfoldedOffset * Factor; 3321 if (F.UnfoldedOffset / Factor != Base.UnfoldedOffset) 3322 continue; 3323 } 3324 3325 // If we make it here and it's legal, add it. 3326 (void)InsertFormula(LU, LUIdx, F); 3327 next:; 3328 } 3329 } 3330 3331 /// GenerateScales - Generate stride factor reuse formulae by making use of 3332 /// scaled-offset address modes, for example. 3333 void LSRInstance::GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base) { 3334 // Determine the integer type for the base formula. 3335 Type *IntTy = Base.getType(); 3336 if (!IntTy) return; 3337 3338 // If this Formula already has a scaled register, we can't add another one. 3339 if (Base.Scale != 0) return; 3340 3341 // Check each interesting stride. 3342 for (SmallSetVector<int64_t, 8>::const_iterator 3343 I = Factors.begin(), E = Factors.end(); I != E; ++I) { 3344 int64_t Factor = *I; 3345 3346 Base.Scale = Factor; 3347 Base.HasBaseReg = Base.BaseRegs.size() > 1; 3348 // Check whether this scale is going to be legal. 3349 if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, 3350 Base)) { 3351 // As a special-case, handle special out-of-loop Basic users specially. 3352 // TODO: Reconsider this special case. 3353 if (LU.Kind == LSRUse::Basic && 3354 isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LSRUse::Special, 3355 LU.AccessTy, Base) && 3356 LU.AllFixupsOutsideLoop) 3357 LU.Kind = LSRUse::Special; 3358 else 3359 continue; 3360 } 3361 // For an ICmpZero, negating a solitary base register won't lead to 3362 // new solutions. 3363 if (LU.Kind == LSRUse::ICmpZero && 3364 !Base.HasBaseReg && Base.BaseOffset == 0 && !Base.BaseGV) 3365 continue; 3366 // For each addrec base reg, apply the scale, if possible. 3367 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) 3368 if (const SCEVAddRecExpr *AR = 3369 dyn_cast<SCEVAddRecExpr>(Base.BaseRegs[i])) { 3370 const SCEV *FactorS = SE.getConstant(IntTy, Factor); 3371 if (FactorS->isZero()) 3372 continue; 3373 // Divide out the factor, ignoring high bits, since we'll be 3374 // scaling the value back up in the end. 3375 if (const SCEV *Quotient = getExactSDiv(AR, FactorS, SE, true)) { 3376 // TODO: This could be optimized to avoid all the copying. 3377 Formula F = Base; 3378 F.ScaledReg = Quotient; 3379 F.DeleteBaseReg(F.BaseRegs[i]); 3380 (void)InsertFormula(LU, LUIdx, F); 3381 } 3382 } 3383 } 3384 } 3385 3386 /// GenerateTruncates - Generate reuse formulae from different IV types. 3387 void LSRInstance::GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base) { 3388 // Don't bother truncating symbolic values. 3389 if (Base.BaseGV) return; 3390 3391 // Determine the integer type for the base formula. 3392 Type *DstTy = Base.getType(); 3393 if (!DstTy) return; 3394 DstTy = SE.getEffectiveSCEVType(DstTy); 3395 3396 for (SmallSetVector<Type *, 4>::const_iterator 3397 I = Types.begin(), E = Types.end(); I != E; ++I) { 3398 Type *SrcTy = *I; 3399 if (SrcTy != DstTy && TTI.isTruncateFree(SrcTy, DstTy)) { 3400 Formula F = Base; 3401 3402 if (F.ScaledReg) F.ScaledReg = SE.getAnyExtendExpr(F.ScaledReg, *I); 3403 for (SmallVectorImpl<const SCEV *>::iterator J = F.BaseRegs.begin(), 3404 JE = F.BaseRegs.end(); J != JE; ++J) 3405 *J = SE.getAnyExtendExpr(*J, SrcTy); 3406 3407 // TODO: This assumes we've done basic processing on all uses and 3408 // have an idea what the register usage is. 3409 if (!F.hasRegsUsedByUsesOtherThan(LUIdx, RegUses)) 3410 continue; 3411 3412 (void)InsertFormula(LU, LUIdx, F); 3413 } 3414 } 3415 } 3416 3417 namespace { 3418 3419 /// WorkItem - Helper class for GenerateCrossUseConstantOffsets. It's used to 3420 /// defer modifications so that the search phase doesn't have to worry about 3421 /// the data structures moving underneath it. 3422 struct WorkItem { 3423 size_t LUIdx; 3424 int64_t Imm; 3425 const SCEV *OrigReg; 3426 3427 WorkItem(size_t LI, int64_t I, const SCEV *R) 3428 : LUIdx(LI), Imm(I), OrigReg(R) {} 3429 3430 void print(raw_ostream &OS) const; 3431 void dump() const; 3432 }; 3433 3434 } 3435 3436 void WorkItem::print(raw_ostream &OS) const { 3437 OS << "in formulae referencing " << *OrigReg << " in use " << LUIdx 3438 << " , add offset " << Imm; 3439 } 3440 3441 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 3442 void WorkItem::dump() const { 3443 print(errs()); errs() << '\n'; 3444 } 3445 #endif 3446 3447 /// GenerateCrossUseConstantOffsets - Look for registers which are a constant 3448 /// distance apart and try to form reuse opportunities between them. 3449 void LSRInstance::GenerateCrossUseConstantOffsets() { 3450 // Group the registers by their value without any added constant offset. 3451 typedef std::map<int64_t, const SCEV *> ImmMapTy; 3452 typedef DenseMap<const SCEV *, ImmMapTy> RegMapTy; 3453 RegMapTy Map; 3454 DenseMap<const SCEV *, SmallBitVector> UsedByIndicesMap; 3455 SmallVector<const SCEV *, 8> Sequence; 3456 for (RegUseTracker::const_iterator I = RegUses.begin(), E = RegUses.end(); 3457 I != E; ++I) { 3458 const SCEV *Reg = *I; 3459 int64_t Imm = ExtractImmediate(Reg, SE); 3460 std::pair<RegMapTy::iterator, bool> Pair = 3461 Map.insert(std::make_pair(Reg, ImmMapTy())); 3462 if (Pair.second) 3463 Sequence.push_back(Reg); 3464 Pair.first->second.insert(std::make_pair(Imm, *I)); 3465 UsedByIndicesMap[Reg] |= RegUses.getUsedByIndices(*I); 3466 } 3467 3468 // Now examine each set of registers with the same base value. Build up 3469 // a list of work to do and do the work in a separate step so that we're 3470 // not adding formulae and register counts while we're searching. 3471 SmallVector<WorkItem, 32> WorkItems; 3472 SmallSet<std::pair<size_t, int64_t>, 32> UniqueItems; 3473 for (SmallVectorImpl<const SCEV *>::const_iterator I = Sequence.begin(), 3474 E = Sequence.end(); I != E; ++I) { 3475 const SCEV *Reg = *I; 3476 const ImmMapTy &Imms = Map.find(Reg)->second; 3477 3478 // It's not worthwhile looking for reuse if there's only one offset. 3479 if (Imms.size() == 1) 3480 continue; 3481 3482 DEBUG(dbgs() << "Generating cross-use offsets for " << *Reg << ':'; 3483 for (ImmMapTy::const_iterator J = Imms.begin(), JE = Imms.end(); 3484 J != JE; ++J) 3485 dbgs() << ' ' << J->first; 3486 dbgs() << '\n'); 3487 3488 // Examine each offset. 3489 for (ImmMapTy::const_iterator J = Imms.begin(), JE = Imms.end(); 3490 J != JE; ++J) { 3491 const SCEV *OrigReg = J->second; 3492 3493 int64_t JImm = J->first; 3494 const SmallBitVector &UsedByIndices = RegUses.getUsedByIndices(OrigReg); 3495 3496 if (!isa<SCEVConstant>(OrigReg) && 3497 UsedByIndicesMap[Reg].count() == 1) { 3498 DEBUG(dbgs() << "Skipping cross-use reuse for " << *OrigReg << '\n'); 3499 continue; 3500 } 3501 3502 // Conservatively examine offsets between this orig reg a few selected 3503 // other orig regs. 3504 ImmMapTy::const_iterator OtherImms[] = { 3505 Imms.begin(), prior(Imms.end()), 3506 Imms.lower_bound((Imms.begin()->first + prior(Imms.end())->first) / 2) 3507 }; 3508 for (size_t i = 0, e = array_lengthof(OtherImms); i != e; ++i) { 3509 ImmMapTy::const_iterator M = OtherImms[i]; 3510 if (M == J || M == JE) continue; 3511 3512 // Compute the difference between the two. 3513 int64_t Imm = (uint64_t)JImm - M->first; 3514 for (int LUIdx = UsedByIndices.find_first(); LUIdx != -1; 3515 LUIdx = UsedByIndices.find_next(LUIdx)) 3516 // Make a memo of this use, offset, and register tuple. 3517 if (UniqueItems.insert(std::make_pair(LUIdx, Imm))) 3518 WorkItems.push_back(WorkItem(LUIdx, Imm, OrigReg)); 3519 } 3520 } 3521 } 3522 3523 Map.clear(); 3524 Sequence.clear(); 3525 UsedByIndicesMap.clear(); 3526 UniqueItems.clear(); 3527 3528 // Now iterate through the worklist and add new formulae. 3529 for (SmallVectorImpl<WorkItem>::const_iterator I = WorkItems.begin(), 3530 E = WorkItems.end(); I != E; ++I) { 3531 const WorkItem &WI = *I; 3532 size_t LUIdx = WI.LUIdx; 3533 LSRUse &LU = Uses[LUIdx]; 3534 int64_t Imm = WI.Imm; 3535 const SCEV *OrigReg = WI.OrigReg; 3536 3537 Type *IntTy = SE.getEffectiveSCEVType(OrigReg->getType()); 3538 const SCEV *NegImmS = SE.getSCEV(ConstantInt::get(IntTy, -(uint64_t)Imm)); 3539 unsigned BitWidth = SE.getTypeSizeInBits(IntTy); 3540 3541 // TODO: Use a more targeted data structure. 3542 for (size_t L = 0, LE = LU.Formulae.size(); L != LE; ++L) { 3543 const Formula &F = LU.Formulae[L]; 3544 // Use the immediate in the scaled register. 3545 if (F.ScaledReg == OrigReg) { 3546 int64_t Offset = (uint64_t)F.BaseOffset + Imm * (uint64_t)F.Scale; 3547 // Don't create 50 + reg(-50). 3548 if (F.referencesReg(SE.getSCEV( 3549 ConstantInt::get(IntTy, -(uint64_t)Offset)))) 3550 continue; 3551 Formula NewF = F; 3552 NewF.BaseOffset = Offset; 3553 if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, 3554 NewF)) 3555 continue; 3556 NewF.ScaledReg = SE.getAddExpr(NegImmS, NewF.ScaledReg); 3557 3558 // If the new scale is a constant in a register, and adding the constant 3559 // value to the immediate would produce a value closer to zero than the 3560 // immediate itself, then the formula isn't worthwhile. 3561 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(NewF.ScaledReg)) 3562 if (C->getValue()->isNegative() != 3563 (NewF.BaseOffset < 0) && 3564 (C->getValue()->getValue().abs() * APInt(BitWidth, F.Scale)) 3565 .ule(abs64(NewF.BaseOffset))) 3566 continue; 3567 3568 // OK, looks good. 3569 (void)InsertFormula(LU, LUIdx, NewF); 3570 } else { 3571 // Use the immediate in a base register. 3572 for (size_t N = 0, NE = F.BaseRegs.size(); N != NE; ++N) { 3573 const SCEV *BaseReg = F.BaseRegs[N]; 3574 if (BaseReg != OrigReg) 3575 continue; 3576 Formula NewF = F; 3577 NewF.BaseOffset = (uint64_t)NewF.BaseOffset + Imm; 3578 if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, 3579 LU.Kind, LU.AccessTy, NewF)) { 3580 if (!TTI.isLegalAddImmediate((uint64_t)NewF.UnfoldedOffset + Imm)) 3581 continue; 3582 NewF = F; 3583 NewF.UnfoldedOffset = (uint64_t)NewF.UnfoldedOffset + Imm; 3584 } 3585 NewF.BaseRegs[N] = SE.getAddExpr(NegImmS, BaseReg); 3586 3587 // If the new formula has a constant in a register, and adding the 3588 // constant value to the immediate would produce a value closer to 3589 // zero than the immediate itself, then the formula isn't worthwhile. 3590 for (SmallVectorImpl<const SCEV *>::const_iterator 3591 J = NewF.BaseRegs.begin(), JE = NewF.BaseRegs.end(); 3592 J != JE; ++J) 3593 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(*J)) 3594 if ((C->getValue()->getValue() + NewF.BaseOffset).abs().slt( 3595 abs64(NewF.BaseOffset)) && 3596 (C->getValue()->getValue() + 3597 NewF.BaseOffset).countTrailingZeros() >= 3598 CountTrailingZeros_64(NewF.BaseOffset)) 3599 goto skip_formula; 3600 3601 // Ok, looks good. 3602 (void)InsertFormula(LU, LUIdx, NewF); 3603 break; 3604 skip_formula:; 3605 } 3606 } 3607 } 3608 } 3609 } 3610 3611 /// GenerateAllReuseFormulae - Generate formulae for each use. 3612 void 3613 LSRInstance::GenerateAllReuseFormulae() { 3614 // This is split into multiple loops so that hasRegsUsedByUsesOtherThan 3615 // queries are more precise. 3616 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 3617 LSRUse &LU = Uses[LUIdx]; 3618 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 3619 GenerateReassociations(LU, LUIdx, LU.Formulae[i]); 3620 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 3621 GenerateCombinations(LU, LUIdx, LU.Formulae[i]); 3622 } 3623 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 3624 LSRUse &LU = Uses[LUIdx]; 3625 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 3626 GenerateSymbolicOffsets(LU, LUIdx, LU.Formulae[i]); 3627 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 3628 GenerateConstantOffsets(LU, LUIdx, LU.Formulae[i]); 3629 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 3630 GenerateICmpZeroScales(LU, LUIdx, LU.Formulae[i]); 3631 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 3632 GenerateScales(LU, LUIdx, LU.Formulae[i]); 3633 } 3634 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 3635 LSRUse &LU = Uses[LUIdx]; 3636 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 3637 GenerateTruncates(LU, LUIdx, LU.Formulae[i]); 3638 } 3639 3640 GenerateCrossUseConstantOffsets(); 3641 3642 DEBUG(dbgs() << "\n" 3643 "After generating reuse formulae:\n"; 3644 print_uses(dbgs())); 3645 } 3646 3647 /// If there are multiple formulae with the same set of registers used 3648 /// by other uses, pick the best one and delete the others. 3649 void LSRInstance::FilterOutUndesirableDedicatedRegisters() { 3650 DenseSet<const SCEV *> VisitedRegs; 3651 SmallPtrSet<const SCEV *, 16> Regs; 3652 SmallPtrSet<const SCEV *, 16> LoserRegs; 3653 #ifndef NDEBUG 3654 bool ChangedFormulae = false; 3655 #endif 3656 3657 // Collect the best formula for each unique set of shared registers. This 3658 // is reset for each use. 3659 typedef DenseMap<SmallVector<const SCEV *, 2>, size_t, UniquifierDenseMapInfo> 3660 BestFormulaeTy; 3661 BestFormulaeTy BestFormulae; 3662 3663 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 3664 LSRUse &LU = Uses[LUIdx]; 3665 DEBUG(dbgs() << "Filtering for use "; LU.print(dbgs()); dbgs() << '\n'); 3666 3667 bool Any = false; 3668 for (size_t FIdx = 0, NumForms = LU.Formulae.size(); 3669 FIdx != NumForms; ++FIdx) { 3670 Formula &F = LU.Formulae[FIdx]; 3671 3672 // Some formulas are instant losers. For example, they may depend on 3673 // nonexistent AddRecs from other loops. These need to be filtered 3674 // immediately, otherwise heuristics could choose them over others leading 3675 // to an unsatisfactory solution. Passing LoserRegs into RateFormula here 3676 // avoids the need to recompute this information across formulae using the 3677 // same bad AddRec. Passing LoserRegs is also essential unless we remove 3678 // the corresponding bad register from the Regs set. 3679 Cost CostF; 3680 Regs.clear(); 3681 CostF.RateFormula(F, Regs, VisitedRegs, L, LU.Offsets, SE, DT, 3682 &LoserRegs); 3683 if (CostF.isLoser()) { 3684 // During initial formula generation, undesirable formulae are generated 3685 // by uses within other loops that have some non-trivial address mode or 3686 // use the postinc form of the IV. LSR needs to provide these formulae 3687 // as the basis of rediscovering the desired formula that uses an AddRec 3688 // corresponding to the existing phi. Once all formulae have been 3689 // generated, these initial losers may be pruned. 3690 DEBUG(dbgs() << " Filtering loser "; F.print(dbgs()); 3691 dbgs() << "\n"); 3692 } 3693 else { 3694 SmallVector<const SCEV *, 2> Key; 3695 for (SmallVectorImpl<const SCEV *>::const_iterator J = F.BaseRegs.begin(), 3696 JE = F.BaseRegs.end(); J != JE; ++J) { 3697 const SCEV *Reg = *J; 3698 if (RegUses.isRegUsedByUsesOtherThan(Reg, LUIdx)) 3699 Key.push_back(Reg); 3700 } 3701 if (F.ScaledReg && 3702 RegUses.isRegUsedByUsesOtherThan(F.ScaledReg, LUIdx)) 3703 Key.push_back(F.ScaledReg); 3704 // Unstable sort by host order ok, because this is only used for 3705 // uniquifying. 3706 std::sort(Key.begin(), Key.end()); 3707 3708 std::pair<BestFormulaeTy::const_iterator, bool> P = 3709 BestFormulae.insert(std::make_pair(Key, FIdx)); 3710 if (P.second) 3711 continue; 3712 3713 Formula &Best = LU.Formulae[P.first->second]; 3714 3715 Cost CostBest; 3716 Regs.clear(); 3717 CostBest.RateFormula(Best, Regs, VisitedRegs, L, LU.Offsets, SE, DT); 3718 if (CostF < CostBest) 3719 std::swap(F, Best); 3720 DEBUG(dbgs() << " Filtering out formula "; F.print(dbgs()); 3721 dbgs() << "\n" 3722 " in favor of formula "; Best.print(dbgs()); 3723 dbgs() << '\n'); 3724 } 3725 #ifndef NDEBUG 3726 ChangedFormulae = true; 3727 #endif 3728 LU.DeleteFormula(F); 3729 --FIdx; 3730 --NumForms; 3731 Any = true; 3732 } 3733 3734 // Now that we've filtered out some formulae, recompute the Regs set. 3735 if (Any) 3736 LU.RecomputeRegs(LUIdx, RegUses); 3737 3738 // Reset this to prepare for the next use. 3739 BestFormulae.clear(); 3740 } 3741 3742 DEBUG(if (ChangedFormulae) { 3743 dbgs() << "\n" 3744 "After filtering out undesirable candidates:\n"; 3745 print_uses(dbgs()); 3746 }); 3747 } 3748 3749 // This is a rough guess that seems to work fairly well. 3750 static const size_t ComplexityLimit = UINT16_MAX; 3751 3752 /// EstimateSearchSpaceComplexity - Estimate the worst-case number of 3753 /// solutions the solver might have to consider. It almost never considers 3754 /// this many solutions because it prune the search space, but the pruning 3755 /// isn't always sufficient. 3756 size_t LSRInstance::EstimateSearchSpaceComplexity() const { 3757 size_t Power = 1; 3758 for (SmallVectorImpl<LSRUse>::const_iterator I = Uses.begin(), 3759 E = Uses.end(); I != E; ++I) { 3760 size_t FSize = I->Formulae.size(); 3761 if (FSize >= ComplexityLimit) { 3762 Power = ComplexityLimit; 3763 break; 3764 } 3765 Power *= FSize; 3766 if (Power >= ComplexityLimit) 3767 break; 3768 } 3769 return Power; 3770 } 3771 3772 /// NarrowSearchSpaceByDetectingSupersets - When one formula uses a superset 3773 /// of the registers of another formula, it won't help reduce register 3774 /// pressure (though it may not necessarily hurt register pressure); remove 3775 /// it to simplify the system. 3776 void LSRInstance::NarrowSearchSpaceByDetectingSupersets() { 3777 if (EstimateSearchSpaceComplexity() >= ComplexityLimit) { 3778 DEBUG(dbgs() << "The search space is too complex.\n"); 3779 3780 DEBUG(dbgs() << "Narrowing the search space by eliminating formulae " 3781 "which use a superset of registers used by other " 3782 "formulae.\n"); 3783 3784 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 3785 LSRUse &LU = Uses[LUIdx]; 3786 bool Any = false; 3787 for (size_t i = 0, e = LU.Formulae.size(); i != e; ++i) { 3788 Formula &F = LU.Formulae[i]; 3789 // Look for a formula with a constant or GV in a register. If the use 3790 // also has a formula with that same value in an immediate field, 3791 // delete the one that uses a register. 3792 for (SmallVectorImpl<const SCEV *>::const_iterator 3793 I = F.BaseRegs.begin(), E = F.BaseRegs.end(); I != E; ++I) { 3794 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(*I)) { 3795 Formula NewF = F; 3796 NewF.BaseOffset += C->getValue()->getSExtValue(); 3797 NewF.BaseRegs.erase(NewF.BaseRegs.begin() + 3798 (I - F.BaseRegs.begin())); 3799 if (LU.HasFormulaWithSameRegs(NewF)) { 3800 DEBUG(dbgs() << " Deleting "; F.print(dbgs()); dbgs() << '\n'); 3801 LU.DeleteFormula(F); 3802 --i; 3803 --e; 3804 Any = true; 3805 break; 3806 } 3807 } else if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(*I)) { 3808 if (GlobalValue *GV = dyn_cast<GlobalValue>(U->getValue())) 3809 if (!F.BaseGV) { 3810 Formula NewF = F; 3811 NewF.BaseGV = GV; 3812 NewF.BaseRegs.erase(NewF.BaseRegs.begin() + 3813 (I - F.BaseRegs.begin())); 3814 if (LU.HasFormulaWithSameRegs(NewF)) { 3815 DEBUG(dbgs() << " Deleting "; F.print(dbgs()); 3816 dbgs() << '\n'); 3817 LU.DeleteFormula(F); 3818 --i; 3819 --e; 3820 Any = true; 3821 break; 3822 } 3823 } 3824 } 3825 } 3826 } 3827 if (Any) 3828 LU.RecomputeRegs(LUIdx, RegUses); 3829 } 3830 3831 DEBUG(dbgs() << "After pre-selection:\n"; 3832 print_uses(dbgs())); 3833 } 3834 } 3835 3836 /// NarrowSearchSpaceByCollapsingUnrolledCode - When there are many registers 3837 /// for expressions like A, A+1, A+2, etc., allocate a single register for 3838 /// them. 3839 void LSRInstance::NarrowSearchSpaceByCollapsingUnrolledCode() { 3840 if (EstimateSearchSpaceComplexity() >= ComplexityLimit) { 3841 DEBUG(dbgs() << "The search space is too complex.\n"); 3842 3843 DEBUG(dbgs() << "Narrowing the search space by assuming that uses " 3844 "separated by a constant offset will use the same " 3845 "registers.\n"); 3846 3847 // This is especially useful for unrolled loops. 3848 3849 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 3850 LSRUse &LU = Uses[LUIdx]; 3851 for (SmallVectorImpl<Formula>::const_iterator I = LU.Formulae.begin(), 3852 E = LU.Formulae.end(); I != E; ++I) { 3853 const Formula &F = *I; 3854 if (F.BaseOffset != 0 && F.Scale == 0) { 3855 if (LSRUse *LUThatHas = FindUseWithSimilarFormula(F, LU)) { 3856 if (reconcileNewOffset(*LUThatHas, F.BaseOffset, 3857 /*HasBaseReg=*/false, 3858 LU.Kind, LU.AccessTy)) { 3859 DEBUG(dbgs() << " Deleting use "; LU.print(dbgs()); 3860 dbgs() << '\n'); 3861 3862 LUThatHas->AllFixupsOutsideLoop &= LU.AllFixupsOutsideLoop; 3863 3864 // Update the relocs to reference the new use. 3865 for (SmallVectorImpl<LSRFixup>::iterator I = Fixups.begin(), 3866 E = Fixups.end(); I != E; ++I) { 3867 LSRFixup &Fixup = *I; 3868 if (Fixup.LUIdx == LUIdx) { 3869 Fixup.LUIdx = LUThatHas - &Uses.front(); 3870 Fixup.Offset += F.BaseOffset; 3871 // Add the new offset to LUThatHas' offset list. 3872 if (LUThatHas->Offsets.back() != Fixup.Offset) { 3873 LUThatHas->Offsets.push_back(Fixup.Offset); 3874 if (Fixup.Offset > LUThatHas->MaxOffset) 3875 LUThatHas->MaxOffset = Fixup.Offset; 3876 if (Fixup.Offset < LUThatHas->MinOffset) 3877 LUThatHas->MinOffset = Fixup.Offset; 3878 } 3879 DEBUG(dbgs() << "New fixup has offset " 3880 << Fixup.Offset << '\n'); 3881 } 3882 if (Fixup.LUIdx == NumUses-1) 3883 Fixup.LUIdx = LUIdx; 3884 } 3885 3886 // Delete formulae from the new use which are no longer legal. 3887 bool Any = false; 3888 for (size_t i = 0, e = LUThatHas->Formulae.size(); i != e; ++i) { 3889 Formula &F = LUThatHas->Formulae[i]; 3890 if (!isLegalUse(TTI, LUThatHas->MinOffset, LUThatHas->MaxOffset, 3891 LUThatHas->Kind, LUThatHas->AccessTy, F)) { 3892 DEBUG(dbgs() << " Deleting "; F.print(dbgs()); 3893 dbgs() << '\n'); 3894 LUThatHas->DeleteFormula(F); 3895 --i; 3896 --e; 3897 Any = true; 3898 } 3899 } 3900 if (Any) 3901 LUThatHas->RecomputeRegs(LUThatHas - &Uses.front(), RegUses); 3902 3903 // Delete the old use. 3904 DeleteUse(LU, LUIdx); 3905 --LUIdx; 3906 --NumUses; 3907 break; 3908 } 3909 } 3910 } 3911 } 3912 } 3913 3914 DEBUG(dbgs() << "After pre-selection:\n"; 3915 print_uses(dbgs())); 3916 } 3917 } 3918 3919 /// NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters - Call 3920 /// FilterOutUndesirableDedicatedRegisters again, if necessary, now that 3921 /// we've done more filtering, as it may be able to find more formulae to 3922 /// eliminate. 3923 void LSRInstance::NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters(){ 3924 if (EstimateSearchSpaceComplexity() >= ComplexityLimit) { 3925 DEBUG(dbgs() << "The search space is too complex.\n"); 3926 3927 DEBUG(dbgs() << "Narrowing the search space by re-filtering out " 3928 "undesirable dedicated registers.\n"); 3929 3930 FilterOutUndesirableDedicatedRegisters(); 3931 3932 DEBUG(dbgs() << "After pre-selection:\n"; 3933 print_uses(dbgs())); 3934 } 3935 } 3936 3937 /// NarrowSearchSpaceByPickingWinnerRegs - Pick a register which seems likely 3938 /// to be profitable, and then in any use which has any reference to that 3939 /// register, delete all formulae which do not reference that register. 3940 void LSRInstance::NarrowSearchSpaceByPickingWinnerRegs() { 3941 // With all other options exhausted, loop until the system is simple 3942 // enough to handle. 3943 SmallPtrSet<const SCEV *, 4> Taken; 3944 while (EstimateSearchSpaceComplexity() >= ComplexityLimit) { 3945 // Ok, we have too many of formulae on our hands to conveniently handle. 3946 // Use a rough heuristic to thin out the list. 3947 DEBUG(dbgs() << "The search space is too complex.\n"); 3948 3949 // Pick the register which is used by the most LSRUses, which is likely 3950 // to be a good reuse register candidate. 3951 const SCEV *Best = 0; 3952 unsigned BestNum = 0; 3953 for (RegUseTracker::const_iterator I = RegUses.begin(), E = RegUses.end(); 3954 I != E; ++I) { 3955 const SCEV *Reg = *I; 3956 if (Taken.count(Reg)) 3957 continue; 3958 if (!Best) 3959 Best = Reg; 3960 else { 3961 unsigned Count = RegUses.getUsedByIndices(Reg).count(); 3962 if (Count > BestNum) { 3963 Best = Reg; 3964 BestNum = Count; 3965 } 3966 } 3967 } 3968 3969 DEBUG(dbgs() << "Narrowing the search space by assuming " << *Best 3970 << " will yield profitable reuse.\n"); 3971 Taken.insert(Best); 3972 3973 // In any use with formulae which references this register, delete formulae 3974 // which don't reference it. 3975 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 3976 LSRUse &LU = Uses[LUIdx]; 3977 if (!LU.Regs.count(Best)) continue; 3978 3979 bool Any = false; 3980 for (size_t i = 0, e = LU.Formulae.size(); i != e; ++i) { 3981 Formula &F = LU.Formulae[i]; 3982 if (!F.referencesReg(Best)) { 3983 DEBUG(dbgs() << " Deleting "; F.print(dbgs()); dbgs() << '\n'); 3984 LU.DeleteFormula(F); 3985 --e; 3986 --i; 3987 Any = true; 3988 assert(e != 0 && "Use has no formulae left! Is Regs inconsistent?"); 3989 continue; 3990 } 3991 } 3992 3993 if (Any) 3994 LU.RecomputeRegs(LUIdx, RegUses); 3995 } 3996 3997 DEBUG(dbgs() << "After pre-selection:\n"; 3998 print_uses(dbgs())); 3999 } 4000 } 4001 4002 /// NarrowSearchSpaceUsingHeuristics - If there are an extraordinary number of 4003 /// formulae to choose from, use some rough heuristics to prune down the number 4004 /// of formulae. This keeps the main solver from taking an extraordinary amount 4005 /// of time in some worst-case scenarios. 4006 void LSRInstance::NarrowSearchSpaceUsingHeuristics() { 4007 NarrowSearchSpaceByDetectingSupersets(); 4008 NarrowSearchSpaceByCollapsingUnrolledCode(); 4009 NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters(); 4010 NarrowSearchSpaceByPickingWinnerRegs(); 4011 } 4012 4013 /// SolveRecurse - This is the recursive solver. 4014 void LSRInstance::SolveRecurse(SmallVectorImpl<const Formula *> &Solution, 4015 Cost &SolutionCost, 4016 SmallVectorImpl<const Formula *> &Workspace, 4017 const Cost &CurCost, 4018 const SmallPtrSet<const SCEV *, 16> &CurRegs, 4019 DenseSet<const SCEV *> &VisitedRegs) const { 4020 // Some ideas: 4021 // - prune more: 4022 // - use more aggressive filtering 4023 // - sort the formula so that the most profitable solutions are found first 4024 // - sort the uses too 4025 // - search faster: 4026 // - don't compute a cost, and then compare. compare while computing a cost 4027 // and bail early. 4028 // - track register sets with SmallBitVector 4029 4030 const LSRUse &LU = Uses[Workspace.size()]; 4031 4032 // If this use references any register that's already a part of the 4033 // in-progress solution, consider it a requirement that a formula must 4034 // reference that register in order to be considered. This prunes out 4035 // unprofitable searching. 4036 SmallSetVector<const SCEV *, 4> ReqRegs; 4037 for (SmallPtrSet<const SCEV *, 16>::const_iterator I = CurRegs.begin(), 4038 E = CurRegs.end(); I != E; ++I) 4039 if (LU.Regs.count(*I)) 4040 ReqRegs.insert(*I); 4041 4042 SmallPtrSet<const SCEV *, 16> NewRegs; 4043 Cost NewCost; 4044 for (SmallVectorImpl<Formula>::const_iterator I = LU.Formulae.begin(), 4045 E = LU.Formulae.end(); I != E; ++I) { 4046 const Formula &F = *I; 4047 4048 // Ignore formulae which do not use any of the required registers. 4049 bool SatisfiedReqReg = true; 4050 for (SmallSetVector<const SCEV *, 4>::const_iterator J = ReqRegs.begin(), 4051 JE = ReqRegs.end(); J != JE; ++J) { 4052 const SCEV *Reg = *J; 4053 if ((!F.ScaledReg || F.ScaledReg != Reg) && 4054 std::find(F.BaseRegs.begin(), F.BaseRegs.end(), Reg) == 4055 F.BaseRegs.end()) { 4056 SatisfiedReqReg = false; 4057 break; 4058 } 4059 } 4060 if (!SatisfiedReqReg) { 4061 // If none of the formulae satisfied the required registers, then we could 4062 // clear ReqRegs and try again. Currently, we simply give up in this case. 4063 continue; 4064 } 4065 4066 // Evaluate the cost of the current formula. If it's already worse than 4067 // the current best, prune the search at that point. 4068 NewCost = CurCost; 4069 NewRegs = CurRegs; 4070 NewCost.RateFormula(F, NewRegs, VisitedRegs, L, LU.Offsets, SE, DT); 4071 if (NewCost < SolutionCost) { 4072 Workspace.push_back(&F); 4073 if (Workspace.size() != Uses.size()) { 4074 SolveRecurse(Solution, SolutionCost, Workspace, NewCost, 4075 NewRegs, VisitedRegs); 4076 if (F.getNumRegs() == 1 && Workspace.size() == 1) 4077 VisitedRegs.insert(F.ScaledReg ? F.ScaledReg : F.BaseRegs[0]); 4078 } else { 4079 DEBUG(dbgs() << "New best at "; NewCost.print(dbgs()); 4080 dbgs() << ".\n Regs:"; 4081 for (SmallPtrSet<const SCEV *, 16>::const_iterator 4082 I = NewRegs.begin(), E = NewRegs.end(); I != E; ++I) 4083 dbgs() << ' ' << **I; 4084 dbgs() << '\n'); 4085 4086 SolutionCost = NewCost; 4087 Solution = Workspace; 4088 } 4089 Workspace.pop_back(); 4090 } 4091 } 4092 } 4093 4094 /// Solve - Choose one formula from each use. Return the results in the given 4095 /// Solution vector. 4096 void LSRInstance::Solve(SmallVectorImpl<const Formula *> &Solution) const { 4097 SmallVector<const Formula *, 8> Workspace; 4098 Cost SolutionCost; 4099 SolutionCost.Loose(); 4100 Cost CurCost; 4101 SmallPtrSet<const SCEV *, 16> CurRegs; 4102 DenseSet<const SCEV *> VisitedRegs; 4103 Workspace.reserve(Uses.size()); 4104 4105 // SolveRecurse does all the work. 4106 SolveRecurse(Solution, SolutionCost, Workspace, CurCost, 4107 CurRegs, VisitedRegs); 4108 if (Solution.empty()) { 4109 DEBUG(dbgs() << "\nNo Satisfactory Solution\n"); 4110 return; 4111 } 4112 4113 // Ok, we've now made all our decisions. 4114 DEBUG(dbgs() << "\n" 4115 "The chosen solution requires "; SolutionCost.print(dbgs()); 4116 dbgs() << ":\n"; 4117 for (size_t i = 0, e = Uses.size(); i != e; ++i) { 4118 dbgs() << " "; 4119 Uses[i].print(dbgs()); 4120 dbgs() << "\n" 4121 " "; 4122 Solution[i]->print(dbgs()); 4123 dbgs() << '\n'; 4124 }); 4125 4126 assert(Solution.size() == Uses.size() && "Malformed solution!"); 4127 } 4128 4129 /// HoistInsertPosition - Helper for AdjustInsertPositionForExpand. Climb up 4130 /// the dominator tree far as we can go while still being dominated by the 4131 /// input positions. This helps canonicalize the insert position, which 4132 /// encourages sharing. 4133 BasicBlock::iterator 4134 LSRInstance::HoistInsertPosition(BasicBlock::iterator IP, 4135 const SmallVectorImpl<Instruction *> &Inputs) 4136 const { 4137 for (;;) { 4138 const Loop *IPLoop = LI.getLoopFor(IP->getParent()); 4139 unsigned IPLoopDepth = IPLoop ? IPLoop->getLoopDepth() : 0; 4140 4141 BasicBlock *IDom; 4142 for (DomTreeNode *Rung = DT.getNode(IP->getParent()); ; ) { 4143 if (!Rung) return IP; 4144 Rung = Rung->getIDom(); 4145 if (!Rung) return IP; 4146 IDom = Rung->getBlock(); 4147 4148 // Don't climb into a loop though. 4149 const Loop *IDomLoop = LI.getLoopFor(IDom); 4150 unsigned IDomDepth = IDomLoop ? IDomLoop->getLoopDepth() : 0; 4151 if (IDomDepth <= IPLoopDepth && 4152 (IDomDepth != IPLoopDepth || IDomLoop == IPLoop)) 4153 break; 4154 } 4155 4156 bool AllDominate = true; 4157 Instruction *BetterPos = 0; 4158 Instruction *Tentative = IDom->getTerminator(); 4159 for (SmallVectorImpl<Instruction *>::const_iterator I = Inputs.begin(), 4160 E = Inputs.end(); I != E; ++I) { 4161 Instruction *Inst = *I; 4162 if (Inst == Tentative || !DT.dominates(Inst, Tentative)) { 4163 AllDominate = false; 4164 break; 4165 } 4166 // Attempt to find an insert position in the middle of the block, 4167 // instead of at the end, so that it can be used for other expansions. 4168 if (IDom == Inst->getParent() && 4169 (!BetterPos || !DT.dominates(Inst, BetterPos))) 4170 BetterPos = llvm::next(BasicBlock::iterator(Inst)); 4171 } 4172 if (!AllDominate) 4173 break; 4174 if (BetterPos) 4175 IP = BetterPos; 4176 else 4177 IP = Tentative; 4178 } 4179 4180 return IP; 4181 } 4182 4183 /// AdjustInsertPositionForExpand - Determine an input position which will be 4184 /// dominated by the operands and which will dominate the result. 4185 BasicBlock::iterator 4186 LSRInstance::AdjustInsertPositionForExpand(BasicBlock::iterator LowestIP, 4187 const LSRFixup &LF, 4188 const LSRUse &LU, 4189 SCEVExpander &Rewriter) const { 4190 // Collect some instructions which must be dominated by the 4191 // expanding replacement. These must be dominated by any operands that 4192 // will be required in the expansion. 4193 SmallVector<Instruction *, 4> Inputs; 4194 if (Instruction *I = dyn_cast<Instruction>(LF.OperandValToReplace)) 4195 Inputs.push_back(I); 4196 if (LU.Kind == LSRUse::ICmpZero) 4197 if (Instruction *I = 4198 dyn_cast<Instruction>(cast<ICmpInst>(LF.UserInst)->getOperand(1))) 4199 Inputs.push_back(I); 4200 if (LF.PostIncLoops.count(L)) { 4201 if (LF.isUseFullyOutsideLoop(L)) 4202 Inputs.push_back(L->getLoopLatch()->getTerminator()); 4203 else 4204 Inputs.push_back(IVIncInsertPos); 4205 } 4206 // The expansion must also be dominated by the increment positions of any 4207 // loops it for which it is using post-inc mode. 4208 for (PostIncLoopSet::const_iterator I = LF.PostIncLoops.begin(), 4209 E = LF.PostIncLoops.end(); I != E; ++I) { 4210 const Loop *PIL = *I; 4211 if (PIL == L) continue; 4212 4213 // Be dominated by the loop exit. 4214 SmallVector<BasicBlock *, 4> ExitingBlocks; 4215 PIL->getExitingBlocks(ExitingBlocks); 4216 if (!ExitingBlocks.empty()) { 4217 BasicBlock *BB = ExitingBlocks[0]; 4218 for (unsigned i = 1, e = ExitingBlocks.size(); i != e; ++i) 4219 BB = DT.findNearestCommonDominator(BB, ExitingBlocks[i]); 4220 Inputs.push_back(BB->getTerminator()); 4221 } 4222 } 4223 4224 assert(!isa<PHINode>(LowestIP) && !isa<LandingPadInst>(LowestIP) 4225 && !isa<DbgInfoIntrinsic>(LowestIP) && 4226 "Insertion point must be a normal instruction"); 4227 4228 // Then, climb up the immediate dominator tree as far as we can go while 4229 // still being dominated by the input positions. 4230 BasicBlock::iterator IP = HoistInsertPosition(LowestIP, Inputs); 4231 4232 // Don't insert instructions before PHI nodes. 4233 while (isa<PHINode>(IP)) ++IP; 4234 4235 // Ignore landingpad instructions. 4236 while (isa<LandingPadInst>(IP)) ++IP; 4237 4238 // Ignore debug intrinsics. 4239 while (isa<DbgInfoIntrinsic>(IP)) ++IP; 4240 4241 // Set IP below instructions recently inserted by SCEVExpander. This keeps the 4242 // IP consistent across expansions and allows the previously inserted 4243 // instructions to be reused by subsequent expansion. 4244 while (Rewriter.isInsertedInstruction(IP) && IP != LowestIP) ++IP; 4245 4246 return IP; 4247 } 4248 4249 /// Expand - Emit instructions for the leading candidate expression for this 4250 /// LSRUse (this is called "expanding"). 4251 Value *LSRInstance::Expand(const LSRFixup &LF, 4252 const Formula &F, 4253 BasicBlock::iterator IP, 4254 SCEVExpander &Rewriter, 4255 SmallVectorImpl<WeakVH> &DeadInsts) const { 4256 const LSRUse &LU = Uses[LF.LUIdx]; 4257 4258 // Determine an input position which will be dominated by the operands and 4259 // which will dominate the result. 4260 IP = AdjustInsertPositionForExpand(IP, LF, LU, Rewriter); 4261 4262 // Inform the Rewriter if we have a post-increment use, so that it can 4263 // perform an advantageous expansion. 4264 Rewriter.setPostInc(LF.PostIncLoops); 4265 4266 // This is the type that the user actually needs. 4267 Type *OpTy = LF.OperandValToReplace->getType(); 4268 // This will be the type that we'll initially expand to. 4269 Type *Ty = F.getType(); 4270 if (!Ty) 4271 // No type known; just expand directly to the ultimate type. 4272 Ty = OpTy; 4273 else if (SE.getEffectiveSCEVType(Ty) == SE.getEffectiveSCEVType(OpTy)) 4274 // Expand directly to the ultimate type if it's the right size. 4275 Ty = OpTy; 4276 // This is the type to do integer arithmetic in. 4277 Type *IntTy = SE.getEffectiveSCEVType(Ty); 4278 4279 // Build up a list of operands to add together to form the full base. 4280 SmallVector<const SCEV *, 8> Ops; 4281 4282 // Expand the BaseRegs portion. 4283 for (SmallVectorImpl<const SCEV *>::const_iterator I = F.BaseRegs.begin(), 4284 E = F.BaseRegs.end(); I != E; ++I) { 4285 const SCEV *Reg = *I; 4286 assert(!Reg->isZero() && "Zero allocated in a base register!"); 4287 4288 // If we're expanding for a post-inc user, make the post-inc adjustment. 4289 PostIncLoopSet &Loops = const_cast<PostIncLoopSet &>(LF.PostIncLoops); 4290 Reg = TransformForPostIncUse(Denormalize, Reg, 4291 LF.UserInst, LF.OperandValToReplace, 4292 Loops, SE, DT); 4293 4294 Ops.push_back(SE.getUnknown(Rewriter.expandCodeFor(Reg, 0, IP))); 4295 } 4296 4297 // Expand the ScaledReg portion. 4298 Value *ICmpScaledV = 0; 4299 if (F.Scale != 0) { 4300 const SCEV *ScaledS = F.ScaledReg; 4301 4302 // If we're expanding for a post-inc user, make the post-inc adjustment. 4303 PostIncLoopSet &Loops = const_cast<PostIncLoopSet &>(LF.PostIncLoops); 4304 ScaledS = TransformForPostIncUse(Denormalize, ScaledS, 4305 LF.UserInst, LF.OperandValToReplace, 4306 Loops, SE, DT); 4307 4308 if (LU.Kind == LSRUse::ICmpZero) { 4309 // An interesting way of "folding" with an icmp is to use a negated 4310 // scale, which we'll implement by inserting it into the other operand 4311 // of the icmp. 4312 assert(F.Scale == -1 && 4313 "The only scale supported by ICmpZero uses is -1!"); 4314 ICmpScaledV = Rewriter.expandCodeFor(ScaledS, 0, IP); 4315 } else { 4316 // Otherwise just expand the scaled register and an explicit scale, 4317 // which is expected to be matched as part of the address. 4318 4319 // Flush the operand list to suppress SCEVExpander hoisting address modes. 4320 if (!Ops.empty() && LU.Kind == LSRUse::Address) { 4321 Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, IP); 4322 Ops.clear(); 4323 Ops.push_back(SE.getUnknown(FullV)); 4324 } 4325 ScaledS = SE.getUnknown(Rewriter.expandCodeFor(ScaledS, 0, IP)); 4326 ScaledS = SE.getMulExpr(ScaledS, 4327 SE.getConstant(ScaledS->getType(), F.Scale)); 4328 Ops.push_back(ScaledS); 4329 } 4330 } 4331 4332 // Expand the GV portion. 4333 if (F.BaseGV) { 4334 // Flush the operand list to suppress SCEVExpander hoisting. 4335 if (!Ops.empty()) { 4336 Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, IP); 4337 Ops.clear(); 4338 Ops.push_back(SE.getUnknown(FullV)); 4339 } 4340 Ops.push_back(SE.getUnknown(F.BaseGV)); 4341 } 4342 4343 // Flush the operand list to suppress SCEVExpander hoisting of both folded and 4344 // unfolded offsets. LSR assumes they both live next to their uses. 4345 if (!Ops.empty()) { 4346 Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, IP); 4347 Ops.clear(); 4348 Ops.push_back(SE.getUnknown(FullV)); 4349 } 4350 4351 // Expand the immediate portion. 4352 int64_t Offset = (uint64_t)F.BaseOffset + LF.Offset; 4353 if (Offset != 0) { 4354 if (LU.Kind == LSRUse::ICmpZero) { 4355 // The other interesting way of "folding" with an ICmpZero is to use a 4356 // negated immediate. 4357 if (!ICmpScaledV) 4358 ICmpScaledV = ConstantInt::get(IntTy, -(uint64_t)Offset); 4359 else { 4360 Ops.push_back(SE.getUnknown(ICmpScaledV)); 4361 ICmpScaledV = ConstantInt::get(IntTy, Offset); 4362 } 4363 } else { 4364 // Just add the immediate values. These again are expected to be matched 4365 // as part of the address. 4366 Ops.push_back(SE.getUnknown(ConstantInt::getSigned(IntTy, Offset))); 4367 } 4368 } 4369 4370 // Expand the unfolded offset portion. 4371 int64_t UnfoldedOffset = F.UnfoldedOffset; 4372 if (UnfoldedOffset != 0) { 4373 // Just add the immediate values. 4374 Ops.push_back(SE.getUnknown(ConstantInt::getSigned(IntTy, 4375 UnfoldedOffset))); 4376 } 4377 4378 // Emit instructions summing all the operands. 4379 const SCEV *FullS = Ops.empty() ? 4380 SE.getConstant(IntTy, 0) : 4381 SE.getAddExpr(Ops); 4382 Value *FullV = Rewriter.expandCodeFor(FullS, Ty, IP); 4383 4384 // We're done expanding now, so reset the rewriter. 4385 Rewriter.clearPostInc(); 4386 4387 // An ICmpZero Formula represents an ICmp which we're handling as a 4388 // comparison against zero. Now that we've expanded an expression for that 4389 // form, update the ICmp's other operand. 4390 if (LU.Kind == LSRUse::ICmpZero) { 4391 ICmpInst *CI = cast<ICmpInst>(LF.UserInst); 4392 DeadInsts.push_back(CI->getOperand(1)); 4393 assert(!F.BaseGV && "ICmp does not support folding a global value and " 4394 "a scale at the same time!"); 4395 if (F.Scale == -1) { 4396 if (ICmpScaledV->getType() != OpTy) { 4397 Instruction *Cast = 4398 CastInst::Create(CastInst::getCastOpcode(ICmpScaledV, false, 4399 OpTy, false), 4400 ICmpScaledV, OpTy, "tmp", CI); 4401 ICmpScaledV = Cast; 4402 } 4403 CI->setOperand(1, ICmpScaledV); 4404 } else { 4405 assert(F.Scale == 0 && 4406 "ICmp does not support folding a global value and " 4407 "a scale at the same time!"); 4408 Constant *C = ConstantInt::getSigned(SE.getEffectiveSCEVType(OpTy), 4409 -(uint64_t)Offset); 4410 if (C->getType() != OpTy) 4411 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 4412 OpTy, false), 4413 C, OpTy); 4414 4415 CI->setOperand(1, C); 4416 } 4417 } 4418 4419 return FullV; 4420 } 4421 4422 /// RewriteForPHI - Helper for Rewrite. PHI nodes are special because the use 4423 /// of their operands effectively happens in their predecessor blocks, so the 4424 /// expression may need to be expanded in multiple places. 4425 void LSRInstance::RewriteForPHI(PHINode *PN, 4426 const LSRFixup &LF, 4427 const Formula &F, 4428 SCEVExpander &Rewriter, 4429 SmallVectorImpl<WeakVH> &DeadInsts, 4430 Pass *P) const { 4431 DenseMap<BasicBlock *, Value *> Inserted; 4432 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 4433 if (PN->getIncomingValue(i) == LF.OperandValToReplace) { 4434 BasicBlock *BB = PN->getIncomingBlock(i); 4435 4436 // If this is a critical edge, split the edge so that we do not insert 4437 // the code on all predecessor/successor paths. We do this unless this 4438 // is the canonical backedge for this loop, which complicates post-inc 4439 // users. 4440 if (e != 1 && BB->getTerminator()->getNumSuccessors() > 1 && 4441 !isa<IndirectBrInst>(BB->getTerminator())) { 4442 BasicBlock *Parent = PN->getParent(); 4443 Loop *PNLoop = LI.getLoopFor(Parent); 4444 if (!PNLoop || Parent != PNLoop->getHeader()) { 4445 // Split the critical edge. 4446 BasicBlock *NewBB = 0; 4447 if (!Parent->isLandingPad()) { 4448 NewBB = SplitCriticalEdge(BB, Parent, P, 4449 /*MergeIdenticalEdges=*/true, 4450 /*DontDeleteUselessPhis=*/true); 4451 } else { 4452 SmallVector<BasicBlock*, 2> NewBBs; 4453 SplitLandingPadPredecessors(Parent, BB, "", "", P, NewBBs); 4454 NewBB = NewBBs[0]; 4455 } 4456 // If NewBB==NULL, then SplitCriticalEdge refused to split because all 4457 // phi predecessors are identical. The simple thing to do is skip 4458 // splitting in this case rather than complicate the API. 4459 if (NewBB) { 4460 // If PN is outside of the loop and BB is in the loop, we want to 4461 // move the block to be immediately before the PHI block, not 4462 // immediately after BB. 4463 if (L->contains(BB) && !L->contains(PN)) 4464 NewBB->moveBefore(PN->getParent()); 4465 4466 // Splitting the edge can reduce the number of PHI entries we have. 4467 e = PN->getNumIncomingValues(); 4468 BB = NewBB; 4469 i = PN->getBasicBlockIndex(BB); 4470 } 4471 } 4472 } 4473 4474 std::pair<DenseMap<BasicBlock *, Value *>::iterator, bool> Pair = 4475 Inserted.insert(std::make_pair(BB, static_cast<Value *>(0))); 4476 if (!Pair.second) 4477 PN->setIncomingValue(i, Pair.first->second); 4478 else { 4479 Value *FullV = Expand(LF, F, BB->getTerminator(), Rewriter, DeadInsts); 4480 4481 // If this is reuse-by-noop-cast, insert the noop cast. 4482 Type *OpTy = LF.OperandValToReplace->getType(); 4483 if (FullV->getType() != OpTy) 4484 FullV = 4485 CastInst::Create(CastInst::getCastOpcode(FullV, false, 4486 OpTy, false), 4487 FullV, LF.OperandValToReplace->getType(), 4488 "tmp", BB->getTerminator()); 4489 4490 PN->setIncomingValue(i, FullV); 4491 Pair.first->second = FullV; 4492 } 4493 } 4494 } 4495 4496 /// Rewrite - Emit instructions for the leading candidate expression for this 4497 /// LSRUse (this is called "expanding"), and update the UserInst to reference 4498 /// the newly expanded value. 4499 void LSRInstance::Rewrite(const LSRFixup &LF, 4500 const Formula &F, 4501 SCEVExpander &Rewriter, 4502 SmallVectorImpl<WeakVH> &DeadInsts, 4503 Pass *P) const { 4504 // First, find an insertion point that dominates UserInst. For PHI nodes, 4505 // find the nearest block which dominates all the relevant uses. 4506 if (PHINode *PN = dyn_cast<PHINode>(LF.UserInst)) { 4507 RewriteForPHI(PN, LF, F, Rewriter, DeadInsts, P); 4508 } else { 4509 Value *FullV = Expand(LF, F, LF.UserInst, Rewriter, DeadInsts); 4510 4511 // If this is reuse-by-noop-cast, insert the noop cast. 4512 Type *OpTy = LF.OperandValToReplace->getType(); 4513 if (FullV->getType() != OpTy) { 4514 Instruction *Cast = 4515 CastInst::Create(CastInst::getCastOpcode(FullV, false, OpTy, false), 4516 FullV, OpTy, "tmp", LF.UserInst); 4517 FullV = Cast; 4518 } 4519 4520 // Update the user. ICmpZero is handled specially here (for now) because 4521 // Expand may have updated one of the operands of the icmp already, and 4522 // its new value may happen to be equal to LF.OperandValToReplace, in 4523 // which case doing replaceUsesOfWith leads to replacing both operands 4524 // with the same value. TODO: Reorganize this. 4525 if (Uses[LF.LUIdx].Kind == LSRUse::ICmpZero) 4526 LF.UserInst->setOperand(0, FullV); 4527 else 4528 LF.UserInst->replaceUsesOfWith(LF.OperandValToReplace, FullV); 4529 } 4530 4531 DeadInsts.push_back(LF.OperandValToReplace); 4532 } 4533 4534 /// ImplementSolution - Rewrite all the fixup locations with new values, 4535 /// following the chosen solution. 4536 void 4537 LSRInstance::ImplementSolution(const SmallVectorImpl<const Formula *> &Solution, 4538 Pass *P) { 4539 // Keep track of instructions we may have made dead, so that 4540 // we can remove them after we are done working. 4541 SmallVector<WeakVH, 16> DeadInsts; 4542 4543 SCEVExpander Rewriter(SE, "lsr"); 4544 #ifndef NDEBUG 4545 Rewriter.setDebugType(DEBUG_TYPE); 4546 #endif 4547 Rewriter.disableCanonicalMode(); 4548 Rewriter.enableLSRMode(); 4549 Rewriter.setIVIncInsertPos(L, IVIncInsertPos); 4550 4551 // Mark phi nodes that terminate chains so the expander tries to reuse them. 4552 for (SmallVectorImpl<IVChain>::const_iterator ChainI = IVChainVec.begin(), 4553 ChainE = IVChainVec.end(); ChainI != ChainE; ++ChainI) { 4554 if (PHINode *PN = dyn_cast<PHINode>(ChainI->tailUserInst())) 4555 Rewriter.setChainedPhi(PN); 4556 } 4557 4558 // Expand the new value definitions and update the users. 4559 for (SmallVectorImpl<LSRFixup>::const_iterator I = Fixups.begin(), 4560 E = Fixups.end(); I != E; ++I) { 4561 const LSRFixup &Fixup = *I; 4562 4563 Rewrite(Fixup, *Solution[Fixup.LUIdx], Rewriter, DeadInsts, P); 4564 4565 Changed = true; 4566 } 4567 4568 for (SmallVectorImpl<IVChain>::const_iterator ChainI = IVChainVec.begin(), 4569 ChainE = IVChainVec.end(); ChainI != ChainE; ++ChainI) { 4570 GenerateIVChain(*ChainI, Rewriter, DeadInsts); 4571 Changed = true; 4572 } 4573 // Clean up after ourselves. This must be done before deleting any 4574 // instructions. 4575 Rewriter.clear(); 4576 4577 Changed |= DeleteTriviallyDeadInstructions(DeadInsts); 4578 } 4579 4580 LSRInstance::LSRInstance(Loop *L, Pass *P) 4581 : IU(P->getAnalysis<IVUsers>()), SE(P->getAnalysis<ScalarEvolution>()), 4582 DT(P->getAnalysis<DominatorTree>()), LI(P->getAnalysis<LoopInfo>()), 4583 TTI(P->getAnalysis<TargetTransformInfo>()), L(L), Changed(false), 4584 IVIncInsertPos(0) { 4585 // If LoopSimplify form is not available, stay out of trouble. 4586 if (!L->isLoopSimplifyForm()) 4587 return; 4588 4589 // If there's no interesting work to be done, bail early. 4590 if (IU.empty()) return; 4591 4592 // If there's too much analysis to be done, bail early. We won't be able to 4593 // model the problem anyway. 4594 unsigned NumUsers = 0; 4595 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) { 4596 if (++NumUsers > MaxIVUsers) { 4597 DEBUG(dbgs() << "LSR skipping loop, too many IV Users in " << *L 4598 << "\n"); 4599 return; 4600 } 4601 } 4602 4603 #ifndef NDEBUG 4604 // All dominating loops must have preheaders, or SCEVExpander may not be able 4605 // to materialize an AddRecExpr whose Start is an outer AddRecExpr. 4606 // 4607 // IVUsers analysis should only create users that are dominated by simple loop 4608 // headers. Since this loop should dominate all of its users, its user list 4609 // should be empty if this loop itself is not within a simple loop nest. 4610 for (DomTreeNode *Rung = DT.getNode(L->getLoopPreheader()); 4611 Rung; Rung = Rung->getIDom()) { 4612 BasicBlock *BB = Rung->getBlock(); 4613 const Loop *DomLoop = LI.getLoopFor(BB); 4614 if (DomLoop && DomLoop->getHeader() == BB) { 4615 assert(DomLoop->getLoopPreheader() && "LSR needs a simplified loop nest"); 4616 } 4617 } 4618 #endif // DEBUG 4619 4620 DEBUG(dbgs() << "\nLSR on loop "; 4621 WriteAsOperand(dbgs(), L->getHeader(), /*PrintType=*/false); 4622 dbgs() << ":\n"); 4623 4624 // First, perform some low-level loop optimizations. 4625 OptimizeShadowIV(); 4626 OptimizeLoopTermCond(); 4627 4628 // If loop preparation eliminates all interesting IV users, bail. 4629 if (IU.empty()) return; 4630 4631 // Skip nested loops until we can model them better with formulae. 4632 if (!L->empty()) { 4633 DEBUG(dbgs() << "LSR skipping outer loop " << *L << "\n"); 4634 return; 4635 } 4636 4637 // Start collecting data and preparing for the solver. 4638 CollectChains(); 4639 CollectInterestingTypesAndFactors(); 4640 CollectFixupsAndInitialFormulae(); 4641 CollectLoopInvariantFixupsAndFormulae(); 4642 4643 assert(!Uses.empty() && "IVUsers reported at least one use"); 4644 DEBUG(dbgs() << "LSR found " << Uses.size() << " uses:\n"; 4645 print_uses(dbgs())); 4646 4647 // Now use the reuse data to generate a bunch of interesting ways 4648 // to formulate the values needed for the uses. 4649 GenerateAllReuseFormulae(); 4650 4651 FilterOutUndesirableDedicatedRegisters(); 4652 NarrowSearchSpaceUsingHeuristics(); 4653 4654 SmallVector<const Formula *, 8> Solution; 4655 Solve(Solution); 4656 4657 // Release memory that is no longer needed. 4658 Factors.clear(); 4659 Types.clear(); 4660 RegUses.clear(); 4661 4662 if (Solution.empty()) 4663 return; 4664 4665 #ifndef NDEBUG 4666 // Formulae should be legal. 4667 for (SmallVectorImpl<LSRUse>::const_iterator I = Uses.begin(), E = Uses.end(); 4668 I != E; ++I) { 4669 const LSRUse &LU = *I; 4670 for (SmallVectorImpl<Formula>::const_iterator J = LU.Formulae.begin(), 4671 JE = LU.Formulae.end(); 4672 J != JE; ++J) 4673 assert(isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, 4674 *J) && "Illegal formula generated!"); 4675 }; 4676 #endif 4677 4678 // Now that we've decided what we want, make it so. 4679 ImplementSolution(Solution, P); 4680 } 4681 4682 void LSRInstance::print_factors_and_types(raw_ostream &OS) const { 4683 if (Factors.empty() && Types.empty()) return; 4684 4685 OS << "LSR has identified the following interesting factors and types: "; 4686 bool First = true; 4687 4688 for (SmallSetVector<int64_t, 8>::const_iterator 4689 I = Factors.begin(), E = Factors.end(); I != E; ++I) { 4690 if (!First) OS << ", "; 4691 First = false; 4692 OS << '*' << *I; 4693 } 4694 4695 for (SmallSetVector<Type *, 4>::const_iterator 4696 I = Types.begin(), E = Types.end(); I != E; ++I) { 4697 if (!First) OS << ", "; 4698 First = false; 4699 OS << '(' << **I << ')'; 4700 } 4701 OS << '\n'; 4702 } 4703 4704 void LSRInstance::print_fixups(raw_ostream &OS) const { 4705 OS << "LSR is examining the following fixup sites:\n"; 4706 for (SmallVectorImpl<LSRFixup>::const_iterator I = Fixups.begin(), 4707 E = Fixups.end(); I != E; ++I) { 4708 dbgs() << " "; 4709 I->print(OS); 4710 OS << '\n'; 4711 } 4712 } 4713 4714 void LSRInstance::print_uses(raw_ostream &OS) const { 4715 OS << "LSR is examining the following uses:\n"; 4716 for (SmallVectorImpl<LSRUse>::const_iterator I = Uses.begin(), 4717 E = Uses.end(); I != E; ++I) { 4718 const LSRUse &LU = *I; 4719 dbgs() << " "; 4720 LU.print(OS); 4721 OS << '\n'; 4722 for (SmallVectorImpl<Formula>::const_iterator J = LU.Formulae.begin(), 4723 JE = LU.Formulae.end(); J != JE; ++J) { 4724 OS << " "; 4725 J->print(OS); 4726 OS << '\n'; 4727 } 4728 } 4729 } 4730 4731 void LSRInstance::print(raw_ostream &OS) const { 4732 print_factors_and_types(OS); 4733 print_fixups(OS); 4734 print_uses(OS); 4735 } 4736 4737 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 4738 void LSRInstance::dump() const { 4739 print(errs()); errs() << '\n'; 4740 } 4741 #endif 4742 4743 namespace { 4744 4745 class LoopStrengthReduce : public LoopPass { 4746 public: 4747 static char ID; // Pass ID, replacement for typeid 4748 LoopStrengthReduce(); 4749 4750 private: 4751 bool runOnLoop(Loop *L, LPPassManager &LPM); 4752 void getAnalysisUsage(AnalysisUsage &AU) const; 4753 }; 4754 4755 } 4756 4757 char LoopStrengthReduce::ID = 0; 4758 INITIALIZE_PASS_BEGIN(LoopStrengthReduce, "loop-reduce", 4759 "Loop Strength Reduction", false, false) 4760 INITIALIZE_AG_DEPENDENCY(TargetTransformInfo) 4761 INITIALIZE_PASS_DEPENDENCY(DominatorTree) 4762 INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) 4763 INITIALIZE_PASS_DEPENDENCY(IVUsers) 4764 INITIALIZE_PASS_DEPENDENCY(LoopInfo) 4765 INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 4766 INITIALIZE_PASS_END(LoopStrengthReduce, "loop-reduce", 4767 "Loop Strength Reduction", false, false) 4768 4769 4770 Pass *llvm::createLoopStrengthReducePass() { 4771 return new LoopStrengthReduce(); 4772 } 4773 4774 LoopStrengthReduce::LoopStrengthReduce() : LoopPass(ID) { 4775 initializeLoopStrengthReducePass(*PassRegistry::getPassRegistry()); 4776 } 4777 4778 void LoopStrengthReduce::getAnalysisUsage(AnalysisUsage &AU) const { 4779 // We split critical edges, so we change the CFG. However, we do update 4780 // many analyses if they are around. 4781 AU.addPreservedID(LoopSimplifyID); 4782 4783 AU.addRequired<LoopInfo>(); 4784 AU.addPreserved<LoopInfo>(); 4785 AU.addRequiredID(LoopSimplifyID); 4786 AU.addRequired<DominatorTree>(); 4787 AU.addPreserved<DominatorTree>(); 4788 AU.addRequired<ScalarEvolution>(); 4789 AU.addPreserved<ScalarEvolution>(); 4790 // Requiring LoopSimplify a second time here prevents IVUsers from running 4791 // twice, since LoopSimplify was invalidated by running ScalarEvolution. 4792 AU.addRequiredID(LoopSimplifyID); 4793 AU.addRequired<IVUsers>(); 4794 AU.addPreserved<IVUsers>(); 4795 AU.addRequired<TargetTransformInfo>(); 4796 } 4797 4798 bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager & /*LPM*/) { 4799 bool Changed = false; 4800 4801 // Run the main LSR transformation. 4802 Changed |= LSRInstance(L, this).getChanged(); 4803 4804 // Remove any extra phis created by processing inner loops. 4805 Changed |= DeleteDeadPHIs(L->getHeader()); 4806 if (EnablePhiElim && L->isLoopSimplifyForm()) { 4807 SmallVector<WeakVH, 16> DeadInsts; 4808 SCEVExpander Rewriter(getAnalysis<ScalarEvolution>(), "lsr"); 4809 #ifndef NDEBUG 4810 Rewriter.setDebugType(DEBUG_TYPE); 4811 #endif 4812 unsigned numFolded = 4813 Rewriter.replaceCongruentIVs(L, &getAnalysis<DominatorTree>(), 4814 DeadInsts, 4815 &getAnalysis<TargetTransformInfo>()); 4816 if (numFolded) { 4817 Changed = true; 4818 DeleteTriviallyDeadInstructions(DeadInsts); 4819 DeleteDeadPHIs(L->getHeader()); 4820 } 4821 } 4822 return Changed; 4823 } 4824