1 //===- LoopStrengthReduce.cpp - Strength Reduce IVs in Loops --------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This transformation analyzes and transforms the induction variables (and 11 // computations derived from them) into forms suitable for efficient execution 12 // on the target. 13 // 14 // This pass performs a strength reduction on array references inside loops that 15 // have as one or more of their components the loop induction variable, it 16 // rewrites expressions to take advantage of scaled-index addressing modes 17 // available on the target, and it performs a variety of other optimizations 18 // related to loop induction variables. 19 // 20 // Terminology note: this code has a lot of handling for "post-increment" or 21 // "post-inc" users. This is not talking about post-increment addressing modes; 22 // it is instead talking about code like this: 23 // 24 // %i = phi [ 0, %entry ], [ %i.next, %latch ] 25 // ... 26 // %i.next = add %i, 1 27 // %c = icmp eq %i.next, %n 28 // 29 // The SCEV for %i is {0,+,1}<%L>. The SCEV for %i.next is {1,+,1}<%L>, however 30 // it's useful to think about these as the same register, with some uses using 31 // the value of the register before the add and some using // it after. In this 32 // example, the icmp is a post-increment user, since it uses %i.next, which is 33 // the value of the induction variable after the increment. The other common 34 // case of post-increment users is users outside the loop. 35 // 36 // TODO: More sophistication in the way Formulae are generated and filtered. 37 // 38 // TODO: Handle multiple loops at a time. 39 // 40 // TODO: Should TargetLowering::AddrMode::BaseGV be changed to a ConstantExpr 41 // instead of a GlobalValue? 42 // 43 // TODO: When truncation is free, truncate ICmp users' operands to make it a 44 // smaller encoding (on x86 at least). 45 // 46 // TODO: When a negated register is used by an add (such as in a list of 47 // multiple base registers, or as the increment expression in an addrec), 48 // we may not actually need both reg and (-1 * reg) in registers; the 49 // negation can be implemented by using a sub instead of an add. The 50 // lack of support for taking this into consideration when making 51 // register pressure decisions is partly worked around by the "Special" 52 // use kind. 53 // 54 //===----------------------------------------------------------------------===// 55 56 #define DEBUG_TYPE "loop-reduce" 57 #include "llvm/Transforms/Scalar.h" 58 #include "llvm/Constants.h" 59 #include "llvm/Instructions.h" 60 #include "llvm/IntrinsicInst.h" 61 #include "llvm/DerivedTypes.h" 62 #include "llvm/Analysis/IVUsers.h" 63 #include "llvm/Analysis/Dominators.h" 64 #include "llvm/Analysis/LoopPass.h" 65 #include "llvm/Analysis/ScalarEvolutionExpander.h" 66 #include "llvm/Assembly/Writer.h" 67 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 68 #include "llvm/Transforms/Utils/Local.h" 69 #include "llvm/ADT/SmallBitVector.h" 70 #include "llvm/ADT/SetVector.h" 71 #include "llvm/ADT/DenseSet.h" 72 #include "llvm/Support/Debug.h" 73 #include "llvm/Support/CommandLine.h" 74 #include "llvm/Support/ValueHandle.h" 75 #include "llvm/Support/raw_ostream.h" 76 #include "llvm/Target/TargetLowering.h" 77 #include <algorithm> 78 using namespace llvm; 79 80 namespace llvm { 81 cl::opt<bool> EnableNested( 82 "enable-lsr-nested", cl::Hidden, cl::desc("Enable LSR on nested loops")); 83 84 cl::opt<bool> EnableRetry( 85 "enable-lsr-retry", cl::Hidden, cl::desc("Enable LSR retry")); 86 } 87 88 namespace { 89 90 /// RegSortData - This class holds data which is used to order reuse candidates. 91 class RegSortData { 92 public: 93 /// UsedByIndices - This represents the set of LSRUse indices which reference 94 /// a particular register. 95 SmallBitVector UsedByIndices; 96 97 RegSortData() {} 98 99 void print(raw_ostream &OS) const; 100 void dump() const; 101 }; 102 103 } 104 105 void RegSortData::print(raw_ostream &OS) const { 106 OS << "[NumUses=" << UsedByIndices.count() << ']'; 107 } 108 109 void RegSortData::dump() const { 110 print(errs()); errs() << '\n'; 111 } 112 113 namespace { 114 115 /// RegUseTracker - Map register candidates to information about how they are 116 /// used. 117 class RegUseTracker { 118 typedef DenseMap<const SCEV *, RegSortData> RegUsesTy; 119 120 RegUsesTy RegUsesMap; 121 SmallVector<const SCEV *, 16> RegSequence; 122 123 public: 124 void CountRegister(const SCEV *Reg, size_t LUIdx); 125 void DropRegister(const SCEV *Reg, size_t LUIdx); 126 void SwapAndDropUse(size_t LUIdx, size_t LastLUIdx); 127 128 bool isRegUsedByUsesOtherThan(const SCEV *Reg, size_t LUIdx) const; 129 130 const SmallBitVector &getUsedByIndices(const SCEV *Reg) const; 131 132 void clear(); 133 134 typedef SmallVectorImpl<const SCEV *>::iterator iterator; 135 typedef SmallVectorImpl<const SCEV *>::const_iterator const_iterator; 136 iterator begin() { return RegSequence.begin(); } 137 iterator end() { return RegSequence.end(); } 138 const_iterator begin() const { return RegSequence.begin(); } 139 const_iterator end() const { return RegSequence.end(); } 140 }; 141 142 } 143 144 void 145 RegUseTracker::CountRegister(const SCEV *Reg, size_t LUIdx) { 146 std::pair<RegUsesTy::iterator, bool> Pair = 147 RegUsesMap.insert(std::make_pair(Reg, RegSortData())); 148 RegSortData &RSD = Pair.first->second; 149 if (Pair.second) 150 RegSequence.push_back(Reg); 151 RSD.UsedByIndices.resize(std::max(RSD.UsedByIndices.size(), LUIdx + 1)); 152 RSD.UsedByIndices.set(LUIdx); 153 } 154 155 void 156 RegUseTracker::DropRegister(const SCEV *Reg, size_t LUIdx) { 157 RegUsesTy::iterator It = RegUsesMap.find(Reg); 158 assert(It != RegUsesMap.end()); 159 RegSortData &RSD = It->second; 160 assert(RSD.UsedByIndices.size() > LUIdx); 161 RSD.UsedByIndices.reset(LUIdx); 162 } 163 164 void 165 RegUseTracker::SwapAndDropUse(size_t LUIdx, size_t LastLUIdx) { 166 assert(LUIdx <= LastLUIdx); 167 168 // Update RegUses. The data structure is not optimized for this purpose; 169 // we must iterate through it and update each of the bit vectors. 170 for (RegUsesTy::iterator I = RegUsesMap.begin(), E = RegUsesMap.end(); 171 I != E; ++I) { 172 SmallBitVector &UsedByIndices = I->second.UsedByIndices; 173 if (LUIdx < UsedByIndices.size()) 174 UsedByIndices[LUIdx] = 175 LastLUIdx < UsedByIndices.size() ? UsedByIndices[LastLUIdx] : 0; 176 UsedByIndices.resize(std::min(UsedByIndices.size(), LastLUIdx)); 177 } 178 } 179 180 bool 181 RegUseTracker::isRegUsedByUsesOtherThan(const SCEV *Reg, size_t LUIdx) const { 182 RegUsesTy::const_iterator I = RegUsesMap.find(Reg); 183 if (I == RegUsesMap.end()) 184 return false; 185 const SmallBitVector &UsedByIndices = I->second.UsedByIndices; 186 int i = UsedByIndices.find_first(); 187 if (i == -1) return false; 188 if ((size_t)i != LUIdx) return true; 189 return UsedByIndices.find_next(i) != -1; 190 } 191 192 const SmallBitVector &RegUseTracker::getUsedByIndices(const SCEV *Reg) const { 193 RegUsesTy::const_iterator I = RegUsesMap.find(Reg); 194 assert(I != RegUsesMap.end() && "Unknown register!"); 195 return I->second.UsedByIndices; 196 } 197 198 void RegUseTracker::clear() { 199 RegUsesMap.clear(); 200 RegSequence.clear(); 201 } 202 203 namespace { 204 205 /// Formula - This class holds information that describes a formula for 206 /// computing satisfying a use. It may include broken-out immediates and scaled 207 /// registers. 208 struct Formula { 209 /// AM - This is used to represent complex addressing, as well as other kinds 210 /// of interesting uses. 211 TargetLowering::AddrMode AM; 212 213 /// BaseRegs - The list of "base" registers for this use. When this is 214 /// non-empty, AM.HasBaseReg should be set to true. 215 SmallVector<const SCEV *, 2> BaseRegs; 216 217 /// ScaledReg - The 'scaled' register for this use. This should be non-null 218 /// when AM.Scale is not zero. 219 const SCEV *ScaledReg; 220 221 /// UnfoldedOffset - An additional constant offset which added near the 222 /// use. This requires a temporary register, but the offset itself can 223 /// live in an add immediate field rather than a register. 224 int64_t UnfoldedOffset; 225 226 Formula() : ScaledReg(0), UnfoldedOffset(0) {} 227 228 void InitialMatch(const SCEV *S, Loop *L, ScalarEvolution &SE); 229 230 unsigned getNumRegs() const; 231 Type *getType() const; 232 233 void DeleteBaseReg(const SCEV *&S); 234 235 bool referencesReg(const SCEV *S) const; 236 bool hasRegsUsedByUsesOtherThan(size_t LUIdx, 237 const RegUseTracker &RegUses) const; 238 239 void print(raw_ostream &OS) const; 240 void dump() const; 241 }; 242 243 } 244 245 /// DoInitialMatch - Recursion helper for InitialMatch. 246 static void DoInitialMatch(const SCEV *S, Loop *L, 247 SmallVectorImpl<const SCEV *> &Good, 248 SmallVectorImpl<const SCEV *> &Bad, 249 ScalarEvolution &SE) { 250 // Collect expressions which properly dominate the loop header. 251 if (SE.properlyDominates(S, L->getHeader())) { 252 Good.push_back(S); 253 return; 254 } 255 256 // Look at add operands. 257 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 258 for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end(); 259 I != E; ++I) 260 DoInitialMatch(*I, L, Good, Bad, SE); 261 return; 262 } 263 264 // Look at addrec operands. 265 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) 266 if (!AR->getStart()->isZero()) { 267 DoInitialMatch(AR->getStart(), L, Good, Bad, SE); 268 DoInitialMatch(SE.getAddRecExpr(SE.getConstant(AR->getType(), 0), 269 AR->getStepRecurrence(SE), 270 // FIXME: AR->getNoWrapFlags() 271 AR->getLoop(), SCEV::FlagAnyWrap), 272 L, Good, Bad, SE); 273 return; 274 } 275 276 // Handle a multiplication by -1 (negation) if it didn't fold. 277 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) 278 if (Mul->getOperand(0)->isAllOnesValue()) { 279 SmallVector<const SCEV *, 4> Ops(Mul->op_begin()+1, Mul->op_end()); 280 const SCEV *NewMul = SE.getMulExpr(Ops); 281 282 SmallVector<const SCEV *, 4> MyGood; 283 SmallVector<const SCEV *, 4> MyBad; 284 DoInitialMatch(NewMul, L, MyGood, MyBad, SE); 285 const SCEV *NegOne = SE.getSCEV(ConstantInt::getAllOnesValue( 286 SE.getEffectiveSCEVType(NewMul->getType()))); 287 for (SmallVectorImpl<const SCEV *>::const_iterator I = MyGood.begin(), 288 E = MyGood.end(); I != E; ++I) 289 Good.push_back(SE.getMulExpr(NegOne, *I)); 290 for (SmallVectorImpl<const SCEV *>::const_iterator I = MyBad.begin(), 291 E = MyBad.end(); I != E; ++I) 292 Bad.push_back(SE.getMulExpr(NegOne, *I)); 293 return; 294 } 295 296 // Ok, we can't do anything interesting. Just stuff the whole thing into a 297 // register and hope for the best. 298 Bad.push_back(S); 299 } 300 301 /// InitialMatch - Incorporate loop-variant parts of S into this Formula, 302 /// attempting to keep all loop-invariant and loop-computable values in a 303 /// single base register. 304 void Formula::InitialMatch(const SCEV *S, Loop *L, ScalarEvolution &SE) { 305 SmallVector<const SCEV *, 4> Good; 306 SmallVector<const SCEV *, 4> Bad; 307 DoInitialMatch(S, L, Good, Bad, SE); 308 if (!Good.empty()) { 309 const SCEV *Sum = SE.getAddExpr(Good); 310 if (!Sum->isZero()) 311 BaseRegs.push_back(Sum); 312 AM.HasBaseReg = true; 313 } 314 if (!Bad.empty()) { 315 const SCEV *Sum = SE.getAddExpr(Bad); 316 if (!Sum->isZero()) 317 BaseRegs.push_back(Sum); 318 AM.HasBaseReg = true; 319 } 320 } 321 322 /// getNumRegs - Return the total number of register operands used by this 323 /// formula. This does not include register uses implied by non-constant 324 /// addrec strides. 325 unsigned Formula::getNumRegs() const { 326 return !!ScaledReg + BaseRegs.size(); 327 } 328 329 /// getType - Return the type of this formula, if it has one, or null 330 /// otherwise. This type is meaningless except for the bit size. 331 Type *Formula::getType() const { 332 return !BaseRegs.empty() ? BaseRegs.front()->getType() : 333 ScaledReg ? ScaledReg->getType() : 334 AM.BaseGV ? AM.BaseGV->getType() : 335 0; 336 } 337 338 /// DeleteBaseReg - Delete the given base reg from the BaseRegs list. 339 void Formula::DeleteBaseReg(const SCEV *&S) { 340 if (&S != &BaseRegs.back()) 341 std::swap(S, BaseRegs.back()); 342 BaseRegs.pop_back(); 343 } 344 345 /// referencesReg - Test if this formula references the given register. 346 bool Formula::referencesReg(const SCEV *S) const { 347 return S == ScaledReg || 348 std::find(BaseRegs.begin(), BaseRegs.end(), S) != BaseRegs.end(); 349 } 350 351 /// hasRegsUsedByUsesOtherThan - Test whether this formula uses registers 352 /// which are used by uses other than the use with the given index. 353 bool Formula::hasRegsUsedByUsesOtherThan(size_t LUIdx, 354 const RegUseTracker &RegUses) const { 355 if (ScaledReg) 356 if (RegUses.isRegUsedByUsesOtherThan(ScaledReg, LUIdx)) 357 return true; 358 for (SmallVectorImpl<const SCEV *>::const_iterator I = BaseRegs.begin(), 359 E = BaseRegs.end(); I != E; ++I) 360 if (RegUses.isRegUsedByUsesOtherThan(*I, LUIdx)) 361 return true; 362 return false; 363 } 364 365 void Formula::print(raw_ostream &OS) const { 366 bool First = true; 367 if (AM.BaseGV) { 368 if (!First) OS << " + "; else First = false; 369 WriteAsOperand(OS, AM.BaseGV, /*PrintType=*/false); 370 } 371 if (AM.BaseOffs != 0) { 372 if (!First) OS << " + "; else First = false; 373 OS << AM.BaseOffs; 374 } 375 for (SmallVectorImpl<const SCEV *>::const_iterator I = BaseRegs.begin(), 376 E = BaseRegs.end(); I != E; ++I) { 377 if (!First) OS << " + "; else First = false; 378 OS << "reg(" << **I << ')'; 379 } 380 if (AM.HasBaseReg && BaseRegs.empty()) { 381 if (!First) OS << " + "; else First = false; 382 OS << "**error: HasBaseReg**"; 383 } else if (!AM.HasBaseReg && !BaseRegs.empty()) { 384 if (!First) OS << " + "; else First = false; 385 OS << "**error: !HasBaseReg**"; 386 } 387 if (AM.Scale != 0) { 388 if (!First) OS << " + "; else First = false; 389 OS << AM.Scale << "*reg("; 390 if (ScaledReg) 391 OS << *ScaledReg; 392 else 393 OS << "<unknown>"; 394 OS << ')'; 395 } 396 if (UnfoldedOffset != 0) { 397 if (!First) OS << " + "; else First = false; 398 OS << "imm(" << UnfoldedOffset << ')'; 399 } 400 } 401 402 void Formula::dump() const { 403 print(errs()); errs() << '\n'; 404 } 405 406 /// isAddRecSExtable - Return true if the given addrec can be sign-extended 407 /// without changing its value. 408 static bool isAddRecSExtable(const SCEVAddRecExpr *AR, ScalarEvolution &SE) { 409 Type *WideTy = 410 IntegerType::get(SE.getContext(), SE.getTypeSizeInBits(AR->getType()) + 1); 411 return isa<SCEVAddRecExpr>(SE.getSignExtendExpr(AR, WideTy)); 412 } 413 414 /// isAddSExtable - Return true if the given add can be sign-extended 415 /// without changing its value. 416 static bool isAddSExtable(const SCEVAddExpr *A, ScalarEvolution &SE) { 417 Type *WideTy = 418 IntegerType::get(SE.getContext(), SE.getTypeSizeInBits(A->getType()) + 1); 419 return isa<SCEVAddExpr>(SE.getSignExtendExpr(A, WideTy)); 420 } 421 422 /// isMulSExtable - Return true if the given mul can be sign-extended 423 /// without changing its value. 424 static bool isMulSExtable(const SCEVMulExpr *M, ScalarEvolution &SE) { 425 Type *WideTy = 426 IntegerType::get(SE.getContext(), 427 SE.getTypeSizeInBits(M->getType()) * M->getNumOperands()); 428 return isa<SCEVMulExpr>(SE.getSignExtendExpr(M, WideTy)); 429 } 430 431 /// getExactSDiv - Return an expression for LHS /s RHS, if it can be determined 432 /// and if the remainder is known to be zero, or null otherwise. If 433 /// IgnoreSignificantBits is true, expressions like (X * Y) /s Y are simplified 434 /// to Y, ignoring that the multiplication may overflow, which is useful when 435 /// the result will be used in a context where the most significant bits are 436 /// ignored. 437 static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS, 438 ScalarEvolution &SE, 439 bool IgnoreSignificantBits = false) { 440 // Handle the trivial case, which works for any SCEV type. 441 if (LHS == RHS) 442 return SE.getConstant(LHS->getType(), 1); 443 444 // Handle a few RHS special cases. 445 const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS); 446 if (RC) { 447 const APInt &RA = RC->getValue()->getValue(); 448 // Handle x /s -1 as x * -1, to give ScalarEvolution a chance to do 449 // some folding. 450 if (RA.isAllOnesValue()) 451 return SE.getMulExpr(LHS, RC); 452 // Handle x /s 1 as x. 453 if (RA == 1) 454 return LHS; 455 } 456 457 // Check for a division of a constant by a constant. 458 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(LHS)) { 459 if (!RC) 460 return 0; 461 const APInt &LA = C->getValue()->getValue(); 462 const APInt &RA = RC->getValue()->getValue(); 463 if (LA.srem(RA) != 0) 464 return 0; 465 return SE.getConstant(LA.sdiv(RA)); 466 } 467 468 // Distribute the sdiv over addrec operands, if the addrec doesn't overflow. 469 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) { 470 if (IgnoreSignificantBits || isAddRecSExtable(AR, SE)) { 471 const SCEV *Step = getExactSDiv(AR->getStepRecurrence(SE), RHS, SE, 472 IgnoreSignificantBits); 473 if (!Step) return 0; 474 const SCEV *Start = getExactSDiv(AR->getStart(), RHS, SE, 475 IgnoreSignificantBits); 476 if (!Start) return 0; 477 // FlagNW is independent of the start value, step direction, and is 478 // preserved with smaller magnitude steps. 479 // FIXME: AR->getNoWrapFlags(SCEV::FlagNW) 480 return SE.getAddRecExpr(Start, Step, AR->getLoop(), SCEV::FlagAnyWrap); 481 } 482 return 0; 483 } 484 485 // Distribute the sdiv over add operands, if the add doesn't overflow. 486 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(LHS)) { 487 if (IgnoreSignificantBits || isAddSExtable(Add, SE)) { 488 SmallVector<const SCEV *, 8> Ops; 489 for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end(); 490 I != E; ++I) { 491 const SCEV *Op = getExactSDiv(*I, RHS, SE, 492 IgnoreSignificantBits); 493 if (!Op) return 0; 494 Ops.push_back(Op); 495 } 496 return SE.getAddExpr(Ops); 497 } 498 return 0; 499 } 500 501 // Check for a multiply operand that we can pull RHS out of. 502 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS)) { 503 if (IgnoreSignificantBits || isMulSExtable(Mul, SE)) { 504 SmallVector<const SCEV *, 4> Ops; 505 bool Found = false; 506 for (SCEVMulExpr::op_iterator I = Mul->op_begin(), E = Mul->op_end(); 507 I != E; ++I) { 508 const SCEV *S = *I; 509 if (!Found) 510 if (const SCEV *Q = getExactSDiv(S, RHS, SE, 511 IgnoreSignificantBits)) { 512 S = Q; 513 Found = true; 514 } 515 Ops.push_back(S); 516 } 517 return Found ? SE.getMulExpr(Ops) : 0; 518 } 519 return 0; 520 } 521 522 // Otherwise we don't know. 523 return 0; 524 } 525 526 /// ExtractImmediate - If S involves the addition of a constant integer value, 527 /// return that integer value, and mutate S to point to a new SCEV with that 528 /// value excluded. 529 static int64_t ExtractImmediate(const SCEV *&S, ScalarEvolution &SE) { 530 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) { 531 if (C->getValue()->getValue().getMinSignedBits() <= 64) { 532 S = SE.getConstant(C->getType(), 0); 533 return C->getValue()->getSExtValue(); 534 } 535 } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 536 SmallVector<const SCEV *, 8> NewOps(Add->op_begin(), Add->op_end()); 537 int64_t Result = ExtractImmediate(NewOps.front(), SE); 538 if (Result != 0) 539 S = SE.getAddExpr(NewOps); 540 return Result; 541 } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 542 SmallVector<const SCEV *, 8> NewOps(AR->op_begin(), AR->op_end()); 543 int64_t Result = ExtractImmediate(NewOps.front(), SE); 544 if (Result != 0) 545 S = SE.getAddRecExpr(NewOps, AR->getLoop(), 546 // FIXME: AR->getNoWrapFlags(SCEV::FlagNW) 547 SCEV::FlagAnyWrap); 548 return Result; 549 } 550 return 0; 551 } 552 553 /// ExtractSymbol - If S involves the addition of a GlobalValue address, 554 /// return that symbol, and mutate S to point to a new SCEV with that 555 /// value excluded. 556 static GlobalValue *ExtractSymbol(const SCEV *&S, ScalarEvolution &SE) { 557 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 558 if (GlobalValue *GV = dyn_cast<GlobalValue>(U->getValue())) { 559 S = SE.getConstant(GV->getType(), 0); 560 return GV; 561 } 562 } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 563 SmallVector<const SCEV *, 8> NewOps(Add->op_begin(), Add->op_end()); 564 GlobalValue *Result = ExtractSymbol(NewOps.back(), SE); 565 if (Result) 566 S = SE.getAddExpr(NewOps); 567 return Result; 568 } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 569 SmallVector<const SCEV *, 8> NewOps(AR->op_begin(), AR->op_end()); 570 GlobalValue *Result = ExtractSymbol(NewOps.front(), SE); 571 if (Result) 572 S = SE.getAddRecExpr(NewOps, AR->getLoop(), 573 // FIXME: AR->getNoWrapFlags(SCEV::FlagNW) 574 SCEV::FlagAnyWrap); 575 return Result; 576 } 577 return 0; 578 } 579 580 /// isAddressUse - Returns true if the specified instruction is using the 581 /// specified value as an address. 582 static bool isAddressUse(Instruction *Inst, Value *OperandVal) { 583 bool isAddress = isa<LoadInst>(Inst); 584 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 585 if (SI->getOperand(1) == OperandVal) 586 isAddress = true; 587 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 588 // Addressing modes can also be folded into prefetches and a variety 589 // of intrinsics. 590 switch (II->getIntrinsicID()) { 591 default: break; 592 case Intrinsic::prefetch: 593 case Intrinsic::x86_sse_storeu_ps: 594 case Intrinsic::x86_sse2_storeu_pd: 595 case Intrinsic::x86_sse2_storeu_dq: 596 case Intrinsic::x86_sse2_storel_dq: 597 if (II->getArgOperand(0) == OperandVal) 598 isAddress = true; 599 break; 600 } 601 } 602 return isAddress; 603 } 604 605 /// getAccessType - Return the type of the memory being accessed. 606 static Type *getAccessType(const Instruction *Inst) { 607 Type *AccessTy = Inst->getType(); 608 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) 609 AccessTy = SI->getOperand(0)->getType(); 610 else if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 611 // Addressing modes can also be folded into prefetches and a variety 612 // of intrinsics. 613 switch (II->getIntrinsicID()) { 614 default: break; 615 case Intrinsic::x86_sse_storeu_ps: 616 case Intrinsic::x86_sse2_storeu_pd: 617 case Intrinsic::x86_sse2_storeu_dq: 618 case Intrinsic::x86_sse2_storel_dq: 619 AccessTy = II->getArgOperand(0)->getType(); 620 break; 621 } 622 } 623 624 // All pointers have the same requirements, so canonicalize them to an 625 // arbitrary pointer type to minimize variation. 626 if (PointerType *PTy = dyn_cast<PointerType>(AccessTy)) 627 AccessTy = PointerType::get(IntegerType::get(PTy->getContext(), 1), 628 PTy->getAddressSpace()); 629 630 return AccessTy; 631 } 632 633 /// DeleteTriviallyDeadInstructions - If any of the instructions is the 634 /// specified set are trivially dead, delete them and see if this makes any of 635 /// their operands subsequently dead. 636 static bool 637 DeleteTriviallyDeadInstructions(SmallVectorImpl<WeakVH> &DeadInsts) { 638 bool Changed = false; 639 640 while (!DeadInsts.empty()) { 641 Instruction *I = dyn_cast_or_null<Instruction>(&*DeadInsts.pop_back_val()); 642 643 if (I == 0 || !isInstructionTriviallyDead(I)) 644 continue; 645 646 for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI) 647 if (Instruction *U = dyn_cast<Instruction>(*OI)) { 648 *OI = 0; 649 if (U->use_empty()) 650 DeadInsts.push_back(U); 651 } 652 653 I->eraseFromParent(); 654 Changed = true; 655 } 656 657 return Changed; 658 } 659 660 namespace { 661 662 /// Cost - This class is used to measure and compare candidate formulae. 663 class Cost { 664 /// TODO: Some of these could be merged. Also, a lexical ordering 665 /// isn't always optimal. 666 unsigned NumRegs; 667 unsigned AddRecCost; 668 unsigned NumIVMuls; 669 unsigned NumBaseAdds; 670 unsigned ImmCost; 671 unsigned SetupCost; 672 673 public: 674 Cost() 675 : NumRegs(0), AddRecCost(0), NumIVMuls(0), NumBaseAdds(0), ImmCost(0), 676 SetupCost(0) {} 677 678 bool operator<(const Cost &Other) const; 679 680 void Loose(); 681 682 #ifndef NDEBUG 683 // Once any of the metrics loses, they must all remain losers. 684 bool isValid() { 685 return ((NumRegs | AddRecCost | NumIVMuls | NumBaseAdds 686 | ImmCost | SetupCost) != ~0u) 687 || ((NumRegs & AddRecCost & NumIVMuls & NumBaseAdds 688 & ImmCost & SetupCost) == ~0u); 689 } 690 #endif 691 692 bool isLoser() { 693 assert(isValid() && "invalid cost"); 694 return NumRegs == ~0u; 695 } 696 697 void RateFormula(const Formula &F, 698 SmallPtrSet<const SCEV *, 16> &Regs, 699 const DenseSet<const SCEV *> &VisitedRegs, 700 const Loop *L, 701 const SmallVectorImpl<int64_t> &Offsets, 702 ScalarEvolution &SE, DominatorTree &DT); 703 704 void print(raw_ostream &OS) const; 705 void dump() const; 706 707 private: 708 void RateRegister(const SCEV *Reg, 709 SmallPtrSet<const SCEV *, 16> &Regs, 710 const Loop *L, 711 ScalarEvolution &SE, DominatorTree &DT); 712 void RatePrimaryRegister(const SCEV *Reg, 713 SmallPtrSet<const SCEV *, 16> &Regs, 714 const Loop *L, 715 ScalarEvolution &SE, DominatorTree &DT); 716 }; 717 718 } 719 720 /// RateRegister - Tally up interesting quantities from the given register. 721 void Cost::RateRegister(const SCEV *Reg, 722 SmallPtrSet<const SCEV *, 16> &Regs, 723 const Loop *L, 724 ScalarEvolution &SE, DominatorTree &DT) { 725 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Reg)) { 726 if (AR->getLoop() == L) 727 AddRecCost += 1; /// TODO: This should be a function of the stride. 728 729 // If this is an addrec for another loop, don't second-guess its addrec phi 730 // nodes. LSR isn't currently smart enough to reason about more than one 731 // loop at a time. LSR has either already run on inner loops, will not run 732 // on other loops, and cannot be expected to change sibling loops. If the 733 // AddRec exists, consider it's register free and leave it alone. Otherwise, 734 // do not consider this formula at all. 735 // FIXME: why do we need to generate such fomulae? 736 else if (!EnableNested || L->contains(AR->getLoop()) || 737 (!AR->getLoop()->contains(L) && 738 DT.dominates(L->getHeader(), AR->getLoop()->getHeader()))) { 739 for (BasicBlock::iterator I = AR->getLoop()->getHeader()->begin(); 740 PHINode *PN = dyn_cast<PHINode>(I); ++I) { 741 if (SE.isSCEVable(PN->getType()) && 742 (SE.getEffectiveSCEVType(PN->getType()) == 743 SE.getEffectiveSCEVType(AR->getType())) && 744 SE.getSCEV(PN) == AR) 745 return; 746 } 747 if (!EnableNested) { 748 Loose(); 749 return; 750 } 751 // If this isn't one of the addrecs that the loop already has, it 752 // would require a costly new phi and add. TODO: This isn't 753 // precisely modeled right now. 754 ++NumBaseAdds; 755 if (!Regs.count(AR->getStart())) { 756 RateRegister(AR->getStart(), Regs, L, SE, DT); 757 if (isLoser()) 758 return; 759 } 760 } 761 762 // Add the step value register, if it needs one. 763 // TODO: The non-affine case isn't precisely modeled here. 764 if (!AR->isAffine() || !isa<SCEVConstant>(AR->getOperand(1))) { 765 if (!Regs.count(AR->getOperand(1))) { 766 RateRegister(AR->getOperand(1), Regs, L, SE, DT); 767 if (isLoser()) 768 return; 769 } 770 } 771 } 772 ++NumRegs; 773 774 // Rough heuristic; favor registers which don't require extra setup 775 // instructions in the preheader. 776 if (!isa<SCEVUnknown>(Reg) && 777 !isa<SCEVConstant>(Reg) && 778 !(isa<SCEVAddRecExpr>(Reg) && 779 (isa<SCEVUnknown>(cast<SCEVAddRecExpr>(Reg)->getStart()) || 780 isa<SCEVConstant>(cast<SCEVAddRecExpr>(Reg)->getStart())))) 781 ++SetupCost; 782 783 NumIVMuls += isa<SCEVMulExpr>(Reg) && 784 SE.hasComputableLoopEvolution(Reg, L); 785 } 786 787 /// RatePrimaryRegister - Record this register in the set. If we haven't seen it 788 /// before, rate it. 789 void Cost::RatePrimaryRegister(const SCEV *Reg, 790 SmallPtrSet<const SCEV *, 16> &Regs, 791 const Loop *L, 792 ScalarEvolution &SE, DominatorTree &DT) { 793 if (Regs.insert(Reg)) 794 RateRegister(Reg, Regs, L, SE, DT); 795 } 796 797 void Cost::RateFormula(const Formula &F, 798 SmallPtrSet<const SCEV *, 16> &Regs, 799 const DenseSet<const SCEV *> &VisitedRegs, 800 const Loop *L, 801 const SmallVectorImpl<int64_t> &Offsets, 802 ScalarEvolution &SE, DominatorTree &DT) { 803 // Tally up the registers. 804 if (const SCEV *ScaledReg = F.ScaledReg) { 805 if (VisitedRegs.count(ScaledReg)) { 806 Loose(); 807 return; 808 } 809 RatePrimaryRegister(ScaledReg, Regs, L, SE, DT); 810 if (isLoser()) 811 return; 812 } 813 for (SmallVectorImpl<const SCEV *>::const_iterator I = F.BaseRegs.begin(), 814 E = F.BaseRegs.end(); I != E; ++I) { 815 const SCEV *BaseReg = *I; 816 if (VisitedRegs.count(BaseReg)) { 817 Loose(); 818 return; 819 } 820 RatePrimaryRegister(BaseReg, Regs, L, SE, DT); 821 if (isLoser()) 822 return; 823 } 824 825 // Determine how many (unfolded) adds we'll need inside the loop. 826 size_t NumBaseParts = F.BaseRegs.size() + (F.UnfoldedOffset != 0); 827 if (NumBaseParts > 1) 828 NumBaseAdds += NumBaseParts - 1; 829 830 // Tally up the non-zero immediates. 831 for (SmallVectorImpl<int64_t>::const_iterator I = Offsets.begin(), 832 E = Offsets.end(); I != E; ++I) { 833 int64_t Offset = (uint64_t)*I + F.AM.BaseOffs; 834 if (F.AM.BaseGV) 835 ImmCost += 64; // Handle symbolic values conservatively. 836 // TODO: This should probably be the pointer size. 837 else if (Offset != 0) 838 ImmCost += APInt(64, Offset, true).getMinSignedBits(); 839 } 840 assert(isValid() && "invalid cost"); 841 } 842 843 /// Loose - Set this cost to a losing value. 844 void Cost::Loose() { 845 NumRegs = ~0u; 846 AddRecCost = ~0u; 847 NumIVMuls = ~0u; 848 NumBaseAdds = ~0u; 849 ImmCost = ~0u; 850 SetupCost = ~0u; 851 } 852 853 /// operator< - Choose the lower cost. 854 bool Cost::operator<(const Cost &Other) const { 855 if (NumRegs != Other.NumRegs) 856 return NumRegs < Other.NumRegs; 857 if (AddRecCost != Other.AddRecCost) 858 return AddRecCost < Other.AddRecCost; 859 if (NumIVMuls != Other.NumIVMuls) 860 return NumIVMuls < Other.NumIVMuls; 861 if (NumBaseAdds != Other.NumBaseAdds) 862 return NumBaseAdds < Other.NumBaseAdds; 863 if (ImmCost != Other.ImmCost) 864 return ImmCost < Other.ImmCost; 865 if (SetupCost != Other.SetupCost) 866 return SetupCost < Other.SetupCost; 867 return false; 868 } 869 870 void Cost::print(raw_ostream &OS) const { 871 OS << NumRegs << " reg" << (NumRegs == 1 ? "" : "s"); 872 if (AddRecCost != 0) 873 OS << ", with addrec cost " << AddRecCost; 874 if (NumIVMuls != 0) 875 OS << ", plus " << NumIVMuls << " IV mul" << (NumIVMuls == 1 ? "" : "s"); 876 if (NumBaseAdds != 0) 877 OS << ", plus " << NumBaseAdds << " base add" 878 << (NumBaseAdds == 1 ? "" : "s"); 879 if (ImmCost != 0) 880 OS << ", plus " << ImmCost << " imm cost"; 881 if (SetupCost != 0) 882 OS << ", plus " << SetupCost << " setup cost"; 883 } 884 885 void Cost::dump() const { 886 print(errs()); errs() << '\n'; 887 } 888 889 namespace { 890 891 /// LSRFixup - An operand value in an instruction which is to be replaced 892 /// with some equivalent, possibly strength-reduced, replacement. 893 struct LSRFixup { 894 /// UserInst - The instruction which will be updated. 895 Instruction *UserInst; 896 897 /// OperandValToReplace - The operand of the instruction which will 898 /// be replaced. The operand may be used more than once; every instance 899 /// will be replaced. 900 Value *OperandValToReplace; 901 902 /// PostIncLoops - If this user is to use the post-incremented value of an 903 /// induction variable, this variable is non-null and holds the loop 904 /// associated with the induction variable. 905 PostIncLoopSet PostIncLoops; 906 907 /// LUIdx - The index of the LSRUse describing the expression which 908 /// this fixup needs, minus an offset (below). 909 size_t LUIdx; 910 911 /// Offset - A constant offset to be added to the LSRUse expression. 912 /// This allows multiple fixups to share the same LSRUse with different 913 /// offsets, for example in an unrolled loop. 914 int64_t Offset; 915 916 bool isUseFullyOutsideLoop(const Loop *L) const; 917 918 LSRFixup(); 919 920 void print(raw_ostream &OS) const; 921 void dump() const; 922 }; 923 924 } 925 926 LSRFixup::LSRFixup() 927 : UserInst(0), OperandValToReplace(0), LUIdx(~size_t(0)), Offset(0) {} 928 929 /// isUseFullyOutsideLoop - Test whether this fixup always uses its 930 /// value outside of the given loop. 931 bool LSRFixup::isUseFullyOutsideLoop(const Loop *L) const { 932 // PHI nodes use their value in their incoming blocks. 933 if (const PHINode *PN = dyn_cast<PHINode>(UserInst)) { 934 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 935 if (PN->getIncomingValue(i) == OperandValToReplace && 936 L->contains(PN->getIncomingBlock(i))) 937 return false; 938 return true; 939 } 940 941 return !L->contains(UserInst); 942 } 943 944 void LSRFixup::print(raw_ostream &OS) const { 945 OS << "UserInst="; 946 // Store is common and interesting enough to be worth special-casing. 947 if (StoreInst *Store = dyn_cast<StoreInst>(UserInst)) { 948 OS << "store "; 949 WriteAsOperand(OS, Store->getOperand(0), /*PrintType=*/false); 950 } else if (UserInst->getType()->isVoidTy()) 951 OS << UserInst->getOpcodeName(); 952 else 953 WriteAsOperand(OS, UserInst, /*PrintType=*/false); 954 955 OS << ", OperandValToReplace="; 956 WriteAsOperand(OS, OperandValToReplace, /*PrintType=*/false); 957 958 for (PostIncLoopSet::const_iterator I = PostIncLoops.begin(), 959 E = PostIncLoops.end(); I != E; ++I) { 960 OS << ", PostIncLoop="; 961 WriteAsOperand(OS, (*I)->getHeader(), /*PrintType=*/false); 962 } 963 964 if (LUIdx != ~size_t(0)) 965 OS << ", LUIdx=" << LUIdx; 966 967 if (Offset != 0) 968 OS << ", Offset=" << Offset; 969 } 970 971 void LSRFixup::dump() const { 972 print(errs()); errs() << '\n'; 973 } 974 975 namespace { 976 977 /// UniquifierDenseMapInfo - A DenseMapInfo implementation for holding 978 /// DenseMaps and DenseSets of sorted SmallVectors of const SCEV*. 979 struct UniquifierDenseMapInfo { 980 static SmallVector<const SCEV *, 2> getEmptyKey() { 981 SmallVector<const SCEV *, 2> V; 982 V.push_back(reinterpret_cast<const SCEV *>(-1)); 983 return V; 984 } 985 986 static SmallVector<const SCEV *, 2> getTombstoneKey() { 987 SmallVector<const SCEV *, 2> V; 988 V.push_back(reinterpret_cast<const SCEV *>(-2)); 989 return V; 990 } 991 992 static unsigned getHashValue(const SmallVector<const SCEV *, 2> &V) { 993 unsigned Result = 0; 994 for (SmallVectorImpl<const SCEV *>::const_iterator I = V.begin(), 995 E = V.end(); I != E; ++I) 996 Result ^= DenseMapInfo<const SCEV *>::getHashValue(*I); 997 return Result; 998 } 999 1000 static bool isEqual(const SmallVector<const SCEV *, 2> &LHS, 1001 const SmallVector<const SCEV *, 2> &RHS) { 1002 return LHS == RHS; 1003 } 1004 }; 1005 1006 /// LSRUse - This class holds the state that LSR keeps for each use in 1007 /// IVUsers, as well as uses invented by LSR itself. It includes information 1008 /// about what kinds of things can be folded into the user, information about 1009 /// the user itself, and information about how the use may be satisfied. 1010 /// TODO: Represent multiple users of the same expression in common? 1011 class LSRUse { 1012 DenseSet<SmallVector<const SCEV *, 2>, UniquifierDenseMapInfo> Uniquifier; 1013 1014 public: 1015 /// KindType - An enum for a kind of use, indicating what types of 1016 /// scaled and immediate operands it might support. 1017 enum KindType { 1018 Basic, ///< A normal use, with no folding. 1019 Special, ///< A special case of basic, allowing -1 scales. 1020 Address, ///< An address use; folding according to TargetLowering 1021 ICmpZero ///< An equality icmp with both operands folded into one. 1022 // TODO: Add a generic icmp too? 1023 }; 1024 1025 KindType Kind; 1026 Type *AccessTy; 1027 1028 SmallVector<int64_t, 8> Offsets; 1029 int64_t MinOffset; 1030 int64_t MaxOffset; 1031 1032 /// AllFixupsOutsideLoop - This records whether all of the fixups using this 1033 /// LSRUse are outside of the loop, in which case some special-case heuristics 1034 /// may be used. 1035 bool AllFixupsOutsideLoop; 1036 1037 /// WidestFixupType - This records the widest use type for any fixup using 1038 /// this LSRUse. FindUseWithSimilarFormula can't consider uses with different 1039 /// max fixup widths to be equivalent, because the narrower one may be relying 1040 /// on the implicit truncation to truncate away bogus bits. 1041 Type *WidestFixupType; 1042 1043 /// Formulae - A list of ways to build a value that can satisfy this user. 1044 /// After the list is populated, one of these is selected heuristically and 1045 /// used to formulate a replacement for OperandValToReplace in UserInst. 1046 SmallVector<Formula, 12> Formulae; 1047 1048 /// Regs - The set of register candidates used by all formulae in this LSRUse. 1049 SmallPtrSet<const SCEV *, 4> Regs; 1050 1051 LSRUse(KindType K, Type *T) : Kind(K), AccessTy(T), 1052 MinOffset(INT64_MAX), 1053 MaxOffset(INT64_MIN), 1054 AllFixupsOutsideLoop(true), 1055 WidestFixupType(0) {} 1056 1057 bool HasFormulaWithSameRegs(const Formula &F) const; 1058 bool InsertFormula(const Formula &F); 1059 void DeleteFormula(Formula &F); 1060 void RecomputeRegs(size_t LUIdx, RegUseTracker &Reguses); 1061 1062 void print(raw_ostream &OS) const; 1063 void dump() const; 1064 }; 1065 1066 } 1067 1068 /// HasFormula - Test whether this use as a formula which has the same 1069 /// registers as the given formula. 1070 bool LSRUse::HasFormulaWithSameRegs(const Formula &F) const { 1071 SmallVector<const SCEV *, 2> Key = F.BaseRegs; 1072 if (F.ScaledReg) Key.push_back(F.ScaledReg); 1073 // Unstable sort by host order ok, because this is only used for uniquifying. 1074 std::sort(Key.begin(), Key.end()); 1075 return Uniquifier.count(Key); 1076 } 1077 1078 /// InsertFormula - If the given formula has not yet been inserted, add it to 1079 /// the list, and return true. Return false otherwise. 1080 bool LSRUse::InsertFormula(const Formula &F) { 1081 SmallVector<const SCEV *, 2> Key = F.BaseRegs; 1082 if (F.ScaledReg) Key.push_back(F.ScaledReg); 1083 // Unstable sort by host order ok, because this is only used for uniquifying. 1084 std::sort(Key.begin(), Key.end()); 1085 1086 if (!Uniquifier.insert(Key).second) 1087 return false; 1088 1089 // Using a register to hold the value of 0 is not profitable. 1090 assert((!F.ScaledReg || !F.ScaledReg->isZero()) && 1091 "Zero allocated in a scaled register!"); 1092 #ifndef NDEBUG 1093 for (SmallVectorImpl<const SCEV *>::const_iterator I = 1094 F.BaseRegs.begin(), E = F.BaseRegs.end(); I != E; ++I) 1095 assert(!(*I)->isZero() && "Zero allocated in a base register!"); 1096 #endif 1097 1098 // Add the formula to the list. 1099 Formulae.push_back(F); 1100 1101 // Record registers now being used by this use. 1102 if (F.ScaledReg) Regs.insert(F.ScaledReg); 1103 Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end()); 1104 1105 return true; 1106 } 1107 1108 /// DeleteFormula - Remove the given formula from this use's list. 1109 void LSRUse::DeleteFormula(Formula &F) { 1110 if (&F != &Formulae.back()) 1111 std::swap(F, Formulae.back()); 1112 Formulae.pop_back(); 1113 assert(!Formulae.empty() && "LSRUse has no formulae left!"); 1114 } 1115 1116 /// RecomputeRegs - Recompute the Regs field, and update RegUses. 1117 void LSRUse::RecomputeRegs(size_t LUIdx, RegUseTracker &RegUses) { 1118 // Now that we've filtered out some formulae, recompute the Regs set. 1119 SmallPtrSet<const SCEV *, 4> OldRegs = Regs; 1120 Regs.clear(); 1121 for (SmallVectorImpl<Formula>::const_iterator I = Formulae.begin(), 1122 E = Formulae.end(); I != E; ++I) { 1123 const Formula &F = *I; 1124 if (F.ScaledReg) Regs.insert(F.ScaledReg); 1125 Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end()); 1126 } 1127 1128 // Update the RegTracker. 1129 for (SmallPtrSet<const SCEV *, 4>::iterator I = OldRegs.begin(), 1130 E = OldRegs.end(); I != E; ++I) 1131 if (!Regs.count(*I)) 1132 RegUses.DropRegister(*I, LUIdx); 1133 } 1134 1135 void LSRUse::print(raw_ostream &OS) const { 1136 OS << "LSR Use: Kind="; 1137 switch (Kind) { 1138 case Basic: OS << "Basic"; break; 1139 case Special: OS << "Special"; break; 1140 case ICmpZero: OS << "ICmpZero"; break; 1141 case Address: 1142 OS << "Address of "; 1143 if (AccessTy->isPointerTy()) 1144 OS << "pointer"; // the full pointer type could be really verbose 1145 else 1146 OS << *AccessTy; 1147 } 1148 1149 OS << ", Offsets={"; 1150 for (SmallVectorImpl<int64_t>::const_iterator I = Offsets.begin(), 1151 E = Offsets.end(); I != E; ++I) { 1152 OS << *I; 1153 if (llvm::next(I) != E) 1154 OS << ','; 1155 } 1156 OS << '}'; 1157 1158 if (AllFixupsOutsideLoop) 1159 OS << ", all-fixups-outside-loop"; 1160 1161 if (WidestFixupType) 1162 OS << ", widest fixup type: " << *WidestFixupType; 1163 } 1164 1165 void LSRUse::dump() const { 1166 print(errs()); errs() << '\n'; 1167 } 1168 1169 /// isLegalUse - Test whether the use described by AM is "legal", meaning it can 1170 /// be completely folded into the user instruction at isel time. This includes 1171 /// address-mode folding and special icmp tricks. 1172 static bool isLegalUse(const TargetLowering::AddrMode &AM, 1173 LSRUse::KindType Kind, Type *AccessTy, 1174 const TargetLowering *TLI) { 1175 switch (Kind) { 1176 case LSRUse::Address: 1177 // If we have low-level target information, ask the target if it can 1178 // completely fold this address. 1179 if (TLI) return TLI->isLegalAddressingMode(AM, AccessTy); 1180 1181 // Otherwise, just guess that reg+reg addressing is legal. 1182 return !AM.BaseGV && AM.BaseOffs == 0 && AM.Scale <= 1; 1183 1184 case LSRUse::ICmpZero: 1185 // There's not even a target hook for querying whether it would be legal to 1186 // fold a GV into an ICmp. 1187 if (AM.BaseGV) 1188 return false; 1189 1190 // ICmp only has two operands; don't allow more than two non-trivial parts. 1191 if (AM.Scale != 0 && AM.HasBaseReg && AM.BaseOffs != 0) 1192 return false; 1193 1194 // ICmp only supports no scale or a -1 scale, as we can "fold" a -1 scale by 1195 // putting the scaled register in the other operand of the icmp. 1196 if (AM.Scale != 0 && AM.Scale != -1) 1197 return false; 1198 1199 // If we have low-level target information, ask the target if it can fold an 1200 // integer immediate on an icmp. 1201 if (AM.BaseOffs != 0) { 1202 if (TLI) return TLI->isLegalICmpImmediate(-AM.BaseOffs); 1203 return false; 1204 } 1205 1206 return true; 1207 1208 case LSRUse::Basic: 1209 // Only handle single-register values. 1210 return !AM.BaseGV && AM.Scale == 0 && AM.BaseOffs == 0; 1211 1212 case LSRUse::Special: 1213 // Only handle -1 scales, or no scale. 1214 return AM.Scale == 0 || AM.Scale == -1; 1215 } 1216 1217 return false; 1218 } 1219 1220 static bool isLegalUse(TargetLowering::AddrMode AM, 1221 int64_t MinOffset, int64_t MaxOffset, 1222 LSRUse::KindType Kind, Type *AccessTy, 1223 const TargetLowering *TLI) { 1224 // Check for overflow. 1225 if (((int64_t)((uint64_t)AM.BaseOffs + MinOffset) > AM.BaseOffs) != 1226 (MinOffset > 0)) 1227 return false; 1228 AM.BaseOffs = (uint64_t)AM.BaseOffs + MinOffset; 1229 if (isLegalUse(AM, Kind, AccessTy, TLI)) { 1230 AM.BaseOffs = (uint64_t)AM.BaseOffs - MinOffset; 1231 // Check for overflow. 1232 if (((int64_t)((uint64_t)AM.BaseOffs + MaxOffset) > AM.BaseOffs) != 1233 (MaxOffset > 0)) 1234 return false; 1235 AM.BaseOffs = (uint64_t)AM.BaseOffs + MaxOffset; 1236 return isLegalUse(AM, Kind, AccessTy, TLI); 1237 } 1238 return false; 1239 } 1240 1241 static bool isAlwaysFoldable(int64_t BaseOffs, 1242 GlobalValue *BaseGV, 1243 bool HasBaseReg, 1244 LSRUse::KindType Kind, Type *AccessTy, 1245 const TargetLowering *TLI) { 1246 // Fast-path: zero is always foldable. 1247 if (BaseOffs == 0 && !BaseGV) return true; 1248 1249 // Conservatively, create an address with an immediate and a 1250 // base and a scale. 1251 TargetLowering::AddrMode AM; 1252 AM.BaseOffs = BaseOffs; 1253 AM.BaseGV = BaseGV; 1254 AM.HasBaseReg = HasBaseReg; 1255 AM.Scale = Kind == LSRUse::ICmpZero ? -1 : 1; 1256 1257 // Canonicalize a scale of 1 to a base register if the formula doesn't 1258 // already have a base register. 1259 if (!AM.HasBaseReg && AM.Scale == 1) { 1260 AM.Scale = 0; 1261 AM.HasBaseReg = true; 1262 } 1263 1264 return isLegalUse(AM, Kind, AccessTy, TLI); 1265 } 1266 1267 static bool isAlwaysFoldable(const SCEV *S, 1268 int64_t MinOffset, int64_t MaxOffset, 1269 bool HasBaseReg, 1270 LSRUse::KindType Kind, Type *AccessTy, 1271 const TargetLowering *TLI, 1272 ScalarEvolution &SE) { 1273 // Fast-path: zero is always foldable. 1274 if (S->isZero()) return true; 1275 1276 // Conservatively, create an address with an immediate and a 1277 // base and a scale. 1278 int64_t BaseOffs = ExtractImmediate(S, SE); 1279 GlobalValue *BaseGV = ExtractSymbol(S, SE); 1280 1281 // If there's anything else involved, it's not foldable. 1282 if (!S->isZero()) return false; 1283 1284 // Fast-path: zero is always foldable. 1285 if (BaseOffs == 0 && !BaseGV) return true; 1286 1287 // Conservatively, create an address with an immediate and a 1288 // base and a scale. 1289 TargetLowering::AddrMode AM; 1290 AM.BaseOffs = BaseOffs; 1291 AM.BaseGV = BaseGV; 1292 AM.HasBaseReg = HasBaseReg; 1293 AM.Scale = Kind == LSRUse::ICmpZero ? -1 : 1; 1294 1295 return isLegalUse(AM, MinOffset, MaxOffset, Kind, AccessTy, TLI); 1296 } 1297 1298 namespace { 1299 1300 /// UseMapDenseMapInfo - A DenseMapInfo implementation for holding 1301 /// DenseMaps and DenseSets of pairs of const SCEV* and LSRUse::Kind. 1302 struct UseMapDenseMapInfo { 1303 static std::pair<const SCEV *, LSRUse::KindType> getEmptyKey() { 1304 return std::make_pair(reinterpret_cast<const SCEV *>(-1), LSRUse::Basic); 1305 } 1306 1307 static std::pair<const SCEV *, LSRUse::KindType> getTombstoneKey() { 1308 return std::make_pair(reinterpret_cast<const SCEV *>(-2), LSRUse::Basic); 1309 } 1310 1311 static unsigned 1312 getHashValue(const std::pair<const SCEV *, LSRUse::KindType> &V) { 1313 unsigned Result = DenseMapInfo<const SCEV *>::getHashValue(V.first); 1314 Result ^= DenseMapInfo<unsigned>::getHashValue(unsigned(V.second)); 1315 return Result; 1316 } 1317 1318 static bool isEqual(const std::pair<const SCEV *, LSRUse::KindType> &LHS, 1319 const std::pair<const SCEV *, LSRUse::KindType> &RHS) { 1320 return LHS == RHS; 1321 } 1322 }; 1323 1324 /// LSRInstance - This class holds state for the main loop strength reduction 1325 /// logic. 1326 class LSRInstance { 1327 IVUsers &IU; 1328 ScalarEvolution &SE; 1329 DominatorTree &DT; 1330 LoopInfo &LI; 1331 const TargetLowering *const TLI; 1332 Loop *const L; 1333 bool Changed; 1334 1335 /// IVIncInsertPos - This is the insert position that the current loop's 1336 /// induction variable increment should be placed. In simple loops, this is 1337 /// the latch block's terminator. But in more complicated cases, this is a 1338 /// position which will dominate all the in-loop post-increment users. 1339 Instruction *IVIncInsertPos; 1340 1341 /// Factors - Interesting factors between use strides. 1342 SmallSetVector<int64_t, 8> Factors; 1343 1344 /// Types - Interesting use types, to facilitate truncation reuse. 1345 SmallSetVector<Type *, 4> Types; 1346 1347 /// Fixups - The list of operands which are to be replaced. 1348 SmallVector<LSRFixup, 16> Fixups; 1349 1350 /// Uses - The list of interesting uses. 1351 SmallVector<LSRUse, 16> Uses; 1352 1353 /// RegUses - Track which uses use which register candidates. 1354 RegUseTracker RegUses; 1355 1356 void OptimizeShadowIV(); 1357 bool FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse); 1358 ICmpInst *OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse); 1359 void OptimizeLoopTermCond(); 1360 1361 void CollectInterestingTypesAndFactors(); 1362 void CollectFixupsAndInitialFormulae(); 1363 1364 LSRFixup &getNewFixup() { 1365 Fixups.push_back(LSRFixup()); 1366 return Fixups.back(); 1367 } 1368 1369 // Support for sharing of LSRUses between LSRFixups. 1370 typedef DenseMap<std::pair<const SCEV *, LSRUse::KindType>, 1371 size_t, 1372 UseMapDenseMapInfo> UseMapTy; 1373 UseMapTy UseMap; 1374 1375 bool reconcileNewOffset(LSRUse &LU, int64_t NewOffset, bool HasBaseReg, 1376 LSRUse::KindType Kind, Type *AccessTy); 1377 1378 std::pair<size_t, int64_t> getUse(const SCEV *&Expr, 1379 LSRUse::KindType Kind, 1380 Type *AccessTy); 1381 1382 void DeleteUse(LSRUse &LU, size_t LUIdx); 1383 1384 LSRUse *FindUseWithSimilarFormula(const Formula &F, const LSRUse &OrigLU); 1385 1386 public: 1387 void InsertInitialFormula(const SCEV *S, LSRUse &LU, size_t LUIdx); 1388 void InsertSupplementalFormula(const SCEV *S, LSRUse &LU, size_t LUIdx); 1389 void CountRegisters(const Formula &F, size_t LUIdx); 1390 bool InsertFormula(LSRUse &LU, unsigned LUIdx, const Formula &F); 1391 1392 void CollectLoopInvariantFixupsAndFormulae(); 1393 1394 void GenerateReassociations(LSRUse &LU, unsigned LUIdx, Formula Base, 1395 unsigned Depth = 0); 1396 void GenerateCombinations(LSRUse &LU, unsigned LUIdx, Formula Base); 1397 void GenerateSymbolicOffsets(LSRUse &LU, unsigned LUIdx, Formula Base); 1398 void GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx, Formula Base); 1399 void GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx, Formula Base); 1400 void GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base); 1401 void GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base); 1402 void GenerateCrossUseConstantOffsets(); 1403 void GenerateAllReuseFormulae(); 1404 1405 void FilterOutUndesirableDedicatedRegisters(); 1406 1407 size_t EstimateSearchSpaceComplexity() const; 1408 void NarrowSearchSpaceByDetectingSupersets(); 1409 void NarrowSearchSpaceByCollapsingUnrolledCode(); 1410 void NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters(); 1411 void NarrowSearchSpaceByPickingWinnerRegs(); 1412 void NarrowSearchSpaceUsingHeuristics(); 1413 1414 void SolveRecurse(SmallVectorImpl<const Formula *> &Solution, 1415 Cost &SolutionCost, 1416 SmallVectorImpl<const Formula *> &Workspace, 1417 const Cost &CurCost, 1418 const SmallPtrSet<const SCEV *, 16> &CurRegs, 1419 DenseSet<const SCEV *> &VisitedRegs) const; 1420 void Solve(SmallVectorImpl<const Formula *> &Solution) const; 1421 1422 BasicBlock::iterator 1423 HoistInsertPosition(BasicBlock::iterator IP, 1424 const SmallVectorImpl<Instruction *> &Inputs) const; 1425 BasicBlock::iterator AdjustInsertPositionForExpand(BasicBlock::iterator IP, 1426 const LSRFixup &LF, 1427 const LSRUse &LU) const; 1428 1429 Value *Expand(const LSRFixup &LF, 1430 const Formula &F, 1431 BasicBlock::iterator IP, 1432 SCEVExpander &Rewriter, 1433 SmallVectorImpl<WeakVH> &DeadInsts) const; 1434 void RewriteForPHI(PHINode *PN, const LSRFixup &LF, 1435 const Formula &F, 1436 SCEVExpander &Rewriter, 1437 SmallVectorImpl<WeakVH> &DeadInsts, 1438 Pass *P) const; 1439 void Rewrite(const LSRFixup &LF, 1440 const Formula &F, 1441 SCEVExpander &Rewriter, 1442 SmallVectorImpl<WeakVH> &DeadInsts, 1443 Pass *P) const; 1444 void ImplementSolution(const SmallVectorImpl<const Formula *> &Solution, 1445 Pass *P); 1446 1447 LSRInstance(const TargetLowering *tli, Loop *l, Pass *P); 1448 1449 bool getChanged() const { return Changed; } 1450 1451 void print_factors_and_types(raw_ostream &OS) const; 1452 void print_fixups(raw_ostream &OS) const; 1453 void print_uses(raw_ostream &OS) const; 1454 void print(raw_ostream &OS) const; 1455 void dump() const; 1456 }; 1457 1458 } 1459 1460 /// OptimizeShadowIV - If IV is used in a int-to-float cast 1461 /// inside the loop then try to eliminate the cast operation. 1462 void LSRInstance::OptimizeShadowIV() { 1463 const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L); 1464 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) 1465 return; 1466 1467 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); 1468 UI != E; /* empty */) { 1469 IVUsers::const_iterator CandidateUI = UI; 1470 ++UI; 1471 Instruction *ShadowUse = CandidateUI->getUser(); 1472 Type *DestTy = NULL; 1473 bool IsSigned = false; 1474 1475 /* If shadow use is a int->float cast then insert a second IV 1476 to eliminate this cast. 1477 1478 for (unsigned i = 0; i < n; ++i) 1479 foo((double)i); 1480 1481 is transformed into 1482 1483 double d = 0.0; 1484 for (unsigned i = 0; i < n; ++i, ++d) 1485 foo(d); 1486 */ 1487 if (UIToFPInst *UCast = dyn_cast<UIToFPInst>(CandidateUI->getUser())) { 1488 IsSigned = false; 1489 DestTy = UCast->getDestTy(); 1490 } 1491 else if (SIToFPInst *SCast = dyn_cast<SIToFPInst>(CandidateUI->getUser())) { 1492 IsSigned = true; 1493 DestTy = SCast->getDestTy(); 1494 } 1495 if (!DestTy) continue; 1496 1497 if (TLI) { 1498 // If target does not support DestTy natively then do not apply 1499 // this transformation. 1500 EVT DVT = TLI->getValueType(DestTy); 1501 if (!TLI->isTypeLegal(DVT)) continue; 1502 } 1503 1504 PHINode *PH = dyn_cast<PHINode>(ShadowUse->getOperand(0)); 1505 if (!PH) continue; 1506 if (PH->getNumIncomingValues() != 2) continue; 1507 1508 Type *SrcTy = PH->getType(); 1509 int Mantissa = DestTy->getFPMantissaWidth(); 1510 if (Mantissa == -1) continue; 1511 if ((int)SE.getTypeSizeInBits(SrcTy) > Mantissa) 1512 continue; 1513 1514 unsigned Entry, Latch; 1515 if (PH->getIncomingBlock(0) == L->getLoopPreheader()) { 1516 Entry = 0; 1517 Latch = 1; 1518 } else { 1519 Entry = 1; 1520 Latch = 0; 1521 } 1522 1523 ConstantInt *Init = dyn_cast<ConstantInt>(PH->getIncomingValue(Entry)); 1524 if (!Init) continue; 1525 Constant *NewInit = ConstantFP::get(DestTy, IsSigned ? 1526 (double)Init->getSExtValue() : 1527 (double)Init->getZExtValue()); 1528 1529 BinaryOperator *Incr = 1530 dyn_cast<BinaryOperator>(PH->getIncomingValue(Latch)); 1531 if (!Incr) continue; 1532 if (Incr->getOpcode() != Instruction::Add 1533 && Incr->getOpcode() != Instruction::Sub) 1534 continue; 1535 1536 /* Initialize new IV, double d = 0.0 in above example. */ 1537 ConstantInt *C = NULL; 1538 if (Incr->getOperand(0) == PH) 1539 C = dyn_cast<ConstantInt>(Incr->getOperand(1)); 1540 else if (Incr->getOperand(1) == PH) 1541 C = dyn_cast<ConstantInt>(Incr->getOperand(0)); 1542 else 1543 continue; 1544 1545 if (!C) continue; 1546 1547 // Ignore negative constants, as the code below doesn't handle them 1548 // correctly. TODO: Remove this restriction. 1549 if (!C->getValue().isStrictlyPositive()) continue; 1550 1551 /* Add new PHINode. */ 1552 PHINode *NewPH = PHINode::Create(DestTy, 2, "IV.S.", PH); 1553 1554 /* create new increment. '++d' in above example. */ 1555 Constant *CFP = ConstantFP::get(DestTy, C->getZExtValue()); 1556 BinaryOperator *NewIncr = 1557 BinaryOperator::Create(Incr->getOpcode() == Instruction::Add ? 1558 Instruction::FAdd : Instruction::FSub, 1559 NewPH, CFP, "IV.S.next.", Incr); 1560 1561 NewPH->addIncoming(NewInit, PH->getIncomingBlock(Entry)); 1562 NewPH->addIncoming(NewIncr, PH->getIncomingBlock(Latch)); 1563 1564 /* Remove cast operation */ 1565 ShadowUse->replaceAllUsesWith(NewPH); 1566 ShadowUse->eraseFromParent(); 1567 Changed = true; 1568 break; 1569 } 1570 } 1571 1572 /// FindIVUserForCond - If Cond has an operand that is an expression of an IV, 1573 /// set the IV user and stride information and return true, otherwise return 1574 /// false. 1575 bool LSRInstance::FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse) { 1576 for (IVUsers::iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) 1577 if (UI->getUser() == Cond) { 1578 // NOTE: we could handle setcc instructions with multiple uses here, but 1579 // InstCombine does it as well for simple uses, it's not clear that it 1580 // occurs enough in real life to handle. 1581 CondUse = UI; 1582 return true; 1583 } 1584 return false; 1585 } 1586 1587 /// OptimizeMax - Rewrite the loop's terminating condition if it uses 1588 /// a max computation. 1589 /// 1590 /// This is a narrow solution to a specific, but acute, problem. For loops 1591 /// like this: 1592 /// 1593 /// i = 0; 1594 /// do { 1595 /// p[i] = 0.0; 1596 /// } while (++i < n); 1597 /// 1598 /// the trip count isn't just 'n', because 'n' might not be positive. And 1599 /// unfortunately this can come up even for loops where the user didn't use 1600 /// a C do-while loop. For example, seemingly well-behaved top-test loops 1601 /// will commonly be lowered like this: 1602 // 1603 /// if (n > 0) { 1604 /// i = 0; 1605 /// do { 1606 /// p[i] = 0.0; 1607 /// } while (++i < n); 1608 /// } 1609 /// 1610 /// and then it's possible for subsequent optimization to obscure the if 1611 /// test in such a way that indvars can't find it. 1612 /// 1613 /// When indvars can't find the if test in loops like this, it creates a 1614 /// max expression, which allows it to give the loop a canonical 1615 /// induction variable: 1616 /// 1617 /// i = 0; 1618 /// max = n < 1 ? 1 : n; 1619 /// do { 1620 /// p[i] = 0.0; 1621 /// } while (++i != max); 1622 /// 1623 /// Canonical induction variables are necessary because the loop passes 1624 /// are designed around them. The most obvious example of this is the 1625 /// LoopInfo analysis, which doesn't remember trip count values. It 1626 /// expects to be able to rediscover the trip count each time it is 1627 /// needed, and it does this using a simple analysis that only succeeds if 1628 /// the loop has a canonical induction variable. 1629 /// 1630 /// However, when it comes time to generate code, the maximum operation 1631 /// can be quite costly, especially if it's inside of an outer loop. 1632 /// 1633 /// This function solves this problem by detecting this type of loop and 1634 /// rewriting their conditions from ICMP_NE back to ICMP_SLT, and deleting 1635 /// the instructions for the maximum computation. 1636 /// 1637 ICmpInst *LSRInstance::OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse) { 1638 // Check that the loop matches the pattern we're looking for. 1639 if (Cond->getPredicate() != CmpInst::ICMP_EQ && 1640 Cond->getPredicate() != CmpInst::ICMP_NE) 1641 return Cond; 1642 1643 SelectInst *Sel = dyn_cast<SelectInst>(Cond->getOperand(1)); 1644 if (!Sel || !Sel->hasOneUse()) return Cond; 1645 1646 const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L); 1647 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) 1648 return Cond; 1649 const SCEV *One = SE.getConstant(BackedgeTakenCount->getType(), 1); 1650 1651 // Add one to the backedge-taken count to get the trip count. 1652 const SCEV *IterationCount = SE.getAddExpr(One, BackedgeTakenCount); 1653 if (IterationCount != SE.getSCEV(Sel)) return Cond; 1654 1655 // Check for a max calculation that matches the pattern. There's no check 1656 // for ICMP_ULE here because the comparison would be with zero, which 1657 // isn't interesting. 1658 CmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE; 1659 const SCEVNAryExpr *Max = 0; 1660 if (const SCEVSMaxExpr *S = dyn_cast<SCEVSMaxExpr>(BackedgeTakenCount)) { 1661 Pred = ICmpInst::ICMP_SLE; 1662 Max = S; 1663 } else if (const SCEVSMaxExpr *S = dyn_cast<SCEVSMaxExpr>(IterationCount)) { 1664 Pred = ICmpInst::ICMP_SLT; 1665 Max = S; 1666 } else if (const SCEVUMaxExpr *U = dyn_cast<SCEVUMaxExpr>(IterationCount)) { 1667 Pred = ICmpInst::ICMP_ULT; 1668 Max = U; 1669 } else { 1670 // No match; bail. 1671 return Cond; 1672 } 1673 1674 // To handle a max with more than two operands, this optimization would 1675 // require additional checking and setup. 1676 if (Max->getNumOperands() != 2) 1677 return Cond; 1678 1679 const SCEV *MaxLHS = Max->getOperand(0); 1680 const SCEV *MaxRHS = Max->getOperand(1); 1681 1682 // ScalarEvolution canonicalizes constants to the left. For < and >, look 1683 // for a comparison with 1. For <= and >=, a comparison with zero. 1684 if (!MaxLHS || 1685 (ICmpInst::isTrueWhenEqual(Pred) ? !MaxLHS->isZero() : (MaxLHS != One))) 1686 return Cond; 1687 1688 // Check the relevant induction variable for conformance to 1689 // the pattern. 1690 const SCEV *IV = SE.getSCEV(Cond->getOperand(0)); 1691 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(IV); 1692 if (!AR || !AR->isAffine() || 1693 AR->getStart() != One || 1694 AR->getStepRecurrence(SE) != One) 1695 return Cond; 1696 1697 assert(AR->getLoop() == L && 1698 "Loop condition operand is an addrec in a different loop!"); 1699 1700 // Check the right operand of the select, and remember it, as it will 1701 // be used in the new comparison instruction. 1702 Value *NewRHS = 0; 1703 if (ICmpInst::isTrueWhenEqual(Pred)) { 1704 // Look for n+1, and grab n. 1705 if (AddOperator *BO = dyn_cast<AddOperator>(Sel->getOperand(1))) 1706 if (isa<ConstantInt>(BO->getOperand(1)) && 1707 cast<ConstantInt>(BO->getOperand(1))->isOne() && 1708 SE.getSCEV(BO->getOperand(0)) == MaxRHS) 1709 NewRHS = BO->getOperand(0); 1710 if (AddOperator *BO = dyn_cast<AddOperator>(Sel->getOperand(2))) 1711 if (isa<ConstantInt>(BO->getOperand(1)) && 1712 cast<ConstantInt>(BO->getOperand(1))->isOne() && 1713 SE.getSCEV(BO->getOperand(0)) == MaxRHS) 1714 NewRHS = BO->getOperand(0); 1715 if (!NewRHS) 1716 return Cond; 1717 } else if (SE.getSCEV(Sel->getOperand(1)) == MaxRHS) 1718 NewRHS = Sel->getOperand(1); 1719 else if (SE.getSCEV(Sel->getOperand(2)) == MaxRHS) 1720 NewRHS = Sel->getOperand(2); 1721 else if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(MaxRHS)) 1722 NewRHS = SU->getValue(); 1723 else 1724 // Max doesn't match expected pattern. 1725 return Cond; 1726 1727 // Determine the new comparison opcode. It may be signed or unsigned, 1728 // and the original comparison may be either equality or inequality. 1729 if (Cond->getPredicate() == CmpInst::ICMP_EQ) 1730 Pred = CmpInst::getInversePredicate(Pred); 1731 1732 // Ok, everything looks ok to change the condition into an SLT or SGE and 1733 // delete the max calculation. 1734 ICmpInst *NewCond = 1735 new ICmpInst(Cond, Pred, Cond->getOperand(0), NewRHS, "scmp"); 1736 1737 // Delete the max calculation instructions. 1738 Cond->replaceAllUsesWith(NewCond); 1739 CondUse->setUser(NewCond); 1740 Instruction *Cmp = cast<Instruction>(Sel->getOperand(0)); 1741 Cond->eraseFromParent(); 1742 Sel->eraseFromParent(); 1743 if (Cmp->use_empty()) 1744 Cmp->eraseFromParent(); 1745 return NewCond; 1746 } 1747 1748 /// OptimizeLoopTermCond - Change loop terminating condition to use the 1749 /// postinc iv when possible. 1750 void 1751 LSRInstance::OptimizeLoopTermCond() { 1752 SmallPtrSet<Instruction *, 4> PostIncs; 1753 1754 BasicBlock *LatchBlock = L->getLoopLatch(); 1755 SmallVector<BasicBlock*, 8> ExitingBlocks; 1756 L->getExitingBlocks(ExitingBlocks); 1757 1758 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { 1759 BasicBlock *ExitingBlock = ExitingBlocks[i]; 1760 1761 // Get the terminating condition for the loop if possible. If we 1762 // can, we want to change it to use a post-incremented version of its 1763 // induction variable, to allow coalescing the live ranges for the IV into 1764 // one register value. 1765 1766 BranchInst *TermBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator()); 1767 if (!TermBr) 1768 continue; 1769 // FIXME: Overly conservative, termination condition could be an 'or' etc.. 1770 if (TermBr->isUnconditional() || !isa<ICmpInst>(TermBr->getCondition())) 1771 continue; 1772 1773 // Search IVUsesByStride to find Cond's IVUse if there is one. 1774 IVStrideUse *CondUse = 0; 1775 ICmpInst *Cond = cast<ICmpInst>(TermBr->getCondition()); 1776 if (!FindIVUserForCond(Cond, CondUse)) 1777 continue; 1778 1779 // If the trip count is computed in terms of a max (due to ScalarEvolution 1780 // being unable to find a sufficient guard, for example), change the loop 1781 // comparison to use SLT or ULT instead of NE. 1782 // One consequence of doing this now is that it disrupts the count-down 1783 // optimization. That's not always a bad thing though, because in such 1784 // cases it may still be worthwhile to avoid a max. 1785 Cond = OptimizeMax(Cond, CondUse); 1786 1787 // If this exiting block dominates the latch block, it may also use 1788 // the post-inc value if it won't be shared with other uses. 1789 // Check for dominance. 1790 if (!DT.dominates(ExitingBlock, LatchBlock)) 1791 continue; 1792 1793 // Conservatively avoid trying to use the post-inc value in non-latch 1794 // exits if there may be pre-inc users in intervening blocks. 1795 if (LatchBlock != ExitingBlock) 1796 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) 1797 // Test if the use is reachable from the exiting block. This dominator 1798 // query is a conservative approximation of reachability. 1799 if (&*UI != CondUse && 1800 !DT.properlyDominates(UI->getUser()->getParent(), ExitingBlock)) { 1801 // Conservatively assume there may be reuse if the quotient of their 1802 // strides could be a legal scale. 1803 const SCEV *A = IU.getStride(*CondUse, L); 1804 const SCEV *B = IU.getStride(*UI, L); 1805 if (!A || !B) continue; 1806 if (SE.getTypeSizeInBits(A->getType()) != 1807 SE.getTypeSizeInBits(B->getType())) { 1808 if (SE.getTypeSizeInBits(A->getType()) > 1809 SE.getTypeSizeInBits(B->getType())) 1810 B = SE.getSignExtendExpr(B, A->getType()); 1811 else 1812 A = SE.getSignExtendExpr(A, B->getType()); 1813 } 1814 if (const SCEVConstant *D = 1815 dyn_cast_or_null<SCEVConstant>(getExactSDiv(B, A, SE))) { 1816 const ConstantInt *C = D->getValue(); 1817 // Stride of one or negative one can have reuse with non-addresses. 1818 if (C->isOne() || C->isAllOnesValue()) 1819 goto decline_post_inc; 1820 // Avoid weird situations. 1821 if (C->getValue().getMinSignedBits() >= 64 || 1822 C->getValue().isMinSignedValue()) 1823 goto decline_post_inc; 1824 // Without TLI, assume that any stride might be valid, and so any 1825 // use might be shared. 1826 if (!TLI) 1827 goto decline_post_inc; 1828 // Check for possible scaled-address reuse. 1829 Type *AccessTy = getAccessType(UI->getUser()); 1830 TargetLowering::AddrMode AM; 1831 AM.Scale = C->getSExtValue(); 1832 if (TLI->isLegalAddressingMode(AM, AccessTy)) 1833 goto decline_post_inc; 1834 AM.Scale = -AM.Scale; 1835 if (TLI->isLegalAddressingMode(AM, AccessTy)) 1836 goto decline_post_inc; 1837 } 1838 } 1839 1840 DEBUG(dbgs() << " Change loop exiting icmp to use postinc iv: " 1841 << *Cond << '\n'); 1842 1843 // It's possible for the setcc instruction to be anywhere in the loop, and 1844 // possible for it to have multiple users. If it is not immediately before 1845 // the exiting block branch, move it. 1846 if (&*++BasicBlock::iterator(Cond) != TermBr) { 1847 if (Cond->hasOneUse()) { 1848 Cond->moveBefore(TermBr); 1849 } else { 1850 // Clone the terminating condition and insert into the loopend. 1851 ICmpInst *OldCond = Cond; 1852 Cond = cast<ICmpInst>(Cond->clone()); 1853 Cond->setName(L->getHeader()->getName() + ".termcond"); 1854 ExitingBlock->getInstList().insert(TermBr, Cond); 1855 1856 // Clone the IVUse, as the old use still exists! 1857 CondUse = &IU.AddUser(Cond, CondUse->getOperandValToReplace()); 1858 TermBr->replaceUsesOfWith(OldCond, Cond); 1859 } 1860 } 1861 1862 // If we get to here, we know that we can transform the setcc instruction to 1863 // use the post-incremented version of the IV, allowing us to coalesce the 1864 // live ranges for the IV correctly. 1865 CondUse->transformToPostInc(L); 1866 Changed = true; 1867 1868 PostIncs.insert(Cond); 1869 decline_post_inc:; 1870 } 1871 1872 // Determine an insertion point for the loop induction variable increment. It 1873 // must dominate all the post-inc comparisons we just set up, and it must 1874 // dominate the loop latch edge. 1875 IVIncInsertPos = L->getLoopLatch()->getTerminator(); 1876 for (SmallPtrSet<Instruction *, 4>::const_iterator I = PostIncs.begin(), 1877 E = PostIncs.end(); I != E; ++I) { 1878 BasicBlock *BB = 1879 DT.findNearestCommonDominator(IVIncInsertPos->getParent(), 1880 (*I)->getParent()); 1881 if (BB == (*I)->getParent()) 1882 IVIncInsertPos = *I; 1883 else if (BB != IVIncInsertPos->getParent()) 1884 IVIncInsertPos = BB->getTerminator(); 1885 } 1886 } 1887 1888 /// reconcileNewOffset - Determine if the given use can accommodate a fixup 1889 /// at the given offset and other details. If so, update the use and 1890 /// return true. 1891 bool 1892 LSRInstance::reconcileNewOffset(LSRUse &LU, int64_t NewOffset, bool HasBaseReg, 1893 LSRUse::KindType Kind, Type *AccessTy) { 1894 int64_t NewMinOffset = LU.MinOffset; 1895 int64_t NewMaxOffset = LU.MaxOffset; 1896 Type *NewAccessTy = AccessTy; 1897 1898 // Check for a mismatched kind. It's tempting to collapse mismatched kinds to 1899 // something conservative, however this can pessimize in the case that one of 1900 // the uses will have all its uses outside the loop, for example. 1901 if (LU.Kind != Kind) 1902 return false; 1903 // Conservatively assume HasBaseReg is true for now. 1904 if (NewOffset < LU.MinOffset) { 1905 if (!isAlwaysFoldable(LU.MaxOffset - NewOffset, 0, HasBaseReg, 1906 Kind, AccessTy, TLI)) 1907 return false; 1908 NewMinOffset = NewOffset; 1909 } else if (NewOffset > LU.MaxOffset) { 1910 if (!isAlwaysFoldable(NewOffset - LU.MinOffset, 0, HasBaseReg, 1911 Kind, AccessTy, TLI)) 1912 return false; 1913 NewMaxOffset = NewOffset; 1914 } 1915 // Check for a mismatched access type, and fall back conservatively as needed. 1916 // TODO: Be less conservative when the type is similar and can use the same 1917 // addressing modes. 1918 if (Kind == LSRUse::Address && AccessTy != LU.AccessTy) 1919 NewAccessTy = Type::getVoidTy(AccessTy->getContext()); 1920 1921 // Update the use. 1922 LU.MinOffset = NewMinOffset; 1923 LU.MaxOffset = NewMaxOffset; 1924 LU.AccessTy = NewAccessTy; 1925 if (NewOffset != LU.Offsets.back()) 1926 LU.Offsets.push_back(NewOffset); 1927 return true; 1928 } 1929 1930 /// getUse - Return an LSRUse index and an offset value for a fixup which 1931 /// needs the given expression, with the given kind and optional access type. 1932 /// Either reuse an existing use or create a new one, as needed. 1933 std::pair<size_t, int64_t> 1934 LSRInstance::getUse(const SCEV *&Expr, 1935 LSRUse::KindType Kind, Type *AccessTy) { 1936 const SCEV *Copy = Expr; 1937 int64_t Offset = ExtractImmediate(Expr, SE); 1938 1939 // Basic uses can't accept any offset, for example. 1940 if (!isAlwaysFoldable(Offset, 0, /*HasBaseReg=*/true, Kind, AccessTy, TLI)) { 1941 Expr = Copy; 1942 Offset = 0; 1943 } 1944 1945 std::pair<UseMapTy::iterator, bool> P = 1946 UseMap.insert(std::make_pair(std::make_pair(Expr, Kind), 0)); 1947 if (!P.second) { 1948 // A use already existed with this base. 1949 size_t LUIdx = P.first->second; 1950 LSRUse &LU = Uses[LUIdx]; 1951 if (reconcileNewOffset(LU, Offset, /*HasBaseReg=*/true, Kind, AccessTy)) 1952 // Reuse this use. 1953 return std::make_pair(LUIdx, Offset); 1954 } 1955 1956 // Create a new use. 1957 size_t LUIdx = Uses.size(); 1958 P.first->second = LUIdx; 1959 Uses.push_back(LSRUse(Kind, AccessTy)); 1960 LSRUse &LU = Uses[LUIdx]; 1961 1962 // We don't need to track redundant offsets, but we don't need to go out 1963 // of our way here to avoid them. 1964 if (LU.Offsets.empty() || Offset != LU.Offsets.back()) 1965 LU.Offsets.push_back(Offset); 1966 1967 LU.MinOffset = Offset; 1968 LU.MaxOffset = Offset; 1969 return std::make_pair(LUIdx, Offset); 1970 } 1971 1972 /// DeleteUse - Delete the given use from the Uses list. 1973 void LSRInstance::DeleteUse(LSRUse &LU, size_t LUIdx) { 1974 if (&LU != &Uses.back()) 1975 std::swap(LU, Uses.back()); 1976 Uses.pop_back(); 1977 1978 // Update RegUses. 1979 RegUses.SwapAndDropUse(LUIdx, Uses.size()); 1980 } 1981 1982 /// FindUseWithFormula - Look for a use distinct from OrigLU which is has 1983 /// a formula that has the same registers as the given formula. 1984 LSRUse * 1985 LSRInstance::FindUseWithSimilarFormula(const Formula &OrigF, 1986 const LSRUse &OrigLU) { 1987 // Search all uses for the formula. This could be more clever. 1988 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 1989 LSRUse &LU = Uses[LUIdx]; 1990 // Check whether this use is close enough to OrigLU, to see whether it's 1991 // worthwhile looking through its formulae. 1992 // Ignore ICmpZero uses because they may contain formulae generated by 1993 // GenerateICmpZeroScales, in which case adding fixup offsets may 1994 // be invalid. 1995 if (&LU != &OrigLU && 1996 LU.Kind != LSRUse::ICmpZero && 1997 LU.Kind == OrigLU.Kind && OrigLU.AccessTy == LU.AccessTy && 1998 LU.WidestFixupType == OrigLU.WidestFixupType && 1999 LU.HasFormulaWithSameRegs(OrigF)) { 2000 // Scan through this use's formulae. 2001 for (SmallVectorImpl<Formula>::const_iterator I = LU.Formulae.begin(), 2002 E = LU.Formulae.end(); I != E; ++I) { 2003 const Formula &F = *I; 2004 // Check to see if this formula has the same registers and symbols 2005 // as OrigF. 2006 if (F.BaseRegs == OrigF.BaseRegs && 2007 F.ScaledReg == OrigF.ScaledReg && 2008 F.AM.BaseGV == OrigF.AM.BaseGV && 2009 F.AM.Scale == OrigF.AM.Scale && 2010 F.UnfoldedOffset == OrigF.UnfoldedOffset) { 2011 if (F.AM.BaseOffs == 0) 2012 return &LU; 2013 // This is the formula where all the registers and symbols matched; 2014 // there aren't going to be any others. Since we declined it, we 2015 // can skip the rest of the formulae and procede to the next LSRUse. 2016 break; 2017 } 2018 } 2019 } 2020 } 2021 2022 // Nothing looked good. 2023 return 0; 2024 } 2025 2026 void LSRInstance::CollectInterestingTypesAndFactors() { 2027 SmallSetVector<const SCEV *, 4> Strides; 2028 2029 // Collect interesting types and strides. 2030 SmallVector<const SCEV *, 4> Worklist; 2031 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) { 2032 const SCEV *Expr = IU.getExpr(*UI); 2033 2034 // Collect interesting types. 2035 Types.insert(SE.getEffectiveSCEVType(Expr->getType())); 2036 2037 // Add strides for mentioned loops. 2038 Worklist.push_back(Expr); 2039 do { 2040 const SCEV *S = Worklist.pop_back_val(); 2041 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 2042 Strides.insert(AR->getStepRecurrence(SE)); 2043 Worklist.push_back(AR->getStart()); 2044 } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 2045 Worklist.append(Add->op_begin(), Add->op_end()); 2046 } 2047 } while (!Worklist.empty()); 2048 } 2049 2050 // Compute interesting factors from the set of interesting strides. 2051 for (SmallSetVector<const SCEV *, 4>::const_iterator 2052 I = Strides.begin(), E = Strides.end(); I != E; ++I) 2053 for (SmallSetVector<const SCEV *, 4>::const_iterator NewStrideIter = 2054 llvm::next(I); NewStrideIter != E; ++NewStrideIter) { 2055 const SCEV *OldStride = *I; 2056 const SCEV *NewStride = *NewStrideIter; 2057 2058 if (SE.getTypeSizeInBits(OldStride->getType()) != 2059 SE.getTypeSizeInBits(NewStride->getType())) { 2060 if (SE.getTypeSizeInBits(OldStride->getType()) > 2061 SE.getTypeSizeInBits(NewStride->getType())) 2062 NewStride = SE.getSignExtendExpr(NewStride, OldStride->getType()); 2063 else 2064 OldStride = SE.getSignExtendExpr(OldStride, NewStride->getType()); 2065 } 2066 if (const SCEVConstant *Factor = 2067 dyn_cast_or_null<SCEVConstant>(getExactSDiv(NewStride, OldStride, 2068 SE, true))) { 2069 if (Factor->getValue()->getValue().getMinSignedBits() <= 64) 2070 Factors.insert(Factor->getValue()->getValue().getSExtValue()); 2071 } else if (const SCEVConstant *Factor = 2072 dyn_cast_or_null<SCEVConstant>(getExactSDiv(OldStride, 2073 NewStride, 2074 SE, true))) { 2075 if (Factor->getValue()->getValue().getMinSignedBits() <= 64) 2076 Factors.insert(Factor->getValue()->getValue().getSExtValue()); 2077 } 2078 } 2079 2080 // If all uses use the same type, don't bother looking for truncation-based 2081 // reuse. 2082 if (Types.size() == 1) 2083 Types.clear(); 2084 2085 DEBUG(print_factors_and_types(dbgs())); 2086 } 2087 2088 void LSRInstance::CollectFixupsAndInitialFormulae() { 2089 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) { 2090 // Record the uses. 2091 LSRFixup &LF = getNewFixup(); 2092 LF.UserInst = UI->getUser(); 2093 LF.OperandValToReplace = UI->getOperandValToReplace(); 2094 LF.PostIncLoops = UI->getPostIncLoops(); 2095 2096 LSRUse::KindType Kind = LSRUse::Basic; 2097 Type *AccessTy = 0; 2098 if (isAddressUse(LF.UserInst, LF.OperandValToReplace)) { 2099 Kind = LSRUse::Address; 2100 AccessTy = getAccessType(LF.UserInst); 2101 } 2102 2103 const SCEV *S = IU.getExpr(*UI); 2104 2105 // Equality (== and !=) ICmps are special. We can rewrite (i == N) as 2106 // (N - i == 0), and this allows (N - i) to be the expression that we work 2107 // with rather than just N or i, so we can consider the register 2108 // requirements for both N and i at the same time. Limiting this code to 2109 // equality icmps is not a problem because all interesting loops use 2110 // equality icmps, thanks to IndVarSimplify. 2111 if (ICmpInst *CI = dyn_cast<ICmpInst>(LF.UserInst)) 2112 if (CI->isEquality()) { 2113 // Swap the operands if needed to put the OperandValToReplace on the 2114 // left, for consistency. 2115 Value *NV = CI->getOperand(1); 2116 if (NV == LF.OperandValToReplace) { 2117 CI->setOperand(1, CI->getOperand(0)); 2118 CI->setOperand(0, NV); 2119 NV = CI->getOperand(1); 2120 Changed = true; 2121 } 2122 2123 // x == y --> x - y == 0 2124 const SCEV *N = SE.getSCEV(NV); 2125 if (SE.isLoopInvariant(N, L)) { 2126 // S is normalized, so normalize N before folding it into S 2127 // to keep the result normalized. 2128 N = TransformForPostIncUse(Normalize, N, CI, 0, 2129 LF.PostIncLoops, SE, DT); 2130 Kind = LSRUse::ICmpZero; 2131 S = SE.getMinusSCEV(N, S); 2132 } 2133 2134 // -1 and the negations of all interesting strides (except the negation 2135 // of -1) are now also interesting. 2136 for (size_t i = 0, e = Factors.size(); i != e; ++i) 2137 if (Factors[i] != -1) 2138 Factors.insert(-(uint64_t)Factors[i]); 2139 Factors.insert(-1); 2140 } 2141 2142 // Set up the initial formula for this use. 2143 std::pair<size_t, int64_t> P = getUse(S, Kind, AccessTy); 2144 LF.LUIdx = P.first; 2145 LF.Offset = P.second; 2146 LSRUse &LU = Uses[LF.LUIdx]; 2147 LU.AllFixupsOutsideLoop &= LF.isUseFullyOutsideLoop(L); 2148 if (!LU.WidestFixupType || 2149 SE.getTypeSizeInBits(LU.WidestFixupType) < 2150 SE.getTypeSizeInBits(LF.OperandValToReplace->getType())) 2151 LU.WidestFixupType = LF.OperandValToReplace->getType(); 2152 2153 // If this is the first use of this LSRUse, give it a formula. 2154 if (LU.Formulae.empty()) { 2155 InsertInitialFormula(S, LU, LF.LUIdx); 2156 CountRegisters(LU.Formulae.back(), LF.LUIdx); 2157 } 2158 } 2159 2160 DEBUG(print_fixups(dbgs())); 2161 } 2162 2163 /// InsertInitialFormula - Insert a formula for the given expression into 2164 /// the given use, separating out loop-variant portions from loop-invariant 2165 /// and loop-computable portions. 2166 void 2167 LSRInstance::InsertInitialFormula(const SCEV *S, LSRUse &LU, size_t LUIdx) { 2168 Formula F; 2169 F.InitialMatch(S, L, SE); 2170 bool Inserted = InsertFormula(LU, LUIdx, F); 2171 assert(Inserted && "Initial formula already exists!"); (void)Inserted; 2172 } 2173 2174 /// InsertSupplementalFormula - Insert a simple single-register formula for 2175 /// the given expression into the given use. 2176 void 2177 LSRInstance::InsertSupplementalFormula(const SCEV *S, 2178 LSRUse &LU, size_t LUIdx) { 2179 Formula F; 2180 F.BaseRegs.push_back(S); 2181 F.AM.HasBaseReg = true; 2182 bool Inserted = InsertFormula(LU, LUIdx, F); 2183 assert(Inserted && "Supplemental formula already exists!"); (void)Inserted; 2184 } 2185 2186 /// CountRegisters - Note which registers are used by the given formula, 2187 /// updating RegUses. 2188 void LSRInstance::CountRegisters(const Formula &F, size_t LUIdx) { 2189 if (F.ScaledReg) 2190 RegUses.CountRegister(F.ScaledReg, LUIdx); 2191 for (SmallVectorImpl<const SCEV *>::const_iterator I = F.BaseRegs.begin(), 2192 E = F.BaseRegs.end(); I != E; ++I) 2193 RegUses.CountRegister(*I, LUIdx); 2194 } 2195 2196 /// InsertFormula - If the given formula has not yet been inserted, add it to 2197 /// the list, and return true. Return false otherwise. 2198 bool LSRInstance::InsertFormula(LSRUse &LU, unsigned LUIdx, const Formula &F) { 2199 if (!LU.InsertFormula(F)) 2200 return false; 2201 2202 CountRegisters(F, LUIdx); 2203 return true; 2204 } 2205 2206 /// CollectLoopInvariantFixupsAndFormulae - Check for other uses of 2207 /// loop-invariant values which we're tracking. These other uses will pin these 2208 /// values in registers, making them less profitable for elimination. 2209 /// TODO: This currently misses non-constant addrec step registers. 2210 /// TODO: Should this give more weight to users inside the loop? 2211 void 2212 LSRInstance::CollectLoopInvariantFixupsAndFormulae() { 2213 SmallVector<const SCEV *, 8> Worklist(RegUses.begin(), RegUses.end()); 2214 SmallPtrSet<const SCEV *, 8> Inserted; 2215 2216 while (!Worklist.empty()) { 2217 const SCEV *S = Worklist.pop_back_val(); 2218 2219 if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) 2220 Worklist.append(N->op_begin(), N->op_end()); 2221 else if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S)) 2222 Worklist.push_back(C->getOperand()); 2223 else if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) { 2224 Worklist.push_back(D->getLHS()); 2225 Worklist.push_back(D->getRHS()); 2226 } else if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 2227 if (!Inserted.insert(U)) continue; 2228 const Value *V = U->getValue(); 2229 if (const Instruction *Inst = dyn_cast<Instruction>(V)) { 2230 // Look for instructions defined outside the loop. 2231 if (L->contains(Inst)) continue; 2232 } else if (isa<UndefValue>(V)) 2233 // Undef doesn't have a live range, so it doesn't matter. 2234 continue; 2235 for (Value::const_use_iterator UI = V->use_begin(), UE = V->use_end(); 2236 UI != UE; ++UI) { 2237 const Instruction *UserInst = dyn_cast<Instruction>(*UI); 2238 // Ignore non-instructions. 2239 if (!UserInst) 2240 continue; 2241 // Ignore instructions in other functions (as can happen with 2242 // Constants). 2243 if (UserInst->getParent()->getParent() != L->getHeader()->getParent()) 2244 continue; 2245 // Ignore instructions not dominated by the loop. 2246 const BasicBlock *UseBB = !isa<PHINode>(UserInst) ? 2247 UserInst->getParent() : 2248 cast<PHINode>(UserInst)->getIncomingBlock( 2249 PHINode::getIncomingValueNumForOperand(UI.getOperandNo())); 2250 if (!DT.dominates(L->getHeader(), UseBB)) 2251 continue; 2252 // Ignore uses which are part of other SCEV expressions, to avoid 2253 // analyzing them multiple times. 2254 if (SE.isSCEVable(UserInst->getType())) { 2255 const SCEV *UserS = SE.getSCEV(const_cast<Instruction *>(UserInst)); 2256 // If the user is a no-op, look through to its uses. 2257 if (!isa<SCEVUnknown>(UserS)) 2258 continue; 2259 if (UserS == U) { 2260 Worklist.push_back( 2261 SE.getUnknown(const_cast<Instruction *>(UserInst))); 2262 continue; 2263 } 2264 } 2265 // Ignore icmp instructions which are already being analyzed. 2266 if (const ICmpInst *ICI = dyn_cast<ICmpInst>(UserInst)) { 2267 unsigned OtherIdx = !UI.getOperandNo(); 2268 Value *OtherOp = const_cast<Value *>(ICI->getOperand(OtherIdx)); 2269 if (SE.hasComputableLoopEvolution(SE.getSCEV(OtherOp), L)) 2270 continue; 2271 } 2272 2273 LSRFixup &LF = getNewFixup(); 2274 LF.UserInst = const_cast<Instruction *>(UserInst); 2275 LF.OperandValToReplace = UI.getUse(); 2276 std::pair<size_t, int64_t> P = getUse(S, LSRUse::Basic, 0); 2277 LF.LUIdx = P.first; 2278 LF.Offset = P.second; 2279 LSRUse &LU = Uses[LF.LUIdx]; 2280 LU.AllFixupsOutsideLoop &= LF.isUseFullyOutsideLoop(L); 2281 if (!LU.WidestFixupType || 2282 SE.getTypeSizeInBits(LU.WidestFixupType) < 2283 SE.getTypeSizeInBits(LF.OperandValToReplace->getType())) 2284 LU.WidestFixupType = LF.OperandValToReplace->getType(); 2285 InsertSupplementalFormula(U, LU, LF.LUIdx); 2286 CountRegisters(LU.Formulae.back(), Uses.size() - 1); 2287 break; 2288 } 2289 } 2290 } 2291 } 2292 2293 /// CollectSubexprs - Split S into subexpressions which can be pulled out into 2294 /// separate registers. If C is non-null, multiply each subexpression by C. 2295 static void CollectSubexprs(const SCEV *S, const SCEVConstant *C, 2296 SmallVectorImpl<const SCEV *> &Ops, 2297 const Loop *L, 2298 ScalarEvolution &SE) { 2299 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 2300 // Break out add operands. 2301 for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end(); 2302 I != E; ++I) 2303 CollectSubexprs(*I, C, Ops, L, SE); 2304 return; 2305 } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 2306 // Split a non-zero base out of an addrec. 2307 if (!AR->getStart()->isZero()) { 2308 CollectSubexprs(SE.getAddRecExpr(SE.getConstant(AR->getType(), 0), 2309 AR->getStepRecurrence(SE), 2310 AR->getLoop(), 2311 //FIXME: AR->getNoWrapFlags(SCEV::FlagNW) 2312 SCEV::FlagAnyWrap), 2313 C, Ops, L, SE); 2314 CollectSubexprs(AR->getStart(), C, Ops, L, SE); 2315 return; 2316 } 2317 } else if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 2318 // Break (C * (a + b + c)) into C*a + C*b + C*c. 2319 if (Mul->getNumOperands() == 2) 2320 if (const SCEVConstant *Op0 = 2321 dyn_cast<SCEVConstant>(Mul->getOperand(0))) { 2322 CollectSubexprs(Mul->getOperand(1), 2323 C ? cast<SCEVConstant>(SE.getMulExpr(C, Op0)) : Op0, 2324 Ops, L, SE); 2325 return; 2326 } 2327 } 2328 2329 // Otherwise use the value itself, optionally with a scale applied. 2330 Ops.push_back(C ? SE.getMulExpr(C, S) : S); 2331 } 2332 2333 /// GenerateReassociations - Split out subexpressions from adds and the bases of 2334 /// addrecs. 2335 void LSRInstance::GenerateReassociations(LSRUse &LU, unsigned LUIdx, 2336 Formula Base, 2337 unsigned Depth) { 2338 // Arbitrarily cap recursion to protect compile time. 2339 if (Depth >= 3) return; 2340 2341 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) { 2342 const SCEV *BaseReg = Base.BaseRegs[i]; 2343 2344 SmallVector<const SCEV *, 8> AddOps; 2345 CollectSubexprs(BaseReg, 0, AddOps, L, SE); 2346 2347 if (AddOps.size() == 1) continue; 2348 2349 for (SmallVectorImpl<const SCEV *>::const_iterator J = AddOps.begin(), 2350 JE = AddOps.end(); J != JE; ++J) { 2351 2352 // Loop-variant "unknown" values are uninteresting; we won't be able to 2353 // do anything meaningful with them. 2354 if (isa<SCEVUnknown>(*J) && !SE.isLoopInvariant(*J, L)) 2355 continue; 2356 2357 // Don't pull a constant into a register if the constant could be folded 2358 // into an immediate field. 2359 if (isAlwaysFoldable(*J, LU.MinOffset, LU.MaxOffset, 2360 Base.getNumRegs() > 1, 2361 LU.Kind, LU.AccessTy, TLI, SE)) 2362 continue; 2363 2364 // Collect all operands except *J. 2365 SmallVector<const SCEV *, 8> InnerAddOps 2366 (((const SmallVector<const SCEV *, 8> &)AddOps).begin(), J); 2367 InnerAddOps.append 2368 (llvm::next(J), ((const SmallVector<const SCEV *, 8> &)AddOps).end()); 2369 2370 // Don't leave just a constant behind in a register if the constant could 2371 // be folded into an immediate field. 2372 if (InnerAddOps.size() == 1 && 2373 isAlwaysFoldable(InnerAddOps[0], LU.MinOffset, LU.MaxOffset, 2374 Base.getNumRegs() > 1, 2375 LU.Kind, LU.AccessTy, TLI, SE)) 2376 continue; 2377 2378 const SCEV *InnerSum = SE.getAddExpr(InnerAddOps); 2379 if (InnerSum->isZero()) 2380 continue; 2381 Formula F = Base; 2382 2383 // Add the remaining pieces of the add back into the new formula. 2384 const SCEVConstant *InnerSumSC = dyn_cast<SCEVConstant>(InnerSum); 2385 if (TLI && InnerSumSC && 2386 SE.getTypeSizeInBits(InnerSumSC->getType()) <= 64 && 2387 TLI->isLegalAddImmediate((uint64_t)F.UnfoldedOffset + 2388 InnerSumSC->getValue()->getZExtValue())) { 2389 F.UnfoldedOffset = (uint64_t)F.UnfoldedOffset + 2390 InnerSumSC->getValue()->getZExtValue(); 2391 F.BaseRegs.erase(F.BaseRegs.begin() + i); 2392 } else 2393 F.BaseRegs[i] = InnerSum; 2394 2395 // Add J as its own register, or an unfolded immediate. 2396 const SCEVConstant *SC = dyn_cast<SCEVConstant>(*J); 2397 if (TLI && SC && SE.getTypeSizeInBits(SC->getType()) <= 64 && 2398 TLI->isLegalAddImmediate((uint64_t)F.UnfoldedOffset + 2399 SC->getValue()->getZExtValue())) 2400 F.UnfoldedOffset = (uint64_t)F.UnfoldedOffset + 2401 SC->getValue()->getZExtValue(); 2402 else 2403 F.BaseRegs.push_back(*J); 2404 2405 if (InsertFormula(LU, LUIdx, F)) 2406 // If that formula hadn't been seen before, recurse to find more like 2407 // it. 2408 GenerateReassociations(LU, LUIdx, LU.Formulae.back(), Depth+1); 2409 } 2410 } 2411 } 2412 2413 /// GenerateCombinations - Generate a formula consisting of all of the 2414 /// loop-dominating registers added into a single register. 2415 void LSRInstance::GenerateCombinations(LSRUse &LU, unsigned LUIdx, 2416 Formula Base) { 2417 // This method is only interesting on a plurality of registers. 2418 if (Base.BaseRegs.size() <= 1) return; 2419 2420 Formula F = Base; 2421 F.BaseRegs.clear(); 2422 SmallVector<const SCEV *, 4> Ops; 2423 for (SmallVectorImpl<const SCEV *>::const_iterator 2424 I = Base.BaseRegs.begin(), E = Base.BaseRegs.end(); I != E; ++I) { 2425 const SCEV *BaseReg = *I; 2426 if (SE.properlyDominates(BaseReg, L->getHeader()) && 2427 !SE.hasComputableLoopEvolution(BaseReg, L)) 2428 Ops.push_back(BaseReg); 2429 else 2430 F.BaseRegs.push_back(BaseReg); 2431 } 2432 if (Ops.size() > 1) { 2433 const SCEV *Sum = SE.getAddExpr(Ops); 2434 // TODO: If Sum is zero, it probably means ScalarEvolution missed an 2435 // opportunity to fold something. For now, just ignore such cases 2436 // rather than proceed with zero in a register. 2437 if (!Sum->isZero()) { 2438 F.BaseRegs.push_back(Sum); 2439 (void)InsertFormula(LU, LUIdx, F); 2440 } 2441 } 2442 } 2443 2444 /// GenerateSymbolicOffsets - Generate reuse formulae using symbolic offsets. 2445 void LSRInstance::GenerateSymbolicOffsets(LSRUse &LU, unsigned LUIdx, 2446 Formula Base) { 2447 // We can't add a symbolic offset if the address already contains one. 2448 if (Base.AM.BaseGV) return; 2449 2450 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) { 2451 const SCEV *G = Base.BaseRegs[i]; 2452 GlobalValue *GV = ExtractSymbol(G, SE); 2453 if (G->isZero() || !GV) 2454 continue; 2455 Formula F = Base; 2456 F.AM.BaseGV = GV; 2457 if (!isLegalUse(F.AM, LU.MinOffset, LU.MaxOffset, 2458 LU.Kind, LU.AccessTy, TLI)) 2459 continue; 2460 F.BaseRegs[i] = G; 2461 (void)InsertFormula(LU, LUIdx, F); 2462 } 2463 } 2464 2465 /// GenerateConstantOffsets - Generate reuse formulae using symbolic offsets. 2466 void LSRInstance::GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx, 2467 Formula Base) { 2468 // TODO: For now, just add the min and max offset, because it usually isn't 2469 // worthwhile looking at everything inbetween. 2470 SmallVector<int64_t, 2> Worklist; 2471 Worklist.push_back(LU.MinOffset); 2472 if (LU.MaxOffset != LU.MinOffset) 2473 Worklist.push_back(LU.MaxOffset); 2474 2475 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) { 2476 const SCEV *G = Base.BaseRegs[i]; 2477 2478 for (SmallVectorImpl<int64_t>::const_iterator I = Worklist.begin(), 2479 E = Worklist.end(); I != E; ++I) { 2480 Formula F = Base; 2481 F.AM.BaseOffs = (uint64_t)Base.AM.BaseOffs - *I; 2482 if (isLegalUse(F.AM, LU.MinOffset - *I, LU.MaxOffset - *I, 2483 LU.Kind, LU.AccessTy, TLI)) { 2484 // Add the offset to the base register. 2485 const SCEV *NewG = SE.getAddExpr(SE.getConstant(G->getType(), *I), G); 2486 // If it cancelled out, drop the base register, otherwise update it. 2487 if (NewG->isZero()) { 2488 std::swap(F.BaseRegs[i], F.BaseRegs.back()); 2489 F.BaseRegs.pop_back(); 2490 } else 2491 F.BaseRegs[i] = NewG; 2492 2493 (void)InsertFormula(LU, LUIdx, F); 2494 } 2495 } 2496 2497 int64_t Imm = ExtractImmediate(G, SE); 2498 if (G->isZero() || Imm == 0) 2499 continue; 2500 Formula F = Base; 2501 F.AM.BaseOffs = (uint64_t)F.AM.BaseOffs + Imm; 2502 if (!isLegalUse(F.AM, LU.MinOffset, LU.MaxOffset, 2503 LU.Kind, LU.AccessTy, TLI)) 2504 continue; 2505 F.BaseRegs[i] = G; 2506 (void)InsertFormula(LU, LUIdx, F); 2507 } 2508 } 2509 2510 /// GenerateICmpZeroScales - For ICmpZero, check to see if we can scale up 2511 /// the comparison. For example, x == y -> x*c == y*c. 2512 void LSRInstance::GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx, 2513 Formula Base) { 2514 if (LU.Kind != LSRUse::ICmpZero) return; 2515 2516 // Determine the integer type for the base formula. 2517 Type *IntTy = Base.getType(); 2518 if (!IntTy) return; 2519 if (SE.getTypeSizeInBits(IntTy) > 64) return; 2520 2521 // Don't do this if there is more than one offset. 2522 if (LU.MinOffset != LU.MaxOffset) return; 2523 2524 assert(!Base.AM.BaseGV && "ICmpZero use is not legal!"); 2525 2526 // Check each interesting stride. 2527 for (SmallSetVector<int64_t, 8>::const_iterator 2528 I = Factors.begin(), E = Factors.end(); I != E; ++I) { 2529 int64_t Factor = *I; 2530 2531 // Check that the multiplication doesn't overflow. 2532 if (Base.AM.BaseOffs == INT64_MIN && Factor == -1) 2533 continue; 2534 int64_t NewBaseOffs = (uint64_t)Base.AM.BaseOffs * Factor; 2535 if (NewBaseOffs / Factor != Base.AM.BaseOffs) 2536 continue; 2537 2538 // Check that multiplying with the use offset doesn't overflow. 2539 int64_t Offset = LU.MinOffset; 2540 if (Offset == INT64_MIN && Factor == -1) 2541 continue; 2542 Offset = (uint64_t)Offset * Factor; 2543 if (Offset / Factor != LU.MinOffset) 2544 continue; 2545 2546 Formula F = Base; 2547 F.AM.BaseOffs = NewBaseOffs; 2548 2549 // Check that this scale is legal. 2550 if (!isLegalUse(F.AM, Offset, Offset, LU.Kind, LU.AccessTy, TLI)) 2551 continue; 2552 2553 // Compensate for the use having MinOffset built into it. 2554 F.AM.BaseOffs = (uint64_t)F.AM.BaseOffs + Offset - LU.MinOffset; 2555 2556 const SCEV *FactorS = SE.getConstant(IntTy, Factor); 2557 2558 // Check that multiplying with each base register doesn't overflow. 2559 for (size_t i = 0, e = F.BaseRegs.size(); i != e; ++i) { 2560 F.BaseRegs[i] = SE.getMulExpr(F.BaseRegs[i], FactorS); 2561 if (getExactSDiv(F.BaseRegs[i], FactorS, SE) != Base.BaseRegs[i]) 2562 goto next; 2563 } 2564 2565 // Check that multiplying with the scaled register doesn't overflow. 2566 if (F.ScaledReg) { 2567 F.ScaledReg = SE.getMulExpr(F.ScaledReg, FactorS); 2568 if (getExactSDiv(F.ScaledReg, FactorS, SE) != Base.ScaledReg) 2569 continue; 2570 } 2571 2572 // Check that multiplying with the unfolded offset doesn't overflow. 2573 if (F.UnfoldedOffset != 0) { 2574 if (F.UnfoldedOffset == INT64_MIN && Factor == -1) 2575 continue; 2576 F.UnfoldedOffset = (uint64_t)F.UnfoldedOffset * Factor; 2577 if (F.UnfoldedOffset / Factor != Base.UnfoldedOffset) 2578 continue; 2579 } 2580 2581 // If we make it here and it's legal, add it. 2582 (void)InsertFormula(LU, LUIdx, F); 2583 next:; 2584 } 2585 } 2586 2587 /// GenerateScales - Generate stride factor reuse formulae by making use of 2588 /// scaled-offset address modes, for example. 2589 void LSRInstance::GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base) { 2590 // Determine the integer type for the base formula. 2591 Type *IntTy = Base.getType(); 2592 if (!IntTy) return; 2593 2594 // If this Formula already has a scaled register, we can't add another one. 2595 if (Base.AM.Scale != 0) return; 2596 2597 // Check each interesting stride. 2598 for (SmallSetVector<int64_t, 8>::const_iterator 2599 I = Factors.begin(), E = Factors.end(); I != E; ++I) { 2600 int64_t Factor = *I; 2601 2602 Base.AM.Scale = Factor; 2603 Base.AM.HasBaseReg = Base.BaseRegs.size() > 1; 2604 // Check whether this scale is going to be legal. 2605 if (!isLegalUse(Base.AM, LU.MinOffset, LU.MaxOffset, 2606 LU.Kind, LU.AccessTy, TLI)) { 2607 // As a special-case, handle special out-of-loop Basic users specially. 2608 // TODO: Reconsider this special case. 2609 if (LU.Kind == LSRUse::Basic && 2610 isLegalUse(Base.AM, LU.MinOffset, LU.MaxOffset, 2611 LSRUse::Special, LU.AccessTy, TLI) && 2612 LU.AllFixupsOutsideLoop) 2613 LU.Kind = LSRUse::Special; 2614 else 2615 continue; 2616 } 2617 // For an ICmpZero, negating a solitary base register won't lead to 2618 // new solutions. 2619 if (LU.Kind == LSRUse::ICmpZero && 2620 !Base.AM.HasBaseReg && Base.AM.BaseOffs == 0 && !Base.AM.BaseGV) 2621 continue; 2622 // For each addrec base reg, apply the scale, if possible. 2623 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) 2624 if (const SCEVAddRecExpr *AR = 2625 dyn_cast<SCEVAddRecExpr>(Base.BaseRegs[i])) { 2626 const SCEV *FactorS = SE.getConstant(IntTy, Factor); 2627 if (FactorS->isZero()) 2628 continue; 2629 // Divide out the factor, ignoring high bits, since we'll be 2630 // scaling the value back up in the end. 2631 if (const SCEV *Quotient = getExactSDiv(AR, FactorS, SE, true)) { 2632 // TODO: This could be optimized to avoid all the copying. 2633 Formula F = Base; 2634 F.ScaledReg = Quotient; 2635 F.DeleteBaseReg(F.BaseRegs[i]); 2636 (void)InsertFormula(LU, LUIdx, F); 2637 } 2638 } 2639 } 2640 } 2641 2642 /// GenerateTruncates - Generate reuse formulae from different IV types. 2643 void LSRInstance::GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base) { 2644 // This requires TargetLowering to tell us which truncates are free. 2645 if (!TLI) return; 2646 2647 // Don't bother truncating symbolic values. 2648 if (Base.AM.BaseGV) return; 2649 2650 // Determine the integer type for the base formula. 2651 Type *DstTy = Base.getType(); 2652 if (!DstTy) return; 2653 DstTy = SE.getEffectiveSCEVType(DstTy); 2654 2655 for (SmallSetVector<Type *, 4>::const_iterator 2656 I = Types.begin(), E = Types.end(); I != E; ++I) { 2657 Type *SrcTy = *I; 2658 if (SrcTy != DstTy && TLI->isTruncateFree(SrcTy, DstTy)) { 2659 Formula F = Base; 2660 2661 if (F.ScaledReg) F.ScaledReg = SE.getAnyExtendExpr(F.ScaledReg, *I); 2662 for (SmallVectorImpl<const SCEV *>::iterator J = F.BaseRegs.begin(), 2663 JE = F.BaseRegs.end(); J != JE; ++J) 2664 *J = SE.getAnyExtendExpr(*J, SrcTy); 2665 2666 // TODO: This assumes we've done basic processing on all uses and 2667 // have an idea what the register usage is. 2668 if (!F.hasRegsUsedByUsesOtherThan(LUIdx, RegUses)) 2669 continue; 2670 2671 (void)InsertFormula(LU, LUIdx, F); 2672 } 2673 } 2674 } 2675 2676 namespace { 2677 2678 /// WorkItem - Helper class for GenerateCrossUseConstantOffsets. It's used to 2679 /// defer modifications so that the search phase doesn't have to worry about 2680 /// the data structures moving underneath it. 2681 struct WorkItem { 2682 size_t LUIdx; 2683 int64_t Imm; 2684 const SCEV *OrigReg; 2685 2686 WorkItem(size_t LI, int64_t I, const SCEV *R) 2687 : LUIdx(LI), Imm(I), OrigReg(R) {} 2688 2689 void print(raw_ostream &OS) const; 2690 void dump() const; 2691 }; 2692 2693 } 2694 2695 void WorkItem::print(raw_ostream &OS) const { 2696 OS << "in formulae referencing " << *OrigReg << " in use " << LUIdx 2697 << " , add offset " << Imm; 2698 } 2699 2700 void WorkItem::dump() const { 2701 print(errs()); errs() << '\n'; 2702 } 2703 2704 /// GenerateCrossUseConstantOffsets - Look for registers which are a constant 2705 /// distance apart and try to form reuse opportunities between them. 2706 void LSRInstance::GenerateCrossUseConstantOffsets() { 2707 // Group the registers by their value without any added constant offset. 2708 typedef std::map<int64_t, const SCEV *> ImmMapTy; 2709 typedef DenseMap<const SCEV *, ImmMapTy> RegMapTy; 2710 RegMapTy Map; 2711 DenseMap<const SCEV *, SmallBitVector> UsedByIndicesMap; 2712 SmallVector<const SCEV *, 8> Sequence; 2713 for (RegUseTracker::const_iterator I = RegUses.begin(), E = RegUses.end(); 2714 I != E; ++I) { 2715 const SCEV *Reg = *I; 2716 int64_t Imm = ExtractImmediate(Reg, SE); 2717 std::pair<RegMapTy::iterator, bool> Pair = 2718 Map.insert(std::make_pair(Reg, ImmMapTy())); 2719 if (Pair.second) 2720 Sequence.push_back(Reg); 2721 Pair.first->second.insert(std::make_pair(Imm, *I)); 2722 UsedByIndicesMap[Reg] |= RegUses.getUsedByIndices(*I); 2723 } 2724 2725 // Now examine each set of registers with the same base value. Build up 2726 // a list of work to do and do the work in a separate step so that we're 2727 // not adding formulae and register counts while we're searching. 2728 SmallVector<WorkItem, 32> WorkItems; 2729 SmallSet<std::pair<size_t, int64_t>, 32> UniqueItems; 2730 for (SmallVectorImpl<const SCEV *>::const_iterator I = Sequence.begin(), 2731 E = Sequence.end(); I != E; ++I) { 2732 const SCEV *Reg = *I; 2733 const ImmMapTy &Imms = Map.find(Reg)->second; 2734 2735 // It's not worthwhile looking for reuse if there's only one offset. 2736 if (Imms.size() == 1) 2737 continue; 2738 2739 DEBUG(dbgs() << "Generating cross-use offsets for " << *Reg << ':'; 2740 for (ImmMapTy::const_iterator J = Imms.begin(), JE = Imms.end(); 2741 J != JE; ++J) 2742 dbgs() << ' ' << J->first; 2743 dbgs() << '\n'); 2744 2745 // Examine each offset. 2746 for (ImmMapTy::const_iterator J = Imms.begin(), JE = Imms.end(); 2747 J != JE; ++J) { 2748 const SCEV *OrigReg = J->second; 2749 2750 int64_t JImm = J->first; 2751 const SmallBitVector &UsedByIndices = RegUses.getUsedByIndices(OrigReg); 2752 2753 if (!isa<SCEVConstant>(OrigReg) && 2754 UsedByIndicesMap[Reg].count() == 1) { 2755 DEBUG(dbgs() << "Skipping cross-use reuse for " << *OrigReg << '\n'); 2756 continue; 2757 } 2758 2759 // Conservatively examine offsets between this orig reg a few selected 2760 // other orig regs. 2761 ImmMapTy::const_iterator OtherImms[] = { 2762 Imms.begin(), prior(Imms.end()), 2763 Imms.lower_bound((Imms.begin()->first + prior(Imms.end())->first) / 2) 2764 }; 2765 for (size_t i = 0, e = array_lengthof(OtherImms); i != e; ++i) { 2766 ImmMapTy::const_iterator M = OtherImms[i]; 2767 if (M == J || M == JE) continue; 2768 2769 // Compute the difference between the two. 2770 int64_t Imm = (uint64_t)JImm - M->first; 2771 for (int LUIdx = UsedByIndices.find_first(); LUIdx != -1; 2772 LUIdx = UsedByIndices.find_next(LUIdx)) 2773 // Make a memo of this use, offset, and register tuple. 2774 if (UniqueItems.insert(std::make_pair(LUIdx, Imm))) 2775 WorkItems.push_back(WorkItem(LUIdx, Imm, OrigReg)); 2776 } 2777 } 2778 } 2779 2780 Map.clear(); 2781 Sequence.clear(); 2782 UsedByIndicesMap.clear(); 2783 UniqueItems.clear(); 2784 2785 // Now iterate through the worklist and add new formulae. 2786 for (SmallVectorImpl<WorkItem>::const_iterator I = WorkItems.begin(), 2787 E = WorkItems.end(); I != E; ++I) { 2788 const WorkItem &WI = *I; 2789 size_t LUIdx = WI.LUIdx; 2790 LSRUse &LU = Uses[LUIdx]; 2791 int64_t Imm = WI.Imm; 2792 const SCEV *OrigReg = WI.OrigReg; 2793 2794 Type *IntTy = SE.getEffectiveSCEVType(OrigReg->getType()); 2795 const SCEV *NegImmS = SE.getSCEV(ConstantInt::get(IntTy, -(uint64_t)Imm)); 2796 unsigned BitWidth = SE.getTypeSizeInBits(IntTy); 2797 2798 // TODO: Use a more targeted data structure. 2799 for (size_t L = 0, LE = LU.Formulae.size(); L != LE; ++L) { 2800 const Formula &F = LU.Formulae[L]; 2801 // Use the immediate in the scaled register. 2802 if (F.ScaledReg == OrigReg) { 2803 int64_t Offs = (uint64_t)F.AM.BaseOffs + 2804 Imm * (uint64_t)F.AM.Scale; 2805 // Don't create 50 + reg(-50). 2806 if (F.referencesReg(SE.getSCEV( 2807 ConstantInt::get(IntTy, -(uint64_t)Offs)))) 2808 continue; 2809 Formula NewF = F; 2810 NewF.AM.BaseOffs = Offs; 2811 if (!isLegalUse(NewF.AM, LU.MinOffset, LU.MaxOffset, 2812 LU.Kind, LU.AccessTy, TLI)) 2813 continue; 2814 NewF.ScaledReg = SE.getAddExpr(NegImmS, NewF.ScaledReg); 2815 2816 // If the new scale is a constant in a register, and adding the constant 2817 // value to the immediate would produce a value closer to zero than the 2818 // immediate itself, then the formula isn't worthwhile. 2819 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(NewF.ScaledReg)) 2820 if (C->getValue()->isNegative() != 2821 (NewF.AM.BaseOffs < 0) && 2822 (C->getValue()->getValue().abs() * APInt(BitWidth, F.AM.Scale)) 2823 .ule(abs64(NewF.AM.BaseOffs))) 2824 continue; 2825 2826 // OK, looks good. 2827 (void)InsertFormula(LU, LUIdx, NewF); 2828 } else { 2829 // Use the immediate in a base register. 2830 for (size_t N = 0, NE = F.BaseRegs.size(); N != NE; ++N) { 2831 const SCEV *BaseReg = F.BaseRegs[N]; 2832 if (BaseReg != OrigReg) 2833 continue; 2834 Formula NewF = F; 2835 NewF.AM.BaseOffs = (uint64_t)NewF.AM.BaseOffs + Imm; 2836 if (!isLegalUse(NewF.AM, LU.MinOffset, LU.MaxOffset, 2837 LU.Kind, LU.AccessTy, TLI)) { 2838 if (!TLI || 2839 !TLI->isLegalAddImmediate((uint64_t)NewF.UnfoldedOffset + Imm)) 2840 continue; 2841 NewF = F; 2842 NewF.UnfoldedOffset = (uint64_t)NewF.UnfoldedOffset + Imm; 2843 } 2844 NewF.BaseRegs[N] = SE.getAddExpr(NegImmS, BaseReg); 2845 2846 // If the new formula has a constant in a register, and adding the 2847 // constant value to the immediate would produce a value closer to 2848 // zero than the immediate itself, then the formula isn't worthwhile. 2849 for (SmallVectorImpl<const SCEV *>::const_iterator 2850 J = NewF.BaseRegs.begin(), JE = NewF.BaseRegs.end(); 2851 J != JE; ++J) 2852 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(*J)) 2853 if ((C->getValue()->getValue() + NewF.AM.BaseOffs).abs().slt( 2854 abs64(NewF.AM.BaseOffs)) && 2855 (C->getValue()->getValue() + 2856 NewF.AM.BaseOffs).countTrailingZeros() >= 2857 CountTrailingZeros_64(NewF.AM.BaseOffs)) 2858 goto skip_formula; 2859 2860 // Ok, looks good. 2861 (void)InsertFormula(LU, LUIdx, NewF); 2862 break; 2863 skip_formula:; 2864 } 2865 } 2866 } 2867 } 2868 } 2869 2870 /// GenerateAllReuseFormulae - Generate formulae for each use. 2871 void 2872 LSRInstance::GenerateAllReuseFormulae() { 2873 // This is split into multiple loops so that hasRegsUsedByUsesOtherThan 2874 // queries are more precise. 2875 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 2876 LSRUse &LU = Uses[LUIdx]; 2877 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 2878 GenerateReassociations(LU, LUIdx, LU.Formulae[i]); 2879 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 2880 GenerateCombinations(LU, LUIdx, LU.Formulae[i]); 2881 } 2882 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 2883 LSRUse &LU = Uses[LUIdx]; 2884 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 2885 GenerateSymbolicOffsets(LU, LUIdx, LU.Formulae[i]); 2886 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 2887 GenerateConstantOffsets(LU, LUIdx, LU.Formulae[i]); 2888 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 2889 GenerateICmpZeroScales(LU, LUIdx, LU.Formulae[i]); 2890 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 2891 GenerateScales(LU, LUIdx, LU.Formulae[i]); 2892 } 2893 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 2894 LSRUse &LU = Uses[LUIdx]; 2895 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 2896 GenerateTruncates(LU, LUIdx, LU.Formulae[i]); 2897 } 2898 2899 GenerateCrossUseConstantOffsets(); 2900 2901 DEBUG(dbgs() << "\n" 2902 "After generating reuse formulae:\n"; 2903 print_uses(dbgs())); 2904 } 2905 2906 /// If there are multiple formulae with the same set of registers used 2907 /// by other uses, pick the best one and delete the others. 2908 void LSRInstance::FilterOutUndesirableDedicatedRegisters() { 2909 DenseSet<const SCEV *> VisitedRegs; 2910 SmallPtrSet<const SCEV *, 16> Regs; 2911 #ifndef NDEBUG 2912 bool ChangedFormulae = false; 2913 #endif 2914 2915 // Collect the best formula for each unique set of shared registers. This 2916 // is reset for each use. 2917 typedef DenseMap<SmallVector<const SCEV *, 2>, size_t, UniquifierDenseMapInfo> 2918 BestFormulaeTy; 2919 BestFormulaeTy BestFormulae; 2920 2921 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 2922 LSRUse &LU = Uses[LUIdx]; 2923 DEBUG(dbgs() << "Filtering for use "; LU.print(dbgs()); dbgs() << '\n'); 2924 2925 bool Any = false; 2926 for (size_t FIdx = 0, NumForms = LU.Formulae.size(); 2927 FIdx != NumForms; ++FIdx) { 2928 Formula &F = LU.Formulae[FIdx]; 2929 2930 SmallVector<const SCEV *, 2> Key; 2931 for (SmallVectorImpl<const SCEV *>::const_iterator J = F.BaseRegs.begin(), 2932 JE = F.BaseRegs.end(); J != JE; ++J) { 2933 const SCEV *Reg = *J; 2934 if (RegUses.isRegUsedByUsesOtherThan(Reg, LUIdx)) 2935 Key.push_back(Reg); 2936 } 2937 if (F.ScaledReg && 2938 RegUses.isRegUsedByUsesOtherThan(F.ScaledReg, LUIdx)) 2939 Key.push_back(F.ScaledReg); 2940 // Unstable sort by host order ok, because this is only used for 2941 // uniquifying. 2942 std::sort(Key.begin(), Key.end()); 2943 2944 std::pair<BestFormulaeTy::const_iterator, bool> P = 2945 BestFormulae.insert(std::make_pair(Key, FIdx)); 2946 if (!P.second) { 2947 Formula &Best = LU.Formulae[P.first->second]; 2948 2949 Cost CostF; 2950 CostF.RateFormula(F, Regs, VisitedRegs, L, LU.Offsets, SE, DT); 2951 Regs.clear(); 2952 Cost CostBest; 2953 CostBest.RateFormula(Best, Regs, VisitedRegs, L, LU.Offsets, SE, DT); 2954 Regs.clear(); 2955 if (CostF < CostBest) 2956 std::swap(F, Best); 2957 DEBUG(dbgs() << " Filtering out formula "; F.print(dbgs()); 2958 dbgs() << "\n" 2959 " in favor of formula "; Best.print(dbgs()); 2960 dbgs() << '\n'); 2961 #ifndef NDEBUG 2962 ChangedFormulae = true; 2963 #endif 2964 LU.DeleteFormula(F); 2965 --FIdx; 2966 --NumForms; 2967 Any = true; 2968 continue; 2969 } 2970 } 2971 2972 // Now that we've filtered out some formulae, recompute the Regs set. 2973 if (Any) 2974 LU.RecomputeRegs(LUIdx, RegUses); 2975 2976 // Reset this to prepare for the next use. 2977 BestFormulae.clear(); 2978 } 2979 2980 DEBUG(if (ChangedFormulae) { 2981 dbgs() << "\n" 2982 "After filtering out undesirable candidates:\n"; 2983 print_uses(dbgs()); 2984 }); 2985 } 2986 2987 // This is a rough guess that seems to work fairly well. 2988 static const size_t ComplexityLimit = UINT16_MAX; 2989 2990 /// EstimateSearchSpaceComplexity - Estimate the worst-case number of 2991 /// solutions the solver might have to consider. It almost never considers 2992 /// this many solutions because it prune the search space, but the pruning 2993 /// isn't always sufficient. 2994 size_t LSRInstance::EstimateSearchSpaceComplexity() const { 2995 size_t Power = 1; 2996 for (SmallVectorImpl<LSRUse>::const_iterator I = Uses.begin(), 2997 E = Uses.end(); I != E; ++I) { 2998 size_t FSize = I->Formulae.size(); 2999 if (FSize >= ComplexityLimit) { 3000 Power = ComplexityLimit; 3001 break; 3002 } 3003 Power *= FSize; 3004 if (Power >= ComplexityLimit) 3005 break; 3006 } 3007 return Power; 3008 } 3009 3010 /// NarrowSearchSpaceByDetectingSupersets - When one formula uses a superset 3011 /// of the registers of another formula, it won't help reduce register 3012 /// pressure (though it may not necessarily hurt register pressure); remove 3013 /// it to simplify the system. 3014 void LSRInstance::NarrowSearchSpaceByDetectingSupersets() { 3015 if (EstimateSearchSpaceComplexity() >= ComplexityLimit) { 3016 DEBUG(dbgs() << "The search space is too complex.\n"); 3017 3018 DEBUG(dbgs() << "Narrowing the search space by eliminating formulae " 3019 "which use a superset of registers used by other " 3020 "formulae.\n"); 3021 3022 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 3023 LSRUse &LU = Uses[LUIdx]; 3024 bool Any = false; 3025 for (size_t i = 0, e = LU.Formulae.size(); i != e; ++i) { 3026 Formula &F = LU.Formulae[i]; 3027 // Look for a formula with a constant or GV in a register. If the use 3028 // also has a formula with that same value in an immediate field, 3029 // delete the one that uses a register. 3030 for (SmallVectorImpl<const SCEV *>::const_iterator 3031 I = F.BaseRegs.begin(), E = F.BaseRegs.end(); I != E; ++I) { 3032 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(*I)) { 3033 Formula NewF = F; 3034 NewF.AM.BaseOffs += C->getValue()->getSExtValue(); 3035 NewF.BaseRegs.erase(NewF.BaseRegs.begin() + 3036 (I - F.BaseRegs.begin())); 3037 if (LU.HasFormulaWithSameRegs(NewF)) { 3038 DEBUG(dbgs() << " Deleting "; F.print(dbgs()); dbgs() << '\n'); 3039 LU.DeleteFormula(F); 3040 --i; 3041 --e; 3042 Any = true; 3043 break; 3044 } 3045 } else if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(*I)) { 3046 if (GlobalValue *GV = dyn_cast<GlobalValue>(U->getValue())) 3047 if (!F.AM.BaseGV) { 3048 Formula NewF = F; 3049 NewF.AM.BaseGV = GV; 3050 NewF.BaseRegs.erase(NewF.BaseRegs.begin() + 3051 (I - F.BaseRegs.begin())); 3052 if (LU.HasFormulaWithSameRegs(NewF)) { 3053 DEBUG(dbgs() << " Deleting "; F.print(dbgs()); 3054 dbgs() << '\n'); 3055 LU.DeleteFormula(F); 3056 --i; 3057 --e; 3058 Any = true; 3059 break; 3060 } 3061 } 3062 } 3063 } 3064 } 3065 if (Any) 3066 LU.RecomputeRegs(LUIdx, RegUses); 3067 } 3068 3069 DEBUG(dbgs() << "After pre-selection:\n"; 3070 print_uses(dbgs())); 3071 } 3072 } 3073 3074 /// NarrowSearchSpaceByCollapsingUnrolledCode - When there are many registers 3075 /// for expressions like A, A+1, A+2, etc., allocate a single register for 3076 /// them. 3077 void LSRInstance::NarrowSearchSpaceByCollapsingUnrolledCode() { 3078 if (EstimateSearchSpaceComplexity() >= ComplexityLimit) { 3079 DEBUG(dbgs() << "The search space is too complex.\n"); 3080 3081 DEBUG(dbgs() << "Narrowing the search space by assuming that uses " 3082 "separated by a constant offset will use the same " 3083 "registers.\n"); 3084 3085 // This is especially useful for unrolled loops. 3086 3087 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 3088 LSRUse &LU = Uses[LUIdx]; 3089 for (SmallVectorImpl<Formula>::const_iterator I = LU.Formulae.begin(), 3090 E = LU.Formulae.end(); I != E; ++I) { 3091 const Formula &F = *I; 3092 if (F.AM.BaseOffs != 0 && F.AM.Scale == 0) { 3093 if (LSRUse *LUThatHas = FindUseWithSimilarFormula(F, LU)) { 3094 if (reconcileNewOffset(*LUThatHas, F.AM.BaseOffs, 3095 /*HasBaseReg=*/false, 3096 LU.Kind, LU.AccessTy)) { 3097 DEBUG(dbgs() << " Deleting use "; LU.print(dbgs()); 3098 dbgs() << '\n'); 3099 3100 LUThatHas->AllFixupsOutsideLoop &= LU.AllFixupsOutsideLoop; 3101 3102 // Update the relocs to reference the new use. 3103 for (SmallVectorImpl<LSRFixup>::iterator I = Fixups.begin(), 3104 E = Fixups.end(); I != E; ++I) { 3105 LSRFixup &Fixup = *I; 3106 if (Fixup.LUIdx == LUIdx) { 3107 Fixup.LUIdx = LUThatHas - &Uses.front(); 3108 Fixup.Offset += F.AM.BaseOffs; 3109 // Add the new offset to LUThatHas' offset list. 3110 if (LUThatHas->Offsets.back() != Fixup.Offset) { 3111 LUThatHas->Offsets.push_back(Fixup.Offset); 3112 if (Fixup.Offset > LUThatHas->MaxOffset) 3113 LUThatHas->MaxOffset = Fixup.Offset; 3114 if (Fixup.Offset < LUThatHas->MinOffset) 3115 LUThatHas->MinOffset = Fixup.Offset; 3116 } 3117 DEBUG(dbgs() << "New fixup has offset " 3118 << Fixup.Offset << '\n'); 3119 } 3120 if (Fixup.LUIdx == NumUses-1) 3121 Fixup.LUIdx = LUIdx; 3122 } 3123 3124 // Delete formulae from the new use which are no longer legal. 3125 bool Any = false; 3126 for (size_t i = 0, e = LUThatHas->Formulae.size(); i != e; ++i) { 3127 Formula &F = LUThatHas->Formulae[i]; 3128 if (!isLegalUse(F.AM, 3129 LUThatHas->MinOffset, LUThatHas->MaxOffset, 3130 LUThatHas->Kind, LUThatHas->AccessTy, TLI)) { 3131 DEBUG(dbgs() << " Deleting "; F.print(dbgs()); 3132 dbgs() << '\n'); 3133 LUThatHas->DeleteFormula(F); 3134 --i; 3135 --e; 3136 Any = true; 3137 } 3138 } 3139 if (Any) 3140 LUThatHas->RecomputeRegs(LUThatHas - &Uses.front(), RegUses); 3141 3142 // Delete the old use. 3143 DeleteUse(LU, LUIdx); 3144 --LUIdx; 3145 --NumUses; 3146 break; 3147 } 3148 } 3149 } 3150 } 3151 } 3152 3153 DEBUG(dbgs() << "After pre-selection:\n"; 3154 print_uses(dbgs())); 3155 } 3156 } 3157 3158 /// NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters - Call 3159 /// FilterOutUndesirableDedicatedRegisters again, if necessary, now that 3160 /// we've done more filtering, as it may be able to find more formulae to 3161 /// eliminate. 3162 void LSRInstance::NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters(){ 3163 if (EstimateSearchSpaceComplexity() >= ComplexityLimit) { 3164 DEBUG(dbgs() << "The search space is too complex.\n"); 3165 3166 DEBUG(dbgs() << "Narrowing the search space by re-filtering out " 3167 "undesirable dedicated registers.\n"); 3168 3169 FilterOutUndesirableDedicatedRegisters(); 3170 3171 DEBUG(dbgs() << "After pre-selection:\n"; 3172 print_uses(dbgs())); 3173 } 3174 } 3175 3176 /// NarrowSearchSpaceByPickingWinnerRegs - Pick a register which seems likely 3177 /// to be profitable, and then in any use which has any reference to that 3178 /// register, delete all formulae which do not reference that register. 3179 void LSRInstance::NarrowSearchSpaceByPickingWinnerRegs() { 3180 // With all other options exhausted, loop until the system is simple 3181 // enough to handle. 3182 SmallPtrSet<const SCEV *, 4> Taken; 3183 while (EstimateSearchSpaceComplexity() >= ComplexityLimit) { 3184 // Ok, we have too many of formulae on our hands to conveniently handle. 3185 // Use a rough heuristic to thin out the list. 3186 DEBUG(dbgs() << "The search space is too complex.\n"); 3187 3188 // Pick the register which is used by the most LSRUses, which is likely 3189 // to be a good reuse register candidate. 3190 const SCEV *Best = 0; 3191 unsigned BestNum = 0; 3192 for (RegUseTracker::const_iterator I = RegUses.begin(), E = RegUses.end(); 3193 I != E; ++I) { 3194 const SCEV *Reg = *I; 3195 if (Taken.count(Reg)) 3196 continue; 3197 if (!Best) 3198 Best = Reg; 3199 else { 3200 unsigned Count = RegUses.getUsedByIndices(Reg).count(); 3201 if (Count > BestNum) { 3202 Best = Reg; 3203 BestNum = Count; 3204 } 3205 } 3206 } 3207 3208 DEBUG(dbgs() << "Narrowing the search space by assuming " << *Best 3209 << " will yield profitable reuse.\n"); 3210 Taken.insert(Best); 3211 3212 // In any use with formulae which references this register, delete formulae 3213 // which don't reference it. 3214 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 3215 LSRUse &LU = Uses[LUIdx]; 3216 if (!LU.Regs.count(Best)) continue; 3217 3218 bool Any = false; 3219 for (size_t i = 0, e = LU.Formulae.size(); i != e; ++i) { 3220 Formula &F = LU.Formulae[i]; 3221 if (!F.referencesReg(Best)) { 3222 DEBUG(dbgs() << " Deleting "; F.print(dbgs()); dbgs() << '\n'); 3223 LU.DeleteFormula(F); 3224 --e; 3225 --i; 3226 Any = true; 3227 assert(e != 0 && "Use has no formulae left! Is Regs inconsistent?"); 3228 continue; 3229 } 3230 } 3231 3232 if (Any) 3233 LU.RecomputeRegs(LUIdx, RegUses); 3234 } 3235 3236 DEBUG(dbgs() << "After pre-selection:\n"; 3237 print_uses(dbgs())); 3238 } 3239 } 3240 3241 /// NarrowSearchSpaceUsingHeuristics - If there are an extraordinary number of 3242 /// formulae to choose from, use some rough heuristics to prune down the number 3243 /// of formulae. This keeps the main solver from taking an extraordinary amount 3244 /// of time in some worst-case scenarios. 3245 void LSRInstance::NarrowSearchSpaceUsingHeuristics() { 3246 NarrowSearchSpaceByDetectingSupersets(); 3247 NarrowSearchSpaceByCollapsingUnrolledCode(); 3248 NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters(); 3249 NarrowSearchSpaceByPickingWinnerRegs(); 3250 } 3251 3252 /// SolveRecurse - This is the recursive solver. 3253 void LSRInstance::SolveRecurse(SmallVectorImpl<const Formula *> &Solution, 3254 Cost &SolutionCost, 3255 SmallVectorImpl<const Formula *> &Workspace, 3256 const Cost &CurCost, 3257 const SmallPtrSet<const SCEV *, 16> &CurRegs, 3258 DenseSet<const SCEV *> &VisitedRegs) const { 3259 // Some ideas: 3260 // - prune more: 3261 // - use more aggressive filtering 3262 // - sort the formula so that the most profitable solutions are found first 3263 // - sort the uses too 3264 // - search faster: 3265 // - don't compute a cost, and then compare. compare while computing a cost 3266 // and bail early. 3267 // - track register sets with SmallBitVector 3268 3269 const LSRUse &LU = Uses[Workspace.size()]; 3270 3271 // If this use references any register that's already a part of the 3272 // in-progress solution, consider it a requirement that a formula must 3273 // reference that register in order to be considered. This prunes out 3274 // unprofitable searching. 3275 SmallSetVector<const SCEV *, 4> ReqRegs; 3276 for (SmallPtrSet<const SCEV *, 16>::const_iterator I = CurRegs.begin(), 3277 E = CurRegs.end(); I != E; ++I) 3278 if (LU.Regs.count(*I)) 3279 ReqRegs.insert(*I); 3280 3281 bool AnySatisfiedReqRegs = false; 3282 SmallPtrSet<const SCEV *, 16> NewRegs; 3283 Cost NewCost; 3284 retry: 3285 for (SmallVectorImpl<Formula>::const_iterator I = LU.Formulae.begin(), 3286 E = LU.Formulae.end(); I != E; ++I) { 3287 const Formula &F = *I; 3288 3289 // Ignore formulae which do not use any of the required registers. 3290 for (SmallSetVector<const SCEV *, 4>::const_iterator J = ReqRegs.begin(), 3291 JE = ReqRegs.end(); J != JE; ++J) { 3292 const SCEV *Reg = *J; 3293 if ((!F.ScaledReg || F.ScaledReg != Reg) && 3294 std::find(F.BaseRegs.begin(), F.BaseRegs.end(), Reg) == 3295 F.BaseRegs.end()) 3296 goto skip; 3297 } 3298 AnySatisfiedReqRegs = true; 3299 3300 // Evaluate the cost of the current formula. If it's already worse than 3301 // the current best, prune the search at that point. 3302 NewCost = CurCost; 3303 NewRegs = CurRegs; 3304 NewCost.RateFormula(F, NewRegs, VisitedRegs, L, LU.Offsets, SE, DT); 3305 if (NewCost < SolutionCost) { 3306 Workspace.push_back(&F); 3307 if (Workspace.size() != Uses.size()) { 3308 SolveRecurse(Solution, SolutionCost, Workspace, NewCost, 3309 NewRegs, VisitedRegs); 3310 if (F.getNumRegs() == 1 && Workspace.size() == 1) 3311 VisitedRegs.insert(F.ScaledReg ? F.ScaledReg : F.BaseRegs[0]); 3312 } else { 3313 DEBUG(dbgs() << "New best at "; NewCost.print(dbgs()); 3314 dbgs() << ". Regs:"; 3315 for (SmallPtrSet<const SCEV *, 16>::const_iterator 3316 I = NewRegs.begin(), E = NewRegs.end(); I != E; ++I) 3317 dbgs() << ' ' << **I; 3318 dbgs() << '\n'); 3319 3320 SolutionCost = NewCost; 3321 Solution = Workspace; 3322 } 3323 Workspace.pop_back(); 3324 } 3325 skip:; 3326 } 3327 3328 if (!EnableRetry && !AnySatisfiedReqRegs) 3329 return; 3330 3331 // If none of the formulae had all of the required registers, relax the 3332 // constraint so that we don't exclude all formulae. 3333 if (!AnySatisfiedReqRegs) { 3334 assert(!ReqRegs.empty() && "Solver failed even without required registers"); 3335 ReqRegs.clear(); 3336 goto retry; 3337 } 3338 } 3339 3340 /// Solve - Choose one formula from each use. Return the results in the given 3341 /// Solution vector. 3342 void LSRInstance::Solve(SmallVectorImpl<const Formula *> &Solution) const { 3343 SmallVector<const Formula *, 8> Workspace; 3344 Cost SolutionCost; 3345 SolutionCost.Loose(); 3346 Cost CurCost; 3347 SmallPtrSet<const SCEV *, 16> CurRegs; 3348 DenseSet<const SCEV *> VisitedRegs; 3349 Workspace.reserve(Uses.size()); 3350 3351 // SolveRecurse does all the work. 3352 SolveRecurse(Solution, SolutionCost, Workspace, CurCost, 3353 CurRegs, VisitedRegs); 3354 if (Solution.empty()) { 3355 DEBUG(dbgs() << "\nNo Satisfactory Solution\n"); 3356 return; 3357 } 3358 3359 // Ok, we've now made all our decisions. 3360 DEBUG(dbgs() << "\n" 3361 "The chosen solution requires "; SolutionCost.print(dbgs()); 3362 dbgs() << ":\n"; 3363 for (size_t i = 0, e = Uses.size(); i != e; ++i) { 3364 dbgs() << " "; 3365 Uses[i].print(dbgs()); 3366 dbgs() << "\n" 3367 " "; 3368 Solution[i]->print(dbgs()); 3369 dbgs() << '\n'; 3370 }); 3371 3372 assert(Solution.size() == Uses.size() && "Malformed solution!"); 3373 } 3374 3375 /// HoistInsertPosition - Helper for AdjustInsertPositionForExpand. Climb up 3376 /// the dominator tree far as we can go while still being dominated by the 3377 /// input positions. This helps canonicalize the insert position, which 3378 /// encourages sharing. 3379 BasicBlock::iterator 3380 LSRInstance::HoistInsertPosition(BasicBlock::iterator IP, 3381 const SmallVectorImpl<Instruction *> &Inputs) 3382 const { 3383 for (;;) { 3384 const Loop *IPLoop = LI.getLoopFor(IP->getParent()); 3385 unsigned IPLoopDepth = IPLoop ? IPLoop->getLoopDepth() : 0; 3386 3387 BasicBlock *IDom; 3388 for (DomTreeNode *Rung = DT.getNode(IP->getParent()); ; ) { 3389 if (!Rung) return IP; 3390 Rung = Rung->getIDom(); 3391 if (!Rung) return IP; 3392 IDom = Rung->getBlock(); 3393 3394 // Don't climb into a loop though. 3395 const Loop *IDomLoop = LI.getLoopFor(IDom); 3396 unsigned IDomDepth = IDomLoop ? IDomLoop->getLoopDepth() : 0; 3397 if (IDomDepth <= IPLoopDepth && 3398 (IDomDepth != IPLoopDepth || IDomLoop == IPLoop)) 3399 break; 3400 } 3401 3402 bool AllDominate = true; 3403 Instruction *BetterPos = 0; 3404 Instruction *Tentative = IDom->getTerminator(); 3405 for (SmallVectorImpl<Instruction *>::const_iterator I = Inputs.begin(), 3406 E = Inputs.end(); I != E; ++I) { 3407 Instruction *Inst = *I; 3408 if (Inst == Tentative || !DT.dominates(Inst, Tentative)) { 3409 AllDominate = false; 3410 break; 3411 } 3412 // Attempt to find an insert position in the middle of the block, 3413 // instead of at the end, so that it can be used for other expansions. 3414 if (IDom == Inst->getParent() && 3415 (!BetterPos || DT.dominates(BetterPos, Inst))) 3416 BetterPos = llvm::next(BasicBlock::iterator(Inst)); 3417 } 3418 if (!AllDominate) 3419 break; 3420 if (BetterPos) 3421 IP = BetterPos; 3422 else 3423 IP = Tentative; 3424 } 3425 3426 return IP; 3427 } 3428 3429 /// AdjustInsertPositionForExpand - Determine an input position which will be 3430 /// dominated by the operands and which will dominate the result. 3431 BasicBlock::iterator 3432 LSRInstance::AdjustInsertPositionForExpand(BasicBlock::iterator IP, 3433 const LSRFixup &LF, 3434 const LSRUse &LU) const { 3435 // Collect some instructions which must be dominated by the 3436 // expanding replacement. These must be dominated by any operands that 3437 // will be required in the expansion. 3438 SmallVector<Instruction *, 4> Inputs; 3439 if (Instruction *I = dyn_cast<Instruction>(LF.OperandValToReplace)) 3440 Inputs.push_back(I); 3441 if (LU.Kind == LSRUse::ICmpZero) 3442 if (Instruction *I = 3443 dyn_cast<Instruction>(cast<ICmpInst>(LF.UserInst)->getOperand(1))) 3444 Inputs.push_back(I); 3445 if (LF.PostIncLoops.count(L)) { 3446 if (LF.isUseFullyOutsideLoop(L)) 3447 Inputs.push_back(L->getLoopLatch()->getTerminator()); 3448 else 3449 Inputs.push_back(IVIncInsertPos); 3450 } 3451 // The expansion must also be dominated by the increment positions of any 3452 // loops it for which it is using post-inc mode. 3453 for (PostIncLoopSet::const_iterator I = LF.PostIncLoops.begin(), 3454 E = LF.PostIncLoops.end(); I != E; ++I) { 3455 const Loop *PIL = *I; 3456 if (PIL == L) continue; 3457 3458 // Be dominated by the loop exit. 3459 SmallVector<BasicBlock *, 4> ExitingBlocks; 3460 PIL->getExitingBlocks(ExitingBlocks); 3461 if (!ExitingBlocks.empty()) { 3462 BasicBlock *BB = ExitingBlocks[0]; 3463 for (unsigned i = 1, e = ExitingBlocks.size(); i != e; ++i) 3464 BB = DT.findNearestCommonDominator(BB, ExitingBlocks[i]); 3465 Inputs.push_back(BB->getTerminator()); 3466 } 3467 } 3468 3469 // Then, climb up the immediate dominator tree as far as we can go while 3470 // still being dominated by the input positions. 3471 IP = HoistInsertPosition(IP, Inputs); 3472 3473 // Don't insert instructions before PHI nodes. 3474 while (isa<PHINode>(IP)) ++IP; 3475 3476 // Ignore landingpad instructions. 3477 while (isa<LandingPadInst>(IP)) ++IP; 3478 3479 // Ignore debug intrinsics. 3480 while (isa<DbgInfoIntrinsic>(IP)) ++IP; 3481 3482 return IP; 3483 } 3484 3485 /// Expand - Emit instructions for the leading candidate expression for this 3486 /// LSRUse (this is called "expanding"). 3487 Value *LSRInstance::Expand(const LSRFixup &LF, 3488 const Formula &F, 3489 BasicBlock::iterator IP, 3490 SCEVExpander &Rewriter, 3491 SmallVectorImpl<WeakVH> &DeadInsts) const { 3492 const LSRUse &LU = Uses[LF.LUIdx]; 3493 3494 // Determine an input position which will be dominated by the operands and 3495 // which will dominate the result. 3496 IP = AdjustInsertPositionForExpand(IP, LF, LU); 3497 3498 // Inform the Rewriter if we have a post-increment use, so that it can 3499 // perform an advantageous expansion. 3500 Rewriter.setPostInc(LF.PostIncLoops); 3501 3502 // This is the type that the user actually needs. 3503 Type *OpTy = LF.OperandValToReplace->getType(); 3504 // This will be the type that we'll initially expand to. 3505 Type *Ty = F.getType(); 3506 if (!Ty) 3507 // No type known; just expand directly to the ultimate type. 3508 Ty = OpTy; 3509 else if (SE.getEffectiveSCEVType(Ty) == SE.getEffectiveSCEVType(OpTy)) 3510 // Expand directly to the ultimate type if it's the right size. 3511 Ty = OpTy; 3512 // This is the type to do integer arithmetic in. 3513 Type *IntTy = SE.getEffectiveSCEVType(Ty); 3514 3515 // Build up a list of operands to add together to form the full base. 3516 SmallVector<const SCEV *, 8> Ops; 3517 3518 // Expand the BaseRegs portion. 3519 for (SmallVectorImpl<const SCEV *>::const_iterator I = F.BaseRegs.begin(), 3520 E = F.BaseRegs.end(); I != E; ++I) { 3521 const SCEV *Reg = *I; 3522 assert(!Reg->isZero() && "Zero allocated in a base register!"); 3523 3524 // If we're expanding for a post-inc user, make the post-inc adjustment. 3525 PostIncLoopSet &Loops = const_cast<PostIncLoopSet &>(LF.PostIncLoops); 3526 Reg = TransformForPostIncUse(Denormalize, Reg, 3527 LF.UserInst, LF.OperandValToReplace, 3528 Loops, SE, DT); 3529 3530 Ops.push_back(SE.getUnknown(Rewriter.expandCodeFor(Reg, 0, IP))); 3531 } 3532 3533 // Flush the operand list to suppress SCEVExpander hoisting. 3534 if (!Ops.empty()) { 3535 Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, IP); 3536 Ops.clear(); 3537 Ops.push_back(SE.getUnknown(FullV)); 3538 } 3539 3540 // Expand the ScaledReg portion. 3541 Value *ICmpScaledV = 0; 3542 if (F.AM.Scale != 0) { 3543 const SCEV *ScaledS = F.ScaledReg; 3544 3545 // If we're expanding for a post-inc user, make the post-inc adjustment. 3546 PostIncLoopSet &Loops = const_cast<PostIncLoopSet &>(LF.PostIncLoops); 3547 ScaledS = TransformForPostIncUse(Denormalize, ScaledS, 3548 LF.UserInst, LF.OperandValToReplace, 3549 Loops, SE, DT); 3550 3551 if (LU.Kind == LSRUse::ICmpZero) { 3552 // An interesting way of "folding" with an icmp is to use a negated 3553 // scale, which we'll implement by inserting it into the other operand 3554 // of the icmp. 3555 assert(F.AM.Scale == -1 && 3556 "The only scale supported by ICmpZero uses is -1!"); 3557 ICmpScaledV = Rewriter.expandCodeFor(ScaledS, 0, IP); 3558 } else { 3559 // Otherwise just expand the scaled register and an explicit scale, 3560 // which is expected to be matched as part of the address. 3561 ScaledS = SE.getUnknown(Rewriter.expandCodeFor(ScaledS, 0, IP)); 3562 ScaledS = SE.getMulExpr(ScaledS, 3563 SE.getConstant(ScaledS->getType(), F.AM.Scale)); 3564 Ops.push_back(ScaledS); 3565 3566 // Flush the operand list to suppress SCEVExpander hoisting. 3567 Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, IP); 3568 Ops.clear(); 3569 Ops.push_back(SE.getUnknown(FullV)); 3570 } 3571 } 3572 3573 // Expand the GV portion. 3574 if (F.AM.BaseGV) { 3575 Ops.push_back(SE.getUnknown(F.AM.BaseGV)); 3576 3577 // Flush the operand list to suppress SCEVExpander hoisting. 3578 Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, IP); 3579 Ops.clear(); 3580 Ops.push_back(SE.getUnknown(FullV)); 3581 } 3582 3583 // Expand the immediate portion. 3584 int64_t Offset = (uint64_t)F.AM.BaseOffs + LF.Offset; 3585 if (Offset != 0) { 3586 if (LU.Kind == LSRUse::ICmpZero) { 3587 // The other interesting way of "folding" with an ICmpZero is to use a 3588 // negated immediate. 3589 if (!ICmpScaledV) 3590 ICmpScaledV = ConstantInt::get(IntTy, -Offset); 3591 else { 3592 Ops.push_back(SE.getUnknown(ICmpScaledV)); 3593 ICmpScaledV = ConstantInt::get(IntTy, Offset); 3594 } 3595 } else { 3596 // Just add the immediate values. These again are expected to be matched 3597 // as part of the address. 3598 Ops.push_back(SE.getUnknown(ConstantInt::getSigned(IntTy, Offset))); 3599 } 3600 } 3601 3602 // Expand the unfolded offset portion. 3603 int64_t UnfoldedOffset = F.UnfoldedOffset; 3604 if (UnfoldedOffset != 0) { 3605 // Just add the immediate values. 3606 Ops.push_back(SE.getUnknown(ConstantInt::getSigned(IntTy, 3607 UnfoldedOffset))); 3608 } 3609 3610 // Emit instructions summing all the operands. 3611 const SCEV *FullS = Ops.empty() ? 3612 SE.getConstant(IntTy, 0) : 3613 SE.getAddExpr(Ops); 3614 Value *FullV = Rewriter.expandCodeFor(FullS, Ty, IP); 3615 3616 // We're done expanding now, so reset the rewriter. 3617 Rewriter.clearPostInc(); 3618 3619 // An ICmpZero Formula represents an ICmp which we're handling as a 3620 // comparison against zero. Now that we've expanded an expression for that 3621 // form, update the ICmp's other operand. 3622 if (LU.Kind == LSRUse::ICmpZero) { 3623 ICmpInst *CI = cast<ICmpInst>(LF.UserInst); 3624 DeadInsts.push_back(CI->getOperand(1)); 3625 assert(!F.AM.BaseGV && "ICmp does not support folding a global value and " 3626 "a scale at the same time!"); 3627 if (F.AM.Scale == -1) { 3628 if (ICmpScaledV->getType() != OpTy) { 3629 Instruction *Cast = 3630 CastInst::Create(CastInst::getCastOpcode(ICmpScaledV, false, 3631 OpTy, false), 3632 ICmpScaledV, OpTy, "tmp", CI); 3633 ICmpScaledV = Cast; 3634 } 3635 CI->setOperand(1, ICmpScaledV); 3636 } else { 3637 assert(F.AM.Scale == 0 && 3638 "ICmp does not support folding a global value and " 3639 "a scale at the same time!"); 3640 Constant *C = ConstantInt::getSigned(SE.getEffectiveSCEVType(OpTy), 3641 -(uint64_t)Offset); 3642 if (C->getType() != OpTy) 3643 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 3644 OpTy, false), 3645 C, OpTy); 3646 3647 CI->setOperand(1, C); 3648 } 3649 } 3650 3651 return FullV; 3652 } 3653 3654 /// RewriteForPHI - Helper for Rewrite. PHI nodes are special because the use 3655 /// of their operands effectively happens in their predecessor blocks, so the 3656 /// expression may need to be expanded in multiple places. 3657 void LSRInstance::RewriteForPHI(PHINode *PN, 3658 const LSRFixup &LF, 3659 const Formula &F, 3660 SCEVExpander &Rewriter, 3661 SmallVectorImpl<WeakVH> &DeadInsts, 3662 Pass *P) const { 3663 DenseMap<BasicBlock *, Value *> Inserted; 3664 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 3665 if (PN->getIncomingValue(i) == LF.OperandValToReplace) { 3666 BasicBlock *BB = PN->getIncomingBlock(i); 3667 3668 // If this is a critical edge, split the edge so that we do not insert 3669 // the code on all predecessor/successor paths. We do this unless this 3670 // is the canonical backedge for this loop, which complicates post-inc 3671 // users. 3672 if (e != 1 && BB->getTerminator()->getNumSuccessors() > 1 && 3673 !isa<IndirectBrInst>(BB->getTerminator())) { 3674 BasicBlock *Parent = PN->getParent(); 3675 Loop *PNLoop = LI.getLoopFor(Parent); 3676 if (!PNLoop || Parent != PNLoop->getHeader()) { 3677 // Split the critical edge. 3678 BasicBlock *NewBB = 0; 3679 if (!Parent->isLandingPad()) { 3680 NewBB = SplitCriticalEdge(BB, Parent, P); 3681 } else { 3682 SmallVector<BasicBlock*, 2> NewBBs; 3683 SplitLandingPadPredecessors(Parent, BB, "", "", P, NewBBs); 3684 NewBB = NewBBs[0]; 3685 } 3686 3687 // If PN is outside of the loop and BB is in the loop, we want to 3688 // move the block to be immediately before the PHI block, not 3689 // immediately after BB. 3690 if (L->contains(BB) && !L->contains(PN)) 3691 NewBB->moveBefore(PN->getParent()); 3692 3693 // Splitting the edge can reduce the number of PHI entries we have. 3694 e = PN->getNumIncomingValues(); 3695 BB = NewBB; 3696 i = PN->getBasicBlockIndex(BB); 3697 } 3698 } 3699 3700 std::pair<DenseMap<BasicBlock *, Value *>::iterator, bool> Pair = 3701 Inserted.insert(std::make_pair(BB, static_cast<Value *>(0))); 3702 if (!Pair.second) 3703 PN->setIncomingValue(i, Pair.first->second); 3704 else { 3705 Value *FullV = Expand(LF, F, BB->getTerminator(), Rewriter, DeadInsts); 3706 3707 // If this is reuse-by-noop-cast, insert the noop cast. 3708 Type *OpTy = LF.OperandValToReplace->getType(); 3709 if (FullV->getType() != OpTy) 3710 FullV = 3711 CastInst::Create(CastInst::getCastOpcode(FullV, false, 3712 OpTy, false), 3713 FullV, LF.OperandValToReplace->getType(), 3714 "tmp", BB->getTerminator()); 3715 3716 PN->setIncomingValue(i, FullV); 3717 Pair.first->second = FullV; 3718 } 3719 } 3720 } 3721 3722 /// Rewrite - Emit instructions for the leading candidate expression for this 3723 /// LSRUse (this is called "expanding"), and update the UserInst to reference 3724 /// the newly expanded value. 3725 void LSRInstance::Rewrite(const LSRFixup &LF, 3726 const Formula &F, 3727 SCEVExpander &Rewriter, 3728 SmallVectorImpl<WeakVH> &DeadInsts, 3729 Pass *P) const { 3730 // First, find an insertion point that dominates UserInst. For PHI nodes, 3731 // find the nearest block which dominates all the relevant uses. 3732 if (PHINode *PN = dyn_cast<PHINode>(LF.UserInst)) { 3733 RewriteForPHI(PN, LF, F, Rewriter, DeadInsts, P); 3734 } else { 3735 Value *FullV = Expand(LF, F, LF.UserInst, Rewriter, DeadInsts); 3736 3737 // If this is reuse-by-noop-cast, insert the noop cast. 3738 Type *OpTy = LF.OperandValToReplace->getType(); 3739 if (FullV->getType() != OpTy) { 3740 Instruction *Cast = 3741 CastInst::Create(CastInst::getCastOpcode(FullV, false, OpTy, false), 3742 FullV, OpTy, "tmp", LF.UserInst); 3743 FullV = Cast; 3744 } 3745 3746 // Update the user. ICmpZero is handled specially here (for now) because 3747 // Expand may have updated one of the operands of the icmp already, and 3748 // its new value may happen to be equal to LF.OperandValToReplace, in 3749 // which case doing replaceUsesOfWith leads to replacing both operands 3750 // with the same value. TODO: Reorganize this. 3751 if (Uses[LF.LUIdx].Kind == LSRUse::ICmpZero) 3752 LF.UserInst->setOperand(0, FullV); 3753 else 3754 LF.UserInst->replaceUsesOfWith(LF.OperandValToReplace, FullV); 3755 } 3756 3757 DeadInsts.push_back(LF.OperandValToReplace); 3758 } 3759 3760 /// ImplementSolution - Rewrite all the fixup locations with new values, 3761 /// following the chosen solution. 3762 void 3763 LSRInstance::ImplementSolution(const SmallVectorImpl<const Formula *> &Solution, 3764 Pass *P) { 3765 // Keep track of instructions we may have made dead, so that 3766 // we can remove them after we are done working. 3767 SmallVector<WeakVH, 16> DeadInsts; 3768 3769 SCEVExpander Rewriter(SE, "lsr"); 3770 Rewriter.disableCanonicalMode(); 3771 Rewriter.setIVIncInsertPos(L, IVIncInsertPos); 3772 3773 // Expand the new value definitions and update the users. 3774 for (SmallVectorImpl<LSRFixup>::const_iterator I = Fixups.begin(), 3775 E = Fixups.end(); I != E; ++I) { 3776 const LSRFixup &Fixup = *I; 3777 3778 Rewrite(Fixup, *Solution[Fixup.LUIdx], Rewriter, DeadInsts, P); 3779 3780 Changed = true; 3781 } 3782 3783 // Clean up after ourselves. This must be done before deleting any 3784 // instructions. 3785 Rewriter.clear(); 3786 3787 Changed |= DeleteTriviallyDeadInstructions(DeadInsts); 3788 } 3789 3790 LSRInstance::LSRInstance(const TargetLowering *tli, Loop *l, Pass *P) 3791 : IU(P->getAnalysis<IVUsers>()), 3792 SE(P->getAnalysis<ScalarEvolution>()), 3793 DT(P->getAnalysis<DominatorTree>()), 3794 LI(P->getAnalysis<LoopInfo>()), 3795 TLI(tli), L(l), Changed(false), IVIncInsertPos(0) { 3796 3797 // If LoopSimplify form is not available, stay out of trouble. 3798 if (!L->isLoopSimplifyForm()) return; 3799 3800 // If there's no interesting work to be done, bail early. 3801 if (IU.empty()) return; 3802 3803 DEBUG(dbgs() << "\nLSR on loop "; 3804 WriteAsOperand(dbgs(), L->getHeader(), /*PrintType=*/false); 3805 dbgs() << ":\n"); 3806 3807 // First, perform some low-level loop optimizations. 3808 OptimizeShadowIV(); 3809 OptimizeLoopTermCond(); 3810 3811 // If loop preparation eliminates all interesting IV users, bail. 3812 if (IU.empty()) return; 3813 3814 // Skip nested loops until we can model them better with formulae. 3815 if (!EnableNested && !L->empty()) { 3816 DEBUG(dbgs() << "LSR skipping outer loop " << *L << "\n"); 3817 return; 3818 } 3819 3820 // Start collecting data and preparing for the solver. 3821 CollectInterestingTypesAndFactors(); 3822 CollectFixupsAndInitialFormulae(); 3823 CollectLoopInvariantFixupsAndFormulae(); 3824 3825 DEBUG(dbgs() << "LSR found " << Uses.size() << " uses:\n"; 3826 print_uses(dbgs())); 3827 3828 // Now use the reuse data to generate a bunch of interesting ways 3829 // to formulate the values needed for the uses. 3830 GenerateAllReuseFormulae(); 3831 3832 FilterOutUndesirableDedicatedRegisters(); 3833 NarrowSearchSpaceUsingHeuristics(); 3834 3835 SmallVector<const Formula *, 8> Solution; 3836 Solve(Solution); 3837 3838 // Release memory that is no longer needed. 3839 Factors.clear(); 3840 Types.clear(); 3841 RegUses.clear(); 3842 3843 if (Solution.empty()) 3844 return; 3845 3846 #ifndef NDEBUG 3847 // Formulae should be legal. 3848 for (SmallVectorImpl<LSRUse>::const_iterator I = Uses.begin(), 3849 E = Uses.end(); I != E; ++I) { 3850 const LSRUse &LU = *I; 3851 for (SmallVectorImpl<Formula>::const_iterator J = LU.Formulae.begin(), 3852 JE = LU.Formulae.end(); J != JE; ++J) 3853 assert(isLegalUse(J->AM, LU.MinOffset, LU.MaxOffset, 3854 LU.Kind, LU.AccessTy, TLI) && 3855 "Illegal formula generated!"); 3856 }; 3857 #endif 3858 3859 // Now that we've decided what we want, make it so. 3860 ImplementSolution(Solution, P); 3861 } 3862 3863 void LSRInstance::print_factors_and_types(raw_ostream &OS) const { 3864 if (Factors.empty() && Types.empty()) return; 3865 3866 OS << "LSR has identified the following interesting factors and types: "; 3867 bool First = true; 3868 3869 for (SmallSetVector<int64_t, 8>::const_iterator 3870 I = Factors.begin(), E = Factors.end(); I != E; ++I) { 3871 if (!First) OS << ", "; 3872 First = false; 3873 OS << '*' << *I; 3874 } 3875 3876 for (SmallSetVector<Type *, 4>::const_iterator 3877 I = Types.begin(), E = Types.end(); I != E; ++I) { 3878 if (!First) OS << ", "; 3879 First = false; 3880 OS << '(' << **I << ')'; 3881 } 3882 OS << '\n'; 3883 } 3884 3885 void LSRInstance::print_fixups(raw_ostream &OS) const { 3886 OS << "LSR is examining the following fixup sites:\n"; 3887 for (SmallVectorImpl<LSRFixup>::const_iterator I = Fixups.begin(), 3888 E = Fixups.end(); I != E; ++I) { 3889 dbgs() << " "; 3890 I->print(OS); 3891 OS << '\n'; 3892 } 3893 } 3894 3895 void LSRInstance::print_uses(raw_ostream &OS) const { 3896 OS << "LSR is examining the following uses:\n"; 3897 for (SmallVectorImpl<LSRUse>::const_iterator I = Uses.begin(), 3898 E = Uses.end(); I != E; ++I) { 3899 const LSRUse &LU = *I; 3900 dbgs() << " "; 3901 LU.print(OS); 3902 OS << '\n'; 3903 for (SmallVectorImpl<Formula>::const_iterator J = LU.Formulae.begin(), 3904 JE = LU.Formulae.end(); J != JE; ++J) { 3905 OS << " "; 3906 J->print(OS); 3907 OS << '\n'; 3908 } 3909 } 3910 } 3911 3912 void LSRInstance::print(raw_ostream &OS) const { 3913 print_factors_and_types(OS); 3914 print_fixups(OS); 3915 print_uses(OS); 3916 } 3917 3918 void LSRInstance::dump() const { 3919 print(errs()); errs() << '\n'; 3920 } 3921 3922 namespace { 3923 3924 class LoopStrengthReduce : public LoopPass { 3925 /// TLI - Keep a pointer of a TargetLowering to consult for determining 3926 /// transformation profitability. 3927 const TargetLowering *const TLI; 3928 3929 public: 3930 static char ID; // Pass ID, replacement for typeid 3931 explicit LoopStrengthReduce(const TargetLowering *tli = 0); 3932 3933 private: 3934 bool runOnLoop(Loop *L, LPPassManager &LPM); 3935 void getAnalysisUsage(AnalysisUsage &AU) const; 3936 }; 3937 3938 } 3939 3940 char LoopStrengthReduce::ID = 0; 3941 INITIALIZE_PASS_BEGIN(LoopStrengthReduce, "loop-reduce", 3942 "Loop Strength Reduction", false, false) 3943 INITIALIZE_PASS_DEPENDENCY(DominatorTree) 3944 INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) 3945 INITIALIZE_PASS_DEPENDENCY(IVUsers) 3946 INITIALIZE_PASS_DEPENDENCY(LoopInfo) 3947 INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 3948 INITIALIZE_PASS_END(LoopStrengthReduce, "loop-reduce", 3949 "Loop Strength Reduction", false, false) 3950 3951 3952 Pass *llvm::createLoopStrengthReducePass(const TargetLowering *TLI) { 3953 return new LoopStrengthReduce(TLI); 3954 } 3955 3956 LoopStrengthReduce::LoopStrengthReduce(const TargetLowering *tli) 3957 : LoopPass(ID), TLI(tli) { 3958 initializeLoopStrengthReducePass(*PassRegistry::getPassRegistry()); 3959 } 3960 3961 void LoopStrengthReduce::getAnalysisUsage(AnalysisUsage &AU) const { 3962 // We split critical edges, so we change the CFG. However, we do update 3963 // many analyses if they are around. 3964 AU.addPreservedID(LoopSimplifyID); 3965 3966 AU.addRequired<LoopInfo>(); 3967 AU.addPreserved<LoopInfo>(); 3968 AU.addRequiredID(LoopSimplifyID); 3969 AU.addRequired<DominatorTree>(); 3970 AU.addPreserved<DominatorTree>(); 3971 AU.addRequired<ScalarEvolution>(); 3972 AU.addPreserved<ScalarEvolution>(); 3973 // Requiring LoopSimplify a second time here prevents IVUsers from running 3974 // twice, since LoopSimplify was invalidated by running ScalarEvolution. 3975 AU.addRequiredID(LoopSimplifyID); 3976 AU.addRequired<IVUsers>(); 3977 AU.addPreserved<IVUsers>(); 3978 } 3979 3980 bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager & /*LPM*/) { 3981 bool Changed = false; 3982 3983 // Run the main LSR transformation. 3984 Changed |= LSRInstance(TLI, L, this).getChanged(); 3985 3986 // At this point, it is worth checking to see if any recurrence PHIs are also 3987 // dead, so that we can remove them as well. 3988 Changed |= DeleteDeadPHIs(L->getHeader()); 3989 3990 return Changed; 3991 } 3992