1 //===- LoopStrengthReduce.cpp - Strength Reduce IVs in Loops --------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This transformation analyzes and transforms the induction variables (and 11 // computations derived from them) into forms suitable for efficient execution 12 // on the target. 13 // 14 // This pass performs a strength reduction on array references inside loops that 15 // have as one or more of their components the loop induction variable, it 16 // rewrites expressions to take advantage of scaled-index addressing modes 17 // available on the target, and it performs a variety of other optimizations 18 // related to loop induction variables. 19 // 20 // Terminology note: this code has a lot of handling for "post-increment" or 21 // "post-inc" users. This is not talking about post-increment addressing modes; 22 // it is instead talking about code like this: 23 // 24 // %i = phi [ 0, %entry ], [ %i.next, %latch ] 25 // ... 26 // %i.next = add %i, 1 27 // %c = icmp eq %i.next, %n 28 // 29 // The SCEV for %i is {0,+,1}<%L>. The SCEV for %i.next is {1,+,1}<%L>, however 30 // it's useful to think about these as the same register, with some uses using 31 // the value of the register before the add and some using // it after. In this 32 // example, the icmp is a post-increment user, since it uses %i.next, which is 33 // the value of the induction variable after the increment. The other common 34 // case of post-increment users is users outside the loop. 35 // 36 // TODO: More sophistication in the way Formulae are generated and filtered. 37 // 38 // TODO: Handle multiple loops at a time. 39 // 40 // TODO: Should TargetLowering::AddrMode::BaseGV be changed to a ConstantExpr 41 // instead of a GlobalValue? 42 // 43 // TODO: When truncation is free, truncate ICmp users' operands to make it a 44 // smaller encoding (on x86 at least). 45 // 46 // TODO: When a negated register is used by an add (such as in a list of 47 // multiple base registers, or as the increment expression in an addrec), 48 // we may not actually need both reg and (-1 * reg) in registers; the 49 // negation can be implemented by using a sub instead of an add. The 50 // lack of support for taking this into consideration when making 51 // register pressure decisions is partly worked around by the "Special" 52 // use kind. 53 // 54 //===----------------------------------------------------------------------===// 55 56 #define DEBUG_TYPE "loop-reduce" 57 #include "llvm/Transforms/Scalar.h" 58 #include "llvm/Constants.h" 59 #include "llvm/Instructions.h" 60 #include "llvm/IntrinsicInst.h" 61 #include "llvm/DerivedTypes.h" 62 #include "llvm/Analysis/IVUsers.h" 63 #include "llvm/Analysis/Dominators.h" 64 #include "llvm/Analysis/LoopPass.h" 65 #include "llvm/Analysis/ScalarEvolutionExpander.h" 66 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 67 #include "llvm/Transforms/Utils/Local.h" 68 #include "llvm/ADT/SmallBitVector.h" 69 #include "llvm/ADT/SetVector.h" 70 #include "llvm/ADT/DenseSet.h" 71 #include "llvm/Support/Debug.h" 72 #include "llvm/Support/ValueHandle.h" 73 #include "llvm/Support/raw_ostream.h" 74 #include "llvm/Target/TargetLowering.h" 75 #include <algorithm> 76 using namespace llvm; 77 78 namespace { 79 80 /// RegSortData - This class holds data which is used to order reuse candidates. 81 class RegSortData { 82 public: 83 /// UsedByIndices - This represents the set of LSRUse indices which reference 84 /// a particular register. 85 SmallBitVector UsedByIndices; 86 87 RegSortData() {} 88 89 void print(raw_ostream &OS) const; 90 void dump() const; 91 }; 92 93 } 94 95 void RegSortData::print(raw_ostream &OS) const { 96 OS << "[NumUses=" << UsedByIndices.count() << ']'; 97 } 98 99 void RegSortData::dump() const { 100 print(errs()); errs() << '\n'; 101 } 102 103 namespace { 104 105 /// RegUseTracker - Map register candidates to information about how they are 106 /// used. 107 class RegUseTracker { 108 typedef DenseMap<const SCEV *, RegSortData> RegUsesTy; 109 110 RegUsesTy RegUsesMap; 111 SmallVector<const SCEV *, 16> RegSequence; 112 113 public: 114 void CountRegister(const SCEV *Reg, size_t LUIdx); 115 void DropRegister(const SCEV *Reg, size_t LUIdx); 116 void DropUse(size_t LUIdx); 117 118 bool isRegUsedByUsesOtherThan(const SCEV *Reg, size_t LUIdx) const; 119 120 const SmallBitVector &getUsedByIndices(const SCEV *Reg) const; 121 122 void clear(); 123 124 typedef SmallVectorImpl<const SCEV *>::iterator iterator; 125 typedef SmallVectorImpl<const SCEV *>::const_iterator const_iterator; 126 iterator begin() { return RegSequence.begin(); } 127 iterator end() { return RegSequence.end(); } 128 const_iterator begin() const { return RegSequence.begin(); } 129 const_iterator end() const { return RegSequence.end(); } 130 }; 131 132 } 133 134 void 135 RegUseTracker::CountRegister(const SCEV *Reg, size_t LUIdx) { 136 std::pair<RegUsesTy::iterator, bool> Pair = 137 RegUsesMap.insert(std::make_pair(Reg, RegSortData())); 138 RegSortData &RSD = Pair.first->second; 139 if (Pair.second) 140 RegSequence.push_back(Reg); 141 RSD.UsedByIndices.resize(std::max(RSD.UsedByIndices.size(), LUIdx + 1)); 142 RSD.UsedByIndices.set(LUIdx); 143 } 144 145 void 146 RegUseTracker::DropRegister(const SCEV *Reg, size_t LUIdx) { 147 RegUsesTy::iterator It = RegUsesMap.find(Reg); 148 assert(It != RegUsesMap.end()); 149 RegSortData &RSD = It->second; 150 assert(RSD.UsedByIndices.size() > LUIdx); 151 RSD.UsedByIndices.reset(LUIdx); 152 } 153 154 void 155 RegUseTracker::DropUse(size_t LUIdx) { 156 // Remove the use index from every register's use list. 157 for (RegUsesTy::iterator I = RegUsesMap.begin(), E = RegUsesMap.end(); 158 I != E; ++I) 159 I->second.UsedByIndices.reset(LUIdx); 160 } 161 162 bool 163 RegUseTracker::isRegUsedByUsesOtherThan(const SCEV *Reg, size_t LUIdx) const { 164 RegUsesTy::const_iterator I = RegUsesMap.find(Reg); 165 if (I == RegUsesMap.end()) 166 return false; 167 const SmallBitVector &UsedByIndices = I->second.UsedByIndices; 168 int i = UsedByIndices.find_first(); 169 if (i == -1) return false; 170 if ((size_t)i != LUIdx) return true; 171 return UsedByIndices.find_next(i) != -1; 172 } 173 174 const SmallBitVector &RegUseTracker::getUsedByIndices(const SCEV *Reg) const { 175 RegUsesTy::const_iterator I = RegUsesMap.find(Reg); 176 assert(I != RegUsesMap.end() && "Unknown register!"); 177 return I->second.UsedByIndices; 178 } 179 180 void RegUseTracker::clear() { 181 RegUsesMap.clear(); 182 RegSequence.clear(); 183 } 184 185 namespace { 186 187 /// Formula - This class holds information that describes a formula for 188 /// computing satisfying a use. It may include broken-out immediates and scaled 189 /// registers. 190 struct Formula { 191 /// AM - This is used to represent complex addressing, as well as other kinds 192 /// of interesting uses. 193 TargetLowering::AddrMode AM; 194 195 /// BaseRegs - The list of "base" registers for this use. When this is 196 /// non-empty, AM.HasBaseReg should be set to true. 197 SmallVector<const SCEV *, 2> BaseRegs; 198 199 /// ScaledReg - The 'scaled' register for this use. This should be non-null 200 /// when AM.Scale is not zero. 201 const SCEV *ScaledReg; 202 203 Formula() : ScaledReg(0) {} 204 205 void InitialMatch(const SCEV *S, Loop *L, 206 ScalarEvolution &SE, DominatorTree &DT); 207 208 unsigned getNumRegs() const; 209 const Type *getType() const; 210 211 void DeleteBaseReg(const SCEV *&S); 212 213 bool referencesReg(const SCEV *S) const; 214 bool hasRegsUsedByUsesOtherThan(size_t LUIdx, 215 const RegUseTracker &RegUses) const; 216 217 void print(raw_ostream &OS) const; 218 void dump() const; 219 }; 220 221 } 222 223 /// DoInitialMatch - Recursion helper for InitialMatch. 224 static void DoInitialMatch(const SCEV *S, Loop *L, 225 SmallVectorImpl<const SCEV *> &Good, 226 SmallVectorImpl<const SCEV *> &Bad, 227 ScalarEvolution &SE, DominatorTree &DT) { 228 // Collect expressions which properly dominate the loop header. 229 if (S->properlyDominates(L->getHeader(), &DT)) { 230 Good.push_back(S); 231 return; 232 } 233 234 // Look at add operands. 235 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 236 for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end(); 237 I != E; ++I) 238 DoInitialMatch(*I, L, Good, Bad, SE, DT); 239 return; 240 } 241 242 // Look at addrec operands. 243 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) 244 if (!AR->getStart()->isZero()) { 245 DoInitialMatch(AR->getStart(), L, Good, Bad, SE, DT); 246 DoInitialMatch(SE.getAddRecExpr(SE.getConstant(AR->getType(), 0), 247 AR->getStepRecurrence(SE), 248 AR->getLoop()), 249 L, Good, Bad, SE, DT); 250 return; 251 } 252 253 // Handle a multiplication by -1 (negation) if it didn't fold. 254 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) 255 if (Mul->getOperand(0)->isAllOnesValue()) { 256 SmallVector<const SCEV *, 4> Ops(Mul->op_begin()+1, Mul->op_end()); 257 const SCEV *NewMul = SE.getMulExpr(Ops); 258 259 SmallVector<const SCEV *, 4> MyGood; 260 SmallVector<const SCEV *, 4> MyBad; 261 DoInitialMatch(NewMul, L, MyGood, MyBad, SE, DT); 262 const SCEV *NegOne = SE.getSCEV(ConstantInt::getAllOnesValue( 263 SE.getEffectiveSCEVType(NewMul->getType()))); 264 for (SmallVectorImpl<const SCEV *>::const_iterator I = MyGood.begin(), 265 E = MyGood.end(); I != E; ++I) 266 Good.push_back(SE.getMulExpr(NegOne, *I)); 267 for (SmallVectorImpl<const SCEV *>::const_iterator I = MyBad.begin(), 268 E = MyBad.end(); I != E; ++I) 269 Bad.push_back(SE.getMulExpr(NegOne, *I)); 270 return; 271 } 272 273 // Ok, we can't do anything interesting. Just stuff the whole thing into a 274 // register and hope for the best. 275 Bad.push_back(S); 276 } 277 278 /// InitialMatch - Incorporate loop-variant parts of S into this Formula, 279 /// attempting to keep all loop-invariant and loop-computable values in a 280 /// single base register. 281 void Formula::InitialMatch(const SCEV *S, Loop *L, 282 ScalarEvolution &SE, DominatorTree &DT) { 283 SmallVector<const SCEV *, 4> Good; 284 SmallVector<const SCEV *, 4> Bad; 285 DoInitialMatch(S, L, Good, Bad, SE, DT); 286 if (!Good.empty()) { 287 const SCEV *Sum = SE.getAddExpr(Good); 288 if (!Sum->isZero()) 289 BaseRegs.push_back(Sum); 290 AM.HasBaseReg = true; 291 } 292 if (!Bad.empty()) { 293 const SCEV *Sum = SE.getAddExpr(Bad); 294 if (!Sum->isZero()) 295 BaseRegs.push_back(Sum); 296 AM.HasBaseReg = true; 297 } 298 } 299 300 /// getNumRegs - Return the total number of register operands used by this 301 /// formula. This does not include register uses implied by non-constant 302 /// addrec strides. 303 unsigned Formula::getNumRegs() const { 304 return !!ScaledReg + BaseRegs.size(); 305 } 306 307 /// getType - Return the type of this formula, if it has one, or null 308 /// otherwise. This type is meaningless except for the bit size. 309 const Type *Formula::getType() const { 310 return !BaseRegs.empty() ? BaseRegs.front()->getType() : 311 ScaledReg ? ScaledReg->getType() : 312 AM.BaseGV ? AM.BaseGV->getType() : 313 0; 314 } 315 316 /// DeleteBaseReg - Delete the given base reg from the BaseRegs list. 317 void Formula::DeleteBaseReg(const SCEV *&S) { 318 if (&S != &BaseRegs.back()) 319 std::swap(S, BaseRegs.back()); 320 BaseRegs.pop_back(); 321 } 322 323 /// referencesReg - Test if this formula references the given register. 324 bool Formula::referencesReg(const SCEV *S) const { 325 return S == ScaledReg || 326 std::find(BaseRegs.begin(), BaseRegs.end(), S) != BaseRegs.end(); 327 } 328 329 /// hasRegsUsedByUsesOtherThan - Test whether this formula uses registers 330 /// which are used by uses other than the use with the given index. 331 bool Formula::hasRegsUsedByUsesOtherThan(size_t LUIdx, 332 const RegUseTracker &RegUses) const { 333 if (ScaledReg) 334 if (RegUses.isRegUsedByUsesOtherThan(ScaledReg, LUIdx)) 335 return true; 336 for (SmallVectorImpl<const SCEV *>::const_iterator I = BaseRegs.begin(), 337 E = BaseRegs.end(); I != E; ++I) 338 if (RegUses.isRegUsedByUsesOtherThan(*I, LUIdx)) 339 return true; 340 return false; 341 } 342 343 void Formula::print(raw_ostream &OS) const { 344 bool First = true; 345 if (AM.BaseGV) { 346 if (!First) OS << " + "; else First = false; 347 WriteAsOperand(OS, AM.BaseGV, /*PrintType=*/false); 348 } 349 if (AM.BaseOffs != 0) { 350 if (!First) OS << " + "; else First = false; 351 OS << AM.BaseOffs; 352 } 353 for (SmallVectorImpl<const SCEV *>::const_iterator I = BaseRegs.begin(), 354 E = BaseRegs.end(); I != E; ++I) { 355 if (!First) OS << " + "; else First = false; 356 OS << "reg(" << **I << ')'; 357 } 358 if (AM.HasBaseReg && BaseRegs.empty()) { 359 if (!First) OS << " + "; else First = false; 360 OS << "**error: HasBaseReg**"; 361 } else if (!AM.HasBaseReg && !BaseRegs.empty()) { 362 if (!First) OS << " + "; else First = false; 363 OS << "**error: !HasBaseReg**"; 364 } 365 if (AM.Scale != 0) { 366 if (!First) OS << " + "; else First = false; 367 OS << AM.Scale << "*reg("; 368 if (ScaledReg) 369 OS << *ScaledReg; 370 else 371 OS << "<unknown>"; 372 OS << ')'; 373 } 374 } 375 376 void Formula::dump() const { 377 print(errs()); errs() << '\n'; 378 } 379 380 /// isAddRecSExtable - Return true if the given addrec can be sign-extended 381 /// without changing its value. 382 static bool isAddRecSExtable(const SCEVAddRecExpr *AR, ScalarEvolution &SE) { 383 const Type *WideTy = 384 IntegerType::get(SE.getContext(), SE.getTypeSizeInBits(AR->getType()) + 1); 385 return isa<SCEVAddRecExpr>(SE.getSignExtendExpr(AR, WideTy)); 386 } 387 388 /// isAddSExtable - Return true if the given add can be sign-extended 389 /// without changing its value. 390 static bool isAddSExtable(const SCEVAddExpr *A, ScalarEvolution &SE) { 391 const Type *WideTy = 392 IntegerType::get(SE.getContext(), SE.getTypeSizeInBits(A->getType()) + 1); 393 return isa<SCEVAddExpr>(SE.getSignExtendExpr(A, WideTy)); 394 } 395 396 /// isMulSExtable - Return true if the given mul can be sign-extended 397 /// without changing its value. 398 static bool isMulSExtable(const SCEVMulExpr *M, ScalarEvolution &SE) { 399 const Type *WideTy = 400 IntegerType::get(SE.getContext(), 401 SE.getTypeSizeInBits(M->getType()) * M->getNumOperands()); 402 return isa<SCEVMulExpr>(SE.getSignExtendExpr(M, WideTy)); 403 } 404 405 /// getExactSDiv - Return an expression for LHS /s RHS, if it can be determined 406 /// and if the remainder is known to be zero, or null otherwise. If 407 /// IgnoreSignificantBits is true, expressions like (X * Y) /s Y are simplified 408 /// to Y, ignoring that the multiplication may overflow, which is useful when 409 /// the result will be used in a context where the most significant bits are 410 /// ignored. 411 static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS, 412 ScalarEvolution &SE, 413 bool IgnoreSignificantBits = false) { 414 // Handle the trivial case, which works for any SCEV type. 415 if (LHS == RHS) 416 return SE.getConstant(LHS->getType(), 1); 417 418 // Handle a few RHS special cases. 419 const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS); 420 if (RC) { 421 const APInt &RA = RC->getValue()->getValue(); 422 // Handle x /s -1 as x * -1, to give ScalarEvolution a chance to do 423 // some folding. 424 if (RA.isAllOnesValue()) 425 return SE.getMulExpr(LHS, RC); 426 // Handle x /s 1 as x. 427 if (RA == 1) 428 return LHS; 429 } 430 431 // Check for a division of a constant by a constant. 432 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(LHS)) { 433 if (!RC) 434 return 0; 435 const APInt &LA = C->getValue()->getValue(); 436 const APInt &RA = RC->getValue()->getValue(); 437 if (LA.srem(RA) != 0) 438 return 0; 439 return SE.getConstant(LA.sdiv(RA)); 440 } 441 442 // Distribute the sdiv over addrec operands, if the addrec doesn't overflow. 443 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) { 444 if (IgnoreSignificantBits || isAddRecSExtable(AR, SE)) { 445 const SCEV *Step = getExactSDiv(AR->getStepRecurrence(SE), RHS, SE, 446 IgnoreSignificantBits); 447 if (!Step) return 0; 448 const SCEV *Start = getExactSDiv(AR->getStart(), RHS, SE, 449 IgnoreSignificantBits); 450 if (!Start) return 0; 451 return SE.getAddRecExpr(Start, Step, AR->getLoop()); 452 } 453 return 0; 454 } 455 456 // Distribute the sdiv over add operands, if the add doesn't overflow. 457 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(LHS)) { 458 if (IgnoreSignificantBits || isAddSExtable(Add, SE)) { 459 SmallVector<const SCEV *, 8> Ops; 460 for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end(); 461 I != E; ++I) { 462 const SCEV *Op = getExactSDiv(*I, RHS, SE, 463 IgnoreSignificantBits); 464 if (!Op) return 0; 465 Ops.push_back(Op); 466 } 467 return SE.getAddExpr(Ops); 468 } 469 return 0; 470 } 471 472 // Check for a multiply operand that we can pull RHS out of. 473 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS)) { 474 if (IgnoreSignificantBits || isMulSExtable(Mul, SE)) { 475 SmallVector<const SCEV *, 4> Ops; 476 bool Found = false; 477 for (SCEVMulExpr::op_iterator I = Mul->op_begin(), E = Mul->op_end(); 478 I != E; ++I) { 479 const SCEV *S = *I; 480 if (!Found) 481 if (const SCEV *Q = getExactSDiv(S, RHS, SE, 482 IgnoreSignificantBits)) { 483 S = Q; 484 Found = true; 485 } 486 Ops.push_back(S); 487 } 488 return Found ? SE.getMulExpr(Ops) : 0; 489 } 490 return 0; 491 } 492 493 // Otherwise we don't know. 494 return 0; 495 } 496 497 /// ExtractImmediate - If S involves the addition of a constant integer value, 498 /// return that integer value, and mutate S to point to a new SCEV with that 499 /// value excluded. 500 static int64_t ExtractImmediate(const SCEV *&S, ScalarEvolution &SE) { 501 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) { 502 if (C->getValue()->getValue().getMinSignedBits() <= 64) { 503 S = SE.getConstant(C->getType(), 0); 504 return C->getValue()->getSExtValue(); 505 } 506 } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 507 SmallVector<const SCEV *, 8> NewOps(Add->op_begin(), Add->op_end()); 508 int64_t Result = ExtractImmediate(NewOps.front(), SE); 509 if (Result != 0) 510 S = SE.getAddExpr(NewOps); 511 return Result; 512 } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 513 SmallVector<const SCEV *, 8> NewOps(AR->op_begin(), AR->op_end()); 514 int64_t Result = ExtractImmediate(NewOps.front(), SE); 515 if (Result != 0) 516 S = SE.getAddRecExpr(NewOps, AR->getLoop()); 517 return Result; 518 } 519 return 0; 520 } 521 522 /// ExtractSymbol - If S involves the addition of a GlobalValue address, 523 /// return that symbol, and mutate S to point to a new SCEV with that 524 /// value excluded. 525 static GlobalValue *ExtractSymbol(const SCEV *&S, ScalarEvolution &SE) { 526 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 527 if (GlobalValue *GV = dyn_cast<GlobalValue>(U->getValue())) { 528 S = SE.getConstant(GV->getType(), 0); 529 return GV; 530 } 531 } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 532 SmallVector<const SCEV *, 8> NewOps(Add->op_begin(), Add->op_end()); 533 GlobalValue *Result = ExtractSymbol(NewOps.back(), SE); 534 if (Result) 535 S = SE.getAddExpr(NewOps); 536 return Result; 537 } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 538 SmallVector<const SCEV *, 8> NewOps(AR->op_begin(), AR->op_end()); 539 GlobalValue *Result = ExtractSymbol(NewOps.front(), SE); 540 if (Result) 541 S = SE.getAddRecExpr(NewOps, AR->getLoop()); 542 return Result; 543 } 544 return 0; 545 } 546 547 /// isAddressUse - Returns true if the specified instruction is using the 548 /// specified value as an address. 549 static bool isAddressUse(Instruction *Inst, Value *OperandVal) { 550 bool isAddress = isa<LoadInst>(Inst); 551 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 552 if (SI->getOperand(1) == OperandVal) 553 isAddress = true; 554 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 555 // Addressing modes can also be folded into prefetches and a variety 556 // of intrinsics. 557 switch (II->getIntrinsicID()) { 558 default: break; 559 case Intrinsic::prefetch: 560 case Intrinsic::x86_sse2_loadu_dq: 561 case Intrinsic::x86_sse2_loadu_pd: 562 case Intrinsic::x86_sse_loadu_ps: 563 case Intrinsic::x86_sse_storeu_ps: 564 case Intrinsic::x86_sse2_storeu_pd: 565 case Intrinsic::x86_sse2_storeu_dq: 566 case Intrinsic::x86_sse2_storel_dq: 567 if (II->getArgOperand(0) == OperandVal) 568 isAddress = true; 569 break; 570 } 571 } 572 return isAddress; 573 } 574 575 /// getAccessType - Return the type of the memory being accessed. 576 static const Type *getAccessType(const Instruction *Inst) { 577 const Type *AccessTy = Inst->getType(); 578 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) 579 AccessTy = SI->getOperand(0)->getType(); 580 else if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 581 // Addressing modes can also be folded into prefetches and a variety 582 // of intrinsics. 583 switch (II->getIntrinsicID()) { 584 default: break; 585 case Intrinsic::x86_sse_storeu_ps: 586 case Intrinsic::x86_sse2_storeu_pd: 587 case Intrinsic::x86_sse2_storeu_dq: 588 case Intrinsic::x86_sse2_storel_dq: 589 AccessTy = II->getArgOperand(0)->getType(); 590 break; 591 } 592 } 593 594 // All pointers have the same requirements, so canonicalize them to an 595 // arbitrary pointer type to minimize variation. 596 if (const PointerType *PTy = dyn_cast<PointerType>(AccessTy)) 597 AccessTy = PointerType::get(IntegerType::get(PTy->getContext(), 1), 598 PTy->getAddressSpace()); 599 600 return AccessTy; 601 } 602 603 /// DeleteTriviallyDeadInstructions - If any of the instructions is the 604 /// specified set are trivially dead, delete them and see if this makes any of 605 /// their operands subsequently dead. 606 static bool 607 DeleteTriviallyDeadInstructions(SmallVectorImpl<WeakVH> &DeadInsts) { 608 bool Changed = false; 609 610 while (!DeadInsts.empty()) { 611 Instruction *I = dyn_cast_or_null<Instruction>(DeadInsts.pop_back_val()); 612 613 if (I == 0 || !isInstructionTriviallyDead(I)) 614 continue; 615 616 for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI) 617 if (Instruction *U = dyn_cast<Instruction>(*OI)) { 618 *OI = 0; 619 if (U->use_empty()) 620 DeadInsts.push_back(U); 621 } 622 623 I->eraseFromParent(); 624 Changed = true; 625 } 626 627 return Changed; 628 } 629 630 namespace { 631 632 /// Cost - This class is used to measure and compare candidate formulae. 633 class Cost { 634 /// TODO: Some of these could be merged. Also, a lexical ordering 635 /// isn't always optimal. 636 unsigned NumRegs; 637 unsigned AddRecCost; 638 unsigned NumIVMuls; 639 unsigned NumBaseAdds; 640 unsigned ImmCost; 641 unsigned SetupCost; 642 643 public: 644 Cost() 645 : NumRegs(0), AddRecCost(0), NumIVMuls(0), NumBaseAdds(0), ImmCost(0), 646 SetupCost(0) {} 647 648 unsigned getNumRegs() const { return NumRegs; } 649 650 bool operator<(const Cost &Other) const; 651 652 void Loose(); 653 654 void RateFormula(const Formula &F, 655 SmallPtrSet<const SCEV *, 16> &Regs, 656 const DenseSet<const SCEV *> &VisitedRegs, 657 const Loop *L, 658 const SmallVectorImpl<int64_t> &Offsets, 659 ScalarEvolution &SE, DominatorTree &DT); 660 661 void print(raw_ostream &OS) const; 662 void dump() const; 663 664 private: 665 void RateRegister(const SCEV *Reg, 666 SmallPtrSet<const SCEV *, 16> &Regs, 667 const Loop *L, 668 ScalarEvolution &SE, DominatorTree &DT); 669 void RatePrimaryRegister(const SCEV *Reg, 670 SmallPtrSet<const SCEV *, 16> &Regs, 671 const Loop *L, 672 ScalarEvolution &SE, DominatorTree &DT); 673 }; 674 675 } 676 677 /// RateRegister - Tally up interesting quantities from the given register. 678 void Cost::RateRegister(const SCEV *Reg, 679 SmallPtrSet<const SCEV *, 16> &Regs, 680 const Loop *L, 681 ScalarEvolution &SE, DominatorTree &DT) { 682 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Reg)) { 683 if (AR->getLoop() == L) 684 AddRecCost += 1; /// TODO: This should be a function of the stride. 685 686 // If this is an addrec for a loop that's already been visited by LSR, 687 // don't second-guess its addrec phi nodes. LSR isn't currently smart 688 // enough to reason about more than one loop at a time. Consider these 689 // registers free and leave them alone. 690 else if (L->contains(AR->getLoop()) || 691 (!AR->getLoop()->contains(L) && 692 DT.dominates(L->getHeader(), AR->getLoop()->getHeader()))) { 693 for (BasicBlock::iterator I = AR->getLoop()->getHeader()->begin(); 694 PHINode *PN = dyn_cast<PHINode>(I); ++I) 695 if (SE.isSCEVable(PN->getType()) && 696 (SE.getEffectiveSCEVType(PN->getType()) == 697 SE.getEffectiveSCEVType(AR->getType())) && 698 SE.getSCEV(PN) == AR) 699 return; 700 701 // If this isn't one of the addrecs that the loop already has, it 702 // would require a costly new phi and add. TODO: This isn't 703 // precisely modeled right now. 704 ++NumBaseAdds; 705 if (!Regs.count(AR->getStart())) 706 RateRegister(AR->getStart(), Regs, L, SE, DT); 707 } 708 709 // Add the step value register, if it needs one. 710 // TODO: The non-affine case isn't precisely modeled here. 711 if (!AR->isAffine() || !isa<SCEVConstant>(AR->getOperand(1))) 712 if (!Regs.count(AR->getStart())) 713 RateRegister(AR->getOperand(1), Regs, L, SE, DT); 714 } 715 ++NumRegs; 716 717 // Rough heuristic; favor registers which don't require extra setup 718 // instructions in the preheader. 719 if (!isa<SCEVUnknown>(Reg) && 720 !isa<SCEVConstant>(Reg) && 721 !(isa<SCEVAddRecExpr>(Reg) && 722 (isa<SCEVUnknown>(cast<SCEVAddRecExpr>(Reg)->getStart()) || 723 isa<SCEVConstant>(cast<SCEVAddRecExpr>(Reg)->getStart())))) 724 ++SetupCost; 725 } 726 727 /// RatePrimaryRegister - Record this register in the set. If we haven't seen it 728 /// before, rate it. 729 void Cost::RatePrimaryRegister(const SCEV *Reg, 730 SmallPtrSet<const SCEV *, 16> &Regs, 731 const Loop *L, 732 ScalarEvolution &SE, DominatorTree &DT) { 733 if (Regs.insert(Reg)) 734 RateRegister(Reg, Regs, L, SE, DT); 735 } 736 737 void Cost::RateFormula(const Formula &F, 738 SmallPtrSet<const SCEV *, 16> &Regs, 739 const DenseSet<const SCEV *> &VisitedRegs, 740 const Loop *L, 741 const SmallVectorImpl<int64_t> &Offsets, 742 ScalarEvolution &SE, DominatorTree &DT) { 743 // Tally up the registers. 744 if (const SCEV *ScaledReg = F.ScaledReg) { 745 if (VisitedRegs.count(ScaledReg)) { 746 Loose(); 747 return; 748 } 749 RatePrimaryRegister(ScaledReg, Regs, L, SE, DT); 750 } 751 for (SmallVectorImpl<const SCEV *>::const_iterator I = F.BaseRegs.begin(), 752 E = F.BaseRegs.end(); I != E; ++I) { 753 const SCEV *BaseReg = *I; 754 if (VisitedRegs.count(BaseReg)) { 755 Loose(); 756 return; 757 } 758 RatePrimaryRegister(BaseReg, Regs, L, SE, DT); 759 760 NumIVMuls += isa<SCEVMulExpr>(BaseReg) && 761 BaseReg->hasComputableLoopEvolution(L); 762 } 763 764 if (F.BaseRegs.size() > 1) 765 NumBaseAdds += F.BaseRegs.size() - 1; 766 767 // Tally up the non-zero immediates. 768 for (SmallVectorImpl<int64_t>::const_iterator I = Offsets.begin(), 769 E = Offsets.end(); I != E; ++I) { 770 int64_t Offset = (uint64_t)*I + F.AM.BaseOffs; 771 if (F.AM.BaseGV) 772 ImmCost += 64; // Handle symbolic values conservatively. 773 // TODO: This should probably be the pointer size. 774 else if (Offset != 0) 775 ImmCost += APInt(64, Offset, true).getMinSignedBits(); 776 } 777 } 778 779 /// Loose - Set this cost to a loosing value. 780 void Cost::Loose() { 781 NumRegs = ~0u; 782 AddRecCost = ~0u; 783 NumIVMuls = ~0u; 784 NumBaseAdds = ~0u; 785 ImmCost = ~0u; 786 SetupCost = ~0u; 787 } 788 789 /// operator< - Choose the lower cost. 790 bool Cost::operator<(const Cost &Other) const { 791 if (NumRegs != Other.NumRegs) 792 return NumRegs < Other.NumRegs; 793 if (AddRecCost != Other.AddRecCost) 794 return AddRecCost < Other.AddRecCost; 795 if (NumIVMuls != Other.NumIVMuls) 796 return NumIVMuls < Other.NumIVMuls; 797 if (NumBaseAdds != Other.NumBaseAdds) 798 return NumBaseAdds < Other.NumBaseAdds; 799 if (ImmCost != Other.ImmCost) 800 return ImmCost < Other.ImmCost; 801 if (SetupCost != Other.SetupCost) 802 return SetupCost < Other.SetupCost; 803 return false; 804 } 805 806 void Cost::print(raw_ostream &OS) const { 807 OS << NumRegs << " reg" << (NumRegs == 1 ? "" : "s"); 808 if (AddRecCost != 0) 809 OS << ", with addrec cost " << AddRecCost; 810 if (NumIVMuls != 0) 811 OS << ", plus " << NumIVMuls << " IV mul" << (NumIVMuls == 1 ? "" : "s"); 812 if (NumBaseAdds != 0) 813 OS << ", plus " << NumBaseAdds << " base add" 814 << (NumBaseAdds == 1 ? "" : "s"); 815 if (ImmCost != 0) 816 OS << ", plus " << ImmCost << " imm cost"; 817 if (SetupCost != 0) 818 OS << ", plus " << SetupCost << " setup cost"; 819 } 820 821 void Cost::dump() const { 822 print(errs()); errs() << '\n'; 823 } 824 825 namespace { 826 827 /// LSRFixup - An operand value in an instruction which is to be replaced 828 /// with some equivalent, possibly strength-reduced, replacement. 829 struct LSRFixup { 830 /// UserInst - The instruction which will be updated. 831 Instruction *UserInst; 832 833 /// OperandValToReplace - The operand of the instruction which will 834 /// be replaced. The operand may be used more than once; every instance 835 /// will be replaced. 836 Value *OperandValToReplace; 837 838 /// PostIncLoops - If this user is to use the post-incremented value of an 839 /// induction variable, this variable is non-null and holds the loop 840 /// associated with the induction variable. 841 PostIncLoopSet PostIncLoops; 842 843 /// LUIdx - The index of the LSRUse describing the expression which 844 /// this fixup needs, minus an offset (below). 845 size_t LUIdx; 846 847 /// Offset - A constant offset to be added to the LSRUse expression. 848 /// This allows multiple fixups to share the same LSRUse with different 849 /// offsets, for example in an unrolled loop. 850 int64_t Offset; 851 852 bool isUseFullyOutsideLoop(const Loop *L) const; 853 854 LSRFixup(); 855 856 void print(raw_ostream &OS) const; 857 void dump() const; 858 }; 859 860 } 861 862 LSRFixup::LSRFixup() 863 : UserInst(0), OperandValToReplace(0), LUIdx(~size_t(0)), Offset(0) {} 864 865 /// isUseFullyOutsideLoop - Test whether this fixup always uses its 866 /// value outside of the given loop. 867 bool LSRFixup::isUseFullyOutsideLoop(const Loop *L) const { 868 // PHI nodes use their value in their incoming blocks. 869 if (const PHINode *PN = dyn_cast<PHINode>(UserInst)) { 870 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 871 if (PN->getIncomingValue(i) == OperandValToReplace && 872 L->contains(PN->getIncomingBlock(i))) 873 return false; 874 return true; 875 } 876 877 return !L->contains(UserInst); 878 } 879 880 void LSRFixup::print(raw_ostream &OS) const { 881 OS << "UserInst="; 882 // Store is common and interesting enough to be worth special-casing. 883 if (StoreInst *Store = dyn_cast<StoreInst>(UserInst)) { 884 OS << "store "; 885 WriteAsOperand(OS, Store->getOperand(0), /*PrintType=*/false); 886 } else if (UserInst->getType()->isVoidTy()) 887 OS << UserInst->getOpcodeName(); 888 else 889 WriteAsOperand(OS, UserInst, /*PrintType=*/false); 890 891 OS << ", OperandValToReplace="; 892 WriteAsOperand(OS, OperandValToReplace, /*PrintType=*/false); 893 894 for (PostIncLoopSet::const_iterator I = PostIncLoops.begin(), 895 E = PostIncLoops.end(); I != E; ++I) { 896 OS << ", PostIncLoop="; 897 WriteAsOperand(OS, (*I)->getHeader(), /*PrintType=*/false); 898 } 899 900 if (LUIdx != ~size_t(0)) 901 OS << ", LUIdx=" << LUIdx; 902 903 if (Offset != 0) 904 OS << ", Offset=" << Offset; 905 } 906 907 void LSRFixup::dump() const { 908 print(errs()); errs() << '\n'; 909 } 910 911 namespace { 912 913 /// UniquifierDenseMapInfo - A DenseMapInfo implementation for holding 914 /// DenseMaps and DenseSets of sorted SmallVectors of const SCEV*. 915 struct UniquifierDenseMapInfo { 916 static SmallVector<const SCEV *, 2> getEmptyKey() { 917 SmallVector<const SCEV *, 2> V; 918 V.push_back(reinterpret_cast<const SCEV *>(-1)); 919 return V; 920 } 921 922 static SmallVector<const SCEV *, 2> getTombstoneKey() { 923 SmallVector<const SCEV *, 2> V; 924 V.push_back(reinterpret_cast<const SCEV *>(-2)); 925 return V; 926 } 927 928 static unsigned getHashValue(const SmallVector<const SCEV *, 2> &V) { 929 unsigned Result = 0; 930 for (SmallVectorImpl<const SCEV *>::const_iterator I = V.begin(), 931 E = V.end(); I != E; ++I) 932 Result ^= DenseMapInfo<const SCEV *>::getHashValue(*I); 933 return Result; 934 } 935 936 static bool isEqual(const SmallVector<const SCEV *, 2> &LHS, 937 const SmallVector<const SCEV *, 2> &RHS) { 938 return LHS == RHS; 939 } 940 }; 941 942 /// LSRUse - This class holds the state that LSR keeps for each use in 943 /// IVUsers, as well as uses invented by LSR itself. It includes information 944 /// about what kinds of things can be folded into the user, information about 945 /// the user itself, and information about how the use may be satisfied. 946 /// TODO: Represent multiple users of the same expression in common? 947 class LSRUse { 948 DenseSet<SmallVector<const SCEV *, 2>, UniquifierDenseMapInfo> Uniquifier; 949 950 public: 951 /// KindType - An enum for a kind of use, indicating what types of 952 /// scaled and immediate operands it might support. 953 enum KindType { 954 Basic, ///< A normal use, with no folding. 955 Special, ///< A special case of basic, allowing -1 scales. 956 Address, ///< An address use; folding according to TargetLowering 957 ICmpZero ///< An equality icmp with both operands folded into one. 958 // TODO: Add a generic icmp too? 959 }; 960 961 KindType Kind; 962 const Type *AccessTy; 963 964 SmallVector<int64_t, 8> Offsets; 965 int64_t MinOffset; 966 int64_t MaxOffset; 967 968 /// AllFixupsOutsideLoop - This records whether all of the fixups using this 969 /// LSRUse are outside of the loop, in which case some special-case heuristics 970 /// may be used. 971 bool AllFixupsOutsideLoop; 972 973 /// WidestFixupType - This records the widest use type for any fixup using 974 /// this LSRUse. FindUseWithSimilarFormula can't consider uses with different 975 /// max fixup widths to be equivalent, because the narrower one may be relying 976 /// on the implicit truncation to truncate away bogus bits. 977 const Type *WidestFixupType; 978 979 /// Formulae - A list of ways to build a value that can satisfy this user. 980 /// After the list is populated, one of these is selected heuristically and 981 /// used to formulate a replacement for OperandValToReplace in UserInst. 982 SmallVector<Formula, 12> Formulae; 983 984 /// Regs - The set of register candidates used by all formulae in this LSRUse. 985 SmallPtrSet<const SCEV *, 4> Regs; 986 987 LSRUse(KindType K, const Type *T) : Kind(K), AccessTy(T), 988 MinOffset(INT64_MAX), 989 MaxOffset(INT64_MIN), 990 AllFixupsOutsideLoop(true), 991 WidestFixupType(0) {} 992 993 bool HasFormulaWithSameRegs(const Formula &F) const; 994 bool InsertFormula(const Formula &F); 995 void DeleteFormula(Formula &F); 996 void RecomputeRegs(size_t LUIdx, RegUseTracker &Reguses); 997 998 void print(raw_ostream &OS) const; 999 void dump() const; 1000 }; 1001 1002 } 1003 1004 /// HasFormula - Test whether this use as a formula which has the same 1005 /// registers as the given formula. 1006 bool LSRUse::HasFormulaWithSameRegs(const Formula &F) const { 1007 SmallVector<const SCEV *, 2> Key = F.BaseRegs; 1008 if (F.ScaledReg) Key.push_back(F.ScaledReg); 1009 // Unstable sort by host order ok, because this is only used for uniquifying. 1010 std::sort(Key.begin(), Key.end()); 1011 return Uniquifier.count(Key); 1012 } 1013 1014 /// InsertFormula - If the given formula has not yet been inserted, add it to 1015 /// the list, and return true. Return false otherwise. 1016 bool LSRUse::InsertFormula(const Formula &F) { 1017 SmallVector<const SCEV *, 2> Key = F.BaseRegs; 1018 if (F.ScaledReg) Key.push_back(F.ScaledReg); 1019 // Unstable sort by host order ok, because this is only used for uniquifying. 1020 std::sort(Key.begin(), Key.end()); 1021 1022 if (!Uniquifier.insert(Key).second) 1023 return false; 1024 1025 // Using a register to hold the value of 0 is not profitable. 1026 assert((!F.ScaledReg || !F.ScaledReg->isZero()) && 1027 "Zero allocated in a scaled register!"); 1028 #ifndef NDEBUG 1029 for (SmallVectorImpl<const SCEV *>::const_iterator I = 1030 F.BaseRegs.begin(), E = F.BaseRegs.end(); I != E; ++I) 1031 assert(!(*I)->isZero() && "Zero allocated in a base register!"); 1032 #endif 1033 1034 // Add the formula to the list. 1035 Formulae.push_back(F); 1036 1037 // Record registers now being used by this use. 1038 if (F.ScaledReg) Regs.insert(F.ScaledReg); 1039 Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end()); 1040 1041 return true; 1042 } 1043 1044 /// DeleteFormula - Remove the given formula from this use's list. 1045 void LSRUse::DeleteFormula(Formula &F) { 1046 if (&F != &Formulae.back()) 1047 std::swap(F, Formulae.back()); 1048 Formulae.pop_back(); 1049 assert(!Formulae.empty() && "LSRUse has no formulae left!"); 1050 } 1051 1052 /// RecomputeRegs - Recompute the Regs field, and update RegUses. 1053 void LSRUse::RecomputeRegs(size_t LUIdx, RegUseTracker &RegUses) { 1054 // Now that we've filtered out some formulae, recompute the Regs set. 1055 SmallPtrSet<const SCEV *, 4> OldRegs = Regs; 1056 Regs.clear(); 1057 for (SmallVectorImpl<Formula>::const_iterator I = Formulae.begin(), 1058 E = Formulae.end(); I != E; ++I) { 1059 const Formula &F = *I; 1060 if (F.ScaledReg) Regs.insert(F.ScaledReg); 1061 Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end()); 1062 } 1063 1064 // Update the RegTracker. 1065 for (SmallPtrSet<const SCEV *, 4>::iterator I = OldRegs.begin(), 1066 E = OldRegs.end(); I != E; ++I) 1067 if (!Regs.count(*I)) 1068 RegUses.DropRegister(*I, LUIdx); 1069 } 1070 1071 void LSRUse::print(raw_ostream &OS) const { 1072 OS << "LSR Use: Kind="; 1073 switch (Kind) { 1074 case Basic: OS << "Basic"; break; 1075 case Special: OS << "Special"; break; 1076 case ICmpZero: OS << "ICmpZero"; break; 1077 case Address: 1078 OS << "Address of "; 1079 if (AccessTy->isPointerTy()) 1080 OS << "pointer"; // the full pointer type could be really verbose 1081 else 1082 OS << *AccessTy; 1083 } 1084 1085 OS << ", Offsets={"; 1086 for (SmallVectorImpl<int64_t>::const_iterator I = Offsets.begin(), 1087 E = Offsets.end(); I != E; ++I) { 1088 OS << *I; 1089 if (llvm::next(I) != E) 1090 OS << ','; 1091 } 1092 OS << '}'; 1093 1094 if (AllFixupsOutsideLoop) 1095 OS << ", all-fixups-outside-loop"; 1096 1097 if (WidestFixupType) 1098 OS << ", widest fixup type: " << *WidestFixupType; 1099 } 1100 1101 void LSRUse::dump() const { 1102 print(errs()); errs() << '\n'; 1103 } 1104 1105 /// isLegalUse - Test whether the use described by AM is "legal", meaning it can 1106 /// be completely folded into the user instruction at isel time. This includes 1107 /// address-mode folding and special icmp tricks. 1108 static bool isLegalUse(const TargetLowering::AddrMode &AM, 1109 LSRUse::KindType Kind, const Type *AccessTy, 1110 const TargetLowering *TLI) { 1111 switch (Kind) { 1112 case LSRUse::Address: 1113 // If we have low-level target information, ask the target if it can 1114 // completely fold this address. 1115 if (TLI) return TLI->isLegalAddressingMode(AM, AccessTy); 1116 1117 // Otherwise, just guess that reg+reg addressing is legal. 1118 return !AM.BaseGV && AM.BaseOffs == 0 && AM.Scale <= 1; 1119 1120 case LSRUse::ICmpZero: 1121 // There's not even a target hook for querying whether it would be legal to 1122 // fold a GV into an ICmp. 1123 if (AM.BaseGV) 1124 return false; 1125 1126 // ICmp only has two operands; don't allow more than two non-trivial parts. 1127 if (AM.Scale != 0 && AM.HasBaseReg && AM.BaseOffs != 0) 1128 return false; 1129 1130 // ICmp only supports no scale or a -1 scale, as we can "fold" a -1 scale by 1131 // putting the scaled register in the other operand of the icmp. 1132 if (AM.Scale != 0 && AM.Scale != -1) 1133 return false; 1134 1135 // If we have low-level target information, ask the target if it can fold an 1136 // integer immediate on an icmp. 1137 if (AM.BaseOffs != 0) { 1138 if (TLI) return TLI->isLegalICmpImmediate(-AM.BaseOffs); 1139 return false; 1140 } 1141 1142 return true; 1143 1144 case LSRUse::Basic: 1145 // Only handle single-register values. 1146 return !AM.BaseGV && AM.Scale == 0 && AM.BaseOffs == 0; 1147 1148 case LSRUse::Special: 1149 // Only handle -1 scales, or no scale. 1150 return AM.Scale == 0 || AM.Scale == -1; 1151 } 1152 1153 return false; 1154 } 1155 1156 static bool isLegalUse(TargetLowering::AddrMode AM, 1157 int64_t MinOffset, int64_t MaxOffset, 1158 LSRUse::KindType Kind, const Type *AccessTy, 1159 const TargetLowering *TLI) { 1160 // Check for overflow. 1161 if (((int64_t)((uint64_t)AM.BaseOffs + MinOffset) > AM.BaseOffs) != 1162 (MinOffset > 0)) 1163 return false; 1164 AM.BaseOffs = (uint64_t)AM.BaseOffs + MinOffset; 1165 if (isLegalUse(AM, Kind, AccessTy, TLI)) { 1166 AM.BaseOffs = (uint64_t)AM.BaseOffs - MinOffset; 1167 // Check for overflow. 1168 if (((int64_t)((uint64_t)AM.BaseOffs + MaxOffset) > AM.BaseOffs) != 1169 (MaxOffset > 0)) 1170 return false; 1171 AM.BaseOffs = (uint64_t)AM.BaseOffs + MaxOffset; 1172 return isLegalUse(AM, Kind, AccessTy, TLI); 1173 } 1174 return false; 1175 } 1176 1177 static bool isAlwaysFoldable(int64_t BaseOffs, 1178 GlobalValue *BaseGV, 1179 bool HasBaseReg, 1180 LSRUse::KindType Kind, const Type *AccessTy, 1181 const TargetLowering *TLI) { 1182 // Fast-path: zero is always foldable. 1183 if (BaseOffs == 0 && !BaseGV) return true; 1184 1185 // Conservatively, create an address with an immediate and a 1186 // base and a scale. 1187 TargetLowering::AddrMode AM; 1188 AM.BaseOffs = BaseOffs; 1189 AM.BaseGV = BaseGV; 1190 AM.HasBaseReg = HasBaseReg; 1191 AM.Scale = Kind == LSRUse::ICmpZero ? -1 : 1; 1192 1193 // Canonicalize a scale of 1 to a base register if the formula doesn't 1194 // already have a base register. 1195 if (!AM.HasBaseReg && AM.Scale == 1) { 1196 AM.Scale = 0; 1197 AM.HasBaseReg = true; 1198 } 1199 1200 return isLegalUse(AM, Kind, AccessTy, TLI); 1201 } 1202 1203 static bool isAlwaysFoldable(const SCEV *S, 1204 int64_t MinOffset, int64_t MaxOffset, 1205 bool HasBaseReg, 1206 LSRUse::KindType Kind, const Type *AccessTy, 1207 const TargetLowering *TLI, 1208 ScalarEvolution &SE) { 1209 // Fast-path: zero is always foldable. 1210 if (S->isZero()) return true; 1211 1212 // Conservatively, create an address with an immediate and a 1213 // base and a scale. 1214 int64_t BaseOffs = ExtractImmediate(S, SE); 1215 GlobalValue *BaseGV = ExtractSymbol(S, SE); 1216 1217 // If there's anything else involved, it's not foldable. 1218 if (!S->isZero()) return false; 1219 1220 // Fast-path: zero is always foldable. 1221 if (BaseOffs == 0 && !BaseGV) return true; 1222 1223 // Conservatively, create an address with an immediate and a 1224 // base and a scale. 1225 TargetLowering::AddrMode AM; 1226 AM.BaseOffs = BaseOffs; 1227 AM.BaseGV = BaseGV; 1228 AM.HasBaseReg = HasBaseReg; 1229 AM.Scale = Kind == LSRUse::ICmpZero ? -1 : 1; 1230 1231 return isLegalUse(AM, MinOffset, MaxOffset, Kind, AccessTy, TLI); 1232 } 1233 1234 namespace { 1235 1236 /// UseMapDenseMapInfo - A DenseMapInfo implementation for holding 1237 /// DenseMaps and DenseSets of pairs of const SCEV* and LSRUse::Kind. 1238 struct UseMapDenseMapInfo { 1239 static std::pair<const SCEV *, LSRUse::KindType> getEmptyKey() { 1240 return std::make_pair(reinterpret_cast<const SCEV *>(-1), LSRUse::Basic); 1241 } 1242 1243 static std::pair<const SCEV *, LSRUse::KindType> getTombstoneKey() { 1244 return std::make_pair(reinterpret_cast<const SCEV *>(-2), LSRUse::Basic); 1245 } 1246 1247 static unsigned 1248 getHashValue(const std::pair<const SCEV *, LSRUse::KindType> &V) { 1249 unsigned Result = DenseMapInfo<const SCEV *>::getHashValue(V.first); 1250 Result ^= DenseMapInfo<unsigned>::getHashValue(unsigned(V.second)); 1251 return Result; 1252 } 1253 1254 static bool isEqual(const std::pair<const SCEV *, LSRUse::KindType> &LHS, 1255 const std::pair<const SCEV *, LSRUse::KindType> &RHS) { 1256 return LHS == RHS; 1257 } 1258 }; 1259 1260 /// FormulaSorter - This class implements an ordering for formulae which sorts 1261 /// the by their standalone cost. 1262 class FormulaSorter { 1263 /// These two sets are kept empty, so that we compute standalone costs. 1264 DenseSet<const SCEV *> VisitedRegs; 1265 SmallPtrSet<const SCEV *, 16> Regs; 1266 Loop *L; 1267 LSRUse *LU; 1268 ScalarEvolution &SE; 1269 DominatorTree &DT; 1270 1271 public: 1272 FormulaSorter(Loop *l, LSRUse &lu, ScalarEvolution &se, DominatorTree &dt) 1273 : L(l), LU(&lu), SE(se), DT(dt) {} 1274 1275 bool operator()(const Formula &A, const Formula &B) { 1276 Cost CostA; 1277 CostA.RateFormula(A, Regs, VisitedRegs, L, LU->Offsets, SE, DT); 1278 Regs.clear(); 1279 Cost CostB; 1280 CostB.RateFormula(B, Regs, VisitedRegs, L, LU->Offsets, SE, DT); 1281 Regs.clear(); 1282 return CostA < CostB; 1283 } 1284 }; 1285 1286 /// LSRInstance - This class holds state for the main loop strength reduction 1287 /// logic. 1288 class LSRInstance { 1289 IVUsers &IU; 1290 ScalarEvolution &SE; 1291 DominatorTree &DT; 1292 LoopInfo &LI; 1293 const TargetLowering *const TLI; 1294 Loop *const L; 1295 bool Changed; 1296 1297 /// IVIncInsertPos - This is the insert position that the current loop's 1298 /// induction variable increment should be placed. In simple loops, this is 1299 /// the latch block's terminator. But in more complicated cases, this is a 1300 /// position which will dominate all the in-loop post-increment users. 1301 Instruction *IVIncInsertPos; 1302 1303 /// Factors - Interesting factors between use strides. 1304 SmallSetVector<int64_t, 8> Factors; 1305 1306 /// Types - Interesting use types, to facilitate truncation reuse. 1307 SmallSetVector<const Type *, 4> Types; 1308 1309 /// Fixups - The list of operands which are to be replaced. 1310 SmallVector<LSRFixup, 16> Fixups; 1311 1312 /// Uses - The list of interesting uses. 1313 SmallVector<LSRUse, 16> Uses; 1314 1315 /// RegUses - Track which uses use which register candidates. 1316 RegUseTracker RegUses; 1317 1318 void OptimizeShadowIV(); 1319 bool FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse); 1320 ICmpInst *OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse); 1321 void OptimizeLoopTermCond(); 1322 1323 void CollectInterestingTypesAndFactors(); 1324 void CollectFixupsAndInitialFormulae(); 1325 1326 LSRFixup &getNewFixup() { 1327 Fixups.push_back(LSRFixup()); 1328 return Fixups.back(); 1329 } 1330 1331 // Support for sharing of LSRUses between LSRFixups. 1332 typedef DenseMap<std::pair<const SCEV *, LSRUse::KindType>, 1333 size_t, 1334 UseMapDenseMapInfo> UseMapTy; 1335 UseMapTy UseMap; 1336 1337 bool reconcileNewOffset(LSRUse &LU, int64_t NewOffset, bool HasBaseReg, 1338 LSRUse::KindType Kind, const Type *AccessTy); 1339 1340 std::pair<size_t, int64_t> getUse(const SCEV *&Expr, 1341 LSRUse::KindType Kind, 1342 const Type *AccessTy); 1343 1344 void DeleteUse(LSRUse &LU); 1345 1346 LSRUse *FindUseWithSimilarFormula(const Formula &F, const LSRUse &OrigLU); 1347 1348 public: 1349 void InsertInitialFormula(const SCEV *S, LSRUse &LU, size_t LUIdx); 1350 void InsertSupplementalFormula(const SCEV *S, LSRUse &LU, size_t LUIdx); 1351 void CountRegisters(const Formula &F, size_t LUIdx); 1352 bool InsertFormula(LSRUse &LU, unsigned LUIdx, const Formula &F); 1353 1354 void CollectLoopInvariantFixupsAndFormulae(); 1355 1356 void GenerateReassociations(LSRUse &LU, unsigned LUIdx, Formula Base, 1357 unsigned Depth = 0); 1358 void GenerateCombinations(LSRUse &LU, unsigned LUIdx, Formula Base); 1359 void GenerateSymbolicOffsets(LSRUse &LU, unsigned LUIdx, Formula Base); 1360 void GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx, Formula Base); 1361 void GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx, Formula Base); 1362 void GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base); 1363 void GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base); 1364 void GenerateCrossUseConstantOffsets(); 1365 void GenerateAllReuseFormulae(); 1366 1367 void FilterOutUndesirableDedicatedRegisters(); 1368 1369 size_t EstimateSearchSpaceComplexity() const; 1370 void NarrowSearchSpaceByDetectingSupersets(); 1371 void NarrowSearchSpaceByCollapsingUnrolledCode(); 1372 void NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters(); 1373 void NarrowSearchSpaceByPickingWinnerRegs(); 1374 void NarrowSearchSpaceUsingHeuristics(); 1375 1376 void SolveRecurse(SmallVectorImpl<const Formula *> &Solution, 1377 Cost &SolutionCost, 1378 SmallVectorImpl<const Formula *> &Workspace, 1379 const Cost &CurCost, 1380 const SmallPtrSet<const SCEV *, 16> &CurRegs, 1381 DenseSet<const SCEV *> &VisitedRegs) const; 1382 void Solve(SmallVectorImpl<const Formula *> &Solution) const; 1383 1384 BasicBlock::iterator 1385 HoistInsertPosition(BasicBlock::iterator IP, 1386 const SmallVectorImpl<Instruction *> &Inputs) const; 1387 BasicBlock::iterator AdjustInsertPositionForExpand(BasicBlock::iterator IP, 1388 const LSRFixup &LF, 1389 const LSRUse &LU) const; 1390 1391 Value *Expand(const LSRFixup &LF, 1392 const Formula &F, 1393 BasicBlock::iterator IP, 1394 SCEVExpander &Rewriter, 1395 SmallVectorImpl<WeakVH> &DeadInsts) const; 1396 void RewriteForPHI(PHINode *PN, const LSRFixup &LF, 1397 const Formula &F, 1398 SCEVExpander &Rewriter, 1399 SmallVectorImpl<WeakVH> &DeadInsts, 1400 Pass *P) const; 1401 void Rewrite(const LSRFixup &LF, 1402 const Formula &F, 1403 SCEVExpander &Rewriter, 1404 SmallVectorImpl<WeakVH> &DeadInsts, 1405 Pass *P) const; 1406 void ImplementSolution(const SmallVectorImpl<const Formula *> &Solution, 1407 Pass *P); 1408 1409 LSRInstance(const TargetLowering *tli, Loop *l, Pass *P); 1410 1411 bool getChanged() const { return Changed; } 1412 1413 void print_factors_and_types(raw_ostream &OS) const; 1414 void print_fixups(raw_ostream &OS) const; 1415 void print_uses(raw_ostream &OS) const; 1416 void print(raw_ostream &OS) const; 1417 void dump() const; 1418 }; 1419 1420 } 1421 1422 /// OptimizeShadowIV - If IV is used in a int-to-float cast 1423 /// inside the loop then try to eliminate the cast operation. 1424 void LSRInstance::OptimizeShadowIV() { 1425 const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L); 1426 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) 1427 return; 1428 1429 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); 1430 UI != E; /* empty */) { 1431 IVUsers::const_iterator CandidateUI = UI; 1432 ++UI; 1433 Instruction *ShadowUse = CandidateUI->getUser(); 1434 const Type *DestTy = NULL; 1435 1436 /* If shadow use is a int->float cast then insert a second IV 1437 to eliminate this cast. 1438 1439 for (unsigned i = 0; i < n; ++i) 1440 foo((double)i); 1441 1442 is transformed into 1443 1444 double d = 0.0; 1445 for (unsigned i = 0; i < n; ++i, ++d) 1446 foo(d); 1447 */ 1448 if (UIToFPInst *UCast = dyn_cast<UIToFPInst>(CandidateUI->getUser())) 1449 DestTy = UCast->getDestTy(); 1450 else if (SIToFPInst *SCast = dyn_cast<SIToFPInst>(CandidateUI->getUser())) 1451 DestTy = SCast->getDestTy(); 1452 if (!DestTy) continue; 1453 1454 if (TLI) { 1455 // If target does not support DestTy natively then do not apply 1456 // this transformation. 1457 EVT DVT = TLI->getValueType(DestTy); 1458 if (!TLI->isTypeLegal(DVT)) continue; 1459 } 1460 1461 PHINode *PH = dyn_cast<PHINode>(ShadowUse->getOperand(0)); 1462 if (!PH) continue; 1463 if (PH->getNumIncomingValues() != 2) continue; 1464 1465 const Type *SrcTy = PH->getType(); 1466 int Mantissa = DestTy->getFPMantissaWidth(); 1467 if (Mantissa == -1) continue; 1468 if ((int)SE.getTypeSizeInBits(SrcTy) > Mantissa) 1469 continue; 1470 1471 unsigned Entry, Latch; 1472 if (PH->getIncomingBlock(0) == L->getLoopPreheader()) { 1473 Entry = 0; 1474 Latch = 1; 1475 } else { 1476 Entry = 1; 1477 Latch = 0; 1478 } 1479 1480 ConstantInt *Init = dyn_cast<ConstantInt>(PH->getIncomingValue(Entry)); 1481 if (!Init) continue; 1482 Constant *NewInit = ConstantFP::get(DestTy, Init->getZExtValue()); 1483 1484 BinaryOperator *Incr = 1485 dyn_cast<BinaryOperator>(PH->getIncomingValue(Latch)); 1486 if (!Incr) continue; 1487 if (Incr->getOpcode() != Instruction::Add 1488 && Incr->getOpcode() != Instruction::Sub) 1489 continue; 1490 1491 /* Initialize new IV, double d = 0.0 in above example. */ 1492 ConstantInt *C = NULL; 1493 if (Incr->getOperand(0) == PH) 1494 C = dyn_cast<ConstantInt>(Incr->getOperand(1)); 1495 else if (Incr->getOperand(1) == PH) 1496 C = dyn_cast<ConstantInt>(Incr->getOperand(0)); 1497 else 1498 continue; 1499 1500 if (!C) continue; 1501 1502 // Ignore negative constants, as the code below doesn't handle them 1503 // correctly. TODO: Remove this restriction. 1504 if (!C->getValue().isStrictlyPositive()) continue; 1505 1506 /* Add new PHINode. */ 1507 PHINode *NewPH = PHINode::Create(DestTy, "IV.S.", PH); 1508 1509 /* create new increment. '++d' in above example. */ 1510 Constant *CFP = ConstantFP::get(DestTy, C->getZExtValue()); 1511 BinaryOperator *NewIncr = 1512 BinaryOperator::Create(Incr->getOpcode() == Instruction::Add ? 1513 Instruction::FAdd : Instruction::FSub, 1514 NewPH, CFP, "IV.S.next.", Incr); 1515 1516 NewPH->addIncoming(NewInit, PH->getIncomingBlock(Entry)); 1517 NewPH->addIncoming(NewIncr, PH->getIncomingBlock(Latch)); 1518 1519 /* Remove cast operation */ 1520 ShadowUse->replaceAllUsesWith(NewPH); 1521 ShadowUse->eraseFromParent(); 1522 Changed = true; 1523 break; 1524 } 1525 } 1526 1527 /// FindIVUserForCond - If Cond has an operand that is an expression of an IV, 1528 /// set the IV user and stride information and return true, otherwise return 1529 /// false. 1530 bool LSRInstance::FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse) { 1531 for (IVUsers::iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) 1532 if (UI->getUser() == Cond) { 1533 // NOTE: we could handle setcc instructions with multiple uses here, but 1534 // InstCombine does it as well for simple uses, it's not clear that it 1535 // occurs enough in real life to handle. 1536 CondUse = UI; 1537 return true; 1538 } 1539 return false; 1540 } 1541 1542 /// OptimizeMax - Rewrite the loop's terminating condition if it uses 1543 /// a max computation. 1544 /// 1545 /// This is a narrow solution to a specific, but acute, problem. For loops 1546 /// like this: 1547 /// 1548 /// i = 0; 1549 /// do { 1550 /// p[i] = 0.0; 1551 /// } while (++i < n); 1552 /// 1553 /// the trip count isn't just 'n', because 'n' might not be positive. And 1554 /// unfortunately this can come up even for loops where the user didn't use 1555 /// a C do-while loop. For example, seemingly well-behaved top-test loops 1556 /// will commonly be lowered like this: 1557 // 1558 /// if (n > 0) { 1559 /// i = 0; 1560 /// do { 1561 /// p[i] = 0.0; 1562 /// } while (++i < n); 1563 /// } 1564 /// 1565 /// and then it's possible for subsequent optimization to obscure the if 1566 /// test in such a way that indvars can't find it. 1567 /// 1568 /// When indvars can't find the if test in loops like this, it creates a 1569 /// max expression, which allows it to give the loop a canonical 1570 /// induction variable: 1571 /// 1572 /// i = 0; 1573 /// max = n < 1 ? 1 : n; 1574 /// do { 1575 /// p[i] = 0.0; 1576 /// } while (++i != max); 1577 /// 1578 /// Canonical induction variables are necessary because the loop passes 1579 /// are designed around them. The most obvious example of this is the 1580 /// LoopInfo analysis, which doesn't remember trip count values. It 1581 /// expects to be able to rediscover the trip count each time it is 1582 /// needed, and it does this using a simple analysis that only succeeds if 1583 /// the loop has a canonical induction variable. 1584 /// 1585 /// However, when it comes time to generate code, the maximum operation 1586 /// can be quite costly, especially if it's inside of an outer loop. 1587 /// 1588 /// This function solves this problem by detecting this type of loop and 1589 /// rewriting their conditions from ICMP_NE back to ICMP_SLT, and deleting 1590 /// the instructions for the maximum computation. 1591 /// 1592 ICmpInst *LSRInstance::OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse) { 1593 // Check that the loop matches the pattern we're looking for. 1594 if (Cond->getPredicate() != CmpInst::ICMP_EQ && 1595 Cond->getPredicate() != CmpInst::ICMP_NE) 1596 return Cond; 1597 1598 SelectInst *Sel = dyn_cast<SelectInst>(Cond->getOperand(1)); 1599 if (!Sel || !Sel->hasOneUse()) return Cond; 1600 1601 const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L); 1602 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) 1603 return Cond; 1604 const SCEV *One = SE.getConstant(BackedgeTakenCount->getType(), 1); 1605 1606 // Add one to the backedge-taken count to get the trip count. 1607 const SCEV *IterationCount = SE.getAddExpr(One, BackedgeTakenCount); 1608 if (IterationCount != SE.getSCEV(Sel)) return Cond; 1609 1610 // Check for a max calculation that matches the pattern. There's no check 1611 // for ICMP_ULE here because the comparison would be with zero, which 1612 // isn't interesting. 1613 CmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE; 1614 const SCEVNAryExpr *Max = 0; 1615 if (const SCEVSMaxExpr *S = dyn_cast<SCEVSMaxExpr>(BackedgeTakenCount)) { 1616 Pred = ICmpInst::ICMP_SLE; 1617 Max = S; 1618 } else if (const SCEVSMaxExpr *S = dyn_cast<SCEVSMaxExpr>(IterationCount)) { 1619 Pred = ICmpInst::ICMP_SLT; 1620 Max = S; 1621 } else if (const SCEVUMaxExpr *U = dyn_cast<SCEVUMaxExpr>(IterationCount)) { 1622 Pred = ICmpInst::ICMP_ULT; 1623 Max = U; 1624 } else { 1625 // No match; bail. 1626 return Cond; 1627 } 1628 1629 // To handle a max with more than two operands, this optimization would 1630 // require additional checking and setup. 1631 if (Max->getNumOperands() != 2) 1632 return Cond; 1633 1634 const SCEV *MaxLHS = Max->getOperand(0); 1635 const SCEV *MaxRHS = Max->getOperand(1); 1636 1637 // ScalarEvolution canonicalizes constants to the left. For < and >, look 1638 // for a comparison with 1. For <= and >=, a comparison with zero. 1639 if (!MaxLHS || 1640 (ICmpInst::isTrueWhenEqual(Pred) ? !MaxLHS->isZero() : (MaxLHS != One))) 1641 return Cond; 1642 1643 // Check the relevant induction variable for conformance to 1644 // the pattern. 1645 const SCEV *IV = SE.getSCEV(Cond->getOperand(0)); 1646 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(IV); 1647 if (!AR || !AR->isAffine() || 1648 AR->getStart() != One || 1649 AR->getStepRecurrence(SE) != One) 1650 return Cond; 1651 1652 assert(AR->getLoop() == L && 1653 "Loop condition operand is an addrec in a different loop!"); 1654 1655 // Check the right operand of the select, and remember it, as it will 1656 // be used in the new comparison instruction. 1657 Value *NewRHS = 0; 1658 if (ICmpInst::isTrueWhenEqual(Pred)) { 1659 // Look for n+1, and grab n. 1660 if (AddOperator *BO = dyn_cast<AddOperator>(Sel->getOperand(1))) 1661 if (isa<ConstantInt>(BO->getOperand(1)) && 1662 cast<ConstantInt>(BO->getOperand(1))->isOne() && 1663 SE.getSCEV(BO->getOperand(0)) == MaxRHS) 1664 NewRHS = BO->getOperand(0); 1665 if (AddOperator *BO = dyn_cast<AddOperator>(Sel->getOperand(2))) 1666 if (isa<ConstantInt>(BO->getOperand(1)) && 1667 cast<ConstantInt>(BO->getOperand(1))->isOne() && 1668 SE.getSCEV(BO->getOperand(0)) == MaxRHS) 1669 NewRHS = BO->getOperand(0); 1670 if (!NewRHS) 1671 return Cond; 1672 } else if (SE.getSCEV(Sel->getOperand(1)) == MaxRHS) 1673 NewRHS = Sel->getOperand(1); 1674 else if (SE.getSCEV(Sel->getOperand(2)) == MaxRHS) 1675 NewRHS = Sel->getOperand(2); 1676 else if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(MaxRHS)) 1677 NewRHS = SU->getValue(); 1678 else 1679 // Max doesn't match expected pattern. 1680 return Cond; 1681 1682 // Determine the new comparison opcode. It may be signed or unsigned, 1683 // and the original comparison may be either equality or inequality. 1684 if (Cond->getPredicate() == CmpInst::ICMP_EQ) 1685 Pred = CmpInst::getInversePredicate(Pred); 1686 1687 // Ok, everything looks ok to change the condition into an SLT or SGE and 1688 // delete the max calculation. 1689 ICmpInst *NewCond = 1690 new ICmpInst(Cond, Pred, Cond->getOperand(0), NewRHS, "scmp"); 1691 1692 // Delete the max calculation instructions. 1693 Cond->replaceAllUsesWith(NewCond); 1694 CondUse->setUser(NewCond); 1695 Instruction *Cmp = cast<Instruction>(Sel->getOperand(0)); 1696 Cond->eraseFromParent(); 1697 Sel->eraseFromParent(); 1698 if (Cmp->use_empty()) 1699 Cmp->eraseFromParent(); 1700 return NewCond; 1701 } 1702 1703 /// OptimizeLoopTermCond - Change loop terminating condition to use the 1704 /// postinc iv when possible. 1705 void 1706 LSRInstance::OptimizeLoopTermCond() { 1707 SmallPtrSet<Instruction *, 4> PostIncs; 1708 1709 BasicBlock *LatchBlock = L->getLoopLatch(); 1710 SmallVector<BasicBlock*, 8> ExitingBlocks; 1711 L->getExitingBlocks(ExitingBlocks); 1712 1713 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { 1714 BasicBlock *ExitingBlock = ExitingBlocks[i]; 1715 1716 // Get the terminating condition for the loop if possible. If we 1717 // can, we want to change it to use a post-incremented version of its 1718 // induction variable, to allow coalescing the live ranges for the IV into 1719 // one register value. 1720 1721 BranchInst *TermBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator()); 1722 if (!TermBr) 1723 continue; 1724 // FIXME: Overly conservative, termination condition could be an 'or' etc.. 1725 if (TermBr->isUnconditional() || !isa<ICmpInst>(TermBr->getCondition())) 1726 continue; 1727 1728 // Search IVUsesByStride to find Cond's IVUse if there is one. 1729 IVStrideUse *CondUse = 0; 1730 ICmpInst *Cond = cast<ICmpInst>(TermBr->getCondition()); 1731 if (!FindIVUserForCond(Cond, CondUse)) 1732 continue; 1733 1734 // If the trip count is computed in terms of a max (due to ScalarEvolution 1735 // being unable to find a sufficient guard, for example), change the loop 1736 // comparison to use SLT or ULT instead of NE. 1737 // One consequence of doing this now is that it disrupts the count-down 1738 // optimization. That's not always a bad thing though, because in such 1739 // cases it may still be worthwhile to avoid a max. 1740 Cond = OptimizeMax(Cond, CondUse); 1741 1742 // If this exiting block dominates the latch block, it may also use 1743 // the post-inc value if it won't be shared with other uses. 1744 // Check for dominance. 1745 if (!DT.dominates(ExitingBlock, LatchBlock)) 1746 continue; 1747 1748 // Conservatively avoid trying to use the post-inc value in non-latch 1749 // exits if there may be pre-inc users in intervening blocks. 1750 if (LatchBlock != ExitingBlock) 1751 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) 1752 // Test if the use is reachable from the exiting block. This dominator 1753 // query is a conservative approximation of reachability. 1754 if (&*UI != CondUse && 1755 !DT.properlyDominates(UI->getUser()->getParent(), ExitingBlock)) { 1756 // Conservatively assume there may be reuse if the quotient of their 1757 // strides could be a legal scale. 1758 const SCEV *A = IU.getStride(*CondUse, L); 1759 const SCEV *B = IU.getStride(*UI, L); 1760 if (!A || !B) continue; 1761 if (SE.getTypeSizeInBits(A->getType()) != 1762 SE.getTypeSizeInBits(B->getType())) { 1763 if (SE.getTypeSizeInBits(A->getType()) > 1764 SE.getTypeSizeInBits(B->getType())) 1765 B = SE.getSignExtendExpr(B, A->getType()); 1766 else 1767 A = SE.getSignExtendExpr(A, B->getType()); 1768 } 1769 if (const SCEVConstant *D = 1770 dyn_cast_or_null<SCEVConstant>(getExactSDiv(B, A, SE))) { 1771 const ConstantInt *C = D->getValue(); 1772 // Stride of one or negative one can have reuse with non-addresses. 1773 if (C->isOne() || C->isAllOnesValue()) 1774 goto decline_post_inc; 1775 // Avoid weird situations. 1776 if (C->getValue().getMinSignedBits() >= 64 || 1777 C->getValue().isMinSignedValue()) 1778 goto decline_post_inc; 1779 // Without TLI, assume that any stride might be valid, and so any 1780 // use might be shared. 1781 if (!TLI) 1782 goto decline_post_inc; 1783 // Check for possible scaled-address reuse. 1784 const Type *AccessTy = getAccessType(UI->getUser()); 1785 TargetLowering::AddrMode AM; 1786 AM.Scale = C->getSExtValue(); 1787 if (TLI->isLegalAddressingMode(AM, AccessTy)) 1788 goto decline_post_inc; 1789 AM.Scale = -AM.Scale; 1790 if (TLI->isLegalAddressingMode(AM, AccessTy)) 1791 goto decline_post_inc; 1792 } 1793 } 1794 1795 DEBUG(dbgs() << " Change loop exiting icmp to use postinc iv: " 1796 << *Cond << '\n'); 1797 1798 // It's possible for the setcc instruction to be anywhere in the loop, and 1799 // possible for it to have multiple users. If it is not immediately before 1800 // the exiting block branch, move it. 1801 if (&*++BasicBlock::iterator(Cond) != TermBr) { 1802 if (Cond->hasOneUse()) { 1803 Cond->moveBefore(TermBr); 1804 } else { 1805 // Clone the terminating condition and insert into the loopend. 1806 ICmpInst *OldCond = Cond; 1807 Cond = cast<ICmpInst>(Cond->clone()); 1808 Cond->setName(L->getHeader()->getName() + ".termcond"); 1809 ExitingBlock->getInstList().insert(TermBr, Cond); 1810 1811 // Clone the IVUse, as the old use still exists! 1812 CondUse = &IU.AddUser(Cond, CondUse->getOperandValToReplace()); 1813 TermBr->replaceUsesOfWith(OldCond, Cond); 1814 } 1815 } 1816 1817 // If we get to here, we know that we can transform the setcc instruction to 1818 // use the post-incremented version of the IV, allowing us to coalesce the 1819 // live ranges for the IV correctly. 1820 CondUse->transformToPostInc(L); 1821 Changed = true; 1822 1823 PostIncs.insert(Cond); 1824 decline_post_inc:; 1825 } 1826 1827 // Determine an insertion point for the loop induction variable increment. It 1828 // must dominate all the post-inc comparisons we just set up, and it must 1829 // dominate the loop latch edge. 1830 IVIncInsertPos = L->getLoopLatch()->getTerminator(); 1831 for (SmallPtrSet<Instruction *, 4>::const_iterator I = PostIncs.begin(), 1832 E = PostIncs.end(); I != E; ++I) { 1833 BasicBlock *BB = 1834 DT.findNearestCommonDominator(IVIncInsertPos->getParent(), 1835 (*I)->getParent()); 1836 if (BB == (*I)->getParent()) 1837 IVIncInsertPos = *I; 1838 else if (BB != IVIncInsertPos->getParent()) 1839 IVIncInsertPos = BB->getTerminator(); 1840 } 1841 } 1842 1843 /// reconcileNewOffset - Determine if the given use can accomodate a fixup 1844 /// at the given offset and other details. If so, update the use and 1845 /// return true. 1846 bool 1847 LSRInstance::reconcileNewOffset(LSRUse &LU, int64_t NewOffset, bool HasBaseReg, 1848 LSRUse::KindType Kind, const Type *AccessTy) { 1849 int64_t NewMinOffset = LU.MinOffset; 1850 int64_t NewMaxOffset = LU.MaxOffset; 1851 const Type *NewAccessTy = AccessTy; 1852 1853 // Check for a mismatched kind. It's tempting to collapse mismatched kinds to 1854 // something conservative, however this can pessimize in the case that one of 1855 // the uses will have all its uses outside the loop, for example. 1856 if (LU.Kind != Kind) 1857 return false; 1858 // Conservatively assume HasBaseReg is true for now. 1859 if (NewOffset < LU.MinOffset) { 1860 if (!isAlwaysFoldable(LU.MaxOffset - NewOffset, 0, HasBaseReg, 1861 Kind, AccessTy, TLI)) 1862 return false; 1863 NewMinOffset = NewOffset; 1864 } else if (NewOffset > LU.MaxOffset) { 1865 if (!isAlwaysFoldable(NewOffset - LU.MinOffset, 0, HasBaseReg, 1866 Kind, AccessTy, TLI)) 1867 return false; 1868 NewMaxOffset = NewOffset; 1869 } 1870 // Check for a mismatched access type, and fall back conservatively as needed. 1871 // TODO: Be less conservative when the type is similar and can use the same 1872 // addressing modes. 1873 if (Kind == LSRUse::Address && AccessTy != LU.AccessTy) 1874 NewAccessTy = Type::getVoidTy(AccessTy->getContext()); 1875 1876 // Update the use. 1877 LU.MinOffset = NewMinOffset; 1878 LU.MaxOffset = NewMaxOffset; 1879 LU.AccessTy = NewAccessTy; 1880 if (NewOffset != LU.Offsets.back()) 1881 LU.Offsets.push_back(NewOffset); 1882 return true; 1883 } 1884 1885 /// getUse - Return an LSRUse index and an offset value for a fixup which 1886 /// needs the given expression, with the given kind and optional access type. 1887 /// Either reuse an existing use or create a new one, as needed. 1888 std::pair<size_t, int64_t> 1889 LSRInstance::getUse(const SCEV *&Expr, 1890 LSRUse::KindType Kind, const Type *AccessTy) { 1891 const SCEV *Copy = Expr; 1892 int64_t Offset = ExtractImmediate(Expr, SE); 1893 1894 // Basic uses can't accept any offset, for example. 1895 if (!isAlwaysFoldable(Offset, 0, /*HasBaseReg=*/true, Kind, AccessTy, TLI)) { 1896 Expr = Copy; 1897 Offset = 0; 1898 } 1899 1900 std::pair<UseMapTy::iterator, bool> P = 1901 UseMap.insert(std::make_pair(std::make_pair(Expr, Kind), 0)); 1902 if (!P.second) { 1903 // A use already existed with this base. 1904 size_t LUIdx = P.first->second; 1905 LSRUse &LU = Uses[LUIdx]; 1906 if (reconcileNewOffset(LU, Offset, /*HasBaseReg=*/true, Kind, AccessTy)) 1907 // Reuse this use. 1908 return std::make_pair(LUIdx, Offset); 1909 } 1910 1911 // Create a new use. 1912 size_t LUIdx = Uses.size(); 1913 P.first->second = LUIdx; 1914 Uses.push_back(LSRUse(Kind, AccessTy)); 1915 LSRUse &LU = Uses[LUIdx]; 1916 1917 // We don't need to track redundant offsets, but we don't need to go out 1918 // of our way here to avoid them. 1919 if (LU.Offsets.empty() || Offset != LU.Offsets.back()) 1920 LU.Offsets.push_back(Offset); 1921 1922 LU.MinOffset = Offset; 1923 LU.MaxOffset = Offset; 1924 return std::make_pair(LUIdx, Offset); 1925 } 1926 1927 /// DeleteUse - Delete the given use from the Uses list. 1928 void LSRInstance::DeleteUse(LSRUse &LU) { 1929 if (&LU != &Uses.back()) 1930 std::swap(LU, Uses.back()); 1931 Uses.pop_back(); 1932 } 1933 1934 /// FindUseWithFormula - Look for a use distinct from OrigLU which is has 1935 /// a formula that has the same registers as the given formula. 1936 LSRUse * 1937 LSRInstance::FindUseWithSimilarFormula(const Formula &OrigF, 1938 const LSRUse &OrigLU) { 1939 // Search all uses for the formula. This could be more clever. 1940 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 1941 LSRUse &LU = Uses[LUIdx]; 1942 // Check whether this use is close enough to OrigLU, to see whether it's 1943 // worthwhile looking through its formulae. 1944 // Ignore ICmpZero uses because they may contain formulae generated by 1945 // GenerateICmpZeroScales, in which case adding fixup offsets may 1946 // be invalid. 1947 if (&LU != &OrigLU && 1948 LU.Kind != LSRUse::ICmpZero && 1949 LU.Kind == OrigLU.Kind && OrigLU.AccessTy == LU.AccessTy && 1950 LU.WidestFixupType == OrigLU.WidestFixupType && 1951 LU.HasFormulaWithSameRegs(OrigF)) { 1952 // Scan through this use's formulae. 1953 for (SmallVectorImpl<Formula>::const_iterator I = LU.Formulae.begin(), 1954 E = LU.Formulae.end(); I != E; ++I) { 1955 const Formula &F = *I; 1956 // Check to see if this formula has the same registers and symbols 1957 // as OrigF. 1958 if (F.BaseRegs == OrigF.BaseRegs && 1959 F.ScaledReg == OrigF.ScaledReg && 1960 F.AM.BaseGV == OrigF.AM.BaseGV && 1961 F.AM.Scale == OrigF.AM.Scale) { 1962 if (F.AM.BaseOffs == 0) 1963 return &LU; 1964 // This is the formula where all the registers and symbols matched; 1965 // there aren't going to be any others. Since we declined it, we 1966 // can skip the rest of the formulae and procede to the next LSRUse. 1967 break; 1968 } 1969 } 1970 } 1971 } 1972 1973 // Nothing looked good. 1974 return 0; 1975 } 1976 1977 void LSRInstance::CollectInterestingTypesAndFactors() { 1978 SmallSetVector<const SCEV *, 4> Strides; 1979 1980 // Collect interesting types and strides. 1981 SmallVector<const SCEV *, 4> Worklist; 1982 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) { 1983 const SCEV *Expr = IU.getExpr(*UI); 1984 1985 // Collect interesting types. 1986 Types.insert(SE.getEffectiveSCEVType(Expr->getType())); 1987 1988 // Add strides for mentioned loops. 1989 Worklist.push_back(Expr); 1990 do { 1991 const SCEV *S = Worklist.pop_back_val(); 1992 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 1993 Strides.insert(AR->getStepRecurrence(SE)); 1994 Worklist.push_back(AR->getStart()); 1995 } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 1996 Worklist.append(Add->op_begin(), Add->op_end()); 1997 } 1998 } while (!Worklist.empty()); 1999 } 2000 2001 // Compute interesting factors from the set of interesting strides. 2002 for (SmallSetVector<const SCEV *, 4>::const_iterator 2003 I = Strides.begin(), E = Strides.end(); I != E; ++I) 2004 for (SmallSetVector<const SCEV *, 4>::const_iterator NewStrideIter = 2005 llvm::next(I); NewStrideIter != E; ++NewStrideIter) { 2006 const SCEV *OldStride = *I; 2007 const SCEV *NewStride = *NewStrideIter; 2008 2009 if (SE.getTypeSizeInBits(OldStride->getType()) != 2010 SE.getTypeSizeInBits(NewStride->getType())) { 2011 if (SE.getTypeSizeInBits(OldStride->getType()) > 2012 SE.getTypeSizeInBits(NewStride->getType())) 2013 NewStride = SE.getSignExtendExpr(NewStride, OldStride->getType()); 2014 else 2015 OldStride = SE.getSignExtendExpr(OldStride, NewStride->getType()); 2016 } 2017 if (const SCEVConstant *Factor = 2018 dyn_cast_or_null<SCEVConstant>(getExactSDiv(NewStride, OldStride, 2019 SE, true))) { 2020 if (Factor->getValue()->getValue().getMinSignedBits() <= 64) 2021 Factors.insert(Factor->getValue()->getValue().getSExtValue()); 2022 } else if (const SCEVConstant *Factor = 2023 dyn_cast_or_null<SCEVConstant>(getExactSDiv(OldStride, 2024 NewStride, 2025 SE, true))) { 2026 if (Factor->getValue()->getValue().getMinSignedBits() <= 64) 2027 Factors.insert(Factor->getValue()->getValue().getSExtValue()); 2028 } 2029 } 2030 2031 // If all uses use the same type, don't bother looking for truncation-based 2032 // reuse. 2033 if (Types.size() == 1) 2034 Types.clear(); 2035 2036 DEBUG(print_factors_and_types(dbgs())); 2037 } 2038 2039 void LSRInstance::CollectFixupsAndInitialFormulae() { 2040 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) { 2041 // Record the uses. 2042 LSRFixup &LF = getNewFixup(); 2043 LF.UserInst = UI->getUser(); 2044 LF.OperandValToReplace = UI->getOperandValToReplace(); 2045 LF.PostIncLoops = UI->getPostIncLoops(); 2046 2047 LSRUse::KindType Kind = LSRUse::Basic; 2048 const Type *AccessTy = 0; 2049 if (isAddressUse(LF.UserInst, LF.OperandValToReplace)) { 2050 Kind = LSRUse::Address; 2051 AccessTy = getAccessType(LF.UserInst); 2052 } 2053 2054 const SCEV *S = IU.getExpr(*UI); 2055 2056 // Equality (== and !=) ICmps are special. We can rewrite (i == N) as 2057 // (N - i == 0), and this allows (N - i) to be the expression that we work 2058 // with rather than just N or i, so we can consider the register 2059 // requirements for both N and i at the same time. Limiting this code to 2060 // equality icmps is not a problem because all interesting loops use 2061 // equality icmps, thanks to IndVarSimplify. 2062 if (ICmpInst *CI = dyn_cast<ICmpInst>(LF.UserInst)) 2063 if (CI->isEquality()) { 2064 // Swap the operands if needed to put the OperandValToReplace on the 2065 // left, for consistency. 2066 Value *NV = CI->getOperand(1); 2067 if (NV == LF.OperandValToReplace) { 2068 CI->setOperand(1, CI->getOperand(0)); 2069 CI->setOperand(0, NV); 2070 NV = CI->getOperand(1); 2071 Changed = true; 2072 } 2073 2074 // x == y --> x - y == 0 2075 const SCEV *N = SE.getSCEV(NV); 2076 if (N->isLoopInvariant(L)) { 2077 Kind = LSRUse::ICmpZero; 2078 S = SE.getMinusSCEV(N, S); 2079 } 2080 2081 // -1 and the negations of all interesting strides (except the negation 2082 // of -1) are now also interesting. 2083 for (size_t i = 0, e = Factors.size(); i != e; ++i) 2084 if (Factors[i] != -1) 2085 Factors.insert(-(uint64_t)Factors[i]); 2086 Factors.insert(-1); 2087 } 2088 2089 // Set up the initial formula for this use. 2090 std::pair<size_t, int64_t> P = getUse(S, Kind, AccessTy); 2091 LF.LUIdx = P.first; 2092 LF.Offset = P.second; 2093 LSRUse &LU = Uses[LF.LUIdx]; 2094 LU.AllFixupsOutsideLoop &= LF.isUseFullyOutsideLoop(L); 2095 if (!LU.WidestFixupType || 2096 SE.getTypeSizeInBits(LU.WidestFixupType) < 2097 SE.getTypeSizeInBits(LF.OperandValToReplace->getType())) 2098 LU.WidestFixupType = LF.OperandValToReplace->getType(); 2099 2100 // If this is the first use of this LSRUse, give it a formula. 2101 if (LU.Formulae.empty()) { 2102 InsertInitialFormula(S, LU, LF.LUIdx); 2103 CountRegisters(LU.Formulae.back(), LF.LUIdx); 2104 } 2105 } 2106 2107 DEBUG(print_fixups(dbgs())); 2108 } 2109 2110 /// InsertInitialFormula - Insert a formula for the given expression into 2111 /// the given use, separating out loop-variant portions from loop-invariant 2112 /// and loop-computable portions. 2113 void 2114 LSRInstance::InsertInitialFormula(const SCEV *S, LSRUse &LU, size_t LUIdx) { 2115 Formula F; 2116 F.InitialMatch(S, L, SE, DT); 2117 bool Inserted = InsertFormula(LU, LUIdx, F); 2118 assert(Inserted && "Initial formula already exists!"); (void)Inserted; 2119 } 2120 2121 /// InsertSupplementalFormula - Insert a simple single-register formula for 2122 /// the given expression into the given use. 2123 void 2124 LSRInstance::InsertSupplementalFormula(const SCEV *S, 2125 LSRUse &LU, size_t LUIdx) { 2126 Formula F; 2127 F.BaseRegs.push_back(S); 2128 F.AM.HasBaseReg = true; 2129 bool Inserted = InsertFormula(LU, LUIdx, F); 2130 assert(Inserted && "Supplemental formula already exists!"); (void)Inserted; 2131 } 2132 2133 /// CountRegisters - Note which registers are used by the given formula, 2134 /// updating RegUses. 2135 void LSRInstance::CountRegisters(const Formula &F, size_t LUIdx) { 2136 if (F.ScaledReg) 2137 RegUses.CountRegister(F.ScaledReg, LUIdx); 2138 for (SmallVectorImpl<const SCEV *>::const_iterator I = F.BaseRegs.begin(), 2139 E = F.BaseRegs.end(); I != E; ++I) 2140 RegUses.CountRegister(*I, LUIdx); 2141 } 2142 2143 /// InsertFormula - If the given formula has not yet been inserted, add it to 2144 /// the list, and return true. Return false otherwise. 2145 bool LSRInstance::InsertFormula(LSRUse &LU, unsigned LUIdx, const Formula &F) { 2146 if (!LU.InsertFormula(F)) 2147 return false; 2148 2149 CountRegisters(F, LUIdx); 2150 return true; 2151 } 2152 2153 /// CollectLoopInvariantFixupsAndFormulae - Check for other uses of 2154 /// loop-invariant values which we're tracking. These other uses will pin these 2155 /// values in registers, making them less profitable for elimination. 2156 /// TODO: This currently misses non-constant addrec step registers. 2157 /// TODO: Should this give more weight to users inside the loop? 2158 void 2159 LSRInstance::CollectLoopInvariantFixupsAndFormulae() { 2160 SmallVector<const SCEV *, 8> Worklist(RegUses.begin(), RegUses.end()); 2161 SmallPtrSet<const SCEV *, 8> Inserted; 2162 2163 while (!Worklist.empty()) { 2164 const SCEV *S = Worklist.pop_back_val(); 2165 2166 if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) 2167 Worklist.append(N->op_begin(), N->op_end()); 2168 else if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S)) 2169 Worklist.push_back(C->getOperand()); 2170 else if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) { 2171 Worklist.push_back(D->getLHS()); 2172 Worklist.push_back(D->getRHS()); 2173 } else if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 2174 if (!Inserted.insert(U)) continue; 2175 const Value *V = U->getValue(); 2176 if (const Instruction *Inst = dyn_cast<Instruction>(V)) { 2177 // Look for instructions defined outside the loop. 2178 if (L->contains(Inst)) continue; 2179 } else if (isa<UndefValue>(V)) 2180 // Undef doesn't have a live range, so it doesn't matter. 2181 continue; 2182 for (Value::const_use_iterator UI = V->use_begin(), UE = V->use_end(); 2183 UI != UE; ++UI) { 2184 const Instruction *UserInst = dyn_cast<Instruction>(*UI); 2185 // Ignore non-instructions. 2186 if (!UserInst) 2187 continue; 2188 // Ignore instructions in other functions (as can happen with 2189 // Constants). 2190 if (UserInst->getParent()->getParent() != L->getHeader()->getParent()) 2191 continue; 2192 // Ignore instructions not dominated by the loop. 2193 const BasicBlock *UseBB = !isa<PHINode>(UserInst) ? 2194 UserInst->getParent() : 2195 cast<PHINode>(UserInst)->getIncomingBlock( 2196 PHINode::getIncomingValueNumForOperand(UI.getOperandNo())); 2197 if (!DT.dominates(L->getHeader(), UseBB)) 2198 continue; 2199 // Ignore uses which are part of other SCEV expressions, to avoid 2200 // analyzing them multiple times. 2201 if (SE.isSCEVable(UserInst->getType())) { 2202 const SCEV *UserS = SE.getSCEV(const_cast<Instruction *>(UserInst)); 2203 // If the user is a no-op, look through to its uses. 2204 if (!isa<SCEVUnknown>(UserS)) 2205 continue; 2206 if (UserS == U) { 2207 Worklist.push_back( 2208 SE.getUnknown(const_cast<Instruction *>(UserInst))); 2209 continue; 2210 } 2211 } 2212 // Ignore icmp instructions which are already being analyzed. 2213 if (const ICmpInst *ICI = dyn_cast<ICmpInst>(UserInst)) { 2214 unsigned OtherIdx = !UI.getOperandNo(); 2215 Value *OtherOp = const_cast<Value *>(ICI->getOperand(OtherIdx)); 2216 if (SE.getSCEV(OtherOp)->hasComputableLoopEvolution(L)) 2217 continue; 2218 } 2219 2220 LSRFixup &LF = getNewFixup(); 2221 LF.UserInst = const_cast<Instruction *>(UserInst); 2222 LF.OperandValToReplace = UI.getUse(); 2223 std::pair<size_t, int64_t> P = getUse(S, LSRUse::Basic, 0); 2224 LF.LUIdx = P.first; 2225 LF.Offset = P.second; 2226 LSRUse &LU = Uses[LF.LUIdx]; 2227 LU.AllFixupsOutsideLoop &= LF.isUseFullyOutsideLoop(L); 2228 if (!LU.WidestFixupType || 2229 SE.getTypeSizeInBits(LU.WidestFixupType) < 2230 SE.getTypeSizeInBits(LF.OperandValToReplace->getType())) 2231 LU.WidestFixupType = LF.OperandValToReplace->getType(); 2232 InsertSupplementalFormula(U, LU, LF.LUIdx); 2233 CountRegisters(LU.Formulae.back(), Uses.size() - 1); 2234 break; 2235 } 2236 } 2237 } 2238 } 2239 2240 /// CollectSubexprs - Split S into subexpressions which can be pulled out into 2241 /// separate registers. If C is non-null, multiply each subexpression by C. 2242 static void CollectSubexprs(const SCEV *S, const SCEVConstant *C, 2243 SmallVectorImpl<const SCEV *> &Ops, 2244 const Loop *L, 2245 ScalarEvolution &SE) { 2246 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 2247 // Break out add operands. 2248 for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end(); 2249 I != E; ++I) 2250 CollectSubexprs(*I, C, Ops, L, SE); 2251 return; 2252 } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 2253 // Split a non-zero base out of an addrec. 2254 if (!AR->getStart()->isZero()) { 2255 CollectSubexprs(SE.getAddRecExpr(SE.getConstant(AR->getType(), 0), 2256 AR->getStepRecurrence(SE), 2257 AR->getLoop()), 2258 C, Ops, L, SE); 2259 CollectSubexprs(AR->getStart(), C, Ops, L, SE); 2260 return; 2261 } 2262 } else if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 2263 // Break (C * (a + b + c)) into C*a + C*b + C*c. 2264 if (Mul->getNumOperands() == 2) 2265 if (const SCEVConstant *Op0 = 2266 dyn_cast<SCEVConstant>(Mul->getOperand(0))) { 2267 CollectSubexprs(Mul->getOperand(1), 2268 C ? cast<SCEVConstant>(SE.getMulExpr(C, Op0)) : Op0, 2269 Ops, L, SE); 2270 return; 2271 } 2272 } 2273 2274 // Otherwise use the value itself, optionally with a scale applied. 2275 Ops.push_back(C ? SE.getMulExpr(C, S) : S); 2276 } 2277 2278 /// GenerateReassociations - Split out subexpressions from adds and the bases of 2279 /// addrecs. 2280 void LSRInstance::GenerateReassociations(LSRUse &LU, unsigned LUIdx, 2281 Formula Base, 2282 unsigned Depth) { 2283 // Arbitrarily cap recursion to protect compile time. 2284 if (Depth >= 3) return; 2285 2286 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) { 2287 const SCEV *BaseReg = Base.BaseRegs[i]; 2288 2289 SmallVector<const SCEV *, 8> AddOps; 2290 CollectSubexprs(BaseReg, 0, AddOps, L, SE); 2291 2292 if (AddOps.size() == 1) continue; 2293 2294 for (SmallVectorImpl<const SCEV *>::const_iterator J = AddOps.begin(), 2295 JE = AddOps.end(); J != JE; ++J) { 2296 2297 // Loop-variant "unknown" values are uninteresting; we won't be able to 2298 // do anything meaningful with them. 2299 if (isa<SCEVUnknown>(*J) && !(*J)->isLoopInvariant(L)) 2300 continue; 2301 2302 // Don't pull a constant into a register if the constant could be folded 2303 // into an immediate field. 2304 if (isAlwaysFoldable(*J, LU.MinOffset, LU.MaxOffset, 2305 Base.getNumRegs() > 1, 2306 LU.Kind, LU.AccessTy, TLI, SE)) 2307 continue; 2308 2309 // Collect all operands except *J. 2310 SmallVector<const SCEV *, 8> InnerAddOps 2311 (((const SmallVector<const SCEV *, 8> &)AddOps).begin(), J); 2312 InnerAddOps.append 2313 (llvm::next(J), ((const SmallVector<const SCEV *, 8> &)AddOps).end()); 2314 2315 // Don't leave just a constant behind in a register if the constant could 2316 // be folded into an immediate field. 2317 if (InnerAddOps.size() == 1 && 2318 isAlwaysFoldable(InnerAddOps[0], LU.MinOffset, LU.MaxOffset, 2319 Base.getNumRegs() > 1, 2320 LU.Kind, LU.AccessTy, TLI, SE)) 2321 continue; 2322 2323 const SCEV *InnerSum = SE.getAddExpr(InnerAddOps); 2324 if (InnerSum->isZero()) 2325 continue; 2326 Formula F = Base; 2327 F.BaseRegs[i] = InnerSum; 2328 F.BaseRegs.push_back(*J); 2329 if (InsertFormula(LU, LUIdx, F)) 2330 // If that formula hadn't been seen before, recurse to find more like 2331 // it. 2332 GenerateReassociations(LU, LUIdx, LU.Formulae.back(), Depth+1); 2333 } 2334 } 2335 } 2336 2337 /// GenerateCombinations - Generate a formula consisting of all of the 2338 /// loop-dominating registers added into a single register. 2339 void LSRInstance::GenerateCombinations(LSRUse &LU, unsigned LUIdx, 2340 Formula Base) { 2341 // This method is only interesting on a plurality of registers. 2342 if (Base.BaseRegs.size() <= 1) return; 2343 2344 Formula F = Base; 2345 F.BaseRegs.clear(); 2346 SmallVector<const SCEV *, 4> Ops; 2347 for (SmallVectorImpl<const SCEV *>::const_iterator 2348 I = Base.BaseRegs.begin(), E = Base.BaseRegs.end(); I != E; ++I) { 2349 const SCEV *BaseReg = *I; 2350 if (BaseReg->properlyDominates(L->getHeader(), &DT) && 2351 !BaseReg->hasComputableLoopEvolution(L)) 2352 Ops.push_back(BaseReg); 2353 else 2354 F.BaseRegs.push_back(BaseReg); 2355 } 2356 if (Ops.size() > 1) { 2357 const SCEV *Sum = SE.getAddExpr(Ops); 2358 // TODO: If Sum is zero, it probably means ScalarEvolution missed an 2359 // opportunity to fold something. For now, just ignore such cases 2360 // rather than proceed with zero in a register. 2361 if (!Sum->isZero()) { 2362 F.BaseRegs.push_back(Sum); 2363 (void)InsertFormula(LU, LUIdx, F); 2364 } 2365 } 2366 } 2367 2368 /// GenerateSymbolicOffsets - Generate reuse formulae using symbolic offsets. 2369 void LSRInstance::GenerateSymbolicOffsets(LSRUse &LU, unsigned LUIdx, 2370 Formula Base) { 2371 // We can't add a symbolic offset if the address already contains one. 2372 if (Base.AM.BaseGV) return; 2373 2374 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) { 2375 const SCEV *G = Base.BaseRegs[i]; 2376 GlobalValue *GV = ExtractSymbol(G, SE); 2377 if (G->isZero() || !GV) 2378 continue; 2379 Formula F = Base; 2380 F.AM.BaseGV = GV; 2381 if (!isLegalUse(F.AM, LU.MinOffset, LU.MaxOffset, 2382 LU.Kind, LU.AccessTy, TLI)) 2383 continue; 2384 F.BaseRegs[i] = G; 2385 (void)InsertFormula(LU, LUIdx, F); 2386 } 2387 } 2388 2389 /// GenerateConstantOffsets - Generate reuse formulae using symbolic offsets. 2390 void LSRInstance::GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx, 2391 Formula Base) { 2392 // TODO: For now, just add the min and max offset, because it usually isn't 2393 // worthwhile looking at everything inbetween. 2394 SmallVector<int64_t, 2> Worklist; 2395 Worklist.push_back(LU.MinOffset); 2396 if (LU.MaxOffset != LU.MinOffset) 2397 Worklist.push_back(LU.MaxOffset); 2398 2399 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) { 2400 const SCEV *G = Base.BaseRegs[i]; 2401 2402 for (SmallVectorImpl<int64_t>::const_iterator I = Worklist.begin(), 2403 E = Worklist.end(); I != E; ++I) { 2404 Formula F = Base; 2405 F.AM.BaseOffs = (uint64_t)Base.AM.BaseOffs - *I; 2406 if (isLegalUse(F.AM, LU.MinOffset - *I, LU.MaxOffset - *I, 2407 LU.Kind, LU.AccessTy, TLI)) { 2408 // Add the offset to the base register. 2409 const SCEV *NewG = SE.getAddExpr(SE.getConstant(G->getType(), *I), G); 2410 // If it cancelled out, drop the base register, otherwise update it. 2411 if (NewG->isZero()) { 2412 std::swap(F.BaseRegs[i], F.BaseRegs.back()); 2413 F.BaseRegs.pop_back(); 2414 } else 2415 F.BaseRegs[i] = NewG; 2416 2417 (void)InsertFormula(LU, LUIdx, F); 2418 } 2419 } 2420 2421 int64_t Imm = ExtractImmediate(G, SE); 2422 if (G->isZero() || Imm == 0) 2423 continue; 2424 Formula F = Base; 2425 F.AM.BaseOffs = (uint64_t)F.AM.BaseOffs + Imm; 2426 if (!isLegalUse(F.AM, LU.MinOffset, LU.MaxOffset, 2427 LU.Kind, LU.AccessTy, TLI)) 2428 continue; 2429 F.BaseRegs[i] = G; 2430 (void)InsertFormula(LU, LUIdx, F); 2431 } 2432 } 2433 2434 /// GenerateICmpZeroScales - For ICmpZero, check to see if we can scale up 2435 /// the comparison. For example, x == y -> x*c == y*c. 2436 void LSRInstance::GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx, 2437 Formula Base) { 2438 if (LU.Kind != LSRUse::ICmpZero) return; 2439 2440 // Determine the integer type for the base formula. 2441 const Type *IntTy = Base.getType(); 2442 if (!IntTy) return; 2443 if (SE.getTypeSizeInBits(IntTy) > 64) return; 2444 2445 // Don't do this if there is more than one offset. 2446 if (LU.MinOffset != LU.MaxOffset) return; 2447 2448 assert(!Base.AM.BaseGV && "ICmpZero use is not legal!"); 2449 2450 // Check each interesting stride. 2451 for (SmallSetVector<int64_t, 8>::const_iterator 2452 I = Factors.begin(), E = Factors.end(); I != E; ++I) { 2453 int64_t Factor = *I; 2454 2455 // Check that the multiplication doesn't overflow. 2456 if (Base.AM.BaseOffs == INT64_MIN && Factor == -1) 2457 continue; 2458 int64_t NewBaseOffs = (uint64_t)Base.AM.BaseOffs * Factor; 2459 if (NewBaseOffs / Factor != Base.AM.BaseOffs) 2460 continue; 2461 2462 // Check that multiplying with the use offset doesn't overflow. 2463 int64_t Offset = LU.MinOffset; 2464 if (Offset == INT64_MIN && Factor == -1) 2465 continue; 2466 Offset = (uint64_t)Offset * Factor; 2467 if (Offset / Factor != LU.MinOffset) 2468 continue; 2469 2470 Formula F = Base; 2471 F.AM.BaseOffs = NewBaseOffs; 2472 2473 // Check that this scale is legal. 2474 if (!isLegalUse(F.AM, Offset, Offset, LU.Kind, LU.AccessTy, TLI)) 2475 continue; 2476 2477 // Compensate for the use having MinOffset built into it. 2478 F.AM.BaseOffs = (uint64_t)F.AM.BaseOffs + Offset - LU.MinOffset; 2479 2480 const SCEV *FactorS = SE.getConstant(IntTy, Factor); 2481 2482 // Check that multiplying with each base register doesn't overflow. 2483 for (size_t i = 0, e = F.BaseRegs.size(); i != e; ++i) { 2484 F.BaseRegs[i] = SE.getMulExpr(F.BaseRegs[i], FactorS); 2485 if (getExactSDiv(F.BaseRegs[i], FactorS, SE) != Base.BaseRegs[i]) 2486 goto next; 2487 } 2488 2489 // Check that multiplying with the scaled register doesn't overflow. 2490 if (F.ScaledReg) { 2491 F.ScaledReg = SE.getMulExpr(F.ScaledReg, FactorS); 2492 if (getExactSDiv(F.ScaledReg, FactorS, SE) != Base.ScaledReg) 2493 continue; 2494 } 2495 2496 // If we make it here and it's legal, add it. 2497 (void)InsertFormula(LU, LUIdx, F); 2498 next:; 2499 } 2500 } 2501 2502 /// GenerateScales - Generate stride factor reuse formulae by making use of 2503 /// scaled-offset address modes, for example. 2504 void LSRInstance::GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base) { 2505 // Determine the integer type for the base formula. 2506 const Type *IntTy = Base.getType(); 2507 if (!IntTy) return; 2508 2509 // If this Formula already has a scaled register, we can't add another one. 2510 if (Base.AM.Scale != 0) return; 2511 2512 // Check each interesting stride. 2513 for (SmallSetVector<int64_t, 8>::const_iterator 2514 I = Factors.begin(), E = Factors.end(); I != E; ++I) { 2515 int64_t Factor = *I; 2516 2517 Base.AM.Scale = Factor; 2518 Base.AM.HasBaseReg = Base.BaseRegs.size() > 1; 2519 // Check whether this scale is going to be legal. 2520 if (!isLegalUse(Base.AM, LU.MinOffset, LU.MaxOffset, 2521 LU.Kind, LU.AccessTy, TLI)) { 2522 // As a special-case, handle special out-of-loop Basic users specially. 2523 // TODO: Reconsider this special case. 2524 if (LU.Kind == LSRUse::Basic && 2525 isLegalUse(Base.AM, LU.MinOffset, LU.MaxOffset, 2526 LSRUse::Special, LU.AccessTy, TLI) && 2527 LU.AllFixupsOutsideLoop) 2528 LU.Kind = LSRUse::Special; 2529 else 2530 continue; 2531 } 2532 // For an ICmpZero, negating a solitary base register won't lead to 2533 // new solutions. 2534 if (LU.Kind == LSRUse::ICmpZero && 2535 !Base.AM.HasBaseReg && Base.AM.BaseOffs == 0 && !Base.AM.BaseGV) 2536 continue; 2537 // For each addrec base reg, apply the scale, if possible. 2538 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) 2539 if (const SCEVAddRecExpr *AR = 2540 dyn_cast<SCEVAddRecExpr>(Base.BaseRegs[i])) { 2541 const SCEV *FactorS = SE.getConstant(IntTy, Factor); 2542 if (FactorS->isZero()) 2543 continue; 2544 // Divide out the factor, ignoring high bits, since we'll be 2545 // scaling the value back up in the end. 2546 if (const SCEV *Quotient = getExactSDiv(AR, FactorS, SE, true)) { 2547 // TODO: This could be optimized to avoid all the copying. 2548 Formula F = Base; 2549 F.ScaledReg = Quotient; 2550 F.DeleteBaseReg(F.BaseRegs[i]); 2551 (void)InsertFormula(LU, LUIdx, F); 2552 } 2553 } 2554 } 2555 } 2556 2557 /// GenerateTruncates - Generate reuse formulae from different IV types. 2558 void LSRInstance::GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base) { 2559 // This requires TargetLowering to tell us which truncates are free. 2560 if (!TLI) return; 2561 2562 // Don't bother truncating symbolic values. 2563 if (Base.AM.BaseGV) return; 2564 2565 // Determine the integer type for the base formula. 2566 const Type *DstTy = Base.getType(); 2567 if (!DstTy) return; 2568 DstTy = SE.getEffectiveSCEVType(DstTy); 2569 2570 for (SmallSetVector<const Type *, 4>::const_iterator 2571 I = Types.begin(), E = Types.end(); I != E; ++I) { 2572 const Type *SrcTy = *I; 2573 if (SrcTy != DstTy && TLI->isTruncateFree(SrcTy, DstTy)) { 2574 Formula F = Base; 2575 2576 if (F.ScaledReg) F.ScaledReg = SE.getAnyExtendExpr(F.ScaledReg, *I); 2577 for (SmallVectorImpl<const SCEV *>::iterator J = F.BaseRegs.begin(), 2578 JE = F.BaseRegs.end(); J != JE; ++J) 2579 *J = SE.getAnyExtendExpr(*J, SrcTy); 2580 2581 // TODO: This assumes we've done basic processing on all uses and 2582 // have an idea what the register usage is. 2583 if (!F.hasRegsUsedByUsesOtherThan(LUIdx, RegUses)) 2584 continue; 2585 2586 (void)InsertFormula(LU, LUIdx, F); 2587 } 2588 } 2589 } 2590 2591 namespace { 2592 2593 /// WorkItem - Helper class for GenerateCrossUseConstantOffsets. It's used to 2594 /// defer modifications so that the search phase doesn't have to worry about 2595 /// the data structures moving underneath it. 2596 struct WorkItem { 2597 size_t LUIdx; 2598 int64_t Imm; 2599 const SCEV *OrigReg; 2600 2601 WorkItem(size_t LI, int64_t I, const SCEV *R) 2602 : LUIdx(LI), Imm(I), OrigReg(R) {} 2603 2604 void print(raw_ostream &OS) const; 2605 void dump() const; 2606 }; 2607 2608 } 2609 2610 void WorkItem::print(raw_ostream &OS) const { 2611 OS << "in formulae referencing " << *OrigReg << " in use " << LUIdx 2612 << " , add offset " << Imm; 2613 } 2614 2615 void WorkItem::dump() const { 2616 print(errs()); errs() << '\n'; 2617 } 2618 2619 /// GenerateCrossUseConstantOffsets - Look for registers which are a constant 2620 /// distance apart and try to form reuse opportunities between them. 2621 void LSRInstance::GenerateCrossUseConstantOffsets() { 2622 // Group the registers by their value without any added constant offset. 2623 typedef std::map<int64_t, const SCEV *> ImmMapTy; 2624 typedef DenseMap<const SCEV *, ImmMapTy> RegMapTy; 2625 RegMapTy Map; 2626 DenseMap<const SCEV *, SmallBitVector> UsedByIndicesMap; 2627 SmallVector<const SCEV *, 8> Sequence; 2628 for (RegUseTracker::const_iterator I = RegUses.begin(), E = RegUses.end(); 2629 I != E; ++I) { 2630 const SCEV *Reg = *I; 2631 int64_t Imm = ExtractImmediate(Reg, SE); 2632 std::pair<RegMapTy::iterator, bool> Pair = 2633 Map.insert(std::make_pair(Reg, ImmMapTy())); 2634 if (Pair.second) 2635 Sequence.push_back(Reg); 2636 Pair.first->second.insert(std::make_pair(Imm, *I)); 2637 UsedByIndicesMap[Reg] |= RegUses.getUsedByIndices(*I); 2638 } 2639 2640 // Now examine each set of registers with the same base value. Build up 2641 // a list of work to do and do the work in a separate step so that we're 2642 // not adding formulae and register counts while we're searching. 2643 SmallVector<WorkItem, 32> WorkItems; 2644 SmallSet<std::pair<size_t, int64_t>, 32> UniqueItems; 2645 for (SmallVectorImpl<const SCEV *>::const_iterator I = Sequence.begin(), 2646 E = Sequence.end(); I != E; ++I) { 2647 const SCEV *Reg = *I; 2648 const ImmMapTy &Imms = Map.find(Reg)->second; 2649 2650 // It's not worthwhile looking for reuse if there's only one offset. 2651 if (Imms.size() == 1) 2652 continue; 2653 2654 DEBUG(dbgs() << "Generating cross-use offsets for " << *Reg << ':'; 2655 for (ImmMapTy::const_iterator J = Imms.begin(), JE = Imms.end(); 2656 J != JE; ++J) 2657 dbgs() << ' ' << J->first; 2658 dbgs() << '\n'); 2659 2660 // Examine each offset. 2661 for (ImmMapTy::const_iterator J = Imms.begin(), JE = Imms.end(); 2662 J != JE; ++J) { 2663 const SCEV *OrigReg = J->second; 2664 2665 int64_t JImm = J->first; 2666 const SmallBitVector &UsedByIndices = RegUses.getUsedByIndices(OrigReg); 2667 2668 if (!isa<SCEVConstant>(OrigReg) && 2669 UsedByIndicesMap[Reg].count() == 1) { 2670 DEBUG(dbgs() << "Skipping cross-use reuse for " << *OrigReg << '\n'); 2671 continue; 2672 } 2673 2674 // Conservatively examine offsets between this orig reg a few selected 2675 // other orig regs. 2676 ImmMapTy::const_iterator OtherImms[] = { 2677 Imms.begin(), prior(Imms.end()), 2678 Imms.upper_bound((Imms.begin()->first + prior(Imms.end())->first) / 2) 2679 }; 2680 for (size_t i = 0, e = array_lengthof(OtherImms); i != e; ++i) { 2681 ImmMapTy::const_iterator M = OtherImms[i]; 2682 if (M == J || M == JE) continue; 2683 2684 // Compute the difference between the two. 2685 int64_t Imm = (uint64_t)JImm - M->first; 2686 for (int LUIdx = UsedByIndices.find_first(); LUIdx != -1; 2687 LUIdx = UsedByIndices.find_next(LUIdx)) 2688 // Make a memo of this use, offset, and register tuple. 2689 if (UniqueItems.insert(std::make_pair(LUIdx, Imm))) 2690 WorkItems.push_back(WorkItem(LUIdx, Imm, OrigReg)); 2691 } 2692 } 2693 } 2694 2695 Map.clear(); 2696 Sequence.clear(); 2697 UsedByIndicesMap.clear(); 2698 UniqueItems.clear(); 2699 2700 // Now iterate through the worklist and add new formulae. 2701 for (SmallVectorImpl<WorkItem>::const_iterator I = WorkItems.begin(), 2702 E = WorkItems.end(); I != E; ++I) { 2703 const WorkItem &WI = *I; 2704 size_t LUIdx = WI.LUIdx; 2705 LSRUse &LU = Uses[LUIdx]; 2706 int64_t Imm = WI.Imm; 2707 const SCEV *OrigReg = WI.OrigReg; 2708 2709 const Type *IntTy = SE.getEffectiveSCEVType(OrigReg->getType()); 2710 const SCEV *NegImmS = SE.getSCEV(ConstantInt::get(IntTy, -(uint64_t)Imm)); 2711 unsigned BitWidth = SE.getTypeSizeInBits(IntTy); 2712 2713 // TODO: Use a more targeted data structure. 2714 for (size_t L = 0, LE = LU.Formulae.size(); L != LE; ++L) { 2715 const Formula &F = LU.Formulae[L]; 2716 // Use the immediate in the scaled register. 2717 if (F.ScaledReg == OrigReg) { 2718 int64_t Offs = (uint64_t)F.AM.BaseOffs + 2719 Imm * (uint64_t)F.AM.Scale; 2720 // Don't create 50 + reg(-50). 2721 if (F.referencesReg(SE.getSCEV( 2722 ConstantInt::get(IntTy, -(uint64_t)Offs)))) 2723 continue; 2724 Formula NewF = F; 2725 NewF.AM.BaseOffs = Offs; 2726 if (!isLegalUse(NewF.AM, LU.MinOffset, LU.MaxOffset, 2727 LU.Kind, LU.AccessTy, TLI)) 2728 continue; 2729 NewF.ScaledReg = SE.getAddExpr(NegImmS, NewF.ScaledReg); 2730 2731 // If the new scale is a constant in a register, and adding the constant 2732 // value to the immediate would produce a value closer to zero than the 2733 // immediate itself, then the formula isn't worthwhile. 2734 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(NewF.ScaledReg)) 2735 if (C->getValue()->getValue().isNegative() != 2736 (NewF.AM.BaseOffs < 0) && 2737 (C->getValue()->getValue().abs() * APInt(BitWidth, F.AM.Scale)) 2738 .ule(abs64(NewF.AM.BaseOffs))) 2739 continue; 2740 2741 // OK, looks good. 2742 (void)InsertFormula(LU, LUIdx, NewF); 2743 } else { 2744 // Use the immediate in a base register. 2745 for (size_t N = 0, NE = F.BaseRegs.size(); N != NE; ++N) { 2746 const SCEV *BaseReg = F.BaseRegs[N]; 2747 if (BaseReg != OrigReg) 2748 continue; 2749 Formula NewF = F; 2750 NewF.AM.BaseOffs = (uint64_t)NewF.AM.BaseOffs + Imm; 2751 if (!isLegalUse(NewF.AM, LU.MinOffset, LU.MaxOffset, 2752 LU.Kind, LU.AccessTy, TLI)) 2753 continue; 2754 NewF.BaseRegs[N] = SE.getAddExpr(NegImmS, BaseReg); 2755 2756 // If the new formula has a constant in a register, and adding the 2757 // constant value to the immediate would produce a value closer to 2758 // zero than the immediate itself, then the formula isn't worthwhile. 2759 for (SmallVectorImpl<const SCEV *>::const_iterator 2760 J = NewF.BaseRegs.begin(), JE = NewF.BaseRegs.end(); 2761 J != JE; ++J) 2762 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(*J)) 2763 if ((C->getValue()->getValue() + NewF.AM.BaseOffs).abs().slt( 2764 abs64(NewF.AM.BaseOffs)) && 2765 (C->getValue()->getValue() + 2766 NewF.AM.BaseOffs).countTrailingZeros() >= 2767 CountTrailingZeros_64(NewF.AM.BaseOffs)) 2768 goto skip_formula; 2769 2770 // Ok, looks good. 2771 (void)InsertFormula(LU, LUIdx, NewF); 2772 break; 2773 skip_formula:; 2774 } 2775 } 2776 } 2777 } 2778 } 2779 2780 /// GenerateAllReuseFormulae - Generate formulae for each use. 2781 void 2782 LSRInstance::GenerateAllReuseFormulae() { 2783 // This is split into multiple loops so that hasRegsUsedByUsesOtherThan 2784 // queries are more precise. 2785 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 2786 LSRUse &LU = Uses[LUIdx]; 2787 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 2788 GenerateReassociations(LU, LUIdx, LU.Formulae[i]); 2789 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 2790 GenerateCombinations(LU, LUIdx, LU.Formulae[i]); 2791 } 2792 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 2793 LSRUse &LU = Uses[LUIdx]; 2794 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 2795 GenerateSymbolicOffsets(LU, LUIdx, LU.Formulae[i]); 2796 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 2797 GenerateConstantOffsets(LU, LUIdx, LU.Formulae[i]); 2798 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 2799 GenerateICmpZeroScales(LU, LUIdx, LU.Formulae[i]); 2800 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 2801 GenerateScales(LU, LUIdx, LU.Formulae[i]); 2802 } 2803 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 2804 LSRUse &LU = Uses[LUIdx]; 2805 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 2806 GenerateTruncates(LU, LUIdx, LU.Formulae[i]); 2807 } 2808 2809 GenerateCrossUseConstantOffsets(); 2810 2811 DEBUG(dbgs() << "\n" 2812 "After generating reuse formulae:\n"; 2813 print_uses(dbgs())); 2814 } 2815 2816 /// If their are multiple formulae with the same set of registers used 2817 /// by other uses, pick the best one and delete the others. 2818 void LSRInstance::FilterOutUndesirableDedicatedRegisters() { 2819 #ifndef NDEBUG 2820 bool ChangedFormulae = false; 2821 #endif 2822 2823 // Collect the best formula for each unique set of shared registers. This 2824 // is reset for each use. 2825 typedef DenseMap<SmallVector<const SCEV *, 2>, size_t, UniquifierDenseMapInfo> 2826 BestFormulaeTy; 2827 BestFormulaeTy BestFormulae; 2828 2829 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 2830 LSRUse &LU = Uses[LUIdx]; 2831 FormulaSorter Sorter(L, LU, SE, DT); 2832 DEBUG(dbgs() << "Filtering for use "; LU.print(dbgs()); dbgs() << '\n'); 2833 2834 bool Any = false; 2835 for (size_t FIdx = 0, NumForms = LU.Formulae.size(); 2836 FIdx != NumForms; ++FIdx) { 2837 Formula &F = LU.Formulae[FIdx]; 2838 2839 SmallVector<const SCEV *, 2> Key; 2840 for (SmallVectorImpl<const SCEV *>::const_iterator J = F.BaseRegs.begin(), 2841 JE = F.BaseRegs.end(); J != JE; ++J) { 2842 const SCEV *Reg = *J; 2843 if (RegUses.isRegUsedByUsesOtherThan(Reg, LUIdx)) 2844 Key.push_back(Reg); 2845 } 2846 if (F.ScaledReg && 2847 RegUses.isRegUsedByUsesOtherThan(F.ScaledReg, LUIdx)) 2848 Key.push_back(F.ScaledReg); 2849 // Unstable sort by host order ok, because this is only used for 2850 // uniquifying. 2851 std::sort(Key.begin(), Key.end()); 2852 2853 std::pair<BestFormulaeTy::const_iterator, bool> P = 2854 BestFormulae.insert(std::make_pair(Key, FIdx)); 2855 if (!P.second) { 2856 Formula &Best = LU.Formulae[P.first->second]; 2857 if (Sorter.operator()(F, Best)) 2858 std::swap(F, Best); 2859 DEBUG(dbgs() << " Filtering out formula "; F.print(dbgs()); 2860 dbgs() << "\n" 2861 " in favor of formula "; Best.print(dbgs()); 2862 dbgs() << '\n'); 2863 #ifndef NDEBUG 2864 ChangedFormulae = true; 2865 #endif 2866 LU.DeleteFormula(F); 2867 --FIdx; 2868 --NumForms; 2869 Any = true; 2870 continue; 2871 } 2872 } 2873 2874 // Now that we've filtered out some formulae, recompute the Regs set. 2875 if (Any) 2876 LU.RecomputeRegs(LUIdx, RegUses); 2877 2878 // Reset this to prepare for the next use. 2879 BestFormulae.clear(); 2880 } 2881 2882 DEBUG(if (ChangedFormulae) { 2883 dbgs() << "\n" 2884 "After filtering out undesirable candidates:\n"; 2885 print_uses(dbgs()); 2886 }); 2887 } 2888 2889 // This is a rough guess that seems to work fairly well. 2890 static const size_t ComplexityLimit = UINT16_MAX; 2891 2892 /// EstimateSearchSpaceComplexity - Estimate the worst-case number of 2893 /// solutions the solver might have to consider. It almost never considers 2894 /// this many solutions because it prune the search space, but the pruning 2895 /// isn't always sufficient. 2896 size_t LSRInstance::EstimateSearchSpaceComplexity() const { 2897 uint32_t Power = 1; 2898 for (SmallVectorImpl<LSRUse>::const_iterator I = Uses.begin(), 2899 E = Uses.end(); I != E; ++I) { 2900 size_t FSize = I->Formulae.size(); 2901 if (FSize >= ComplexityLimit) { 2902 Power = ComplexityLimit; 2903 break; 2904 } 2905 Power *= FSize; 2906 if (Power >= ComplexityLimit) 2907 break; 2908 } 2909 return Power; 2910 } 2911 2912 /// NarrowSearchSpaceByDetectingSupersets - When one formula uses a superset 2913 /// of the registers of another formula, it won't help reduce register 2914 /// pressure (though it may not necessarily hurt register pressure); remove 2915 /// it to simplify the system. 2916 void LSRInstance::NarrowSearchSpaceByDetectingSupersets() { 2917 if (EstimateSearchSpaceComplexity() >= ComplexityLimit) { 2918 DEBUG(dbgs() << "The search space is too complex.\n"); 2919 2920 DEBUG(dbgs() << "Narrowing the search space by eliminating formulae " 2921 "which use a superset of registers used by other " 2922 "formulae.\n"); 2923 2924 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 2925 LSRUse &LU = Uses[LUIdx]; 2926 bool Any = false; 2927 for (size_t i = 0, e = LU.Formulae.size(); i != e; ++i) { 2928 Formula &F = LU.Formulae[i]; 2929 // Look for a formula with a constant or GV in a register. If the use 2930 // also has a formula with that same value in an immediate field, 2931 // delete the one that uses a register. 2932 for (SmallVectorImpl<const SCEV *>::const_iterator 2933 I = F.BaseRegs.begin(), E = F.BaseRegs.end(); I != E; ++I) { 2934 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(*I)) { 2935 Formula NewF = F; 2936 NewF.AM.BaseOffs += C->getValue()->getSExtValue(); 2937 NewF.BaseRegs.erase(NewF.BaseRegs.begin() + 2938 (I - F.BaseRegs.begin())); 2939 if (LU.HasFormulaWithSameRegs(NewF)) { 2940 DEBUG(dbgs() << " Deleting "; F.print(dbgs()); dbgs() << '\n'); 2941 LU.DeleteFormula(F); 2942 --i; 2943 --e; 2944 Any = true; 2945 break; 2946 } 2947 } else if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(*I)) { 2948 if (GlobalValue *GV = dyn_cast<GlobalValue>(U->getValue())) 2949 if (!F.AM.BaseGV) { 2950 Formula NewF = F; 2951 NewF.AM.BaseGV = GV; 2952 NewF.BaseRegs.erase(NewF.BaseRegs.begin() + 2953 (I - F.BaseRegs.begin())); 2954 if (LU.HasFormulaWithSameRegs(NewF)) { 2955 DEBUG(dbgs() << " Deleting "; F.print(dbgs()); 2956 dbgs() << '\n'); 2957 LU.DeleteFormula(F); 2958 --i; 2959 --e; 2960 Any = true; 2961 break; 2962 } 2963 } 2964 } 2965 } 2966 } 2967 if (Any) 2968 LU.RecomputeRegs(LUIdx, RegUses); 2969 } 2970 2971 DEBUG(dbgs() << "After pre-selection:\n"; 2972 print_uses(dbgs())); 2973 } 2974 } 2975 2976 /// NarrowSearchSpaceByCollapsingUnrolledCode - When there are many registers 2977 /// for expressions like A, A+1, A+2, etc., allocate a single register for 2978 /// them. 2979 void LSRInstance::NarrowSearchSpaceByCollapsingUnrolledCode() { 2980 if (EstimateSearchSpaceComplexity() >= ComplexityLimit) { 2981 DEBUG(dbgs() << "The search space is too complex.\n"); 2982 2983 DEBUG(dbgs() << "Narrowing the search space by assuming that uses " 2984 "separated by a constant offset will use the same " 2985 "registers.\n"); 2986 2987 // This is especially useful for unrolled loops. 2988 2989 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 2990 LSRUse &LU = Uses[LUIdx]; 2991 for (SmallVectorImpl<Formula>::const_iterator I = LU.Formulae.begin(), 2992 E = LU.Formulae.end(); I != E; ++I) { 2993 const Formula &F = *I; 2994 if (F.AM.BaseOffs != 0 && F.AM.Scale == 0) { 2995 if (LSRUse *LUThatHas = FindUseWithSimilarFormula(F, LU)) { 2996 if (reconcileNewOffset(*LUThatHas, F.AM.BaseOffs, 2997 /*HasBaseReg=*/false, 2998 LU.Kind, LU.AccessTy)) { 2999 DEBUG(dbgs() << " Deleting use "; LU.print(dbgs()); 3000 dbgs() << '\n'); 3001 3002 LUThatHas->AllFixupsOutsideLoop &= LU.AllFixupsOutsideLoop; 3003 3004 // Delete formulae from the new use which are no longer legal. 3005 bool Any = false; 3006 for (size_t i = 0, e = LUThatHas->Formulae.size(); i != e; ++i) { 3007 Formula &F = LUThatHas->Formulae[i]; 3008 if (!isLegalUse(F.AM, 3009 LUThatHas->MinOffset, LUThatHas->MaxOffset, 3010 LUThatHas->Kind, LUThatHas->AccessTy, TLI)) { 3011 DEBUG(dbgs() << " Deleting "; F.print(dbgs()); 3012 dbgs() << '\n'); 3013 LUThatHas->DeleteFormula(F); 3014 --i; 3015 --e; 3016 Any = true; 3017 } 3018 } 3019 if (Any) 3020 LUThatHas->RecomputeRegs(LUThatHas - &Uses.front(), RegUses); 3021 3022 // Update the relocs to reference the new use. 3023 for (SmallVectorImpl<LSRFixup>::iterator I = Fixups.begin(), 3024 E = Fixups.end(); I != E; ++I) { 3025 LSRFixup &Fixup = *I; 3026 if (Fixup.LUIdx == LUIdx) { 3027 Fixup.LUIdx = LUThatHas - &Uses.front(); 3028 Fixup.Offset += F.AM.BaseOffs; 3029 DEBUG(dbgs() << "New fixup has offset " 3030 << Fixup.Offset << '\n'); 3031 } 3032 if (Fixup.LUIdx == NumUses-1) 3033 Fixup.LUIdx = LUIdx; 3034 } 3035 3036 // Delete the old use. 3037 DeleteUse(LU); 3038 --LUIdx; 3039 --NumUses; 3040 break; 3041 } 3042 } 3043 } 3044 } 3045 } 3046 3047 DEBUG(dbgs() << "After pre-selection:\n"; 3048 print_uses(dbgs())); 3049 } 3050 } 3051 3052 /// NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters - Call 3053 /// FilterOutUndesirableDedicatedRegisters again, if necessary, now that 3054 /// we've done more filtering, as it may be able to find more formulae to 3055 /// eliminate. 3056 void LSRInstance::NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters(){ 3057 if (EstimateSearchSpaceComplexity() >= ComplexityLimit) { 3058 DEBUG(dbgs() << "The search space is too complex.\n"); 3059 3060 DEBUG(dbgs() << "Narrowing the search space by re-filtering out " 3061 "undesirable dedicated registers.\n"); 3062 3063 FilterOutUndesirableDedicatedRegisters(); 3064 3065 DEBUG(dbgs() << "After pre-selection:\n"; 3066 print_uses(dbgs())); 3067 } 3068 } 3069 3070 /// NarrowSearchSpaceByPickingWinnerRegs - Pick a register which seems likely 3071 /// to be profitable, and then in any use which has any reference to that 3072 /// register, delete all formulae which do not reference that register. 3073 void LSRInstance::NarrowSearchSpaceByPickingWinnerRegs() { 3074 // With all other options exhausted, loop until the system is simple 3075 // enough to handle. 3076 SmallPtrSet<const SCEV *, 4> Taken; 3077 while (EstimateSearchSpaceComplexity() >= ComplexityLimit) { 3078 // Ok, we have too many of formulae on our hands to conveniently handle. 3079 // Use a rough heuristic to thin out the list. 3080 DEBUG(dbgs() << "The search space is too complex.\n"); 3081 3082 // Pick the register which is used by the most LSRUses, which is likely 3083 // to be a good reuse register candidate. 3084 const SCEV *Best = 0; 3085 unsigned BestNum = 0; 3086 for (RegUseTracker::const_iterator I = RegUses.begin(), E = RegUses.end(); 3087 I != E; ++I) { 3088 const SCEV *Reg = *I; 3089 if (Taken.count(Reg)) 3090 continue; 3091 if (!Best) 3092 Best = Reg; 3093 else { 3094 unsigned Count = RegUses.getUsedByIndices(Reg).count(); 3095 if (Count > BestNum) { 3096 Best = Reg; 3097 BestNum = Count; 3098 } 3099 } 3100 } 3101 3102 DEBUG(dbgs() << "Narrowing the search space by assuming " << *Best 3103 << " will yield profitable reuse.\n"); 3104 Taken.insert(Best); 3105 3106 // In any use with formulae which references this register, delete formulae 3107 // which don't reference it. 3108 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 3109 LSRUse &LU = Uses[LUIdx]; 3110 if (!LU.Regs.count(Best)) continue; 3111 3112 bool Any = false; 3113 for (size_t i = 0, e = LU.Formulae.size(); i != e; ++i) { 3114 Formula &F = LU.Formulae[i]; 3115 if (!F.referencesReg(Best)) { 3116 DEBUG(dbgs() << " Deleting "; F.print(dbgs()); dbgs() << '\n'); 3117 LU.DeleteFormula(F); 3118 --e; 3119 --i; 3120 Any = true; 3121 assert(e != 0 && "Use has no formulae left! Is Regs inconsistent?"); 3122 continue; 3123 } 3124 } 3125 3126 if (Any) 3127 LU.RecomputeRegs(LUIdx, RegUses); 3128 } 3129 3130 DEBUG(dbgs() << "After pre-selection:\n"; 3131 print_uses(dbgs())); 3132 } 3133 } 3134 3135 /// NarrowSearchSpaceUsingHeuristics - If there are an extraordinary number of 3136 /// formulae to choose from, use some rough heuristics to prune down the number 3137 /// of formulae. This keeps the main solver from taking an extraordinary amount 3138 /// of time in some worst-case scenarios. 3139 void LSRInstance::NarrowSearchSpaceUsingHeuristics() { 3140 NarrowSearchSpaceByDetectingSupersets(); 3141 NarrowSearchSpaceByCollapsingUnrolledCode(); 3142 NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters(); 3143 NarrowSearchSpaceByPickingWinnerRegs(); 3144 } 3145 3146 /// SolveRecurse - This is the recursive solver. 3147 void LSRInstance::SolveRecurse(SmallVectorImpl<const Formula *> &Solution, 3148 Cost &SolutionCost, 3149 SmallVectorImpl<const Formula *> &Workspace, 3150 const Cost &CurCost, 3151 const SmallPtrSet<const SCEV *, 16> &CurRegs, 3152 DenseSet<const SCEV *> &VisitedRegs) const { 3153 // Some ideas: 3154 // - prune more: 3155 // - use more aggressive filtering 3156 // - sort the formula so that the most profitable solutions are found first 3157 // - sort the uses too 3158 // - search faster: 3159 // - don't compute a cost, and then compare. compare while computing a cost 3160 // and bail early. 3161 // - track register sets with SmallBitVector 3162 3163 const LSRUse &LU = Uses[Workspace.size()]; 3164 3165 // If this use references any register that's already a part of the 3166 // in-progress solution, consider it a requirement that a formula must 3167 // reference that register in order to be considered. This prunes out 3168 // unprofitable searching. 3169 SmallSetVector<const SCEV *, 4> ReqRegs; 3170 for (SmallPtrSet<const SCEV *, 16>::const_iterator I = CurRegs.begin(), 3171 E = CurRegs.end(); I != E; ++I) 3172 if (LU.Regs.count(*I)) 3173 ReqRegs.insert(*I); 3174 3175 bool AnySatisfiedReqRegs = false; 3176 SmallPtrSet<const SCEV *, 16> NewRegs; 3177 Cost NewCost; 3178 retry: 3179 for (SmallVectorImpl<Formula>::const_iterator I = LU.Formulae.begin(), 3180 E = LU.Formulae.end(); I != E; ++I) { 3181 const Formula &F = *I; 3182 3183 // Ignore formulae which do not use any of the required registers. 3184 for (SmallSetVector<const SCEV *, 4>::const_iterator J = ReqRegs.begin(), 3185 JE = ReqRegs.end(); J != JE; ++J) { 3186 const SCEV *Reg = *J; 3187 if ((!F.ScaledReg || F.ScaledReg != Reg) && 3188 std::find(F.BaseRegs.begin(), F.BaseRegs.end(), Reg) == 3189 F.BaseRegs.end()) 3190 goto skip; 3191 } 3192 AnySatisfiedReqRegs = true; 3193 3194 // Evaluate the cost of the current formula. If it's already worse than 3195 // the current best, prune the search at that point. 3196 NewCost = CurCost; 3197 NewRegs = CurRegs; 3198 NewCost.RateFormula(F, NewRegs, VisitedRegs, L, LU.Offsets, SE, DT); 3199 if (NewCost < SolutionCost) { 3200 Workspace.push_back(&F); 3201 if (Workspace.size() != Uses.size()) { 3202 SolveRecurse(Solution, SolutionCost, Workspace, NewCost, 3203 NewRegs, VisitedRegs); 3204 if (F.getNumRegs() == 1 && Workspace.size() == 1) 3205 VisitedRegs.insert(F.ScaledReg ? F.ScaledReg : F.BaseRegs[0]); 3206 } else { 3207 DEBUG(dbgs() << "New best at "; NewCost.print(dbgs()); 3208 dbgs() << ". Regs:"; 3209 for (SmallPtrSet<const SCEV *, 16>::const_iterator 3210 I = NewRegs.begin(), E = NewRegs.end(); I != E; ++I) 3211 dbgs() << ' ' << **I; 3212 dbgs() << '\n'); 3213 3214 SolutionCost = NewCost; 3215 Solution = Workspace; 3216 } 3217 Workspace.pop_back(); 3218 } 3219 skip:; 3220 } 3221 3222 // If none of the formulae had all of the required registers, relax the 3223 // constraint so that we don't exclude all formulae. 3224 if (!AnySatisfiedReqRegs) { 3225 assert(!ReqRegs.empty() && "Solver failed even without required registers"); 3226 ReqRegs.clear(); 3227 goto retry; 3228 } 3229 } 3230 3231 /// Solve - Choose one formula from each use. Return the results in the given 3232 /// Solution vector. 3233 void LSRInstance::Solve(SmallVectorImpl<const Formula *> &Solution) const { 3234 SmallVector<const Formula *, 8> Workspace; 3235 Cost SolutionCost; 3236 SolutionCost.Loose(); 3237 Cost CurCost; 3238 SmallPtrSet<const SCEV *, 16> CurRegs; 3239 DenseSet<const SCEV *> VisitedRegs; 3240 Workspace.reserve(Uses.size()); 3241 3242 // SolveRecurse does all the work. 3243 SolveRecurse(Solution, SolutionCost, Workspace, CurCost, 3244 CurRegs, VisitedRegs); 3245 3246 // Ok, we've now made all our decisions. 3247 DEBUG(dbgs() << "\n" 3248 "The chosen solution requires "; SolutionCost.print(dbgs()); 3249 dbgs() << ":\n"; 3250 for (size_t i = 0, e = Uses.size(); i != e; ++i) { 3251 dbgs() << " "; 3252 Uses[i].print(dbgs()); 3253 dbgs() << "\n" 3254 " "; 3255 Solution[i]->print(dbgs()); 3256 dbgs() << '\n'; 3257 }); 3258 3259 assert(Solution.size() == Uses.size() && "Malformed solution!"); 3260 } 3261 3262 /// HoistInsertPosition - Helper for AdjustInsertPositionForExpand. Climb up 3263 /// the dominator tree far as we can go while still being dominated by the 3264 /// input positions. This helps canonicalize the insert position, which 3265 /// encourages sharing. 3266 BasicBlock::iterator 3267 LSRInstance::HoistInsertPosition(BasicBlock::iterator IP, 3268 const SmallVectorImpl<Instruction *> &Inputs) 3269 const { 3270 for (;;) { 3271 const Loop *IPLoop = LI.getLoopFor(IP->getParent()); 3272 unsigned IPLoopDepth = IPLoop ? IPLoop->getLoopDepth() : 0; 3273 3274 BasicBlock *IDom; 3275 for (DomTreeNode *Rung = DT.getNode(IP->getParent()); ; ) { 3276 if (!Rung) return IP; 3277 Rung = Rung->getIDom(); 3278 if (!Rung) return IP; 3279 IDom = Rung->getBlock(); 3280 3281 // Don't climb into a loop though. 3282 const Loop *IDomLoop = LI.getLoopFor(IDom); 3283 unsigned IDomDepth = IDomLoop ? IDomLoop->getLoopDepth() : 0; 3284 if (IDomDepth <= IPLoopDepth && 3285 (IDomDepth != IPLoopDepth || IDomLoop == IPLoop)) 3286 break; 3287 } 3288 3289 bool AllDominate = true; 3290 Instruction *BetterPos = 0; 3291 Instruction *Tentative = IDom->getTerminator(); 3292 for (SmallVectorImpl<Instruction *>::const_iterator I = Inputs.begin(), 3293 E = Inputs.end(); I != E; ++I) { 3294 Instruction *Inst = *I; 3295 if (Inst == Tentative || !DT.dominates(Inst, Tentative)) { 3296 AllDominate = false; 3297 break; 3298 } 3299 // Attempt to find an insert position in the middle of the block, 3300 // instead of at the end, so that it can be used for other expansions. 3301 if (IDom == Inst->getParent() && 3302 (!BetterPos || DT.dominates(BetterPos, Inst))) 3303 BetterPos = llvm::next(BasicBlock::iterator(Inst)); 3304 } 3305 if (!AllDominate) 3306 break; 3307 if (BetterPos) 3308 IP = BetterPos; 3309 else 3310 IP = Tentative; 3311 } 3312 3313 return IP; 3314 } 3315 3316 /// AdjustInsertPositionForExpand - Determine an input position which will be 3317 /// dominated by the operands and which will dominate the result. 3318 BasicBlock::iterator 3319 LSRInstance::AdjustInsertPositionForExpand(BasicBlock::iterator IP, 3320 const LSRFixup &LF, 3321 const LSRUse &LU) const { 3322 // Collect some instructions which must be dominated by the 3323 // expanding replacement. These must be dominated by any operands that 3324 // will be required in the expansion. 3325 SmallVector<Instruction *, 4> Inputs; 3326 if (Instruction *I = dyn_cast<Instruction>(LF.OperandValToReplace)) 3327 Inputs.push_back(I); 3328 if (LU.Kind == LSRUse::ICmpZero) 3329 if (Instruction *I = 3330 dyn_cast<Instruction>(cast<ICmpInst>(LF.UserInst)->getOperand(1))) 3331 Inputs.push_back(I); 3332 if (LF.PostIncLoops.count(L)) { 3333 if (LF.isUseFullyOutsideLoop(L)) 3334 Inputs.push_back(L->getLoopLatch()->getTerminator()); 3335 else 3336 Inputs.push_back(IVIncInsertPos); 3337 } 3338 // The expansion must also be dominated by the increment positions of any 3339 // loops it for which it is using post-inc mode. 3340 for (PostIncLoopSet::const_iterator I = LF.PostIncLoops.begin(), 3341 E = LF.PostIncLoops.end(); I != E; ++I) { 3342 const Loop *PIL = *I; 3343 if (PIL == L) continue; 3344 3345 // Be dominated by the loop exit. 3346 SmallVector<BasicBlock *, 4> ExitingBlocks; 3347 PIL->getExitingBlocks(ExitingBlocks); 3348 if (!ExitingBlocks.empty()) { 3349 BasicBlock *BB = ExitingBlocks[0]; 3350 for (unsigned i = 1, e = ExitingBlocks.size(); i != e; ++i) 3351 BB = DT.findNearestCommonDominator(BB, ExitingBlocks[i]); 3352 Inputs.push_back(BB->getTerminator()); 3353 } 3354 } 3355 3356 // Then, climb up the immediate dominator tree as far as we can go while 3357 // still being dominated by the input positions. 3358 IP = HoistInsertPosition(IP, Inputs); 3359 3360 // Don't insert instructions before PHI nodes. 3361 while (isa<PHINode>(IP)) ++IP; 3362 3363 // Ignore debug intrinsics. 3364 while (isa<DbgInfoIntrinsic>(IP)) ++IP; 3365 3366 return IP; 3367 } 3368 3369 /// Expand - Emit instructions for the leading candidate expression for this 3370 /// LSRUse (this is called "expanding"). 3371 Value *LSRInstance::Expand(const LSRFixup &LF, 3372 const Formula &F, 3373 BasicBlock::iterator IP, 3374 SCEVExpander &Rewriter, 3375 SmallVectorImpl<WeakVH> &DeadInsts) const { 3376 const LSRUse &LU = Uses[LF.LUIdx]; 3377 3378 // Determine an input position which will be dominated by the operands and 3379 // which will dominate the result. 3380 IP = AdjustInsertPositionForExpand(IP, LF, LU); 3381 3382 // Inform the Rewriter if we have a post-increment use, so that it can 3383 // perform an advantageous expansion. 3384 Rewriter.setPostInc(LF.PostIncLoops); 3385 3386 // This is the type that the user actually needs. 3387 const Type *OpTy = LF.OperandValToReplace->getType(); 3388 // This will be the type that we'll initially expand to. 3389 const Type *Ty = F.getType(); 3390 if (!Ty) 3391 // No type known; just expand directly to the ultimate type. 3392 Ty = OpTy; 3393 else if (SE.getEffectiveSCEVType(Ty) == SE.getEffectiveSCEVType(OpTy)) 3394 // Expand directly to the ultimate type if it's the right size. 3395 Ty = OpTy; 3396 // This is the type to do integer arithmetic in. 3397 const Type *IntTy = SE.getEffectiveSCEVType(Ty); 3398 3399 // Build up a list of operands to add together to form the full base. 3400 SmallVector<const SCEV *, 8> Ops; 3401 3402 // Expand the BaseRegs portion. 3403 for (SmallVectorImpl<const SCEV *>::const_iterator I = F.BaseRegs.begin(), 3404 E = F.BaseRegs.end(); I != E; ++I) { 3405 const SCEV *Reg = *I; 3406 assert(!Reg->isZero() && "Zero allocated in a base register!"); 3407 3408 // If we're expanding for a post-inc user, make the post-inc adjustment. 3409 PostIncLoopSet &Loops = const_cast<PostIncLoopSet &>(LF.PostIncLoops); 3410 Reg = TransformForPostIncUse(Denormalize, Reg, 3411 LF.UserInst, LF.OperandValToReplace, 3412 Loops, SE, DT); 3413 3414 Ops.push_back(SE.getUnknown(Rewriter.expandCodeFor(Reg, 0, IP))); 3415 } 3416 3417 // Flush the operand list to suppress SCEVExpander hoisting. 3418 if (!Ops.empty()) { 3419 Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, IP); 3420 Ops.clear(); 3421 Ops.push_back(SE.getUnknown(FullV)); 3422 } 3423 3424 // Expand the ScaledReg portion. 3425 Value *ICmpScaledV = 0; 3426 if (F.AM.Scale != 0) { 3427 const SCEV *ScaledS = F.ScaledReg; 3428 3429 // If we're expanding for a post-inc user, make the post-inc adjustment. 3430 PostIncLoopSet &Loops = const_cast<PostIncLoopSet &>(LF.PostIncLoops); 3431 ScaledS = TransformForPostIncUse(Denormalize, ScaledS, 3432 LF.UserInst, LF.OperandValToReplace, 3433 Loops, SE, DT); 3434 3435 if (LU.Kind == LSRUse::ICmpZero) { 3436 // An interesting way of "folding" with an icmp is to use a negated 3437 // scale, which we'll implement by inserting it into the other operand 3438 // of the icmp. 3439 assert(F.AM.Scale == -1 && 3440 "The only scale supported by ICmpZero uses is -1!"); 3441 ICmpScaledV = Rewriter.expandCodeFor(ScaledS, 0, IP); 3442 } else { 3443 // Otherwise just expand the scaled register and an explicit scale, 3444 // which is expected to be matched as part of the address. 3445 ScaledS = SE.getUnknown(Rewriter.expandCodeFor(ScaledS, 0, IP)); 3446 ScaledS = SE.getMulExpr(ScaledS, 3447 SE.getConstant(ScaledS->getType(), F.AM.Scale)); 3448 Ops.push_back(ScaledS); 3449 3450 // Flush the operand list to suppress SCEVExpander hoisting. 3451 Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, IP); 3452 Ops.clear(); 3453 Ops.push_back(SE.getUnknown(FullV)); 3454 } 3455 } 3456 3457 // Expand the GV portion. 3458 if (F.AM.BaseGV) { 3459 Ops.push_back(SE.getUnknown(F.AM.BaseGV)); 3460 3461 // Flush the operand list to suppress SCEVExpander hoisting. 3462 Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, IP); 3463 Ops.clear(); 3464 Ops.push_back(SE.getUnknown(FullV)); 3465 } 3466 3467 // Expand the immediate portion. 3468 int64_t Offset = (uint64_t)F.AM.BaseOffs + LF.Offset; 3469 if (Offset != 0) { 3470 if (LU.Kind == LSRUse::ICmpZero) { 3471 // The other interesting way of "folding" with an ICmpZero is to use a 3472 // negated immediate. 3473 if (!ICmpScaledV) 3474 ICmpScaledV = ConstantInt::get(IntTy, -Offset); 3475 else { 3476 Ops.push_back(SE.getUnknown(ICmpScaledV)); 3477 ICmpScaledV = ConstantInt::get(IntTy, Offset); 3478 } 3479 } else { 3480 // Just add the immediate values. These again are expected to be matched 3481 // as part of the address. 3482 Ops.push_back(SE.getUnknown(ConstantInt::getSigned(IntTy, Offset))); 3483 } 3484 } 3485 3486 // Emit instructions summing all the operands. 3487 const SCEV *FullS = Ops.empty() ? 3488 SE.getConstant(IntTy, 0) : 3489 SE.getAddExpr(Ops); 3490 Value *FullV = Rewriter.expandCodeFor(FullS, Ty, IP); 3491 3492 // We're done expanding now, so reset the rewriter. 3493 Rewriter.clearPostInc(); 3494 3495 // An ICmpZero Formula represents an ICmp which we're handling as a 3496 // comparison against zero. Now that we've expanded an expression for that 3497 // form, update the ICmp's other operand. 3498 if (LU.Kind == LSRUse::ICmpZero) { 3499 ICmpInst *CI = cast<ICmpInst>(LF.UserInst); 3500 DeadInsts.push_back(CI->getOperand(1)); 3501 assert(!F.AM.BaseGV && "ICmp does not support folding a global value and " 3502 "a scale at the same time!"); 3503 if (F.AM.Scale == -1) { 3504 if (ICmpScaledV->getType() != OpTy) { 3505 Instruction *Cast = 3506 CastInst::Create(CastInst::getCastOpcode(ICmpScaledV, false, 3507 OpTy, false), 3508 ICmpScaledV, OpTy, "tmp", CI); 3509 ICmpScaledV = Cast; 3510 } 3511 CI->setOperand(1, ICmpScaledV); 3512 } else { 3513 assert(F.AM.Scale == 0 && 3514 "ICmp does not support folding a global value and " 3515 "a scale at the same time!"); 3516 Constant *C = ConstantInt::getSigned(SE.getEffectiveSCEVType(OpTy), 3517 -(uint64_t)Offset); 3518 if (C->getType() != OpTy) 3519 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 3520 OpTy, false), 3521 C, OpTy); 3522 3523 CI->setOperand(1, C); 3524 } 3525 } 3526 3527 return FullV; 3528 } 3529 3530 /// RewriteForPHI - Helper for Rewrite. PHI nodes are special because the use 3531 /// of their operands effectively happens in their predecessor blocks, so the 3532 /// expression may need to be expanded in multiple places. 3533 void LSRInstance::RewriteForPHI(PHINode *PN, 3534 const LSRFixup &LF, 3535 const Formula &F, 3536 SCEVExpander &Rewriter, 3537 SmallVectorImpl<WeakVH> &DeadInsts, 3538 Pass *P) const { 3539 DenseMap<BasicBlock *, Value *> Inserted; 3540 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 3541 if (PN->getIncomingValue(i) == LF.OperandValToReplace) { 3542 BasicBlock *BB = PN->getIncomingBlock(i); 3543 3544 // If this is a critical edge, split the edge so that we do not insert 3545 // the code on all predecessor/successor paths. We do this unless this 3546 // is the canonical backedge for this loop, which complicates post-inc 3547 // users. 3548 if (e != 1 && BB->getTerminator()->getNumSuccessors() > 1 && 3549 !isa<IndirectBrInst>(BB->getTerminator()) && 3550 (PN->getParent() != L->getHeader() || !L->contains(BB))) { 3551 // Split the critical edge. 3552 BasicBlock *NewBB = SplitCriticalEdge(BB, PN->getParent(), P); 3553 3554 // If PN is outside of the loop and BB is in the loop, we want to 3555 // move the block to be immediately before the PHI block, not 3556 // immediately after BB. 3557 if (L->contains(BB) && !L->contains(PN)) 3558 NewBB->moveBefore(PN->getParent()); 3559 3560 // Splitting the edge can reduce the number of PHI entries we have. 3561 e = PN->getNumIncomingValues(); 3562 BB = NewBB; 3563 i = PN->getBasicBlockIndex(BB); 3564 } 3565 3566 std::pair<DenseMap<BasicBlock *, Value *>::iterator, bool> Pair = 3567 Inserted.insert(std::make_pair(BB, static_cast<Value *>(0))); 3568 if (!Pair.second) 3569 PN->setIncomingValue(i, Pair.first->second); 3570 else { 3571 Value *FullV = Expand(LF, F, BB->getTerminator(), Rewriter, DeadInsts); 3572 3573 // If this is reuse-by-noop-cast, insert the noop cast. 3574 const Type *OpTy = LF.OperandValToReplace->getType(); 3575 if (FullV->getType() != OpTy) 3576 FullV = 3577 CastInst::Create(CastInst::getCastOpcode(FullV, false, 3578 OpTy, false), 3579 FullV, LF.OperandValToReplace->getType(), 3580 "tmp", BB->getTerminator()); 3581 3582 PN->setIncomingValue(i, FullV); 3583 Pair.first->second = FullV; 3584 } 3585 } 3586 } 3587 3588 /// Rewrite - Emit instructions for the leading candidate expression for this 3589 /// LSRUse (this is called "expanding"), and update the UserInst to reference 3590 /// the newly expanded value. 3591 void LSRInstance::Rewrite(const LSRFixup &LF, 3592 const Formula &F, 3593 SCEVExpander &Rewriter, 3594 SmallVectorImpl<WeakVH> &DeadInsts, 3595 Pass *P) const { 3596 // First, find an insertion point that dominates UserInst. For PHI nodes, 3597 // find the nearest block which dominates all the relevant uses. 3598 if (PHINode *PN = dyn_cast<PHINode>(LF.UserInst)) { 3599 RewriteForPHI(PN, LF, F, Rewriter, DeadInsts, P); 3600 } else { 3601 Value *FullV = Expand(LF, F, LF.UserInst, Rewriter, DeadInsts); 3602 3603 // If this is reuse-by-noop-cast, insert the noop cast. 3604 const Type *OpTy = LF.OperandValToReplace->getType(); 3605 if (FullV->getType() != OpTy) { 3606 Instruction *Cast = 3607 CastInst::Create(CastInst::getCastOpcode(FullV, false, OpTy, false), 3608 FullV, OpTy, "tmp", LF.UserInst); 3609 FullV = Cast; 3610 } 3611 3612 // Update the user. ICmpZero is handled specially here (for now) because 3613 // Expand may have updated one of the operands of the icmp already, and 3614 // its new value may happen to be equal to LF.OperandValToReplace, in 3615 // which case doing replaceUsesOfWith leads to replacing both operands 3616 // with the same value. TODO: Reorganize this. 3617 if (Uses[LF.LUIdx].Kind == LSRUse::ICmpZero) 3618 LF.UserInst->setOperand(0, FullV); 3619 else 3620 LF.UserInst->replaceUsesOfWith(LF.OperandValToReplace, FullV); 3621 } 3622 3623 DeadInsts.push_back(LF.OperandValToReplace); 3624 } 3625 3626 /// ImplementSolution - Rewrite all the fixup locations with new values, 3627 /// following the chosen solution. 3628 void 3629 LSRInstance::ImplementSolution(const SmallVectorImpl<const Formula *> &Solution, 3630 Pass *P) { 3631 // Keep track of instructions we may have made dead, so that 3632 // we can remove them after we are done working. 3633 SmallVector<WeakVH, 16> DeadInsts; 3634 3635 SCEVExpander Rewriter(SE); 3636 Rewriter.disableCanonicalMode(); 3637 Rewriter.setIVIncInsertPos(L, IVIncInsertPos); 3638 3639 // Expand the new value definitions and update the users. 3640 for (SmallVectorImpl<LSRFixup>::const_iterator I = Fixups.begin(), 3641 E = Fixups.end(); I != E; ++I) { 3642 const LSRFixup &Fixup = *I; 3643 3644 Rewrite(Fixup, *Solution[Fixup.LUIdx], Rewriter, DeadInsts, P); 3645 3646 Changed = true; 3647 } 3648 3649 // Clean up after ourselves. This must be done before deleting any 3650 // instructions. 3651 Rewriter.clear(); 3652 3653 Changed |= DeleteTriviallyDeadInstructions(DeadInsts); 3654 } 3655 3656 LSRInstance::LSRInstance(const TargetLowering *tli, Loop *l, Pass *P) 3657 : IU(P->getAnalysis<IVUsers>()), 3658 SE(P->getAnalysis<ScalarEvolution>()), 3659 DT(P->getAnalysis<DominatorTree>()), 3660 LI(P->getAnalysis<LoopInfo>()), 3661 TLI(tli), L(l), Changed(false), IVIncInsertPos(0) { 3662 3663 // If LoopSimplify form is not available, stay out of trouble. 3664 if (!L->isLoopSimplifyForm()) return; 3665 3666 // If there's no interesting work to be done, bail early. 3667 if (IU.empty()) return; 3668 3669 DEBUG(dbgs() << "\nLSR on loop "; 3670 WriteAsOperand(dbgs(), L->getHeader(), /*PrintType=*/false); 3671 dbgs() << ":\n"); 3672 3673 // First, perform some low-level loop optimizations. 3674 OptimizeShadowIV(); 3675 OptimizeLoopTermCond(); 3676 3677 // Start collecting data and preparing for the solver. 3678 CollectInterestingTypesAndFactors(); 3679 CollectFixupsAndInitialFormulae(); 3680 CollectLoopInvariantFixupsAndFormulae(); 3681 3682 DEBUG(dbgs() << "LSR found " << Uses.size() << " uses:\n"; 3683 print_uses(dbgs())); 3684 3685 // Now use the reuse data to generate a bunch of interesting ways 3686 // to formulate the values needed for the uses. 3687 GenerateAllReuseFormulae(); 3688 3689 FilterOutUndesirableDedicatedRegisters(); 3690 NarrowSearchSpaceUsingHeuristics(); 3691 3692 SmallVector<const Formula *, 8> Solution; 3693 Solve(Solution); 3694 3695 // Release memory that is no longer needed. 3696 Factors.clear(); 3697 Types.clear(); 3698 RegUses.clear(); 3699 3700 #ifndef NDEBUG 3701 // Formulae should be legal. 3702 for (SmallVectorImpl<LSRUse>::const_iterator I = Uses.begin(), 3703 E = Uses.end(); I != E; ++I) { 3704 const LSRUse &LU = *I; 3705 for (SmallVectorImpl<Formula>::const_iterator J = LU.Formulae.begin(), 3706 JE = LU.Formulae.end(); J != JE; ++J) 3707 assert(isLegalUse(J->AM, LU.MinOffset, LU.MaxOffset, 3708 LU.Kind, LU.AccessTy, TLI) && 3709 "Illegal formula generated!"); 3710 }; 3711 #endif 3712 3713 // Now that we've decided what we want, make it so. 3714 ImplementSolution(Solution, P); 3715 } 3716 3717 void LSRInstance::print_factors_and_types(raw_ostream &OS) const { 3718 if (Factors.empty() && Types.empty()) return; 3719 3720 OS << "LSR has identified the following interesting factors and types: "; 3721 bool First = true; 3722 3723 for (SmallSetVector<int64_t, 8>::const_iterator 3724 I = Factors.begin(), E = Factors.end(); I != E; ++I) { 3725 if (!First) OS << ", "; 3726 First = false; 3727 OS << '*' << *I; 3728 } 3729 3730 for (SmallSetVector<const Type *, 4>::const_iterator 3731 I = Types.begin(), E = Types.end(); I != E; ++I) { 3732 if (!First) OS << ", "; 3733 First = false; 3734 OS << '(' << **I << ')'; 3735 } 3736 OS << '\n'; 3737 } 3738 3739 void LSRInstance::print_fixups(raw_ostream &OS) const { 3740 OS << "LSR is examining the following fixup sites:\n"; 3741 for (SmallVectorImpl<LSRFixup>::const_iterator I = Fixups.begin(), 3742 E = Fixups.end(); I != E; ++I) { 3743 dbgs() << " "; 3744 I->print(OS); 3745 OS << '\n'; 3746 } 3747 } 3748 3749 void LSRInstance::print_uses(raw_ostream &OS) const { 3750 OS << "LSR is examining the following uses:\n"; 3751 for (SmallVectorImpl<LSRUse>::const_iterator I = Uses.begin(), 3752 E = Uses.end(); I != E; ++I) { 3753 const LSRUse &LU = *I; 3754 dbgs() << " "; 3755 LU.print(OS); 3756 OS << '\n'; 3757 for (SmallVectorImpl<Formula>::const_iterator J = LU.Formulae.begin(), 3758 JE = LU.Formulae.end(); J != JE; ++J) { 3759 OS << " "; 3760 J->print(OS); 3761 OS << '\n'; 3762 } 3763 } 3764 } 3765 3766 void LSRInstance::print(raw_ostream &OS) const { 3767 print_factors_and_types(OS); 3768 print_fixups(OS); 3769 print_uses(OS); 3770 } 3771 3772 void LSRInstance::dump() const { 3773 print(errs()); errs() << '\n'; 3774 } 3775 3776 namespace { 3777 3778 class LoopStrengthReduce : public LoopPass { 3779 /// TLI - Keep a pointer of a TargetLowering to consult for determining 3780 /// transformation profitability. 3781 const TargetLowering *const TLI; 3782 3783 public: 3784 static char ID; // Pass ID, replacement for typeid 3785 explicit LoopStrengthReduce(const TargetLowering *tli = 0); 3786 3787 private: 3788 bool runOnLoop(Loop *L, LPPassManager &LPM); 3789 void getAnalysisUsage(AnalysisUsage &AU) const; 3790 }; 3791 3792 } 3793 3794 char LoopStrengthReduce::ID = 0; 3795 INITIALIZE_PASS(LoopStrengthReduce, "loop-reduce", 3796 "Loop Strength Reduction", false, false); 3797 3798 Pass *llvm::createLoopStrengthReducePass(const TargetLowering *TLI) { 3799 return new LoopStrengthReduce(TLI); 3800 } 3801 3802 LoopStrengthReduce::LoopStrengthReduce(const TargetLowering *tli) 3803 : LoopPass(ID), TLI(tli) {} 3804 3805 void LoopStrengthReduce::getAnalysisUsage(AnalysisUsage &AU) const { 3806 // We split critical edges, so we change the CFG. However, we do update 3807 // many analyses if they are around. 3808 AU.addPreservedID(LoopSimplifyID); 3809 AU.addPreserved("domfrontier"); 3810 3811 AU.addRequired<LoopInfo>(); 3812 AU.addPreserved<LoopInfo>(); 3813 AU.addRequiredID(LoopSimplifyID); 3814 AU.addRequired<DominatorTree>(); 3815 AU.addPreserved<DominatorTree>(); 3816 AU.addRequired<ScalarEvolution>(); 3817 AU.addPreserved<ScalarEvolution>(); 3818 AU.addRequired<IVUsers>(); 3819 AU.addPreserved<IVUsers>(); 3820 } 3821 3822 bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager & /*LPM*/) { 3823 bool Changed = false; 3824 3825 // Run the main LSR transformation. 3826 Changed |= LSRInstance(TLI, L, this).getChanged(); 3827 3828 // At this point, it is worth checking to see if any recurrence PHIs are also 3829 // dead, so that we can remove them as well. 3830 Changed |= DeleteDeadPHIs(L->getHeader()); 3831 3832 return Changed; 3833 } 3834