1 //===- LoopStrengthReduce.cpp - Strength Reduce IVs in Loops --------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This transformation analyzes and transforms the induction variables (and 11 // computations derived from them) into forms suitable for efficient execution 12 // on the target. 13 // 14 // This pass performs a strength reduction on array references inside loops that 15 // have as one or more of their components the loop induction variable, it 16 // rewrites expressions to take advantage of scaled-index addressing modes 17 // available on the target, and it performs a variety of other optimizations 18 // related to loop induction variables. 19 // 20 // Terminology note: this code has a lot of handling for "post-increment" or 21 // "post-inc" users. This is not talking about post-increment addressing modes; 22 // it is instead talking about code like this: 23 // 24 // %i = phi [ 0, %entry ], [ %i.next, %latch ] 25 // ... 26 // %i.next = add %i, 1 27 // %c = icmp eq %i.next, %n 28 // 29 // The SCEV for %i is {0,+,1}<%L>. The SCEV for %i.next is {1,+,1}<%L>, however 30 // it's useful to think about these as the same register, with some uses using 31 // the value of the register before the add and some using // it after. In this 32 // example, the icmp is a post-increment user, since it uses %i.next, which is 33 // the value of the induction variable after the increment. The other common 34 // case of post-increment users is users outside the loop. 35 // 36 // TODO: More sophistication in the way Formulae are generated and filtered. 37 // 38 // TODO: Handle multiple loops at a time. 39 // 40 // TODO: Should TargetLowering::AddrMode::BaseGV be changed to a ConstantExpr 41 // instead of a GlobalValue? 42 // 43 // TODO: When truncation is free, truncate ICmp users' operands to make it a 44 // smaller encoding (on x86 at least). 45 // 46 // TODO: When a negated register is used by an add (such as in a list of 47 // multiple base registers, or as the increment expression in an addrec), 48 // we may not actually need both reg and (-1 * reg) in registers; the 49 // negation can be implemented by using a sub instead of an add. The 50 // lack of support for taking this into consideration when making 51 // register pressure decisions is partly worked around by the "Special" 52 // use kind. 53 // 54 //===----------------------------------------------------------------------===// 55 56 #define DEBUG_TYPE "loop-reduce" 57 #include "llvm/Transforms/Scalar.h" 58 #include "llvm/Constants.h" 59 #include "llvm/Instructions.h" 60 #include "llvm/IntrinsicInst.h" 61 #include "llvm/DerivedTypes.h" 62 #include "llvm/Analysis/IVUsers.h" 63 #include "llvm/Analysis/Dominators.h" 64 #include "llvm/Analysis/LoopPass.h" 65 #include "llvm/Analysis/ScalarEvolutionExpander.h" 66 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 67 #include "llvm/Transforms/Utils/Local.h" 68 #include "llvm/ADT/SmallBitVector.h" 69 #include "llvm/ADT/SetVector.h" 70 #include "llvm/ADT/DenseSet.h" 71 #include "llvm/Support/Debug.h" 72 #include "llvm/Support/ValueHandle.h" 73 #include "llvm/Support/raw_ostream.h" 74 #include "llvm/Target/TargetLowering.h" 75 #include <algorithm> 76 using namespace llvm; 77 78 namespace { 79 80 /// RegSortData - This class holds data which is used to order reuse candidates. 81 class RegSortData { 82 public: 83 /// UsedByIndices - This represents the set of LSRUse indices which reference 84 /// a particular register. 85 SmallBitVector UsedByIndices; 86 87 RegSortData() {} 88 89 void print(raw_ostream &OS) const; 90 void dump() const; 91 }; 92 93 } 94 95 void RegSortData::print(raw_ostream &OS) const { 96 OS << "[NumUses=" << UsedByIndices.count() << ']'; 97 } 98 99 void RegSortData::dump() const { 100 print(errs()); errs() << '\n'; 101 } 102 103 namespace { 104 105 /// RegUseTracker - Map register candidates to information about how they are 106 /// used. 107 class RegUseTracker { 108 typedef DenseMap<const SCEV *, RegSortData> RegUsesTy; 109 110 RegUsesTy RegUses; 111 SmallVector<const SCEV *, 16> RegSequence; 112 113 public: 114 void CountRegister(const SCEV *Reg, size_t LUIdx); 115 116 bool isRegUsedByUsesOtherThan(const SCEV *Reg, size_t LUIdx) const; 117 118 const SmallBitVector &getUsedByIndices(const SCEV *Reg) const; 119 120 void clear(); 121 122 typedef SmallVectorImpl<const SCEV *>::iterator iterator; 123 typedef SmallVectorImpl<const SCEV *>::const_iterator const_iterator; 124 iterator begin() { return RegSequence.begin(); } 125 iterator end() { return RegSequence.end(); } 126 const_iterator begin() const { return RegSequence.begin(); } 127 const_iterator end() const { return RegSequence.end(); } 128 }; 129 130 } 131 132 void 133 RegUseTracker::CountRegister(const SCEV *Reg, size_t LUIdx) { 134 std::pair<RegUsesTy::iterator, bool> Pair = 135 RegUses.insert(std::make_pair(Reg, RegSortData())); 136 RegSortData &RSD = Pair.first->second; 137 if (Pair.second) 138 RegSequence.push_back(Reg); 139 RSD.UsedByIndices.resize(std::max(RSD.UsedByIndices.size(), LUIdx + 1)); 140 RSD.UsedByIndices.set(LUIdx); 141 } 142 143 bool 144 RegUseTracker::isRegUsedByUsesOtherThan(const SCEV *Reg, size_t LUIdx) const { 145 if (!RegUses.count(Reg)) return false; 146 const SmallBitVector &UsedByIndices = 147 RegUses.find(Reg)->second.UsedByIndices; 148 int i = UsedByIndices.find_first(); 149 if (i == -1) return false; 150 if ((size_t)i != LUIdx) return true; 151 return UsedByIndices.find_next(i) != -1; 152 } 153 154 const SmallBitVector &RegUseTracker::getUsedByIndices(const SCEV *Reg) const { 155 RegUsesTy::const_iterator I = RegUses.find(Reg); 156 assert(I != RegUses.end() && "Unknown register!"); 157 return I->second.UsedByIndices; 158 } 159 160 void RegUseTracker::clear() { 161 RegUses.clear(); 162 RegSequence.clear(); 163 } 164 165 namespace { 166 167 /// Formula - This class holds information that describes a formula for 168 /// computing satisfying a use. It may include broken-out immediates and scaled 169 /// registers. 170 struct Formula { 171 /// AM - This is used to represent complex addressing, as well as other kinds 172 /// of interesting uses. 173 TargetLowering::AddrMode AM; 174 175 /// BaseRegs - The list of "base" registers for this use. When this is 176 /// non-empty, AM.HasBaseReg should be set to true. 177 SmallVector<const SCEV *, 2> BaseRegs; 178 179 /// ScaledReg - The 'scaled' register for this use. This should be non-null 180 /// when AM.Scale is not zero. 181 const SCEV *ScaledReg; 182 183 Formula() : ScaledReg(0) {} 184 185 void InitialMatch(const SCEV *S, Loop *L, 186 ScalarEvolution &SE, DominatorTree &DT); 187 188 unsigned getNumRegs() const; 189 const Type *getType() const; 190 191 bool referencesReg(const SCEV *S) const; 192 bool hasRegsUsedByUsesOtherThan(size_t LUIdx, 193 const RegUseTracker &RegUses) const; 194 195 void print(raw_ostream &OS) const; 196 void dump() const; 197 }; 198 199 } 200 201 /// DoInitialMatch - Recurrsion helper for InitialMatch. 202 static void DoInitialMatch(const SCEV *S, Loop *L, 203 SmallVectorImpl<const SCEV *> &Good, 204 SmallVectorImpl<const SCEV *> &Bad, 205 ScalarEvolution &SE, DominatorTree &DT) { 206 // Collect expressions which properly dominate the loop header. 207 if (S->properlyDominates(L->getHeader(), &DT)) { 208 Good.push_back(S); 209 return; 210 } 211 212 // Look at add operands. 213 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 214 for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end(); 215 I != E; ++I) 216 DoInitialMatch(*I, L, Good, Bad, SE, DT); 217 return; 218 } 219 220 // Look at addrec operands. 221 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) 222 if (!AR->getStart()->isZero()) { 223 DoInitialMatch(AR->getStart(), L, Good, Bad, SE, DT); 224 DoInitialMatch(SE.getAddRecExpr(SE.getIntegerSCEV(0, AR->getType()), 225 AR->getStepRecurrence(SE), 226 AR->getLoop()), 227 L, Good, Bad, SE, DT); 228 return; 229 } 230 231 // Handle a multiplication by -1 (negation) if it didn't fold. 232 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) 233 if (Mul->getOperand(0)->isAllOnesValue()) { 234 SmallVector<const SCEV *, 4> Ops(Mul->op_begin()+1, Mul->op_end()); 235 const SCEV *NewMul = SE.getMulExpr(Ops); 236 237 SmallVector<const SCEV *, 4> MyGood; 238 SmallVector<const SCEV *, 4> MyBad; 239 DoInitialMatch(NewMul, L, MyGood, MyBad, SE, DT); 240 const SCEV *NegOne = SE.getSCEV(ConstantInt::getAllOnesValue( 241 SE.getEffectiveSCEVType(NewMul->getType()))); 242 for (SmallVectorImpl<const SCEV *>::const_iterator I = MyGood.begin(), 243 E = MyGood.end(); I != E; ++I) 244 Good.push_back(SE.getMulExpr(NegOne, *I)); 245 for (SmallVectorImpl<const SCEV *>::const_iterator I = MyBad.begin(), 246 E = MyBad.end(); I != E; ++I) 247 Bad.push_back(SE.getMulExpr(NegOne, *I)); 248 return; 249 } 250 251 // Ok, we can't do anything interesting. Just stuff the whole thing into a 252 // register and hope for the best. 253 Bad.push_back(S); 254 } 255 256 /// InitialMatch - Incorporate loop-variant parts of S into this Formula, 257 /// attempting to keep all loop-invariant and loop-computable values in a 258 /// single base register. 259 void Formula::InitialMatch(const SCEV *S, Loop *L, 260 ScalarEvolution &SE, DominatorTree &DT) { 261 SmallVector<const SCEV *, 4> Good; 262 SmallVector<const SCEV *, 4> Bad; 263 DoInitialMatch(S, L, Good, Bad, SE, DT); 264 if (!Good.empty()) { 265 BaseRegs.push_back(SE.getAddExpr(Good)); 266 AM.HasBaseReg = true; 267 } 268 if (!Bad.empty()) { 269 BaseRegs.push_back(SE.getAddExpr(Bad)); 270 AM.HasBaseReg = true; 271 } 272 } 273 274 /// getNumRegs - Return the total number of register operands used by this 275 /// formula. This does not include register uses implied by non-constant 276 /// addrec strides. 277 unsigned Formula::getNumRegs() const { 278 return !!ScaledReg + BaseRegs.size(); 279 } 280 281 /// getType - Return the type of this formula, if it has one, or null 282 /// otherwise. This type is meaningless except for the bit size. 283 const Type *Formula::getType() const { 284 return !BaseRegs.empty() ? BaseRegs.front()->getType() : 285 ScaledReg ? ScaledReg->getType() : 286 AM.BaseGV ? AM.BaseGV->getType() : 287 0; 288 } 289 290 /// referencesReg - Test if this formula references the given register. 291 bool Formula::referencesReg(const SCEV *S) const { 292 return S == ScaledReg || 293 std::find(BaseRegs.begin(), BaseRegs.end(), S) != BaseRegs.end(); 294 } 295 296 /// hasRegsUsedByUsesOtherThan - Test whether this formula uses registers 297 /// which are used by uses other than the use with the given index. 298 bool Formula::hasRegsUsedByUsesOtherThan(size_t LUIdx, 299 const RegUseTracker &RegUses) const { 300 if (ScaledReg) 301 if (RegUses.isRegUsedByUsesOtherThan(ScaledReg, LUIdx)) 302 return true; 303 for (SmallVectorImpl<const SCEV *>::const_iterator I = BaseRegs.begin(), 304 E = BaseRegs.end(); I != E; ++I) 305 if (RegUses.isRegUsedByUsesOtherThan(*I, LUIdx)) 306 return true; 307 return false; 308 } 309 310 void Formula::print(raw_ostream &OS) const { 311 bool First = true; 312 if (AM.BaseGV) { 313 if (!First) OS << " + "; else First = false; 314 WriteAsOperand(OS, AM.BaseGV, /*PrintType=*/false); 315 } 316 if (AM.BaseOffs != 0) { 317 if (!First) OS << " + "; else First = false; 318 OS << AM.BaseOffs; 319 } 320 for (SmallVectorImpl<const SCEV *>::const_iterator I = BaseRegs.begin(), 321 E = BaseRegs.end(); I != E; ++I) { 322 if (!First) OS << " + "; else First = false; 323 OS << "reg(" << **I << ')'; 324 } 325 if (AM.Scale != 0) { 326 if (!First) OS << " + "; else First = false; 327 OS << AM.Scale << "*reg("; 328 if (ScaledReg) 329 OS << *ScaledReg; 330 else 331 OS << "<unknown>"; 332 OS << ')'; 333 } 334 } 335 336 void Formula::dump() const { 337 print(errs()); errs() << '\n'; 338 } 339 340 /// getSDiv - Return an expression for LHS /s RHS, if it can be determined, 341 /// or null otherwise. If IgnoreSignificantBits is true, expressions like 342 /// (X * Y) /s Y are simplified to Y, ignoring that the multiplication may 343 /// overflow, which is useful when the result will be used in a context where 344 /// the most significant bits are ignored. 345 static const SCEV *getSDiv(const SCEV *LHS, const SCEV *RHS, 346 ScalarEvolution &SE, 347 bool IgnoreSignificantBits = false) { 348 // Handle the trivial case, which works for any SCEV type. 349 if (LHS == RHS) 350 return SE.getIntegerSCEV(1, LHS->getType()); 351 352 // Handle x /s -1 as x * -1, to give ScalarEvolution a chance to do some 353 // folding. 354 if (RHS->isAllOnesValue()) 355 return SE.getMulExpr(LHS, RHS); 356 357 // Check for a division of a constant by a constant. 358 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(LHS)) { 359 const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS); 360 if (!RC) 361 return 0; 362 if (C->getValue()->getValue().srem(RC->getValue()->getValue()) != 0) 363 return 0; 364 return SE.getConstant(C->getValue()->getValue() 365 .sdiv(RC->getValue()->getValue())); 366 } 367 368 // Distribute the sdiv over addrec operands. 369 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) { 370 const SCEV *Start = getSDiv(AR->getStart(), RHS, SE, 371 IgnoreSignificantBits); 372 if (!Start) return 0; 373 const SCEV *Step = getSDiv(AR->getStepRecurrence(SE), RHS, SE, 374 IgnoreSignificantBits); 375 if (!Step) return 0; 376 return SE.getAddRecExpr(Start, Step, AR->getLoop()); 377 } 378 379 // Distribute the sdiv over add operands. 380 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(LHS)) { 381 SmallVector<const SCEV *, 8> Ops; 382 for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end(); 383 I != E; ++I) { 384 const SCEV *Op = getSDiv(*I, RHS, SE, 385 IgnoreSignificantBits); 386 if (!Op) return 0; 387 Ops.push_back(Op); 388 } 389 return SE.getAddExpr(Ops); 390 } 391 392 // Check for a multiply operand that we can pull RHS out of. 393 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS)) 394 if (IgnoreSignificantBits || Mul->hasNoSignedWrap()) { 395 SmallVector<const SCEV *, 4> Ops; 396 bool Found = false; 397 for (SCEVMulExpr::op_iterator I = Mul->op_begin(), E = Mul->op_end(); 398 I != E; ++I) { 399 if (!Found) 400 if (const SCEV *Q = getSDiv(*I, RHS, SE, IgnoreSignificantBits)) { 401 Ops.push_back(Q); 402 Found = true; 403 continue; 404 } 405 Ops.push_back(*I); 406 } 407 return Found ? SE.getMulExpr(Ops) : 0; 408 } 409 410 // Otherwise we don't know. 411 return 0; 412 } 413 414 /// ExtractImmediate - If S involves the addition of a constant integer value, 415 /// return that integer value, and mutate S to point to a new SCEV with that 416 /// value excluded. 417 static int64_t ExtractImmediate(const SCEV *&S, ScalarEvolution &SE) { 418 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) { 419 if (C->getValue()->getValue().getMinSignedBits() <= 64) { 420 S = SE.getIntegerSCEV(0, C->getType()); 421 return C->getValue()->getSExtValue(); 422 } 423 } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 424 SmallVector<const SCEV *, 8> NewOps(Add->op_begin(), Add->op_end()); 425 int64_t Result = ExtractImmediate(NewOps.front(), SE); 426 S = SE.getAddExpr(NewOps); 427 return Result; 428 } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 429 SmallVector<const SCEV *, 8> NewOps(AR->op_begin(), AR->op_end()); 430 int64_t Result = ExtractImmediate(NewOps.front(), SE); 431 S = SE.getAddRecExpr(NewOps, AR->getLoop()); 432 return Result; 433 } 434 return 0; 435 } 436 437 /// ExtractSymbol - If S involves the addition of a GlobalValue address, 438 /// return that symbol, and mutate S to point to a new SCEV with that 439 /// value excluded. 440 static GlobalValue *ExtractSymbol(const SCEV *&S, ScalarEvolution &SE) { 441 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 442 if (GlobalValue *GV = dyn_cast<GlobalValue>(U->getValue())) { 443 S = SE.getIntegerSCEV(0, GV->getType()); 444 return GV; 445 } 446 } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 447 SmallVector<const SCEV *, 8> NewOps(Add->op_begin(), Add->op_end()); 448 GlobalValue *Result = ExtractSymbol(NewOps.back(), SE); 449 S = SE.getAddExpr(NewOps); 450 return Result; 451 } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 452 SmallVector<const SCEV *, 8> NewOps(AR->op_begin(), AR->op_end()); 453 GlobalValue *Result = ExtractSymbol(NewOps.front(), SE); 454 S = SE.getAddRecExpr(NewOps, AR->getLoop()); 455 return Result; 456 } 457 return 0; 458 } 459 460 /// isAddressUse - Returns true if the specified instruction is using the 461 /// specified value as an address. 462 static bool isAddressUse(Instruction *Inst, Value *OperandVal) { 463 bool isAddress = isa<LoadInst>(Inst); 464 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 465 if (SI->getOperand(1) == OperandVal) 466 isAddress = true; 467 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 468 // Addressing modes can also be folded into prefetches and a variety 469 // of intrinsics. 470 switch (II->getIntrinsicID()) { 471 default: break; 472 case Intrinsic::prefetch: 473 case Intrinsic::x86_sse2_loadu_dq: 474 case Intrinsic::x86_sse2_loadu_pd: 475 case Intrinsic::x86_sse_loadu_ps: 476 case Intrinsic::x86_sse_storeu_ps: 477 case Intrinsic::x86_sse2_storeu_pd: 478 case Intrinsic::x86_sse2_storeu_dq: 479 case Intrinsic::x86_sse2_storel_dq: 480 if (II->getOperand(1) == OperandVal) 481 isAddress = true; 482 break; 483 } 484 } 485 return isAddress; 486 } 487 488 /// getAccessType - Return the type of the memory being accessed. 489 static const Type *getAccessType(const Instruction *Inst) { 490 const Type *AccessTy = Inst->getType(); 491 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) 492 AccessTy = SI->getOperand(0)->getType(); 493 else if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 494 // Addressing modes can also be folded into prefetches and a variety 495 // of intrinsics. 496 switch (II->getIntrinsicID()) { 497 default: break; 498 case Intrinsic::x86_sse_storeu_ps: 499 case Intrinsic::x86_sse2_storeu_pd: 500 case Intrinsic::x86_sse2_storeu_dq: 501 case Intrinsic::x86_sse2_storel_dq: 502 AccessTy = II->getOperand(1)->getType(); 503 break; 504 } 505 } 506 507 // All pointers have the same requirements, so canonicalize them to an 508 // arbitrary pointer type to minimize variation. 509 if (const PointerType *PTy = dyn_cast<PointerType>(AccessTy)) 510 AccessTy = PointerType::get(IntegerType::get(PTy->getContext(), 1), 511 PTy->getAddressSpace()); 512 513 return AccessTy; 514 } 515 516 /// DeleteTriviallyDeadInstructions - If any of the instructions is the 517 /// specified set are trivially dead, delete them and see if this makes any of 518 /// their operands subsequently dead. 519 static bool 520 DeleteTriviallyDeadInstructions(SmallVectorImpl<WeakVH> &DeadInsts) { 521 bool Changed = false; 522 523 while (!DeadInsts.empty()) { 524 Instruction *I = dyn_cast_or_null<Instruction>(DeadInsts.pop_back_val()); 525 526 if (I == 0 || !isInstructionTriviallyDead(I)) 527 continue; 528 529 for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI) 530 if (Instruction *U = dyn_cast<Instruction>(*OI)) { 531 *OI = 0; 532 if (U->use_empty()) 533 DeadInsts.push_back(U); 534 } 535 536 I->eraseFromParent(); 537 Changed = true; 538 } 539 540 return Changed; 541 } 542 543 namespace { 544 545 /// Cost - This class is used to measure and compare candidate formulae. 546 class Cost { 547 /// TODO: Some of these could be merged. Also, a lexical ordering 548 /// isn't always optimal. 549 unsigned NumRegs; 550 unsigned AddRecCost; 551 unsigned NumIVMuls; 552 unsigned NumBaseAdds; 553 unsigned ImmCost; 554 unsigned SetupCost; 555 556 public: 557 Cost() 558 : NumRegs(0), AddRecCost(0), NumIVMuls(0), NumBaseAdds(0), ImmCost(0), 559 SetupCost(0) {} 560 561 unsigned getNumRegs() const { return NumRegs; } 562 563 bool operator<(const Cost &Other) const; 564 565 void Loose(); 566 567 void RateFormula(const Formula &F, 568 SmallPtrSet<const SCEV *, 16> &Regs, 569 const DenseSet<const SCEV *> &VisitedRegs, 570 const Loop *L, 571 const SmallVectorImpl<int64_t> &Offsets, 572 ScalarEvolution &SE, DominatorTree &DT); 573 574 void print(raw_ostream &OS) const; 575 void dump() const; 576 577 private: 578 void RateRegister(const SCEV *Reg, 579 SmallPtrSet<const SCEV *, 16> &Regs, 580 const Loop *L, 581 ScalarEvolution &SE, DominatorTree &DT); 582 void RatePrimaryRegister(const SCEV *Reg, 583 SmallPtrSet<const SCEV *, 16> &Regs, 584 const Loop *L, 585 ScalarEvolution &SE, DominatorTree &DT); 586 }; 587 588 } 589 590 /// RateRegister - Tally up interesting quantities from the given register. 591 void Cost::RateRegister(const SCEV *Reg, 592 SmallPtrSet<const SCEV *, 16> &Regs, 593 const Loop *L, 594 ScalarEvolution &SE, DominatorTree &DT) { 595 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Reg)) { 596 if (AR->getLoop() == L) 597 AddRecCost += 1; /// TODO: This should be a function of the stride. 598 599 // If this is an addrec for a loop that's already been visited by LSR, 600 // don't second-guess its addrec phi nodes. LSR isn't currently smart 601 // enough to reason about more than one loop at a time. Consider these 602 // registers free and leave them alone. 603 else if (L->contains(AR->getLoop()) || 604 (!AR->getLoop()->contains(L) && 605 DT.dominates(L->getHeader(), AR->getLoop()->getHeader()))) { 606 for (BasicBlock::iterator I = AR->getLoop()->getHeader()->begin(); 607 PHINode *PN = dyn_cast<PHINode>(I); ++I) 608 if (SE.isSCEVable(PN->getType()) && 609 (SE.getEffectiveSCEVType(PN->getType()) == 610 SE.getEffectiveSCEVType(AR->getType())) && 611 SE.getSCEV(PN) == AR) 612 return; 613 614 // If this isn't one of the addrecs that the loop already has, it 615 // would require a costly new phi and add. TODO: This isn't 616 // precisely modeled right now. 617 ++NumBaseAdds; 618 if (!Regs.count(AR->getStart())) 619 RateRegister(AR->getStart(), Regs, L, SE, DT); 620 } 621 622 // Add the step value register, if it needs one. 623 // TODO: The non-affine case isn't precisely modeled here. 624 if (!AR->isAffine() || !isa<SCEVConstant>(AR->getOperand(1))) 625 if (!Regs.count(AR->getStart())) 626 RateRegister(AR->getOperand(1), Regs, L, SE, DT); 627 } 628 ++NumRegs; 629 630 // Rough heuristic; favor registers which don't require extra setup 631 // instructions in the preheader. 632 if (!isa<SCEVUnknown>(Reg) && 633 !isa<SCEVConstant>(Reg) && 634 !(isa<SCEVAddRecExpr>(Reg) && 635 (isa<SCEVUnknown>(cast<SCEVAddRecExpr>(Reg)->getStart()) || 636 isa<SCEVConstant>(cast<SCEVAddRecExpr>(Reg)->getStart())))) 637 ++SetupCost; 638 } 639 640 /// RatePrimaryRegister - Record this register in the set. If we haven't seen it 641 /// before, rate it. 642 void Cost::RatePrimaryRegister(const SCEV *Reg, 643 SmallPtrSet<const SCEV *, 16> &Regs, 644 const Loop *L, 645 ScalarEvolution &SE, DominatorTree &DT) { 646 if (Regs.insert(Reg)) 647 RateRegister(Reg, Regs, L, SE, DT); 648 } 649 650 void Cost::RateFormula(const Formula &F, 651 SmallPtrSet<const SCEV *, 16> &Regs, 652 const DenseSet<const SCEV *> &VisitedRegs, 653 const Loop *L, 654 const SmallVectorImpl<int64_t> &Offsets, 655 ScalarEvolution &SE, DominatorTree &DT) { 656 // Tally up the registers. 657 if (const SCEV *ScaledReg = F.ScaledReg) { 658 if (VisitedRegs.count(ScaledReg)) { 659 Loose(); 660 return; 661 } 662 RatePrimaryRegister(ScaledReg, Regs, L, SE, DT); 663 } 664 for (SmallVectorImpl<const SCEV *>::const_iterator I = F.BaseRegs.begin(), 665 E = F.BaseRegs.end(); I != E; ++I) { 666 const SCEV *BaseReg = *I; 667 if (VisitedRegs.count(BaseReg)) { 668 Loose(); 669 return; 670 } 671 RatePrimaryRegister(BaseReg, Regs, L, SE, DT); 672 673 NumIVMuls += isa<SCEVMulExpr>(BaseReg) && 674 BaseReg->hasComputableLoopEvolution(L); 675 } 676 677 if (F.BaseRegs.size() > 1) 678 NumBaseAdds += F.BaseRegs.size() - 1; 679 680 // Tally up the non-zero immediates. 681 for (SmallVectorImpl<int64_t>::const_iterator I = Offsets.begin(), 682 E = Offsets.end(); I != E; ++I) { 683 int64_t Offset = (uint64_t)*I + F.AM.BaseOffs; 684 if (F.AM.BaseGV) 685 ImmCost += 64; // Handle symbolic values conservatively. 686 // TODO: This should probably be the pointer size. 687 else if (Offset != 0) 688 ImmCost += APInt(64, Offset, true).getMinSignedBits(); 689 } 690 } 691 692 /// Loose - Set this cost to a loosing value. 693 void Cost::Loose() { 694 NumRegs = ~0u; 695 AddRecCost = ~0u; 696 NumIVMuls = ~0u; 697 NumBaseAdds = ~0u; 698 ImmCost = ~0u; 699 SetupCost = ~0u; 700 } 701 702 /// operator< - Choose the lower cost. 703 bool Cost::operator<(const Cost &Other) const { 704 if (NumRegs != Other.NumRegs) 705 return NumRegs < Other.NumRegs; 706 if (AddRecCost != Other.AddRecCost) 707 return AddRecCost < Other.AddRecCost; 708 if (NumIVMuls != Other.NumIVMuls) 709 return NumIVMuls < Other.NumIVMuls; 710 if (NumBaseAdds != Other.NumBaseAdds) 711 return NumBaseAdds < Other.NumBaseAdds; 712 if (ImmCost != Other.ImmCost) 713 return ImmCost < Other.ImmCost; 714 if (SetupCost != Other.SetupCost) 715 return SetupCost < Other.SetupCost; 716 return false; 717 } 718 719 void Cost::print(raw_ostream &OS) const { 720 OS << NumRegs << " reg" << (NumRegs == 1 ? "" : "s"); 721 if (AddRecCost != 0) 722 OS << ", with addrec cost " << AddRecCost; 723 if (NumIVMuls != 0) 724 OS << ", plus " << NumIVMuls << " IV mul" << (NumIVMuls == 1 ? "" : "s"); 725 if (NumBaseAdds != 0) 726 OS << ", plus " << NumBaseAdds << " base add" 727 << (NumBaseAdds == 1 ? "" : "s"); 728 if (ImmCost != 0) 729 OS << ", plus " << ImmCost << " imm cost"; 730 if (SetupCost != 0) 731 OS << ", plus " << SetupCost << " setup cost"; 732 } 733 734 void Cost::dump() const { 735 print(errs()); errs() << '\n'; 736 } 737 738 namespace { 739 740 /// LSRFixup - An operand value in an instruction which is to be replaced 741 /// with some equivalent, possibly strength-reduced, replacement. 742 struct LSRFixup { 743 /// UserInst - The instruction which will be updated. 744 Instruction *UserInst; 745 746 /// OperandValToReplace - The operand of the instruction which will 747 /// be replaced. The operand may be used more than once; every instance 748 /// will be replaced. 749 Value *OperandValToReplace; 750 751 /// PostIncLoop - If this user is to use the post-incremented value of an 752 /// induction variable, this variable is non-null and holds the loop 753 /// associated with the induction variable. 754 const Loop *PostIncLoop; 755 756 /// LUIdx - The index of the LSRUse describing the expression which 757 /// this fixup needs, minus an offset (below). 758 size_t LUIdx; 759 760 /// Offset - A constant offset to be added to the LSRUse expression. 761 /// This allows multiple fixups to share the same LSRUse with different 762 /// offsets, for example in an unrolled loop. 763 int64_t Offset; 764 765 LSRFixup(); 766 767 void print(raw_ostream &OS) const; 768 void dump() const; 769 }; 770 771 } 772 773 LSRFixup::LSRFixup() 774 : UserInst(0), OperandValToReplace(0), PostIncLoop(0), 775 LUIdx(~size_t(0)), Offset(0) {} 776 777 void LSRFixup::print(raw_ostream &OS) const { 778 OS << "UserInst="; 779 // Store is common and interesting enough to be worth special-casing. 780 if (StoreInst *Store = dyn_cast<StoreInst>(UserInst)) { 781 OS << "store "; 782 WriteAsOperand(OS, Store->getOperand(0), /*PrintType=*/false); 783 } else if (UserInst->getType()->isVoidTy()) 784 OS << UserInst->getOpcodeName(); 785 else 786 WriteAsOperand(OS, UserInst, /*PrintType=*/false); 787 788 OS << ", OperandValToReplace="; 789 WriteAsOperand(OS, OperandValToReplace, /*PrintType=*/false); 790 791 if (PostIncLoop) { 792 OS << ", PostIncLoop="; 793 WriteAsOperand(OS, PostIncLoop->getHeader(), /*PrintType=*/false); 794 } 795 796 if (LUIdx != ~size_t(0)) 797 OS << ", LUIdx=" << LUIdx; 798 799 if (Offset != 0) 800 OS << ", Offset=" << Offset; 801 } 802 803 void LSRFixup::dump() const { 804 print(errs()); errs() << '\n'; 805 } 806 807 namespace { 808 809 /// UniquifierDenseMapInfo - A DenseMapInfo implementation for holding 810 /// DenseMaps and DenseSets of sorted SmallVectors of const SCEV*. 811 struct UniquifierDenseMapInfo { 812 static SmallVector<const SCEV *, 2> getEmptyKey() { 813 SmallVector<const SCEV *, 2> V; 814 V.push_back(reinterpret_cast<const SCEV *>(-1)); 815 return V; 816 } 817 818 static SmallVector<const SCEV *, 2> getTombstoneKey() { 819 SmallVector<const SCEV *, 2> V; 820 V.push_back(reinterpret_cast<const SCEV *>(-2)); 821 return V; 822 } 823 824 static unsigned getHashValue(const SmallVector<const SCEV *, 2> &V) { 825 unsigned Result = 0; 826 for (SmallVectorImpl<const SCEV *>::const_iterator I = V.begin(), 827 E = V.end(); I != E; ++I) 828 Result ^= DenseMapInfo<const SCEV *>::getHashValue(*I); 829 return Result; 830 } 831 832 static bool isEqual(const SmallVector<const SCEV *, 2> &LHS, 833 const SmallVector<const SCEV *, 2> &RHS) { 834 return LHS == RHS; 835 } 836 }; 837 838 /// LSRUse - This class holds the state that LSR keeps for each use in 839 /// IVUsers, as well as uses invented by LSR itself. It includes information 840 /// about what kinds of things can be folded into the user, information about 841 /// the user itself, and information about how the use may be satisfied. 842 /// TODO: Represent multiple users of the same expression in common? 843 class LSRUse { 844 DenseSet<SmallVector<const SCEV *, 2>, UniquifierDenseMapInfo> Uniquifier; 845 846 public: 847 /// KindType - An enum for a kind of use, indicating what types of 848 /// scaled and immediate operands it might support. 849 enum KindType { 850 Basic, ///< A normal use, with no folding. 851 Special, ///< A special case of basic, allowing -1 scales. 852 Address, ///< An address use; folding according to TargetLowering 853 ICmpZero ///< An equality icmp with both operands folded into one. 854 // TODO: Add a generic icmp too? 855 }; 856 857 KindType Kind; 858 const Type *AccessTy; 859 860 SmallVector<int64_t, 8> Offsets; 861 int64_t MinOffset; 862 int64_t MaxOffset; 863 864 /// AllFixupsOutsideLoop - This records whether all of the fixups using this 865 /// LSRUse are outside of the loop, in which case some special-case heuristics 866 /// may be used. 867 bool AllFixupsOutsideLoop; 868 869 /// Formulae - A list of ways to build a value that can satisfy this user. 870 /// After the list is populated, one of these is selected heuristically and 871 /// used to formulate a replacement for OperandValToReplace in UserInst. 872 SmallVector<Formula, 12> Formulae; 873 874 /// Regs - The set of register candidates used by all formulae in this LSRUse. 875 SmallPtrSet<const SCEV *, 4> Regs; 876 877 LSRUse(KindType K, const Type *T) : Kind(K), AccessTy(T), 878 MinOffset(INT64_MAX), 879 MaxOffset(INT64_MIN), 880 AllFixupsOutsideLoop(true) {} 881 882 bool InsertFormula(size_t LUIdx, const Formula &F); 883 884 void check() const; 885 886 void print(raw_ostream &OS) const; 887 void dump() const; 888 }; 889 890 /// InsertFormula - If the given formula has not yet been inserted, add it to 891 /// the list, and return true. Return false otherwise. 892 bool LSRUse::InsertFormula(size_t LUIdx, const Formula &F) { 893 SmallVector<const SCEV *, 2> Key = F.BaseRegs; 894 if (F.ScaledReg) Key.push_back(F.ScaledReg); 895 // Unstable sort by host order ok, because this is only used for uniquifying. 896 std::sort(Key.begin(), Key.end()); 897 898 if (!Uniquifier.insert(Key).second) 899 return false; 900 901 // Using a register to hold the value of 0 is not profitable. 902 assert((!F.ScaledReg || !F.ScaledReg->isZero()) && 903 "Zero allocated in a scaled register!"); 904 #ifndef NDEBUG 905 for (SmallVectorImpl<const SCEV *>::const_iterator I = 906 F.BaseRegs.begin(), E = F.BaseRegs.end(); I != E; ++I) 907 assert(!(*I)->isZero() && "Zero allocated in a base register!"); 908 #endif 909 910 // Add the formula to the list. 911 Formulae.push_back(F); 912 913 // Record registers now being used by this use. 914 if (F.ScaledReg) Regs.insert(F.ScaledReg); 915 Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end()); 916 917 return true; 918 } 919 920 void LSRUse::print(raw_ostream &OS) const { 921 OS << "LSR Use: Kind="; 922 switch (Kind) { 923 case Basic: OS << "Basic"; break; 924 case Special: OS << "Special"; break; 925 case ICmpZero: OS << "ICmpZero"; break; 926 case Address: 927 OS << "Address of "; 928 if (isa<PointerType>(AccessTy)) 929 OS << "pointer"; // the full pointer type could be really verbose 930 else 931 OS << *AccessTy; 932 } 933 934 OS << ", Offsets={"; 935 for (SmallVectorImpl<int64_t>::const_iterator I = Offsets.begin(), 936 E = Offsets.end(); I != E; ++I) { 937 OS << *I; 938 if (next(I) != E) 939 OS << ','; 940 } 941 OS << '}'; 942 943 if (AllFixupsOutsideLoop) 944 OS << ", all-fixups-outside-loop"; 945 } 946 947 void LSRUse::dump() const { 948 print(errs()); errs() << '\n'; 949 } 950 951 /// isLegalUse - Test whether the use described by AM is "legal", meaning it can 952 /// be completely folded into the user instruction at isel time. This includes 953 /// address-mode folding and special icmp tricks. 954 static bool isLegalUse(const TargetLowering::AddrMode &AM, 955 LSRUse::KindType Kind, const Type *AccessTy, 956 const TargetLowering *TLI) { 957 switch (Kind) { 958 case LSRUse::Address: 959 // If we have low-level target information, ask the target if it can 960 // completely fold this address. 961 if (TLI) return TLI->isLegalAddressingMode(AM, AccessTy); 962 963 // Otherwise, just guess that reg+reg addressing is legal. 964 return !AM.BaseGV && AM.BaseOffs == 0 && AM.Scale <= 1; 965 966 case LSRUse::ICmpZero: 967 // There's not even a target hook for querying whether it would be legal to 968 // fold a GV into an ICmp. 969 if (AM.BaseGV) 970 return false; 971 972 // ICmp only has two operands; don't allow more than two non-trivial parts. 973 if (AM.Scale != 0 && AM.HasBaseReg && AM.BaseOffs != 0) 974 return false; 975 976 // ICmp only supports no scale or a -1 scale, as we can "fold" a -1 scale by 977 // putting the scaled register in the other operand of the icmp. 978 if (AM.Scale != 0 && AM.Scale != -1) 979 return false; 980 981 // If we have low-level target information, ask the target if it can fold an 982 // integer immediate on an icmp. 983 if (AM.BaseOffs != 0) { 984 if (TLI) return TLI->isLegalICmpImmediate(-AM.BaseOffs); 985 return false; 986 } 987 988 return true; 989 990 case LSRUse::Basic: 991 // Only handle single-register values. 992 return !AM.BaseGV && AM.Scale == 0 && AM.BaseOffs == 0; 993 994 case LSRUse::Special: 995 // Only handle -1 scales, or no scale. 996 return AM.Scale == 0 || AM.Scale == -1; 997 } 998 999 return false; 1000 } 1001 1002 static bool isLegalUse(TargetLowering::AddrMode AM, 1003 int64_t MinOffset, int64_t MaxOffset, 1004 LSRUse::KindType Kind, const Type *AccessTy, 1005 const TargetLowering *TLI) { 1006 // Check for overflow. 1007 if (((int64_t)((uint64_t)AM.BaseOffs + MinOffset) > AM.BaseOffs) != 1008 (MinOffset > 0)) 1009 return false; 1010 AM.BaseOffs = (uint64_t)AM.BaseOffs + MinOffset; 1011 if (isLegalUse(AM, Kind, AccessTy, TLI)) { 1012 AM.BaseOffs = (uint64_t)AM.BaseOffs - MinOffset; 1013 // Check for overflow. 1014 if (((int64_t)((uint64_t)AM.BaseOffs + MaxOffset) > AM.BaseOffs) != 1015 (MaxOffset > 0)) 1016 return false; 1017 AM.BaseOffs = (uint64_t)AM.BaseOffs + MaxOffset; 1018 return isLegalUse(AM, Kind, AccessTy, TLI); 1019 } 1020 return false; 1021 } 1022 1023 static bool isAlwaysFoldable(int64_t BaseOffs, 1024 GlobalValue *BaseGV, 1025 bool HasBaseReg, 1026 LSRUse::KindType Kind, const Type *AccessTy, 1027 const TargetLowering *TLI, 1028 ScalarEvolution &SE) { 1029 // Fast-path: zero is always foldable. 1030 if (BaseOffs == 0 && !BaseGV) return true; 1031 1032 // Conservatively, create an address with an immediate and a 1033 // base and a scale. 1034 TargetLowering::AddrMode AM; 1035 AM.BaseOffs = BaseOffs; 1036 AM.BaseGV = BaseGV; 1037 AM.HasBaseReg = HasBaseReg; 1038 AM.Scale = Kind == LSRUse::ICmpZero ? -1 : 1; 1039 1040 return isLegalUse(AM, Kind, AccessTy, TLI); 1041 } 1042 1043 static bool isAlwaysFoldable(const SCEV *S, 1044 int64_t MinOffset, int64_t MaxOffset, 1045 bool HasBaseReg, 1046 LSRUse::KindType Kind, const Type *AccessTy, 1047 const TargetLowering *TLI, 1048 ScalarEvolution &SE) { 1049 // Fast-path: zero is always foldable. 1050 if (S->isZero()) return true; 1051 1052 // Conservatively, create an address with an immediate and a 1053 // base and a scale. 1054 int64_t BaseOffs = ExtractImmediate(S, SE); 1055 GlobalValue *BaseGV = ExtractSymbol(S, SE); 1056 1057 // If there's anything else involved, it's not foldable. 1058 if (!S->isZero()) return false; 1059 1060 // Fast-path: zero is always foldable. 1061 if (BaseOffs == 0 && !BaseGV) return true; 1062 1063 // Conservatively, create an address with an immediate and a 1064 // base and a scale. 1065 TargetLowering::AddrMode AM; 1066 AM.BaseOffs = BaseOffs; 1067 AM.BaseGV = BaseGV; 1068 AM.HasBaseReg = HasBaseReg; 1069 AM.Scale = Kind == LSRUse::ICmpZero ? -1 : 1; 1070 1071 return isLegalUse(AM, MinOffset, MaxOffset, Kind, AccessTy, TLI); 1072 } 1073 1074 /// FormulaSorter - This class implements an ordering for formulae which sorts 1075 /// the by their standalone cost. 1076 class FormulaSorter { 1077 /// These two sets are kept empty, so that we compute standalone costs. 1078 DenseSet<const SCEV *> VisitedRegs; 1079 SmallPtrSet<const SCEV *, 16> Regs; 1080 Loop *L; 1081 LSRUse *LU; 1082 ScalarEvolution &SE; 1083 DominatorTree &DT; 1084 1085 public: 1086 FormulaSorter(Loop *l, LSRUse &lu, ScalarEvolution &se, DominatorTree &dt) 1087 : L(l), LU(&lu), SE(se), DT(dt) {} 1088 1089 bool operator()(const Formula &A, const Formula &B) { 1090 Cost CostA; 1091 CostA.RateFormula(A, Regs, VisitedRegs, L, LU->Offsets, SE, DT); 1092 Regs.clear(); 1093 Cost CostB; 1094 CostB.RateFormula(B, Regs, VisitedRegs, L, LU->Offsets, SE, DT); 1095 Regs.clear(); 1096 return CostA < CostB; 1097 } 1098 }; 1099 1100 /// LSRInstance - This class holds state for the main loop strength reduction 1101 /// logic. 1102 class LSRInstance { 1103 IVUsers &IU; 1104 ScalarEvolution &SE; 1105 DominatorTree &DT; 1106 const TargetLowering *const TLI; 1107 Loop *const L; 1108 bool Changed; 1109 1110 /// IVIncInsertPos - This is the insert position that the current loop's 1111 /// induction variable increment should be placed. In simple loops, this is 1112 /// the latch block's terminator. But in more complicated cases, this is a 1113 /// position which will dominate all the in-loop post-increment users. 1114 Instruction *IVIncInsertPos; 1115 1116 /// Factors - Interesting factors between use strides. 1117 SmallSetVector<int64_t, 8> Factors; 1118 1119 /// Types - Interesting use types, to facilitate truncation reuse. 1120 SmallSetVector<const Type *, 4> Types; 1121 1122 /// Fixups - The list of operands which are to be replaced. 1123 SmallVector<LSRFixup, 16> Fixups; 1124 1125 /// Uses - The list of interesting uses. 1126 SmallVector<LSRUse, 16> Uses; 1127 1128 /// RegUses - Track which uses use which register candidates. 1129 RegUseTracker RegUses; 1130 1131 void OptimizeShadowIV(); 1132 bool FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse); 1133 ICmpInst *OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse); 1134 bool OptimizeLoopTermCond(); 1135 1136 void CollectInterestingTypesAndFactors(); 1137 void CollectFixupsAndInitialFormulae(); 1138 1139 LSRFixup &getNewFixup() { 1140 Fixups.push_back(LSRFixup()); 1141 return Fixups.back(); 1142 } 1143 1144 // Support for sharing of LSRUses between LSRFixups. 1145 typedef DenseMap<const SCEV *, size_t> UseMapTy; 1146 UseMapTy UseMap; 1147 1148 bool reconcileNewOffset(LSRUse &LU, int64_t NewOffset, 1149 LSRUse::KindType Kind, const Type *AccessTy); 1150 1151 std::pair<size_t, int64_t> getUse(const SCEV *&Expr, 1152 LSRUse::KindType Kind, 1153 const Type *AccessTy); 1154 1155 public: 1156 void InsertInitialFormula(const SCEV *S, Loop *L, LSRUse &LU, size_t LUIdx); 1157 void InsertSupplementalFormula(const SCEV *S, LSRUse &LU, size_t LUIdx); 1158 void CountRegisters(const Formula &F, size_t LUIdx); 1159 bool InsertFormula(LSRUse &LU, unsigned LUIdx, const Formula &F); 1160 1161 void CollectLoopInvariantFixupsAndFormulae(); 1162 1163 void GenerateReassociations(LSRUse &LU, unsigned LUIdx, Formula Base, 1164 unsigned Depth = 0); 1165 void GenerateCombinations(LSRUse &LU, unsigned LUIdx, Formula Base); 1166 void GenerateSymbolicOffsets(LSRUse &LU, unsigned LUIdx, Formula Base); 1167 void GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx, Formula Base); 1168 void GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx, Formula Base); 1169 void GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base); 1170 void GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base); 1171 void GenerateCrossUseConstantOffsets(); 1172 void GenerateAllReuseFormulae(); 1173 1174 void FilterOutUndesirableDedicatedRegisters(); 1175 void NarrowSearchSpaceUsingHeuristics(); 1176 1177 void SolveRecurse(SmallVectorImpl<const Formula *> &Solution, 1178 Cost &SolutionCost, 1179 SmallVectorImpl<const Formula *> &Workspace, 1180 const Cost &CurCost, 1181 const SmallPtrSet<const SCEV *, 16> &CurRegs, 1182 DenseSet<const SCEV *> &VisitedRegs) const; 1183 void Solve(SmallVectorImpl<const Formula *> &Solution) const; 1184 1185 Value *Expand(const LSRFixup &LF, 1186 const Formula &F, 1187 BasicBlock::iterator IP, Loop *L, Instruction *IVIncInsertPos, 1188 SCEVExpander &Rewriter, 1189 SmallVectorImpl<WeakVH> &DeadInsts, 1190 ScalarEvolution &SE, DominatorTree &DT) const; 1191 void Rewrite(const LSRFixup &LF, 1192 const Formula &F, 1193 Loop *L, Instruction *IVIncInsertPos, 1194 SCEVExpander &Rewriter, 1195 SmallVectorImpl<WeakVH> &DeadInsts, 1196 ScalarEvolution &SE, DominatorTree &DT, 1197 Pass *P) const; 1198 void ImplementSolution(const SmallVectorImpl<const Formula *> &Solution, 1199 Pass *P); 1200 1201 LSRInstance(const TargetLowering *tli, Loop *l, Pass *P); 1202 1203 bool getChanged() const { return Changed; } 1204 1205 void print_factors_and_types(raw_ostream &OS) const; 1206 void print_fixups(raw_ostream &OS) const; 1207 void print_uses(raw_ostream &OS) const; 1208 void print(raw_ostream &OS) const; 1209 void dump() const; 1210 }; 1211 1212 } 1213 1214 /// OptimizeShadowIV - If IV is used in a int-to-float cast 1215 /// inside the loop then try to eliminate the cast opeation. 1216 void LSRInstance::OptimizeShadowIV() { 1217 const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L); 1218 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) 1219 return; 1220 1221 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); 1222 UI != E; /* empty */) { 1223 IVUsers::const_iterator CandidateUI = UI; 1224 ++UI; 1225 Instruction *ShadowUse = CandidateUI->getUser(); 1226 const Type *DestTy = NULL; 1227 1228 /* If shadow use is a int->float cast then insert a second IV 1229 to eliminate this cast. 1230 1231 for (unsigned i = 0; i < n; ++i) 1232 foo((double)i); 1233 1234 is transformed into 1235 1236 double d = 0.0; 1237 for (unsigned i = 0; i < n; ++i, ++d) 1238 foo(d); 1239 */ 1240 if (UIToFPInst *UCast = dyn_cast<UIToFPInst>(CandidateUI->getUser())) 1241 DestTy = UCast->getDestTy(); 1242 else if (SIToFPInst *SCast = dyn_cast<SIToFPInst>(CandidateUI->getUser())) 1243 DestTy = SCast->getDestTy(); 1244 if (!DestTy) continue; 1245 1246 if (TLI) { 1247 // If target does not support DestTy natively then do not apply 1248 // this transformation. 1249 EVT DVT = TLI->getValueType(DestTy); 1250 if (!TLI->isTypeLegal(DVT)) continue; 1251 } 1252 1253 PHINode *PH = dyn_cast<PHINode>(ShadowUse->getOperand(0)); 1254 if (!PH) continue; 1255 if (PH->getNumIncomingValues() != 2) continue; 1256 1257 const Type *SrcTy = PH->getType(); 1258 int Mantissa = DestTy->getFPMantissaWidth(); 1259 if (Mantissa == -1) continue; 1260 if ((int)SE.getTypeSizeInBits(SrcTy) > Mantissa) 1261 continue; 1262 1263 unsigned Entry, Latch; 1264 if (PH->getIncomingBlock(0) == L->getLoopPreheader()) { 1265 Entry = 0; 1266 Latch = 1; 1267 } else { 1268 Entry = 1; 1269 Latch = 0; 1270 } 1271 1272 ConstantInt *Init = dyn_cast<ConstantInt>(PH->getIncomingValue(Entry)); 1273 if (!Init) continue; 1274 Constant *NewInit = ConstantFP::get(DestTy, Init->getZExtValue()); 1275 1276 BinaryOperator *Incr = 1277 dyn_cast<BinaryOperator>(PH->getIncomingValue(Latch)); 1278 if (!Incr) continue; 1279 if (Incr->getOpcode() != Instruction::Add 1280 && Incr->getOpcode() != Instruction::Sub) 1281 continue; 1282 1283 /* Initialize new IV, double d = 0.0 in above example. */ 1284 ConstantInt *C = NULL; 1285 if (Incr->getOperand(0) == PH) 1286 C = dyn_cast<ConstantInt>(Incr->getOperand(1)); 1287 else if (Incr->getOperand(1) == PH) 1288 C = dyn_cast<ConstantInt>(Incr->getOperand(0)); 1289 else 1290 continue; 1291 1292 if (!C) continue; 1293 1294 // Ignore negative constants, as the code below doesn't handle them 1295 // correctly. TODO: Remove this restriction. 1296 if (!C->getValue().isStrictlyPositive()) continue; 1297 1298 /* Add new PHINode. */ 1299 PHINode *NewPH = PHINode::Create(DestTy, "IV.S.", PH); 1300 1301 /* create new increment. '++d' in above example. */ 1302 Constant *CFP = ConstantFP::get(DestTy, C->getZExtValue()); 1303 BinaryOperator *NewIncr = 1304 BinaryOperator::Create(Incr->getOpcode() == Instruction::Add ? 1305 Instruction::FAdd : Instruction::FSub, 1306 NewPH, CFP, "IV.S.next.", Incr); 1307 1308 NewPH->addIncoming(NewInit, PH->getIncomingBlock(Entry)); 1309 NewPH->addIncoming(NewIncr, PH->getIncomingBlock(Latch)); 1310 1311 /* Remove cast operation */ 1312 ShadowUse->replaceAllUsesWith(NewPH); 1313 ShadowUse->eraseFromParent(); 1314 break; 1315 } 1316 } 1317 1318 /// FindIVUserForCond - If Cond has an operand that is an expression of an IV, 1319 /// set the IV user and stride information and return true, otherwise return 1320 /// false. 1321 bool LSRInstance::FindIVUserForCond(ICmpInst *Cond, 1322 IVStrideUse *&CondUse) { 1323 for (IVUsers::iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) 1324 if (UI->getUser() == Cond) { 1325 // NOTE: we could handle setcc instructions with multiple uses here, but 1326 // InstCombine does it as well for simple uses, it's not clear that it 1327 // occurs enough in real life to handle. 1328 CondUse = UI; 1329 return true; 1330 } 1331 return false; 1332 } 1333 1334 /// OptimizeMax - Rewrite the loop's terminating condition if it uses 1335 /// a max computation. 1336 /// 1337 /// This is a narrow solution to a specific, but acute, problem. For loops 1338 /// like this: 1339 /// 1340 /// i = 0; 1341 /// do { 1342 /// p[i] = 0.0; 1343 /// } while (++i < n); 1344 /// 1345 /// the trip count isn't just 'n', because 'n' might not be positive. And 1346 /// unfortunately this can come up even for loops where the user didn't use 1347 /// a C do-while loop. For example, seemingly well-behaved top-test loops 1348 /// will commonly be lowered like this: 1349 // 1350 /// if (n > 0) { 1351 /// i = 0; 1352 /// do { 1353 /// p[i] = 0.0; 1354 /// } while (++i < n); 1355 /// } 1356 /// 1357 /// and then it's possible for subsequent optimization to obscure the if 1358 /// test in such a way that indvars can't find it. 1359 /// 1360 /// When indvars can't find the if test in loops like this, it creates a 1361 /// max expression, which allows it to give the loop a canonical 1362 /// induction variable: 1363 /// 1364 /// i = 0; 1365 /// max = n < 1 ? 1 : n; 1366 /// do { 1367 /// p[i] = 0.0; 1368 /// } while (++i != max); 1369 /// 1370 /// Canonical induction variables are necessary because the loop passes 1371 /// are designed around them. The most obvious example of this is the 1372 /// LoopInfo analysis, which doesn't remember trip count values. It 1373 /// expects to be able to rediscover the trip count each time it is 1374 /// needed, and it does this using a simple analysis that only succeeds if 1375 /// the loop has a canonical induction variable. 1376 /// 1377 /// However, when it comes time to generate code, the maximum operation 1378 /// can be quite costly, especially if it's inside of an outer loop. 1379 /// 1380 /// This function solves this problem by detecting this type of loop and 1381 /// rewriting their conditions from ICMP_NE back to ICMP_SLT, and deleting 1382 /// the instructions for the maximum computation. 1383 /// 1384 ICmpInst *LSRInstance::OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse) { 1385 // Check that the loop matches the pattern we're looking for. 1386 if (Cond->getPredicate() != CmpInst::ICMP_EQ && 1387 Cond->getPredicate() != CmpInst::ICMP_NE) 1388 return Cond; 1389 1390 SelectInst *Sel = dyn_cast<SelectInst>(Cond->getOperand(1)); 1391 if (!Sel || !Sel->hasOneUse()) return Cond; 1392 1393 const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L); 1394 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) 1395 return Cond; 1396 const SCEV *One = SE.getIntegerSCEV(1, BackedgeTakenCount->getType()); 1397 1398 // Add one to the backedge-taken count to get the trip count. 1399 const SCEV *IterationCount = SE.getAddExpr(BackedgeTakenCount, One); 1400 1401 // Check for a max calculation that matches the pattern. 1402 if (!isa<SCEVSMaxExpr>(IterationCount) && !isa<SCEVUMaxExpr>(IterationCount)) 1403 return Cond; 1404 const SCEVNAryExpr *Max = cast<SCEVNAryExpr>(IterationCount); 1405 if (Max != SE.getSCEV(Sel)) return Cond; 1406 1407 // To handle a max with more than two operands, this optimization would 1408 // require additional checking and setup. 1409 if (Max->getNumOperands() != 2) 1410 return Cond; 1411 1412 const SCEV *MaxLHS = Max->getOperand(0); 1413 const SCEV *MaxRHS = Max->getOperand(1); 1414 if (!MaxLHS || MaxLHS != One) return Cond; 1415 // Check the relevant induction variable for conformance to 1416 // the pattern. 1417 const SCEV *IV = SE.getSCEV(Cond->getOperand(0)); 1418 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(IV); 1419 if (!AR || !AR->isAffine() || 1420 AR->getStart() != One || 1421 AR->getStepRecurrence(SE) != One) 1422 return Cond; 1423 1424 assert(AR->getLoop() == L && 1425 "Loop condition operand is an addrec in a different loop!"); 1426 1427 // Check the right operand of the select, and remember it, as it will 1428 // be used in the new comparison instruction. 1429 Value *NewRHS = 0; 1430 if (SE.getSCEV(Sel->getOperand(1)) == MaxRHS) 1431 NewRHS = Sel->getOperand(1); 1432 else if (SE.getSCEV(Sel->getOperand(2)) == MaxRHS) 1433 NewRHS = Sel->getOperand(2); 1434 if (!NewRHS) return Cond; 1435 1436 // Determine the new comparison opcode. It may be signed or unsigned, 1437 // and the original comparison may be either equality or inequality. 1438 CmpInst::Predicate Pred = 1439 isa<SCEVSMaxExpr>(Max) ? CmpInst::ICMP_SLT : CmpInst::ICMP_ULT; 1440 if (Cond->getPredicate() == CmpInst::ICMP_EQ) 1441 Pred = CmpInst::getInversePredicate(Pred); 1442 1443 // Ok, everything looks ok to change the condition into an SLT or SGE and 1444 // delete the max calculation. 1445 ICmpInst *NewCond = 1446 new ICmpInst(Cond, Pred, Cond->getOperand(0), NewRHS, "scmp"); 1447 1448 // Delete the max calculation instructions. 1449 Cond->replaceAllUsesWith(NewCond); 1450 CondUse->setUser(NewCond); 1451 Instruction *Cmp = cast<Instruction>(Sel->getOperand(0)); 1452 Cond->eraseFromParent(); 1453 Sel->eraseFromParent(); 1454 if (Cmp->use_empty()) 1455 Cmp->eraseFromParent(); 1456 return NewCond; 1457 } 1458 1459 /// OptimizeLoopTermCond - Change loop terminating condition to use the 1460 /// postinc iv when possible. 1461 bool 1462 LSRInstance::OptimizeLoopTermCond() { 1463 SmallPtrSet<Instruction *, 4> PostIncs; 1464 1465 BasicBlock *LatchBlock = L->getLoopLatch(); 1466 SmallVector<BasicBlock*, 8> ExitingBlocks; 1467 L->getExitingBlocks(ExitingBlocks); 1468 1469 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { 1470 BasicBlock *ExitingBlock = ExitingBlocks[i]; 1471 1472 // Get the terminating condition for the loop if possible. If we 1473 // can, we want to change it to use a post-incremented version of its 1474 // induction variable, to allow coalescing the live ranges for the IV into 1475 // one register value. 1476 1477 BranchInst *TermBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator()); 1478 if (!TermBr) 1479 continue; 1480 // FIXME: Overly conservative, termination condition could be an 'or' etc.. 1481 if (TermBr->isUnconditional() || !isa<ICmpInst>(TermBr->getCondition())) 1482 continue; 1483 1484 // Search IVUsesByStride to find Cond's IVUse if there is one. 1485 IVStrideUse *CondUse = 0; 1486 ICmpInst *Cond = cast<ICmpInst>(TermBr->getCondition()); 1487 if (!FindIVUserForCond(Cond, CondUse)) 1488 continue; 1489 1490 // If the trip count is computed in terms of a max (due to ScalarEvolution 1491 // being unable to find a sufficient guard, for example), change the loop 1492 // comparison to use SLT or ULT instead of NE. 1493 // One consequence of doing this now is that it disrupts the count-down 1494 // optimization. That's not always a bad thing though, because in such 1495 // cases it may still be worthwhile to avoid a max. 1496 Cond = OptimizeMax(Cond, CondUse); 1497 1498 // If this exiting block dominates the latch block, it may also use 1499 // the post-inc value if it won't be shared with other uses. 1500 // Check for dominance. 1501 if (!DT.dominates(ExitingBlock, LatchBlock)) 1502 continue; 1503 1504 // Conservatively avoid trying to use the post-inc value in non-latch 1505 // exits if there may be pre-inc users in intervening blocks. 1506 if (LatchBlock != ExitingBlock) 1507 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) 1508 // Test if the use is reachable from the exiting block. This dominator 1509 // query is a conservative approximation of reachability. 1510 if (&*UI != CondUse && 1511 !DT.properlyDominates(UI->getUser()->getParent(), ExitingBlock)) { 1512 // Conservatively assume there may be reuse if the quotient of their 1513 // strides could be a legal scale. 1514 const SCEV *A = CondUse->getStride(); 1515 const SCEV *B = UI->getStride(); 1516 if (SE.getTypeSizeInBits(A->getType()) != 1517 SE.getTypeSizeInBits(B->getType())) { 1518 if (SE.getTypeSizeInBits(A->getType()) > 1519 SE.getTypeSizeInBits(B->getType())) 1520 B = SE.getSignExtendExpr(B, A->getType()); 1521 else 1522 A = SE.getSignExtendExpr(A, B->getType()); 1523 } 1524 if (const SCEVConstant *D = 1525 dyn_cast_or_null<SCEVConstant>(getSDiv(B, A, SE))) { 1526 // Stride of one or negative one can have reuse with non-addresses. 1527 if (D->getValue()->isOne() || 1528 D->getValue()->isAllOnesValue()) 1529 goto decline_post_inc; 1530 // Avoid weird situations. 1531 if (D->getValue()->getValue().getMinSignedBits() >= 64 || 1532 D->getValue()->getValue().isMinSignedValue()) 1533 goto decline_post_inc; 1534 // Check for possible scaled-address reuse. 1535 const Type *AccessTy = getAccessType(UI->getUser()); 1536 TargetLowering::AddrMode AM; 1537 AM.Scale = D->getValue()->getSExtValue(); 1538 if (TLI->isLegalAddressingMode(AM, AccessTy)) 1539 goto decline_post_inc; 1540 AM.Scale = -AM.Scale; 1541 if (TLI->isLegalAddressingMode(AM, AccessTy)) 1542 goto decline_post_inc; 1543 } 1544 } 1545 1546 DEBUG(dbgs() << " Change loop exiting icmp to use postinc iv: " 1547 << *Cond << '\n'); 1548 1549 // It's possible for the setcc instruction to be anywhere in the loop, and 1550 // possible for it to have multiple users. If it is not immediately before 1551 // the exiting block branch, move it. 1552 if (&*++BasicBlock::iterator(Cond) != TermBr) { 1553 if (Cond->hasOneUse()) { 1554 Cond->moveBefore(TermBr); 1555 } else { 1556 // Clone the terminating condition and insert into the loopend. 1557 ICmpInst *OldCond = Cond; 1558 Cond = cast<ICmpInst>(Cond->clone()); 1559 Cond->setName(L->getHeader()->getName() + ".termcond"); 1560 ExitingBlock->getInstList().insert(TermBr, Cond); 1561 1562 // Clone the IVUse, as the old use still exists! 1563 CondUse = &IU.AddUser(CondUse->getStride(), CondUse->getOffset(), 1564 Cond, CondUse->getOperandValToReplace()); 1565 TermBr->replaceUsesOfWith(OldCond, Cond); 1566 } 1567 } 1568 1569 // If we get to here, we know that we can transform the setcc instruction to 1570 // use the post-incremented version of the IV, allowing us to coalesce the 1571 // live ranges for the IV correctly. 1572 CondUse->setOffset(SE.getMinusSCEV(CondUse->getOffset(), 1573 CondUse->getStride())); 1574 CondUse->setIsUseOfPostIncrementedValue(true); 1575 Changed = true; 1576 1577 PostIncs.insert(Cond); 1578 decline_post_inc:; 1579 } 1580 1581 // Determine an insertion point for the loop induction variable increment. It 1582 // must dominate all the post-inc comparisons we just set up, and it must 1583 // dominate the loop latch edge. 1584 IVIncInsertPos = L->getLoopLatch()->getTerminator(); 1585 for (SmallPtrSet<Instruction *, 4>::const_iterator I = PostIncs.begin(), 1586 E = PostIncs.end(); I != E; ++I) { 1587 BasicBlock *BB = 1588 DT.findNearestCommonDominator(IVIncInsertPos->getParent(), 1589 (*I)->getParent()); 1590 if (BB == (*I)->getParent()) 1591 IVIncInsertPos = *I; 1592 else if (BB != IVIncInsertPos->getParent()) 1593 IVIncInsertPos = BB->getTerminator(); 1594 } 1595 1596 return Changed; 1597 } 1598 1599 bool 1600 LSRInstance::reconcileNewOffset(LSRUse &LU, int64_t NewOffset, 1601 LSRUse::KindType Kind, const Type *AccessTy) { 1602 int64_t NewMinOffset = LU.MinOffset; 1603 int64_t NewMaxOffset = LU.MaxOffset; 1604 const Type *NewAccessTy = AccessTy; 1605 1606 // Check for a mismatched kind. It's tempting to collapse mismatched kinds to 1607 // something conservative, however this can pessimize in the case that one of 1608 // the uses will have all its uses outside the loop, for example. 1609 if (LU.Kind != Kind) 1610 return false; 1611 // Conservatively assume HasBaseReg is true for now. 1612 if (NewOffset < LU.MinOffset) { 1613 if (!isAlwaysFoldable(LU.MaxOffset - NewOffset, 0, /*HasBaseReg=*/true, 1614 Kind, AccessTy, TLI, SE)) 1615 return false; 1616 NewMinOffset = NewOffset; 1617 } else if (NewOffset > LU.MaxOffset) { 1618 if (!isAlwaysFoldable(NewOffset - LU.MinOffset, 0, /*HasBaseReg=*/true, 1619 Kind, AccessTy, TLI, SE)) 1620 return false; 1621 NewMaxOffset = NewOffset; 1622 } 1623 // Check for a mismatched access type, and fall back conservatively as needed. 1624 if (Kind == LSRUse::Address && AccessTy != LU.AccessTy) 1625 NewAccessTy = Type::getVoidTy(AccessTy->getContext()); 1626 1627 // Update the use. 1628 LU.MinOffset = NewMinOffset; 1629 LU.MaxOffset = NewMaxOffset; 1630 LU.AccessTy = NewAccessTy; 1631 if (NewOffset != LU.Offsets.back()) 1632 LU.Offsets.push_back(NewOffset); 1633 return true; 1634 } 1635 1636 /// getUse - Return an LSRUse index and an offset value for a fixup which 1637 /// needs the given expression, with the given kind and optional access type. 1638 /// Either reuse an exisitng use or create a new one, as needed. 1639 std::pair<size_t, int64_t> 1640 LSRInstance::getUse(const SCEV *&Expr, 1641 LSRUse::KindType Kind, const Type *AccessTy) { 1642 const SCEV *Copy = Expr; 1643 int64_t Offset = ExtractImmediate(Expr, SE); 1644 1645 // Basic uses can't accept any offset, for example. 1646 if (!isAlwaysFoldable(Offset, 0, /*HasBaseReg=*/true, 1647 Kind, AccessTy, TLI, SE)) { 1648 Expr = Copy; 1649 Offset = 0; 1650 } 1651 1652 std::pair<UseMapTy::iterator, bool> P = 1653 UseMap.insert(std::make_pair(Expr, 0)); 1654 if (!P.second) { 1655 // A use already existed with this base. 1656 size_t LUIdx = P.first->second; 1657 LSRUse &LU = Uses[LUIdx]; 1658 if (reconcileNewOffset(LU, Offset, Kind, AccessTy)) 1659 // Reuse this use. 1660 return std::make_pair(LUIdx, Offset); 1661 } 1662 1663 // Create a new use. 1664 size_t LUIdx = Uses.size(); 1665 P.first->second = LUIdx; 1666 Uses.push_back(LSRUse(Kind, AccessTy)); 1667 LSRUse &LU = Uses[LUIdx]; 1668 1669 // We don't need to track redundant offsets, but we don't need to go out 1670 // of our way here to avoid them. 1671 if (LU.Offsets.empty() || Offset != LU.Offsets.back()) 1672 LU.Offsets.push_back(Offset); 1673 1674 LU.MinOffset = Offset; 1675 LU.MaxOffset = Offset; 1676 return std::make_pair(LUIdx, Offset); 1677 } 1678 1679 void LSRInstance::CollectInterestingTypesAndFactors() { 1680 SmallSetVector<const SCEV *, 4> Strides; 1681 1682 // Collect interesting types and factors. 1683 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) { 1684 const SCEV *Stride = UI->getStride(); 1685 1686 // Collect interesting types. 1687 Types.insert(SE.getEffectiveSCEVType(Stride->getType())); 1688 1689 // Collect interesting factors. 1690 for (SmallSetVector<const SCEV *, 4>::const_iterator NewStrideIter = 1691 Strides.begin(), SEnd = Strides.end(); NewStrideIter != SEnd; 1692 ++NewStrideIter) { 1693 const SCEV *OldStride = Stride; 1694 const SCEV *NewStride = *NewStrideIter; 1695 if (OldStride == NewStride) 1696 continue; 1697 1698 if (SE.getTypeSizeInBits(OldStride->getType()) != 1699 SE.getTypeSizeInBits(NewStride->getType())) { 1700 if (SE.getTypeSizeInBits(OldStride->getType()) > 1701 SE.getTypeSizeInBits(NewStride->getType())) 1702 NewStride = SE.getSignExtendExpr(NewStride, OldStride->getType()); 1703 else 1704 OldStride = SE.getSignExtendExpr(OldStride, NewStride->getType()); 1705 } 1706 if (const SCEVConstant *Factor = 1707 dyn_cast_or_null<SCEVConstant>(getSDiv(NewStride, OldStride, 1708 SE, true))) { 1709 if (Factor->getValue()->getValue().getMinSignedBits() <= 64) 1710 Factors.insert(Factor->getValue()->getValue().getSExtValue()); 1711 } else if (const SCEVConstant *Factor = 1712 dyn_cast_or_null<SCEVConstant>(getSDiv(OldStride, NewStride, 1713 SE, true))) { 1714 if (Factor->getValue()->getValue().getMinSignedBits() <= 64) 1715 Factors.insert(Factor->getValue()->getValue().getSExtValue()); 1716 } 1717 } 1718 Strides.insert(Stride); 1719 } 1720 1721 // If all uses use the same type, don't bother looking for truncation-based 1722 // reuse. 1723 if (Types.size() == 1) 1724 Types.clear(); 1725 1726 DEBUG(print_factors_and_types(dbgs())); 1727 } 1728 1729 void LSRInstance::CollectFixupsAndInitialFormulae() { 1730 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) { 1731 // Record the uses. 1732 LSRFixup &LF = getNewFixup(); 1733 LF.UserInst = UI->getUser(); 1734 LF.OperandValToReplace = UI->getOperandValToReplace(); 1735 if (UI->isUseOfPostIncrementedValue()) 1736 LF.PostIncLoop = L; 1737 1738 LSRUse::KindType Kind = LSRUse::Basic; 1739 const Type *AccessTy = 0; 1740 if (isAddressUse(LF.UserInst, LF.OperandValToReplace)) { 1741 Kind = LSRUse::Address; 1742 AccessTy = getAccessType(LF.UserInst); 1743 } 1744 1745 const SCEV *S = IU.getCanonicalExpr(*UI); 1746 1747 // Equality (== and !=) ICmps are special. We can rewrite (i == N) as 1748 // (N - i == 0), and this allows (N - i) to be the expression that we work 1749 // with rather than just N or i, so we can consider the register 1750 // requirements for both N and i at the same time. Limiting this code to 1751 // equality icmps is not a problem because all interesting loops use 1752 // equality icmps, thanks to IndVarSimplify. 1753 if (ICmpInst *CI = dyn_cast<ICmpInst>(LF.UserInst)) 1754 if (CI->isEquality()) { 1755 // Swap the operands if needed to put the OperandValToReplace on the 1756 // left, for consistency. 1757 Value *NV = CI->getOperand(1); 1758 if (NV == LF.OperandValToReplace) { 1759 CI->setOperand(1, CI->getOperand(0)); 1760 CI->setOperand(0, NV); 1761 } 1762 1763 // x == y --> x - y == 0 1764 const SCEV *N = SE.getSCEV(NV); 1765 if (N->isLoopInvariant(L)) { 1766 Kind = LSRUse::ICmpZero; 1767 S = SE.getMinusSCEV(N, S); 1768 } 1769 1770 // -1 and the negations of all interesting strides (except the negation 1771 // of -1) are now also interesting. 1772 for (size_t i = 0, e = Factors.size(); i != e; ++i) 1773 if (Factors[i] != -1) 1774 Factors.insert(-(uint64_t)Factors[i]); 1775 Factors.insert(-1); 1776 } 1777 1778 // Set up the initial formula for this use. 1779 std::pair<size_t, int64_t> P = getUse(S, Kind, AccessTy); 1780 LF.LUIdx = P.first; 1781 LF.Offset = P.second; 1782 LSRUse &LU = Uses[LF.LUIdx]; 1783 LU.AllFixupsOutsideLoop &= !L->contains(LF.UserInst); 1784 1785 // If this is the first use of this LSRUse, give it a formula. 1786 if (LU.Formulae.empty()) { 1787 InsertInitialFormula(S, L, LU, LF.LUIdx); 1788 CountRegisters(LU.Formulae.back(), LF.LUIdx); 1789 } 1790 } 1791 1792 DEBUG(print_fixups(dbgs())); 1793 } 1794 1795 void 1796 LSRInstance::InsertInitialFormula(const SCEV *S, Loop *L, 1797 LSRUse &LU, size_t LUIdx) { 1798 Formula F; 1799 F.InitialMatch(S, L, SE, DT); 1800 bool Inserted = InsertFormula(LU, LUIdx, F); 1801 assert(Inserted && "Initial formula already exists!"); (void)Inserted; 1802 } 1803 1804 void 1805 LSRInstance::InsertSupplementalFormula(const SCEV *S, 1806 LSRUse &LU, size_t LUIdx) { 1807 Formula F; 1808 F.BaseRegs.push_back(S); 1809 F.AM.HasBaseReg = true; 1810 bool Inserted = InsertFormula(LU, LUIdx, F); 1811 assert(Inserted && "Supplemental formula already exists!"); (void)Inserted; 1812 } 1813 1814 /// CountRegisters - Note which registers are used by the given formula, 1815 /// updating RegUses. 1816 void LSRInstance::CountRegisters(const Formula &F, size_t LUIdx) { 1817 if (F.ScaledReg) 1818 RegUses.CountRegister(F.ScaledReg, LUIdx); 1819 for (SmallVectorImpl<const SCEV *>::const_iterator I = F.BaseRegs.begin(), 1820 E = F.BaseRegs.end(); I != E; ++I) 1821 RegUses.CountRegister(*I, LUIdx); 1822 } 1823 1824 /// InsertFormula - If the given formula has not yet been inserted, add it to 1825 /// the list, and return true. Return false otherwise. 1826 bool LSRInstance::InsertFormula(LSRUse &LU, unsigned LUIdx, const Formula &F) { 1827 if (!LU.InsertFormula(LUIdx, F)) 1828 return false; 1829 1830 CountRegisters(F, LUIdx); 1831 return true; 1832 } 1833 1834 /// CollectLoopInvariantFixupsAndFormulae - Check for other uses of 1835 /// loop-invariant values which we're tracking. These other uses will pin these 1836 /// values in registers, making them less profitable for elimination. 1837 /// TODO: This currently misses non-constant addrec step registers. 1838 /// TODO: Should this give more weight to users inside the loop? 1839 void 1840 LSRInstance::CollectLoopInvariantFixupsAndFormulae() { 1841 SmallVector<const SCEV *, 8> Worklist(RegUses.begin(), RegUses.end()); 1842 SmallPtrSet<const SCEV *, 8> Inserted; 1843 1844 while (!Worklist.empty()) { 1845 const SCEV *S = Worklist.pop_back_val(); 1846 1847 if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) 1848 Worklist.insert(Worklist.end(), N->op_begin(), N->op_end()); 1849 else if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S)) 1850 Worklist.push_back(C->getOperand()); 1851 else if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) { 1852 Worklist.push_back(D->getLHS()); 1853 Worklist.push_back(D->getRHS()); 1854 } else if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 1855 if (!Inserted.insert(U)) continue; 1856 const Value *V = U->getValue(); 1857 if (const Instruction *Inst = dyn_cast<Instruction>(V)) 1858 if (L->contains(Inst)) continue; 1859 for (Value::use_const_iterator UI = V->use_begin(), UE = V->use_end(); 1860 UI != UE; ++UI) { 1861 const Instruction *UserInst = dyn_cast<Instruction>(*UI); 1862 // Ignore non-instructions. 1863 if (!UserInst) 1864 continue; 1865 // Ignore instructions in other functions (as can happen with 1866 // Constants). 1867 if (UserInst->getParent()->getParent() != L->getHeader()->getParent()) 1868 continue; 1869 // Ignore instructions not dominated by the loop. 1870 const BasicBlock *UseBB = !isa<PHINode>(UserInst) ? 1871 UserInst->getParent() : 1872 cast<PHINode>(UserInst)->getIncomingBlock( 1873 PHINode::getIncomingValueNumForOperand(UI.getOperandNo())); 1874 if (!DT.dominates(L->getHeader(), UseBB)) 1875 continue; 1876 // Ignore uses which are part of other SCEV expressions, to avoid 1877 // analyzing them multiple times. 1878 if (SE.isSCEVable(UserInst->getType()) && 1879 !isa<SCEVUnknown>(SE.getSCEV(const_cast<Instruction *>(UserInst)))) 1880 continue; 1881 // Ignore icmp instructions which are already being analyzed. 1882 if (const ICmpInst *ICI = dyn_cast<ICmpInst>(UserInst)) { 1883 unsigned OtherIdx = !UI.getOperandNo(); 1884 Value *OtherOp = const_cast<Value *>(ICI->getOperand(OtherIdx)); 1885 if (SE.getSCEV(OtherOp)->hasComputableLoopEvolution(L)) 1886 continue; 1887 } 1888 1889 LSRFixup &LF = getNewFixup(); 1890 LF.UserInst = const_cast<Instruction *>(UserInst); 1891 LF.OperandValToReplace = UI.getUse(); 1892 std::pair<size_t, int64_t> P = getUse(S, LSRUse::Basic, 0); 1893 LF.LUIdx = P.first; 1894 LF.Offset = P.second; 1895 LSRUse &LU = Uses[LF.LUIdx]; 1896 LU.AllFixupsOutsideLoop &= L->contains(LF.UserInst); 1897 InsertSupplementalFormula(U, LU, LF.LUIdx); 1898 CountRegisters(LU.Formulae.back(), Uses.size() - 1); 1899 break; 1900 } 1901 } 1902 } 1903 } 1904 1905 /// CollectSubexprs - Split S into subexpressions which can be pulled out into 1906 /// separate registers. If C is non-null, multiply each subexpression by C. 1907 static void CollectSubexprs(const SCEV *S, const SCEVConstant *C, 1908 SmallVectorImpl<const SCEV *> &Ops, 1909 ScalarEvolution &SE) { 1910 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 1911 // Break out add operands. 1912 for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end(); 1913 I != E; ++I) 1914 CollectSubexprs(*I, C, Ops, SE); 1915 return; 1916 } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 1917 // Split a non-zero base out of an addrec. 1918 if (!AR->getStart()->isZero()) { 1919 CollectSubexprs(SE.getAddRecExpr(SE.getIntegerSCEV(0, AR->getType()), 1920 AR->getStepRecurrence(SE), 1921 AR->getLoop()), C, Ops, SE); 1922 CollectSubexprs(AR->getStart(), C, Ops, SE); 1923 return; 1924 } 1925 } else if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 1926 // Break (C * (a + b + c)) into C*a + C*b + C*c. 1927 if (Mul->getNumOperands() == 2) 1928 if (const SCEVConstant *Op0 = 1929 dyn_cast<SCEVConstant>(Mul->getOperand(0))) { 1930 CollectSubexprs(Mul->getOperand(1), 1931 C ? cast<SCEVConstant>(SE.getMulExpr(C, Op0)) : Op0, 1932 Ops, SE); 1933 return; 1934 } 1935 } 1936 1937 // Otherwise use the value itself. 1938 Ops.push_back(C ? SE.getMulExpr(C, S) : S); 1939 } 1940 1941 /// GenerateReassociations - Split out subexpressions from adds and the bases of 1942 /// addrecs. 1943 void LSRInstance::GenerateReassociations(LSRUse &LU, unsigned LUIdx, 1944 Formula Base, 1945 unsigned Depth) { 1946 // Arbitrarily cap recursion to protect compile time. 1947 if (Depth >= 3) return; 1948 1949 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) { 1950 const SCEV *BaseReg = Base.BaseRegs[i]; 1951 1952 SmallVector<const SCEV *, 8> AddOps; 1953 CollectSubexprs(BaseReg, 0, AddOps, SE); 1954 if (AddOps.size() == 1) continue; 1955 1956 for (SmallVectorImpl<const SCEV *>::const_iterator J = AddOps.begin(), 1957 JE = AddOps.end(); J != JE; ++J) { 1958 // Don't pull a constant into a register if the constant could be folded 1959 // into an immediate field. 1960 if (isAlwaysFoldable(*J, LU.MinOffset, LU.MaxOffset, 1961 Base.getNumRegs() > 1, 1962 LU.Kind, LU.AccessTy, TLI, SE)) 1963 continue; 1964 1965 // Collect all operands except *J. 1966 SmallVector<const SCEV *, 8> InnerAddOps; 1967 for (SmallVectorImpl<const SCEV *>::const_iterator K = AddOps.begin(), 1968 KE = AddOps.end(); K != KE; ++K) 1969 if (K != J) 1970 InnerAddOps.push_back(*K); 1971 1972 // Don't leave just a constant behind in a register if the constant could 1973 // be folded into an immediate field. 1974 if (InnerAddOps.size() == 1 && 1975 isAlwaysFoldable(InnerAddOps[0], LU.MinOffset, LU.MaxOffset, 1976 Base.getNumRegs() > 1, 1977 LU.Kind, LU.AccessTy, TLI, SE)) 1978 continue; 1979 1980 Formula F = Base; 1981 F.BaseRegs[i] = SE.getAddExpr(InnerAddOps); 1982 F.BaseRegs.push_back(*J); 1983 if (InsertFormula(LU, LUIdx, F)) 1984 // If that formula hadn't been seen before, recurse to find more like 1985 // it. 1986 GenerateReassociations(LU, LUIdx, LU.Formulae.back(), Depth+1); 1987 } 1988 } 1989 } 1990 1991 /// GenerateCombinations - Generate a formula consisting of all of the 1992 /// loop-dominating registers added into a single register. 1993 void LSRInstance::GenerateCombinations(LSRUse &LU, unsigned LUIdx, 1994 Formula Base) { 1995 // This method is only intersting on a plurality of registers. 1996 if (Base.BaseRegs.size() <= 1) return; 1997 1998 Formula F = Base; 1999 F.BaseRegs.clear(); 2000 SmallVector<const SCEV *, 4> Ops; 2001 for (SmallVectorImpl<const SCEV *>::const_iterator 2002 I = Base.BaseRegs.begin(), E = Base.BaseRegs.end(); I != E; ++I) { 2003 const SCEV *BaseReg = *I; 2004 if (BaseReg->properlyDominates(L->getHeader(), &DT) && 2005 !BaseReg->hasComputableLoopEvolution(L)) 2006 Ops.push_back(BaseReg); 2007 else 2008 F.BaseRegs.push_back(BaseReg); 2009 } 2010 if (Ops.size() > 1) { 2011 F.BaseRegs.push_back(SE.getAddExpr(Ops)); 2012 (void)InsertFormula(LU, LUIdx, F); 2013 } 2014 } 2015 2016 /// GenerateSymbolicOffsets - Generate reuse formulae using symbolic offsets. 2017 void LSRInstance::GenerateSymbolicOffsets(LSRUse &LU, unsigned LUIdx, 2018 Formula Base) { 2019 // We can't add a symbolic offset if the address already contains one. 2020 if (Base.AM.BaseGV) return; 2021 2022 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) { 2023 const SCEV *G = Base.BaseRegs[i]; 2024 GlobalValue *GV = ExtractSymbol(G, SE); 2025 if (G->isZero() || !GV) 2026 continue; 2027 Formula F = Base; 2028 F.AM.BaseGV = GV; 2029 if (!isLegalUse(F.AM, LU.MinOffset, LU.MaxOffset, 2030 LU.Kind, LU.AccessTy, TLI)) 2031 continue; 2032 F.BaseRegs[i] = G; 2033 (void)InsertFormula(LU, LUIdx, F); 2034 } 2035 } 2036 2037 /// GenerateConstantOffsets - Generate reuse formulae using symbolic offsets. 2038 void LSRInstance::GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx, 2039 Formula Base) { 2040 // TODO: For now, just add the min and max offset, because it usually isn't 2041 // worthwhile looking at everything inbetween. 2042 SmallVector<int64_t, 4> Worklist; 2043 Worklist.push_back(LU.MinOffset); 2044 if (LU.MaxOffset != LU.MinOffset) 2045 Worklist.push_back(LU.MaxOffset); 2046 2047 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) { 2048 const SCEV *G = Base.BaseRegs[i]; 2049 2050 for (SmallVectorImpl<int64_t>::const_iterator I = Worklist.begin(), 2051 E = Worklist.end(); I != E; ++I) { 2052 Formula F = Base; 2053 F.AM.BaseOffs = (uint64_t)Base.AM.BaseOffs - *I; 2054 if (isLegalUse(F.AM, LU.MinOffset - *I, LU.MaxOffset - *I, 2055 LU.Kind, LU.AccessTy, TLI)) { 2056 F.BaseRegs[i] = SE.getAddExpr(G, SE.getIntegerSCEV(*I, G->getType())); 2057 2058 (void)InsertFormula(LU, LUIdx, F); 2059 } 2060 } 2061 2062 int64_t Imm = ExtractImmediate(G, SE); 2063 if (G->isZero() || Imm == 0) 2064 continue; 2065 Formula F = Base; 2066 F.AM.BaseOffs = (uint64_t)F.AM.BaseOffs + Imm; 2067 if (!isLegalUse(F.AM, LU.MinOffset, LU.MaxOffset, 2068 LU.Kind, LU.AccessTy, TLI)) 2069 continue; 2070 F.BaseRegs[i] = G; 2071 (void)InsertFormula(LU, LUIdx, F); 2072 } 2073 } 2074 2075 /// GenerateICmpZeroScales - For ICmpZero, check to see if we can scale up 2076 /// the comparison. For example, x == y -> x*c == y*c. 2077 void LSRInstance::GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx, 2078 Formula Base) { 2079 if (LU.Kind != LSRUse::ICmpZero) return; 2080 2081 // Determine the integer type for the base formula. 2082 const Type *IntTy = Base.getType(); 2083 if (!IntTy) return; 2084 if (SE.getTypeSizeInBits(IntTy) > 64) return; 2085 2086 // Don't do this if there is more than one offset. 2087 if (LU.MinOffset != LU.MaxOffset) return; 2088 2089 assert(!Base.AM.BaseGV && "ICmpZero use is not legal!"); 2090 2091 // Check each interesting stride. 2092 for (SmallSetVector<int64_t, 8>::const_iterator 2093 I = Factors.begin(), E = Factors.end(); I != E; ++I) { 2094 int64_t Factor = *I; 2095 Formula F = Base; 2096 2097 // Check that the multiplication doesn't overflow. 2098 F.AM.BaseOffs = (uint64_t)Base.AM.BaseOffs * Factor; 2099 if ((int64_t)F.AM.BaseOffs / Factor != Base.AM.BaseOffs) 2100 continue; 2101 2102 // Check that multiplying with the use offset doesn't overflow. 2103 int64_t Offset = LU.MinOffset; 2104 Offset = (uint64_t)Offset * Factor; 2105 if ((int64_t)Offset / Factor != LU.MinOffset) 2106 continue; 2107 2108 // Check that this scale is legal. 2109 if (!isLegalUse(F.AM, Offset, Offset, LU.Kind, LU.AccessTy, TLI)) 2110 continue; 2111 2112 // Compensate for the use having MinOffset built into it. 2113 F.AM.BaseOffs = (uint64_t)F.AM.BaseOffs + Offset - LU.MinOffset; 2114 2115 const SCEV *FactorS = SE.getIntegerSCEV(Factor, IntTy); 2116 2117 // Check that multiplying with each base register doesn't overflow. 2118 for (size_t i = 0, e = F.BaseRegs.size(); i != e; ++i) { 2119 F.BaseRegs[i] = SE.getMulExpr(F.BaseRegs[i], FactorS); 2120 if (getSDiv(F.BaseRegs[i], FactorS, SE) != Base.BaseRegs[i]) 2121 goto next; 2122 } 2123 2124 // Check that multiplying with the scaled register doesn't overflow. 2125 if (F.ScaledReg) { 2126 F.ScaledReg = SE.getMulExpr(F.ScaledReg, FactorS); 2127 if (getSDiv(F.ScaledReg, FactorS, SE) != Base.ScaledReg) 2128 continue; 2129 } 2130 2131 // If we make it here and it's legal, add it. 2132 (void)InsertFormula(LU, LUIdx, F); 2133 next:; 2134 } 2135 } 2136 2137 /// GenerateScales - Generate stride factor reuse formulae by making use of 2138 /// scaled-offset address modes, for example. 2139 void LSRInstance::GenerateScales(LSRUse &LU, unsigned LUIdx, 2140 Formula Base) { 2141 // Determine the integer type for the base formula. 2142 const Type *IntTy = Base.getType(); 2143 if (!IntTy) return; 2144 2145 // If this Formula already has a scaled register, we can't add another one. 2146 if (Base.AM.Scale != 0) return; 2147 2148 // Check each interesting stride. 2149 for (SmallSetVector<int64_t, 8>::const_iterator 2150 I = Factors.begin(), E = Factors.end(); I != E; ++I) { 2151 int64_t Factor = *I; 2152 2153 Base.AM.Scale = Factor; 2154 Base.AM.HasBaseReg = Base.BaseRegs.size() > 1; 2155 // Check whether this scale is going to be legal. 2156 if (!isLegalUse(Base.AM, LU.MinOffset, LU.MaxOffset, 2157 LU.Kind, LU.AccessTy, TLI)) { 2158 // As a special-case, handle special out-of-loop Basic users specially. 2159 // TODO: Reconsider this special case. 2160 if (LU.Kind == LSRUse::Basic && 2161 isLegalUse(Base.AM, LU.MinOffset, LU.MaxOffset, 2162 LSRUse::Special, LU.AccessTy, TLI) && 2163 LU.AllFixupsOutsideLoop) 2164 LU.Kind = LSRUse::Special; 2165 else 2166 continue; 2167 } 2168 // For an ICmpZero, negating a solitary base register won't lead to 2169 // new solutions. 2170 if (LU.Kind == LSRUse::ICmpZero && 2171 !Base.AM.HasBaseReg && Base.AM.BaseOffs == 0 && !Base.AM.BaseGV) 2172 continue; 2173 // For each addrec base reg, apply the scale, if possible. 2174 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) 2175 if (const SCEVAddRecExpr *AR = 2176 dyn_cast<SCEVAddRecExpr>(Base.BaseRegs[i])) { 2177 const SCEV *FactorS = SE.getIntegerSCEV(Factor, IntTy); 2178 if (FactorS->isZero()) 2179 continue; 2180 // Divide out the factor, ignoring high bits, since we'll be 2181 // scaling the value back up in the end. 2182 if (const SCEV *Quotient = getSDiv(AR, FactorS, SE, true)) { 2183 // TODO: This could be optimized to avoid all the copying. 2184 Formula F = Base; 2185 F.ScaledReg = Quotient; 2186 std::swap(F.BaseRegs[i], F.BaseRegs.back()); 2187 F.BaseRegs.pop_back(); 2188 (void)InsertFormula(LU, LUIdx, F); 2189 } 2190 } 2191 } 2192 } 2193 2194 /// GenerateTruncates - Generate reuse formulae from different IV types. 2195 void LSRInstance::GenerateTruncates(LSRUse &LU, unsigned LUIdx, 2196 Formula Base) { 2197 // This requires TargetLowering to tell us which truncates are free. 2198 if (!TLI) return; 2199 2200 // Don't bother truncating symbolic values. 2201 if (Base.AM.BaseGV) return; 2202 2203 // Determine the integer type for the base formula. 2204 const Type *DstTy = Base.getType(); 2205 if (!DstTy) return; 2206 DstTy = SE.getEffectiveSCEVType(DstTy); 2207 2208 for (SmallSetVector<const Type *, 4>::const_iterator 2209 I = Types.begin(), E = Types.end(); I != E; ++I) { 2210 const Type *SrcTy = *I; 2211 if (SrcTy != DstTy && TLI->isTruncateFree(SrcTy, DstTy)) { 2212 Formula F = Base; 2213 2214 if (F.ScaledReg) F.ScaledReg = SE.getAnyExtendExpr(F.ScaledReg, *I); 2215 for (SmallVectorImpl<const SCEV *>::iterator J = F.BaseRegs.begin(), 2216 JE = F.BaseRegs.end(); J != JE; ++J) 2217 *J = SE.getAnyExtendExpr(*J, SrcTy); 2218 2219 // TODO: This assumes we've done basic processing on all uses and 2220 // have an idea what the register usage is. 2221 if (!F.hasRegsUsedByUsesOtherThan(LUIdx, RegUses)) 2222 continue; 2223 2224 (void)InsertFormula(LU, LUIdx, F); 2225 } 2226 } 2227 } 2228 2229 namespace { 2230 2231 /// WorkItem - Helper class for GenerateConstantOffsetReuse. It's used to 2232 /// defer modifications so that the search phase doesn't have to worry about 2233 /// the data structures moving underneath it. 2234 struct WorkItem { 2235 size_t LUIdx; 2236 int64_t Imm; 2237 const SCEV *OrigReg; 2238 2239 WorkItem(size_t LI, int64_t I, const SCEV *R) 2240 : LUIdx(LI), Imm(I), OrigReg(R) {} 2241 2242 void print(raw_ostream &OS) const; 2243 void dump() const; 2244 }; 2245 2246 } 2247 2248 void WorkItem::print(raw_ostream &OS) const { 2249 OS << "in formulae referencing " << *OrigReg << " in use " << LUIdx 2250 << " , add offset " << Imm; 2251 } 2252 2253 void WorkItem::dump() const { 2254 print(errs()); errs() << '\n'; 2255 } 2256 2257 /// GenerateCrossUseConstantOffsets - Look for registers which are a constant 2258 /// distance apart and try to form reuse opportunities between them. 2259 void LSRInstance::GenerateCrossUseConstantOffsets() { 2260 // Group the registers by their value without any added constant offset. 2261 typedef std::map<int64_t, const SCEV *> ImmMapTy; 2262 typedef DenseMap<const SCEV *, ImmMapTy> RegMapTy; 2263 RegMapTy Map; 2264 DenseMap<const SCEV *, SmallBitVector> UsedByIndicesMap; 2265 SmallVector<const SCEV *, 8> Sequence; 2266 for (RegUseTracker::const_iterator I = RegUses.begin(), E = RegUses.end(); 2267 I != E; ++I) { 2268 const SCEV *Reg = *I; 2269 int64_t Imm = ExtractImmediate(Reg, SE); 2270 std::pair<RegMapTy::iterator, bool> Pair = 2271 Map.insert(std::make_pair(Reg, ImmMapTy())); 2272 if (Pair.second) 2273 Sequence.push_back(Reg); 2274 Pair.first->second.insert(std::make_pair(Imm, *I)); 2275 UsedByIndicesMap[Reg] |= RegUses.getUsedByIndices(*I); 2276 } 2277 2278 // Now examine each set of registers with the same base value. Build up 2279 // a list of work to do and do the work in a separate step so that we're 2280 // not adding formulae and register counts while we're searching. 2281 SmallVector<WorkItem, 32> WorkItems; 2282 SmallSet<std::pair<size_t, int64_t>, 32> UniqueItems; 2283 for (SmallVectorImpl<const SCEV *>::const_iterator I = Sequence.begin(), 2284 E = Sequence.end(); I != E; ++I) { 2285 const SCEV *Reg = *I; 2286 const ImmMapTy &Imms = Map.find(Reg)->second; 2287 2288 // It's not worthwhile looking for reuse if there's only one offset. 2289 if (Imms.size() == 1) 2290 continue; 2291 2292 DEBUG(dbgs() << "Generating cross-use offsets for " << *Reg << ':'; 2293 for (ImmMapTy::const_iterator J = Imms.begin(), JE = Imms.end(); 2294 J != JE; ++J) 2295 dbgs() << ' ' << J->first; 2296 dbgs() << '\n'); 2297 2298 // Examine each offset. 2299 for (ImmMapTy::const_iterator J = Imms.begin(), JE = Imms.end(); 2300 J != JE; ++J) { 2301 const SCEV *OrigReg = J->second; 2302 2303 int64_t JImm = J->first; 2304 const SmallBitVector &UsedByIndices = RegUses.getUsedByIndices(OrigReg); 2305 2306 if (!isa<SCEVConstant>(OrigReg) && 2307 UsedByIndicesMap[Reg].count() == 1) { 2308 DEBUG(dbgs() << "Skipping cross-use reuse for " << *OrigReg << '\n'); 2309 continue; 2310 } 2311 2312 // Conservatively examine offsets between this orig reg a few selected 2313 // other orig regs. 2314 ImmMapTy::const_iterator OtherImms[] = { 2315 Imms.begin(), prior(Imms.end()), 2316 Imms.upper_bound((Imms.begin()->first + prior(Imms.end())->first) / 2) 2317 }; 2318 for (size_t i = 0, e = array_lengthof(OtherImms); i != e; ++i) { 2319 ImmMapTy::const_iterator M = OtherImms[i]; 2320 if (M == J || M == JE) continue; 2321 2322 // Compute the difference between the two. 2323 int64_t Imm = (uint64_t)JImm - M->first; 2324 for (int LUIdx = UsedByIndices.find_first(); LUIdx != -1; 2325 LUIdx = UsedByIndices.find_next(LUIdx)) 2326 // Make a memo of this use, offset, and register tuple. 2327 if (UniqueItems.insert(std::make_pair(LUIdx, Imm))) 2328 WorkItems.push_back(WorkItem(LUIdx, Imm, OrigReg)); 2329 } 2330 } 2331 } 2332 2333 Map.clear(); 2334 Sequence.clear(); 2335 UsedByIndicesMap.clear(); 2336 UniqueItems.clear(); 2337 2338 // Now iterate through the worklist and add new formulae. 2339 for (SmallVectorImpl<WorkItem>::const_iterator I = WorkItems.begin(), 2340 E = WorkItems.end(); I != E; ++I) { 2341 const WorkItem &WI = *I; 2342 size_t LUIdx = WI.LUIdx; 2343 LSRUse &LU = Uses[LUIdx]; 2344 int64_t Imm = WI.Imm; 2345 const SCEV *OrigReg = WI.OrigReg; 2346 2347 const Type *IntTy = SE.getEffectiveSCEVType(OrigReg->getType()); 2348 const SCEV *NegImmS = SE.getSCEV(ConstantInt::get(IntTy, -(uint64_t)Imm)); 2349 unsigned BitWidth = SE.getTypeSizeInBits(IntTy); 2350 2351 // TODO: Use a more targetted data structure. 2352 for (size_t L = 0, LE = LU.Formulae.size(); L != LE; ++L) { 2353 Formula F = LU.Formulae[L]; 2354 // Use the immediate in the scaled register. 2355 if (F.ScaledReg == OrigReg) { 2356 int64_t Offs = (uint64_t)F.AM.BaseOffs + 2357 Imm * (uint64_t)F.AM.Scale; 2358 // Don't create 50 + reg(-50). 2359 if (F.referencesReg(SE.getSCEV( 2360 ConstantInt::get(IntTy, -(uint64_t)Offs)))) 2361 continue; 2362 Formula NewF = F; 2363 NewF.AM.BaseOffs = Offs; 2364 if (!isLegalUse(NewF.AM, LU.MinOffset, LU.MaxOffset, 2365 LU.Kind, LU.AccessTy, TLI)) 2366 continue; 2367 NewF.ScaledReg = SE.getAddExpr(NegImmS, NewF.ScaledReg); 2368 2369 // If the new scale is a constant in a register, and adding the constant 2370 // value to the immediate would produce a value closer to zero than the 2371 // immediate itself, then the formula isn't worthwhile. 2372 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(NewF.ScaledReg)) 2373 if (C->getValue()->getValue().isNegative() != 2374 (NewF.AM.BaseOffs < 0) && 2375 (C->getValue()->getValue().abs() * APInt(BitWidth, F.AM.Scale)) 2376 .ule(APInt(BitWidth, NewF.AM.BaseOffs).abs())) 2377 continue; 2378 2379 // OK, looks good. 2380 (void)InsertFormula(LU, LUIdx, NewF); 2381 } else { 2382 // Use the immediate in a base register. 2383 for (size_t N = 0, NE = F.BaseRegs.size(); N != NE; ++N) { 2384 const SCEV *BaseReg = F.BaseRegs[N]; 2385 if (BaseReg != OrigReg) 2386 continue; 2387 Formula NewF = F; 2388 NewF.AM.BaseOffs = (uint64_t)NewF.AM.BaseOffs + Imm; 2389 if (!isLegalUse(NewF.AM, LU.MinOffset, LU.MaxOffset, 2390 LU.Kind, LU.AccessTy, TLI)) 2391 continue; 2392 NewF.BaseRegs[N] = SE.getAddExpr(NegImmS, BaseReg); 2393 2394 // If the new formula has a constant in a register, and adding the 2395 // constant value to the immediate would produce a value closer to 2396 // zero than the immediate itself, then the formula isn't worthwhile. 2397 for (SmallVectorImpl<const SCEV *>::const_iterator 2398 J = NewF.BaseRegs.begin(), JE = NewF.BaseRegs.end(); 2399 J != JE; ++J) 2400 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(*J)) 2401 if (C->getValue()->getValue().isNegative() != 2402 (NewF.AM.BaseOffs < 0) && 2403 C->getValue()->getValue().abs() 2404 .ule(APInt(BitWidth, NewF.AM.BaseOffs).abs())) 2405 goto skip_formula; 2406 2407 // Ok, looks good. 2408 (void)InsertFormula(LU, LUIdx, NewF); 2409 break; 2410 skip_formula:; 2411 } 2412 } 2413 } 2414 } 2415 } 2416 2417 /// GenerateAllReuseFormulae - Generate formulae for each use. 2418 void 2419 LSRInstance::GenerateAllReuseFormulae() { 2420 // This is split into two loops so that hasRegsUsedByUsesOtherThan 2421 // queries are more precise. 2422 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 2423 LSRUse &LU = Uses[LUIdx]; 2424 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 2425 GenerateReassociations(LU, LUIdx, LU.Formulae[i]); 2426 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 2427 GenerateCombinations(LU, LUIdx, LU.Formulae[i]); 2428 } 2429 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 2430 LSRUse &LU = Uses[LUIdx]; 2431 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 2432 GenerateSymbolicOffsets(LU, LUIdx, LU.Formulae[i]); 2433 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 2434 GenerateConstantOffsets(LU, LUIdx, LU.Formulae[i]); 2435 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 2436 GenerateICmpZeroScales(LU, LUIdx, LU.Formulae[i]); 2437 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 2438 GenerateScales(LU, LUIdx, LU.Formulae[i]); 2439 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 2440 GenerateTruncates(LU, LUIdx, LU.Formulae[i]); 2441 } 2442 2443 GenerateCrossUseConstantOffsets(); 2444 } 2445 2446 /// If their are multiple formulae with the same set of registers used 2447 /// by other uses, pick the best one and delete the others. 2448 void LSRInstance::FilterOutUndesirableDedicatedRegisters() { 2449 #ifndef NDEBUG 2450 bool Changed = false; 2451 #endif 2452 2453 // Collect the best formula for each unique set of shared registers. This 2454 // is reset for each use. 2455 typedef DenseMap<SmallVector<const SCEV *, 2>, size_t, UniquifierDenseMapInfo> 2456 BestFormulaeTy; 2457 BestFormulaeTy BestFormulae; 2458 2459 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 2460 LSRUse &LU = Uses[LUIdx]; 2461 FormulaSorter Sorter(L, LU, SE, DT); 2462 2463 // Clear out the set of used regs; it will be recomputed. 2464 LU.Regs.clear(); 2465 2466 for (size_t FIdx = 0, NumForms = LU.Formulae.size(); 2467 FIdx != NumForms; ++FIdx) { 2468 Formula &F = LU.Formulae[FIdx]; 2469 2470 SmallVector<const SCEV *, 2> Key; 2471 for (SmallVectorImpl<const SCEV *>::const_iterator J = F.BaseRegs.begin(), 2472 JE = F.BaseRegs.end(); J != JE; ++J) { 2473 const SCEV *Reg = *J; 2474 if (RegUses.isRegUsedByUsesOtherThan(Reg, LUIdx)) 2475 Key.push_back(Reg); 2476 } 2477 if (F.ScaledReg && 2478 RegUses.isRegUsedByUsesOtherThan(F.ScaledReg, LUIdx)) 2479 Key.push_back(F.ScaledReg); 2480 // Unstable sort by host order ok, because this is only used for 2481 // uniquifying. 2482 std::sort(Key.begin(), Key.end()); 2483 2484 std::pair<BestFormulaeTy::const_iterator, bool> P = 2485 BestFormulae.insert(std::make_pair(Key, FIdx)); 2486 if (!P.second) { 2487 Formula &Best = LU.Formulae[P.first->second]; 2488 if (Sorter.operator()(F, Best)) 2489 std::swap(F, Best); 2490 DEBUG(dbgs() << "Filtering out "; F.print(dbgs()); 2491 dbgs() << "\n" 2492 " in favor of "; Best.print(dbgs()); 2493 dbgs() << '\n'); 2494 #ifndef NDEBUG 2495 Changed = true; 2496 #endif 2497 std::swap(F, LU.Formulae.back()); 2498 LU.Formulae.pop_back(); 2499 --FIdx; 2500 --NumForms; 2501 continue; 2502 } 2503 if (F.ScaledReg) LU.Regs.insert(F.ScaledReg); 2504 LU.Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end()); 2505 } 2506 BestFormulae.clear(); 2507 } 2508 2509 DEBUG(if (Changed) { 2510 dbgs() << "\n" 2511 "After filtering out undesirable candidates:\n"; 2512 print_uses(dbgs()); 2513 }); 2514 } 2515 2516 /// NarrowSearchSpaceUsingHeuristics - If there are an extrordinary number of 2517 /// formulae to choose from, use some rough heuristics to prune down the number 2518 /// of formulae. This keeps the main solver from taking an extrordinary amount 2519 /// of time in some worst-case scenarios. 2520 void LSRInstance::NarrowSearchSpaceUsingHeuristics() { 2521 // This is a rough guess that seems to work fairly well. 2522 const size_t Limit = UINT16_MAX; 2523 2524 SmallPtrSet<const SCEV *, 4> Taken; 2525 for (;;) { 2526 // Estimate the worst-case number of solutions we might consider. We almost 2527 // never consider this many solutions because we prune the search space, 2528 // but the pruning isn't always sufficient. 2529 uint32_t Power = 1; 2530 for (SmallVectorImpl<LSRUse>::const_iterator I = Uses.begin(), 2531 E = Uses.end(); I != E; ++I) { 2532 size_t FSize = I->Formulae.size(); 2533 if (FSize >= Limit) { 2534 Power = Limit; 2535 break; 2536 } 2537 Power *= FSize; 2538 if (Power >= Limit) 2539 break; 2540 } 2541 if (Power < Limit) 2542 break; 2543 2544 // Ok, we have too many of formulae on our hands to conveniently handle. 2545 // Use a rough heuristic to thin out the list. 2546 2547 // Pick the register which is used by the most LSRUses, which is likely 2548 // to be a good reuse register candidate. 2549 const SCEV *Best = 0; 2550 unsigned BestNum = 0; 2551 for (RegUseTracker::const_iterator I = RegUses.begin(), E = RegUses.end(); 2552 I != E; ++I) { 2553 const SCEV *Reg = *I; 2554 if (Taken.count(Reg)) 2555 continue; 2556 if (!Best) 2557 Best = Reg; 2558 else { 2559 unsigned Count = RegUses.getUsedByIndices(Reg).count(); 2560 if (Count > BestNum) { 2561 Best = Reg; 2562 BestNum = Count; 2563 } 2564 } 2565 } 2566 2567 DEBUG(dbgs() << "Narrowing the search space by assuming " << *Best 2568 << " will yeild profitable reuse.\n"); 2569 Taken.insert(Best); 2570 2571 // In any use with formulae which references this register, delete formulae 2572 // which don't reference it. 2573 for (SmallVectorImpl<LSRUse>::iterator I = Uses.begin(), 2574 E = Uses.end(); I != E; ++I) { 2575 LSRUse &LU = *I; 2576 if (!LU.Regs.count(Best)) continue; 2577 2578 // Clear out the set of used regs; it will be recomputed. 2579 LU.Regs.clear(); 2580 2581 for (size_t i = 0, e = LU.Formulae.size(); i != e; ++i) { 2582 Formula &F = LU.Formulae[i]; 2583 if (!F.referencesReg(Best)) { 2584 DEBUG(dbgs() << " Deleting "; F.print(dbgs()); dbgs() << '\n'); 2585 std::swap(LU.Formulae.back(), F); 2586 LU.Formulae.pop_back(); 2587 --e; 2588 --i; 2589 continue; 2590 } 2591 2592 if (F.ScaledReg) LU.Regs.insert(F.ScaledReg); 2593 LU.Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end()); 2594 } 2595 } 2596 2597 DEBUG(dbgs() << "After pre-selection:\n"; 2598 print_uses(dbgs())); 2599 } 2600 } 2601 2602 /// SolveRecurse - This is the recursive solver. 2603 void LSRInstance::SolveRecurse(SmallVectorImpl<const Formula *> &Solution, 2604 Cost &SolutionCost, 2605 SmallVectorImpl<const Formula *> &Workspace, 2606 const Cost &CurCost, 2607 const SmallPtrSet<const SCEV *, 16> &CurRegs, 2608 DenseSet<const SCEV *> &VisitedRegs) const { 2609 // Some ideas: 2610 // - prune more: 2611 // - use more aggressive filtering 2612 // - sort the formula so that the most profitable solutions are found first 2613 // - sort the uses too 2614 // - search faster: 2615 // - dont compute a cost, and then compare. compare while computing a cost 2616 // and bail early. 2617 // - track register sets with SmallBitVector 2618 2619 const LSRUse &LU = Uses[Workspace.size()]; 2620 2621 // If this use references any register that's already a part of the 2622 // in-progress solution, consider it a requirement that a formula must 2623 // reference that register in order to be considered. This prunes out 2624 // unprofitable searching. 2625 SmallSetVector<const SCEV *, 4> ReqRegs; 2626 for (SmallPtrSet<const SCEV *, 16>::const_iterator I = CurRegs.begin(), 2627 E = CurRegs.end(); I != E; ++I) 2628 if (LU.Regs.count(*I)) 2629 ReqRegs.insert(*I); 2630 2631 bool AnySatisfiedReqRegs = false; 2632 SmallPtrSet<const SCEV *, 16> NewRegs; 2633 Cost NewCost; 2634 retry: 2635 for (SmallVectorImpl<Formula>::const_iterator I = LU.Formulae.begin(), 2636 E = LU.Formulae.end(); I != E; ++I) { 2637 const Formula &F = *I; 2638 2639 // Ignore formulae which do not use any of the required registers. 2640 for (SmallSetVector<const SCEV *, 4>::const_iterator J = ReqRegs.begin(), 2641 JE = ReqRegs.end(); J != JE; ++J) { 2642 const SCEV *Reg = *J; 2643 if ((!F.ScaledReg || F.ScaledReg != Reg) && 2644 std::find(F.BaseRegs.begin(), F.BaseRegs.end(), Reg) == 2645 F.BaseRegs.end()) 2646 goto skip; 2647 } 2648 AnySatisfiedReqRegs = true; 2649 2650 // Evaluate the cost of the current formula. If it's already worse than 2651 // the current best, prune the search at that point. 2652 NewCost = CurCost; 2653 NewRegs = CurRegs; 2654 NewCost.RateFormula(F, NewRegs, VisitedRegs, L, LU.Offsets, SE, DT); 2655 if (NewCost < SolutionCost) { 2656 Workspace.push_back(&F); 2657 if (Workspace.size() != Uses.size()) { 2658 SolveRecurse(Solution, SolutionCost, Workspace, NewCost, 2659 NewRegs, VisitedRegs); 2660 if (F.getNumRegs() == 1 && Workspace.size() == 1) 2661 VisitedRegs.insert(F.ScaledReg ? F.ScaledReg : F.BaseRegs[0]); 2662 } else { 2663 DEBUG(dbgs() << "New best at "; NewCost.print(dbgs()); 2664 dbgs() << ". Regs:"; 2665 for (SmallPtrSet<const SCEV *, 16>::const_iterator 2666 I = NewRegs.begin(), E = NewRegs.end(); I != E; ++I) 2667 dbgs() << ' ' << **I; 2668 dbgs() << '\n'); 2669 2670 SolutionCost = NewCost; 2671 Solution = Workspace; 2672 } 2673 Workspace.pop_back(); 2674 } 2675 skip:; 2676 } 2677 2678 // If none of the formulae had all of the required registers, relax the 2679 // constraint so that we don't exclude all formulae. 2680 if (!AnySatisfiedReqRegs) { 2681 ReqRegs.clear(); 2682 goto retry; 2683 } 2684 } 2685 2686 void LSRInstance::Solve(SmallVectorImpl<const Formula *> &Solution) const { 2687 SmallVector<const Formula *, 8> Workspace; 2688 Cost SolutionCost; 2689 SolutionCost.Loose(); 2690 Cost CurCost; 2691 SmallPtrSet<const SCEV *, 16> CurRegs; 2692 DenseSet<const SCEV *> VisitedRegs; 2693 Workspace.reserve(Uses.size()); 2694 2695 SolveRecurse(Solution, SolutionCost, Workspace, CurCost, 2696 CurRegs, VisitedRegs); 2697 2698 // Ok, we've now made all our decisions. 2699 DEBUG(dbgs() << "\n" 2700 "The chosen solution requires "; SolutionCost.print(dbgs()); 2701 dbgs() << ":\n"; 2702 for (size_t i = 0, e = Uses.size(); i != e; ++i) { 2703 dbgs() << " "; 2704 Uses[i].print(dbgs()); 2705 dbgs() << "\n" 2706 " "; 2707 Solution[i]->print(dbgs()); 2708 dbgs() << '\n'; 2709 }); 2710 } 2711 2712 /// getImmediateDominator - A handy utility for the specific DominatorTree 2713 /// query that we need here. 2714 /// 2715 static BasicBlock *getImmediateDominator(BasicBlock *BB, DominatorTree &DT) { 2716 DomTreeNode *Node = DT.getNode(BB); 2717 if (!Node) return 0; 2718 Node = Node->getIDom(); 2719 if (!Node) return 0; 2720 return Node->getBlock(); 2721 } 2722 2723 Value *LSRInstance::Expand(const LSRFixup &LF, 2724 const Formula &F, 2725 BasicBlock::iterator IP, 2726 Loop *L, Instruction *IVIncInsertPos, 2727 SCEVExpander &Rewriter, 2728 SmallVectorImpl<WeakVH> &DeadInsts, 2729 ScalarEvolution &SE, DominatorTree &DT) const { 2730 const LSRUse &LU = Uses[LF.LUIdx]; 2731 2732 // Then, collect some instructions which we will remain dominated by when 2733 // expanding the replacement. These must be dominated by any operands that 2734 // will be required in the expansion. 2735 SmallVector<Instruction *, 4> Inputs; 2736 if (Instruction *I = dyn_cast<Instruction>(LF.OperandValToReplace)) 2737 Inputs.push_back(I); 2738 if (LU.Kind == LSRUse::ICmpZero) 2739 if (Instruction *I = 2740 dyn_cast<Instruction>(cast<ICmpInst>(LF.UserInst)->getOperand(1))) 2741 Inputs.push_back(I); 2742 if (LF.PostIncLoop && !L->contains(LF.UserInst)) 2743 Inputs.push_back(L->getLoopLatch()->getTerminator()); 2744 2745 // Then, climb up the immediate dominator tree as far as we can go while 2746 // still being dominated by the input positions. 2747 for (;;) { 2748 bool AllDominate = true; 2749 Instruction *BetterPos = 0; 2750 BasicBlock *IDom = getImmediateDominator(IP->getParent(), DT); 2751 if (!IDom) break; 2752 Instruction *Tentative = IDom->getTerminator(); 2753 for (SmallVectorImpl<Instruction *>::const_iterator I = Inputs.begin(), 2754 E = Inputs.end(); I != E; ++I) { 2755 Instruction *Inst = *I; 2756 if (Inst == Tentative || !DT.dominates(Inst, Tentative)) { 2757 AllDominate = false; 2758 break; 2759 } 2760 if (IDom == Inst->getParent() && 2761 (!BetterPos || DT.dominates(BetterPos, Inst))) 2762 BetterPos = next(BasicBlock::iterator(Inst)); 2763 } 2764 if (!AllDominate) 2765 break; 2766 if (BetterPos) 2767 IP = BetterPos; 2768 else 2769 IP = Tentative; 2770 } 2771 while (isa<PHINode>(IP)) ++IP; 2772 2773 // Inform the Rewriter if we have a post-increment use, so that it can 2774 // perform an advantageous expansion. 2775 Rewriter.setPostInc(LF.PostIncLoop); 2776 2777 // This is the type that the user actually needs. 2778 const Type *OpTy = LF.OperandValToReplace->getType(); 2779 // This will be the type that we'll initially expand to. 2780 const Type *Ty = F.getType(); 2781 if (!Ty) 2782 // No type known; just expand directly to the ultimate type. 2783 Ty = OpTy; 2784 else if (SE.getEffectiveSCEVType(Ty) == SE.getEffectiveSCEVType(OpTy)) 2785 // Expand directly to the ultimate type if it's the right size. 2786 Ty = OpTy; 2787 // This is the type to do integer arithmetic in. 2788 const Type *IntTy = SE.getEffectiveSCEVType(Ty); 2789 2790 // Build up a list of operands to add together to form the full base. 2791 SmallVector<const SCEV *, 8> Ops; 2792 2793 // Expand the BaseRegs portion. 2794 for (SmallVectorImpl<const SCEV *>::const_iterator I = F.BaseRegs.begin(), 2795 E = F.BaseRegs.end(); I != E; ++I) { 2796 const SCEV *Reg = *I; 2797 assert(!Reg->isZero() && "Zero allocated in a base register!"); 2798 2799 // If we're expanding for a post-inc user for the add-rec's loop, make the 2800 // post-inc adjustment. 2801 const SCEV *Start = Reg; 2802 while (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Start)) { 2803 if (AR->getLoop() == LF.PostIncLoop) { 2804 Reg = SE.getAddExpr(Reg, AR->getStepRecurrence(SE)); 2805 // If the user is inside the loop, insert the code after the increment 2806 // so that it is dominated by its operand. 2807 if (L->contains(LF.UserInst)) 2808 IP = IVIncInsertPos; 2809 break; 2810 } 2811 Start = AR->getStart(); 2812 } 2813 2814 Ops.push_back(SE.getUnknown(Rewriter.expandCodeFor(Reg, 0, IP))); 2815 } 2816 2817 // Expand the ScaledReg portion. 2818 Value *ICmpScaledV = 0; 2819 if (F.AM.Scale != 0) { 2820 const SCEV *ScaledS = F.ScaledReg; 2821 2822 // If we're expanding for a post-inc user for the add-rec's loop, make the 2823 // post-inc adjustment. 2824 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(ScaledS)) 2825 if (AR->getLoop() == LF.PostIncLoop) 2826 ScaledS = SE.getAddExpr(ScaledS, AR->getStepRecurrence(SE)); 2827 2828 if (LU.Kind == LSRUse::ICmpZero) { 2829 // An interesting way of "folding" with an icmp is to use a negated 2830 // scale, which we'll implement by inserting it into the other operand 2831 // of the icmp. 2832 assert(F.AM.Scale == -1 && 2833 "The only scale supported by ICmpZero uses is -1!"); 2834 ICmpScaledV = Rewriter.expandCodeFor(ScaledS, 0, IP); 2835 } else { 2836 // Otherwise just expand the scaled register and an explicit scale, 2837 // which is expected to be matched as part of the address. 2838 ScaledS = SE.getUnknown(Rewriter.expandCodeFor(ScaledS, 0, IP)); 2839 ScaledS = SE.getMulExpr(ScaledS, 2840 SE.getIntegerSCEV(F.AM.Scale, 2841 ScaledS->getType())); 2842 Ops.push_back(ScaledS); 2843 } 2844 } 2845 2846 // Expand the immediate portions. 2847 if (F.AM.BaseGV) 2848 Ops.push_back(SE.getSCEV(F.AM.BaseGV)); 2849 int64_t Offset = (uint64_t)F.AM.BaseOffs + LF.Offset; 2850 if (Offset != 0) { 2851 if (LU.Kind == LSRUse::ICmpZero) { 2852 // The other interesting way of "folding" with an ICmpZero is to use a 2853 // negated immediate. 2854 if (!ICmpScaledV) 2855 ICmpScaledV = ConstantInt::get(IntTy, -Offset); 2856 else { 2857 Ops.push_back(SE.getUnknown(ICmpScaledV)); 2858 ICmpScaledV = ConstantInt::get(IntTy, Offset); 2859 } 2860 } else { 2861 // Just add the immediate values. These again are expected to be matched 2862 // as part of the address. 2863 Ops.push_back(SE.getIntegerSCEV(Offset, IntTy)); 2864 } 2865 } 2866 2867 // Emit instructions summing all the operands. 2868 const SCEV *FullS = Ops.empty() ? 2869 SE.getIntegerSCEV(0, IntTy) : 2870 SE.getAddExpr(Ops); 2871 Value *FullV = Rewriter.expandCodeFor(FullS, Ty, IP); 2872 2873 // We're done expanding now, so reset the rewriter. 2874 Rewriter.setPostInc(0); 2875 2876 // An ICmpZero Formula represents an ICmp which we're handling as a 2877 // comparison against zero. Now that we've expanded an expression for that 2878 // form, update the ICmp's other operand. 2879 if (LU.Kind == LSRUse::ICmpZero) { 2880 ICmpInst *CI = cast<ICmpInst>(LF.UserInst); 2881 DeadInsts.push_back(CI->getOperand(1)); 2882 assert(!F.AM.BaseGV && "ICmp does not support folding a global value and " 2883 "a scale at the same time!"); 2884 if (F.AM.Scale == -1) { 2885 if (ICmpScaledV->getType() != OpTy) { 2886 Instruction *Cast = 2887 CastInst::Create(CastInst::getCastOpcode(ICmpScaledV, false, 2888 OpTy, false), 2889 ICmpScaledV, OpTy, "tmp", CI); 2890 ICmpScaledV = Cast; 2891 } 2892 CI->setOperand(1, ICmpScaledV); 2893 } else { 2894 assert(F.AM.Scale == 0 && 2895 "ICmp does not support folding a global value and " 2896 "a scale at the same time!"); 2897 Constant *C = ConstantInt::getSigned(SE.getEffectiveSCEVType(OpTy), 2898 -(uint64_t)Offset); 2899 if (C->getType() != OpTy) 2900 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 2901 OpTy, false), 2902 C, OpTy); 2903 2904 CI->setOperand(1, C); 2905 } 2906 } 2907 2908 return FullV; 2909 } 2910 2911 /// Rewrite - Emit instructions for the leading candidate expression for this 2912 /// LSRUse (this is called "expanding"), and update the UserInst to reference 2913 /// the newly expanded value. 2914 void LSRInstance::Rewrite(const LSRFixup &LF, 2915 const Formula &F, 2916 Loop *L, Instruction *IVIncInsertPos, 2917 SCEVExpander &Rewriter, 2918 SmallVectorImpl<WeakVH> &DeadInsts, 2919 ScalarEvolution &SE, DominatorTree &DT, 2920 Pass *P) const { 2921 const Type *OpTy = LF.OperandValToReplace->getType(); 2922 2923 // First, find an insertion point that dominates UserInst. For PHI nodes, 2924 // find the nearest block which dominates all the relevant uses. 2925 if (PHINode *PN = dyn_cast<PHINode>(LF.UserInst)) { 2926 DenseMap<BasicBlock *, Value *> Inserted; 2927 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 2928 if (PN->getIncomingValue(i) == LF.OperandValToReplace) { 2929 BasicBlock *BB = PN->getIncomingBlock(i); 2930 2931 // If this is a critical edge, split the edge so that we do not insert 2932 // the code on all predecessor/successor paths. We do this unless this 2933 // is the canonical backedge for this loop, which complicates post-inc 2934 // users. 2935 if (e != 1 && BB->getTerminator()->getNumSuccessors() > 1 && 2936 !isa<IndirectBrInst>(BB->getTerminator()) && 2937 (PN->getParent() != L->getHeader() || !L->contains(BB))) { 2938 // Split the critical edge. 2939 BasicBlock *NewBB = SplitCriticalEdge(BB, PN->getParent(), P); 2940 2941 // If PN is outside of the loop and BB is in the loop, we want to 2942 // move the block to be immediately before the PHI block, not 2943 // immediately after BB. 2944 if (L->contains(BB) && !L->contains(PN)) 2945 NewBB->moveBefore(PN->getParent()); 2946 2947 // Splitting the edge can reduce the number of PHI entries we have. 2948 e = PN->getNumIncomingValues(); 2949 BB = NewBB; 2950 i = PN->getBasicBlockIndex(BB); 2951 } 2952 2953 std::pair<DenseMap<BasicBlock *, Value *>::iterator, bool> Pair = 2954 Inserted.insert(std::make_pair(BB, static_cast<Value *>(0))); 2955 if (!Pair.second) 2956 PN->setIncomingValue(i, Pair.first->second); 2957 else { 2958 Value *FullV = Expand(LF, F, BB->getTerminator(), L, IVIncInsertPos, 2959 Rewriter, DeadInsts, SE, DT); 2960 2961 // If this is reuse-by-noop-cast, insert the noop cast. 2962 if (FullV->getType() != OpTy) 2963 FullV = 2964 CastInst::Create(CastInst::getCastOpcode(FullV, false, 2965 OpTy, false), 2966 FullV, LF.OperandValToReplace->getType(), 2967 "tmp", BB->getTerminator()); 2968 2969 PN->setIncomingValue(i, FullV); 2970 Pair.first->second = FullV; 2971 } 2972 } 2973 } else { 2974 Value *FullV = Expand(LF, F, LF.UserInst, L, IVIncInsertPos, 2975 Rewriter, DeadInsts, SE, DT); 2976 2977 // If this is reuse-by-noop-cast, insert the noop cast. 2978 if (FullV->getType() != OpTy) { 2979 Instruction *Cast = 2980 CastInst::Create(CastInst::getCastOpcode(FullV, false, OpTy, false), 2981 FullV, OpTy, "tmp", LF.UserInst); 2982 FullV = Cast; 2983 } 2984 2985 // Update the user. ICmpZero is handled specially here (for now) because 2986 // Expand may have updated one of the operands of the icmp already, and 2987 // its new value may happen to be equal to LF.OperandValToReplace, in 2988 // which case doing replaceUsesOfWith leads to replacing both operands 2989 // with the same value. TODO: Reorganize this. 2990 if (Uses[LF.LUIdx].Kind == LSRUse::ICmpZero) 2991 LF.UserInst->setOperand(0, FullV); 2992 else 2993 LF.UserInst->replaceUsesOfWith(LF.OperandValToReplace, FullV); 2994 } 2995 2996 DeadInsts.push_back(LF.OperandValToReplace); 2997 } 2998 2999 void 3000 LSRInstance::ImplementSolution(const SmallVectorImpl<const Formula *> &Solution, 3001 Pass *P) { 3002 // Keep track of instructions we may have made dead, so that 3003 // we can remove them after we are done working. 3004 SmallVector<WeakVH, 16> DeadInsts; 3005 3006 SCEVExpander Rewriter(SE); 3007 Rewriter.disableCanonicalMode(); 3008 Rewriter.setIVIncInsertPos(L, IVIncInsertPos); 3009 3010 // Expand the new value definitions and update the users. 3011 for (size_t i = 0, e = Fixups.size(); i != e; ++i) { 3012 size_t LUIdx = Fixups[i].LUIdx; 3013 3014 Rewrite(Fixups[i], *Solution[LUIdx], L, IVIncInsertPos, Rewriter, 3015 DeadInsts, SE, DT, P); 3016 3017 Changed = true; 3018 } 3019 3020 // Clean up after ourselves. This must be done before deleting any 3021 // instructions. 3022 Rewriter.clear(); 3023 3024 Changed |= DeleteTriviallyDeadInstructions(DeadInsts); 3025 } 3026 3027 LSRInstance::LSRInstance(const TargetLowering *tli, Loop *l, Pass *P) 3028 : IU(P->getAnalysis<IVUsers>()), 3029 SE(P->getAnalysis<ScalarEvolution>()), 3030 DT(P->getAnalysis<DominatorTree>()), 3031 TLI(tli), L(l), Changed(false), IVIncInsertPos(0) { 3032 3033 // If LoopSimplify form is not available, stay out of trouble. 3034 if (!L->isLoopSimplifyForm()) return; 3035 3036 // If there's no interesting work to be done, bail early. 3037 if (IU.empty()) return; 3038 3039 DEBUG(dbgs() << "\nLSR on loop "; 3040 WriteAsOperand(dbgs(), L->getHeader(), /*PrintType=*/false); 3041 dbgs() << ":\n"); 3042 3043 /// OptimizeShadowIV - If IV is used in a int-to-float cast 3044 /// inside the loop then try to eliminate the cast opeation. 3045 OptimizeShadowIV(); 3046 3047 // Change loop terminating condition to use the postinc iv when possible. 3048 Changed |= OptimizeLoopTermCond(); 3049 3050 CollectInterestingTypesAndFactors(); 3051 CollectFixupsAndInitialFormulae(); 3052 CollectLoopInvariantFixupsAndFormulae(); 3053 3054 DEBUG(dbgs() << "LSR found " << Uses.size() << " uses:\n"; 3055 print_uses(dbgs())); 3056 3057 // Now use the reuse data to generate a bunch of interesting ways 3058 // to formulate the values needed for the uses. 3059 GenerateAllReuseFormulae(); 3060 3061 DEBUG(dbgs() << "\n" 3062 "After generating reuse formulae:\n"; 3063 print_uses(dbgs())); 3064 3065 FilterOutUndesirableDedicatedRegisters(); 3066 NarrowSearchSpaceUsingHeuristics(); 3067 3068 SmallVector<const Formula *, 8> Solution; 3069 Solve(Solution); 3070 assert(Solution.size() == Uses.size() && "Malformed solution!"); 3071 3072 // Release memory that is no longer needed. 3073 Factors.clear(); 3074 Types.clear(); 3075 RegUses.clear(); 3076 3077 #ifndef NDEBUG 3078 // Formulae should be legal. 3079 for (SmallVectorImpl<LSRUse>::const_iterator I = Uses.begin(), 3080 E = Uses.end(); I != E; ++I) { 3081 const LSRUse &LU = *I; 3082 for (SmallVectorImpl<Formula>::const_iterator J = LU.Formulae.begin(), 3083 JE = LU.Formulae.end(); J != JE; ++J) 3084 assert(isLegalUse(J->AM, LU.MinOffset, LU.MaxOffset, 3085 LU.Kind, LU.AccessTy, TLI) && 3086 "Illegal formula generated!"); 3087 }; 3088 #endif 3089 3090 // Now that we've decided what we want, make it so. 3091 ImplementSolution(Solution, P); 3092 } 3093 3094 void LSRInstance::print_factors_and_types(raw_ostream &OS) const { 3095 if (Factors.empty() && Types.empty()) return; 3096 3097 OS << "LSR has identified the following interesting factors and types: "; 3098 bool First = true; 3099 3100 for (SmallSetVector<int64_t, 8>::const_iterator 3101 I = Factors.begin(), E = Factors.end(); I != E; ++I) { 3102 if (!First) OS << ", "; 3103 First = false; 3104 OS << '*' << *I; 3105 } 3106 3107 for (SmallSetVector<const Type *, 4>::const_iterator 3108 I = Types.begin(), E = Types.end(); I != E; ++I) { 3109 if (!First) OS << ", "; 3110 First = false; 3111 OS << '(' << **I << ')'; 3112 } 3113 OS << '\n'; 3114 } 3115 3116 void LSRInstance::print_fixups(raw_ostream &OS) const { 3117 OS << "LSR is examining the following fixup sites:\n"; 3118 for (SmallVectorImpl<LSRFixup>::const_iterator I = Fixups.begin(), 3119 E = Fixups.end(); I != E; ++I) { 3120 const LSRFixup &LF = *I; 3121 dbgs() << " "; 3122 LF.print(OS); 3123 OS << '\n'; 3124 } 3125 } 3126 3127 void LSRInstance::print_uses(raw_ostream &OS) const { 3128 OS << "LSR is examining the following uses:\n"; 3129 for (SmallVectorImpl<LSRUse>::const_iterator I = Uses.begin(), 3130 E = Uses.end(); I != E; ++I) { 3131 const LSRUse &LU = *I; 3132 dbgs() << " "; 3133 LU.print(OS); 3134 OS << '\n'; 3135 for (SmallVectorImpl<Formula>::const_iterator J = LU.Formulae.begin(), 3136 JE = LU.Formulae.end(); J != JE; ++J) { 3137 OS << " "; 3138 J->print(OS); 3139 OS << '\n'; 3140 } 3141 } 3142 } 3143 3144 void LSRInstance::print(raw_ostream &OS) const { 3145 print_factors_and_types(OS); 3146 print_fixups(OS); 3147 print_uses(OS); 3148 } 3149 3150 void LSRInstance::dump() const { 3151 print(errs()); errs() << '\n'; 3152 } 3153 3154 namespace { 3155 3156 class LoopStrengthReduce : public LoopPass { 3157 /// TLI - Keep a pointer of a TargetLowering to consult for determining 3158 /// transformation profitability. 3159 const TargetLowering *const TLI; 3160 3161 public: 3162 static char ID; // Pass ID, replacement for typeid 3163 explicit LoopStrengthReduce(const TargetLowering *tli = 0); 3164 3165 private: 3166 bool runOnLoop(Loop *L, LPPassManager &LPM); 3167 void getAnalysisUsage(AnalysisUsage &AU) const; 3168 }; 3169 3170 } 3171 3172 char LoopStrengthReduce::ID = 0; 3173 static RegisterPass<LoopStrengthReduce> 3174 X("loop-reduce", "Loop Strength Reduction"); 3175 3176 Pass *llvm::createLoopStrengthReducePass(const TargetLowering *TLI) { 3177 return new LoopStrengthReduce(TLI); 3178 } 3179 3180 LoopStrengthReduce::LoopStrengthReduce(const TargetLowering *tli) 3181 : LoopPass(&ID), TLI(tli) {} 3182 3183 void LoopStrengthReduce::getAnalysisUsage(AnalysisUsage &AU) const { 3184 // We split critical edges, so we change the CFG. However, we do update 3185 // many analyses if they are around. 3186 AU.addPreservedID(LoopSimplifyID); 3187 AU.addPreserved<LoopInfo>(); 3188 AU.addPreserved("domfrontier"); 3189 3190 AU.addRequiredID(LoopSimplifyID); 3191 AU.addRequired<DominatorTree>(); 3192 AU.addPreserved<DominatorTree>(); 3193 AU.addRequired<ScalarEvolution>(); 3194 AU.addPreserved<ScalarEvolution>(); 3195 AU.addRequired<IVUsers>(); 3196 AU.addPreserved<IVUsers>(); 3197 } 3198 3199 bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager & /*LPM*/) { 3200 bool Changed = false; 3201 3202 // Run the main LSR transformation. 3203 Changed |= LSRInstance(TLI, L, this).getChanged(); 3204 3205 // At this point, it is worth checking to see if any recurrence PHIs are also 3206 // dead, so that we can remove them as well. 3207 Changed |= DeleteDeadPHIs(L->getHeader()); 3208 3209 return Changed; 3210 } 3211