1 //===- LoopStrengthReduce.cpp - Strength Reduce IVs in Loops --------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This transformation analyzes and transforms the induction variables (and 11 // computations derived from them) into forms suitable for efficient execution 12 // on the target. 13 // 14 // This pass performs a strength reduction on array references inside loops that 15 // have as one or more of their components the loop induction variable, it 16 // rewrites expressions to take advantage of scaled-index addressing modes 17 // available on the target, and it performs a variety of other optimizations 18 // related to loop induction variables. 19 // 20 //===----------------------------------------------------------------------===// 21 22 #define DEBUG_TYPE "loop-reduce" 23 #include "llvm/Transforms/Scalar.h" 24 #include "llvm/Constants.h" 25 #include "llvm/Instructions.h" 26 #include "llvm/IntrinsicInst.h" 27 #include "llvm/LLVMContext.h" 28 #include "llvm/Type.h" 29 #include "llvm/DerivedTypes.h" 30 #include "llvm/Analysis/Dominators.h" 31 #include "llvm/Analysis/IVUsers.h" 32 #include "llvm/Analysis/LoopInfo.h" 33 #include "llvm/Analysis/LoopPass.h" 34 #include "llvm/Analysis/ScalarEvolutionExpander.h" 35 #include "llvm/Transforms/Utils/AddrModeMatcher.h" 36 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 37 #include "llvm/Transforms/Utils/Local.h" 38 #include "llvm/ADT/Statistic.h" 39 #include "llvm/Support/CFG.h" 40 #include "llvm/Support/Debug.h" 41 #include "llvm/Support/CommandLine.h" 42 #include "llvm/Support/ValueHandle.h" 43 #include "llvm/Support/raw_ostream.h" 44 #include "llvm/Target/TargetLowering.h" 45 #include <algorithm> 46 using namespace llvm; 47 48 STATISTIC(NumReduced , "Number of IV uses strength reduced"); 49 STATISTIC(NumInserted, "Number of PHIs inserted"); 50 STATISTIC(NumVariable, "Number of PHIs with variable strides"); 51 STATISTIC(NumEliminated, "Number of strides eliminated"); 52 STATISTIC(NumShadow, "Number of Shadow IVs optimized"); 53 STATISTIC(NumImmSunk, "Number of common expr immediates sunk into uses"); 54 STATISTIC(NumLoopCond, "Number of loop terminating conds optimized"); 55 56 static cl::opt<bool> EnableFullLSRMode("enable-full-lsr", 57 cl::init(false), 58 cl::Hidden); 59 60 namespace { 61 62 struct BasedUser; 63 64 /// IVInfo - This structure keeps track of one IV expression inserted during 65 /// StrengthReduceStridedIVUsers. It contains the stride, the common base, as 66 /// well as the PHI node and increment value created for rewrite. 67 struct IVExpr { 68 const SCEV *Stride; 69 const SCEV *Base; 70 PHINode *PHI; 71 72 IVExpr(const SCEV *const stride, const SCEV *const base, PHINode *phi) 73 : Stride(stride), Base(base), PHI(phi) {} 74 }; 75 76 /// IVsOfOneStride - This structure keeps track of all IV expression inserted 77 /// during StrengthReduceStridedIVUsers for a particular stride of the IV. 78 struct IVsOfOneStride { 79 std::vector<IVExpr> IVs; 80 81 void addIV(const SCEV *const Stride, const SCEV *const Base, PHINode *PHI) { 82 IVs.push_back(IVExpr(Stride, Base, PHI)); 83 } 84 }; 85 86 class LoopStrengthReduce : public LoopPass { 87 IVUsers *IU; 88 LoopInfo *LI; 89 DominatorTree *DT; 90 ScalarEvolution *SE; 91 bool Changed; 92 93 /// IVsByStride - Keep track of all IVs that have been inserted for a 94 /// particular stride. 95 std::map<const SCEV *, IVsOfOneStride> IVsByStride; 96 97 /// StrideNoReuse - Keep track of all the strides whose ivs cannot be 98 /// reused (nor should they be rewritten to reuse other strides). 99 SmallSet<const SCEV *, 4> StrideNoReuse; 100 101 /// DeadInsts - Keep track of instructions we may have made dead, so that 102 /// we can remove them after we are done working. 103 SmallVector<WeakVH, 16> DeadInsts; 104 105 /// TLI - Keep a pointer of a TargetLowering to consult for determining 106 /// transformation profitability. 107 const TargetLowering *TLI; 108 109 public: 110 static char ID; // Pass ID, replacement for typeid 111 explicit LoopStrengthReduce(const TargetLowering *tli = NULL) : 112 LoopPass(&ID), TLI(tli) { 113 } 114 115 bool runOnLoop(Loop *L, LPPassManager &LPM); 116 117 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 118 // We split critical edges, so we change the CFG. However, we do update 119 // many analyses if they are around. 120 AU.addPreservedID(LoopSimplifyID); 121 AU.addPreserved<LoopInfo>(); 122 AU.addPreserved<DominanceFrontier>(); 123 AU.addPreserved<DominatorTree>(); 124 125 AU.addRequiredID(LoopSimplifyID); 126 AU.addRequired<LoopInfo>(); 127 AU.addRequired<DominatorTree>(); 128 AU.addRequired<ScalarEvolution>(); 129 AU.addPreserved<ScalarEvolution>(); 130 AU.addRequired<IVUsers>(); 131 AU.addPreserved<IVUsers>(); 132 } 133 134 private: 135 ICmpInst *ChangeCompareStride(Loop *L, ICmpInst *Cond, 136 IVStrideUse* &CondUse, 137 const SCEV *const * &CondStride); 138 139 void OptimizeIndvars(Loop *L); 140 void OptimizeLoopCountIV(Loop *L); 141 void OptimizeLoopTermCond(Loop *L); 142 143 /// OptimizeShadowIV - If IV is used in a int-to-float cast 144 /// inside the loop then try to eliminate the cast opeation. 145 void OptimizeShadowIV(Loop *L); 146 147 /// OptimizeMax - Rewrite the loop's terminating condition 148 /// if it uses a max computation. 149 ICmpInst *OptimizeMax(Loop *L, ICmpInst *Cond, 150 IVStrideUse* &CondUse); 151 152 bool FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse, 153 const SCEV *const * &CondStride); 154 bool RequiresTypeConversion(const Type *Ty, const Type *NewTy); 155 const SCEV *CheckForIVReuse(bool, bool, bool, const SCEV *const&, 156 IVExpr&, const Type*, 157 const std::vector<BasedUser>& UsersToProcess); 158 bool ValidScale(bool, int64_t, 159 const std::vector<BasedUser>& UsersToProcess); 160 bool ValidOffset(bool, int64_t, int64_t, 161 const std::vector<BasedUser>& UsersToProcess); 162 const SCEV *CollectIVUsers(const SCEV *const &Stride, 163 IVUsersOfOneStride &Uses, 164 Loop *L, 165 bool &AllUsesAreAddresses, 166 bool &AllUsesAreOutsideLoop, 167 std::vector<BasedUser> &UsersToProcess); 168 bool ShouldUseFullStrengthReductionMode( 169 const std::vector<BasedUser> &UsersToProcess, 170 const Loop *L, 171 bool AllUsesAreAddresses, 172 const SCEV *Stride); 173 void PrepareToStrengthReduceFully( 174 std::vector<BasedUser> &UsersToProcess, 175 const SCEV *Stride, 176 const SCEV *CommonExprs, 177 const Loop *L, 178 SCEVExpander &PreheaderRewriter); 179 void PrepareToStrengthReduceFromSmallerStride( 180 std::vector<BasedUser> &UsersToProcess, 181 Value *CommonBaseV, 182 const IVExpr &ReuseIV, 183 Instruction *PreInsertPt); 184 void PrepareToStrengthReduceWithNewPhi( 185 std::vector<BasedUser> &UsersToProcess, 186 const SCEV *Stride, 187 const SCEV *CommonExprs, 188 Value *CommonBaseV, 189 Instruction *IVIncInsertPt, 190 const Loop *L, 191 SCEVExpander &PreheaderRewriter); 192 void StrengthReduceStridedIVUsers(const SCEV *const &Stride, 193 IVUsersOfOneStride &Uses, 194 Loop *L); 195 void DeleteTriviallyDeadInstructions(); 196 }; 197 } 198 199 char LoopStrengthReduce::ID = 0; 200 static RegisterPass<LoopStrengthReduce> 201 X("loop-reduce", "Loop Strength Reduction"); 202 203 Pass *llvm::createLoopStrengthReducePass(const TargetLowering *TLI) { 204 return new LoopStrengthReduce(TLI); 205 } 206 207 /// DeleteTriviallyDeadInstructions - If any of the instructions is the 208 /// specified set are trivially dead, delete them and see if this makes any of 209 /// their operands subsequently dead. 210 void LoopStrengthReduce::DeleteTriviallyDeadInstructions() { 211 if (DeadInsts.empty()) return; 212 213 while (!DeadInsts.empty()) { 214 Instruction *I = dyn_cast_or_null<Instruction>(DeadInsts.back()); 215 DeadInsts.pop_back(); 216 217 if (I == 0 || !isInstructionTriviallyDead(I)) 218 continue; 219 220 for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI) { 221 if (Instruction *U = dyn_cast<Instruction>(*OI)) { 222 *OI = 0; 223 if (U->use_empty()) 224 DeadInsts.push_back(U); 225 } 226 } 227 228 I->eraseFromParent(); 229 Changed = true; 230 } 231 } 232 233 /// containsAddRecFromDifferentLoop - Determine whether expression S involves a 234 /// subexpression that is an AddRec from a loop other than L. An outer loop 235 /// of L is OK, but not an inner loop nor a disjoint loop. 236 static bool containsAddRecFromDifferentLoop(const SCEV *S, Loop *L) { 237 // This is very common, put it first. 238 if (isa<SCEVConstant>(S)) 239 return false; 240 if (const SCEVCommutativeExpr *AE = dyn_cast<SCEVCommutativeExpr>(S)) { 241 for (unsigned int i=0; i< AE->getNumOperands(); i++) 242 if (containsAddRecFromDifferentLoop(AE->getOperand(i), L)) 243 return true; 244 return false; 245 } 246 if (const SCEVAddRecExpr *AE = dyn_cast<SCEVAddRecExpr>(S)) { 247 if (const Loop *newLoop = AE->getLoop()) { 248 if (newLoop == L) 249 return false; 250 // if newLoop is an outer loop of L, this is OK. 251 if (!LoopInfo::isNotAlreadyContainedIn(L, newLoop)) 252 return false; 253 } 254 return true; 255 } 256 if (const SCEVUDivExpr *DE = dyn_cast<SCEVUDivExpr>(S)) 257 return containsAddRecFromDifferentLoop(DE->getLHS(), L) || 258 containsAddRecFromDifferentLoop(DE->getRHS(), L); 259 #if 0 260 // SCEVSDivExpr has been backed out temporarily, but will be back; we'll 261 // need this when it is. 262 if (const SCEVSDivExpr *DE = dyn_cast<SCEVSDivExpr>(S)) 263 return containsAddRecFromDifferentLoop(DE->getLHS(), L) || 264 containsAddRecFromDifferentLoop(DE->getRHS(), L); 265 #endif 266 if (const SCEVCastExpr *CE = dyn_cast<SCEVCastExpr>(S)) 267 return containsAddRecFromDifferentLoop(CE->getOperand(), L); 268 return false; 269 } 270 271 /// isAddressUse - Returns true if the specified instruction is using the 272 /// specified value as an address. 273 static bool isAddressUse(Instruction *Inst, Value *OperandVal) { 274 bool isAddress = isa<LoadInst>(Inst); 275 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 276 if (SI->getOperand(1) == OperandVal) 277 isAddress = true; 278 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 279 // Addressing modes can also be folded into prefetches and a variety 280 // of intrinsics. 281 switch (II->getIntrinsicID()) { 282 default: break; 283 case Intrinsic::prefetch: 284 case Intrinsic::x86_sse2_loadu_dq: 285 case Intrinsic::x86_sse2_loadu_pd: 286 case Intrinsic::x86_sse_loadu_ps: 287 case Intrinsic::x86_sse_storeu_ps: 288 case Intrinsic::x86_sse2_storeu_pd: 289 case Intrinsic::x86_sse2_storeu_dq: 290 case Intrinsic::x86_sse2_storel_dq: 291 if (II->getOperand(1) == OperandVal) 292 isAddress = true; 293 break; 294 } 295 } 296 return isAddress; 297 } 298 299 /// getAccessType - Return the type of the memory being accessed. 300 static const Type *getAccessType(const Instruction *Inst) { 301 const Type *AccessTy = Inst->getType(); 302 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) 303 AccessTy = SI->getOperand(0)->getType(); 304 else if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 305 // Addressing modes can also be folded into prefetches and a variety 306 // of intrinsics. 307 switch (II->getIntrinsicID()) { 308 default: break; 309 case Intrinsic::x86_sse_storeu_ps: 310 case Intrinsic::x86_sse2_storeu_pd: 311 case Intrinsic::x86_sse2_storeu_dq: 312 case Intrinsic::x86_sse2_storel_dq: 313 AccessTy = II->getOperand(1)->getType(); 314 break; 315 } 316 } 317 return AccessTy; 318 } 319 320 namespace { 321 /// BasedUser - For a particular base value, keep information about how we've 322 /// partitioned the expression so far. 323 struct BasedUser { 324 /// SE - The current ScalarEvolution object. 325 ScalarEvolution *SE; 326 327 /// Base - The Base value for the PHI node that needs to be inserted for 328 /// this use. As the use is processed, information gets moved from this 329 /// field to the Imm field (below). BasedUser values are sorted by this 330 /// field. 331 const SCEV *Base; 332 333 /// Inst - The instruction using the induction variable. 334 Instruction *Inst; 335 336 /// OperandValToReplace - The operand value of Inst to replace with the 337 /// EmittedBase. 338 Value *OperandValToReplace; 339 340 /// Imm - The immediate value that should be added to the base immediately 341 /// before Inst, because it will be folded into the imm field of the 342 /// instruction. This is also sometimes used for loop-variant values that 343 /// must be added inside the loop. 344 const SCEV *Imm; 345 346 /// Phi - The induction variable that performs the striding that 347 /// should be used for this user. 348 PHINode *Phi; 349 350 // isUseOfPostIncrementedValue - True if this should use the 351 // post-incremented version of this IV, not the preincremented version. 352 // This can only be set in special cases, such as the terminating setcc 353 // instruction for a loop and uses outside the loop that are dominated by 354 // the loop. 355 bool isUseOfPostIncrementedValue; 356 357 BasedUser(IVStrideUse &IVSU, ScalarEvolution *se) 358 : SE(se), Base(IVSU.getOffset()), Inst(IVSU.getUser()), 359 OperandValToReplace(IVSU.getOperandValToReplace()), 360 Imm(SE->getIntegerSCEV(0, Base->getType())), 361 isUseOfPostIncrementedValue(IVSU.isUseOfPostIncrementedValue()) {} 362 363 // Once we rewrite the code to insert the new IVs we want, update the 364 // operands of Inst to use the new expression 'NewBase', with 'Imm' added 365 // to it. 366 void RewriteInstructionToUseNewBase(const SCEV *const &NewBase, 367 Instruction *InsertPt, 368 SCEVExpander &Rewriter, Loop *L, Pass *P, 369 LoopInfo &LI, 370 SmallVectorImpl<WeakVH> &DeadInsts); 371 372 Value *InsertCodeForBaseAtPosition(const SCEV *const &NewBase, 373 const Type *Ty, 374 SCEVExpander &Rewriter, 375 Instruction *IP, Loop *L, 376 LoopInfo &LI); 377 void dump() const; 378 }; 379 } 380 381 void BasedUser::dump() const { 382 errs() << " Base=" << *Base; 383 errs() << " Imm=" << *Imm; 384 errs() << " Inst: " << *Inst; 385 } 386 387 Value *BasedUser::InsertCodeForBaseAtPosition(const SCEV *const &NewBase, 388 const Type *Ty, 389 SCEVExpander &Rewriter, 390 Instruction *IP, Loop *L, 391 LoopInfo &LI) { 392 // Figure out where we *really* want to insert this code. In particular, if 393 // the user is inside of a loop that is nested inside of L, we really don't 394 // want to insert this expression before the user, we'd rather pull it out as 395 // many loops as possible. 396 Instruction *BaseInsertPt = IP; 397 398 // Figure out the most-nested loop that IP is in. 399 Loop *InsertLoop = LI.getLoopFor(IP->getParent()); 400 401 // If InsertLoop is not L, and InsertLoop is nested inside of L, figure out 402 // the preheader of the outer-most loop where NewBase is not loop invariant. 403 if (L->contains(IP->getParent())) 404 while (InsertLoop && NewBase->isLoopInvariant(InsertLoop)) { 405 BaseInsertPt = InsertLoop->getLoopPreheader()->getTerminator(); 406 InsertLoop = InsertLoop->getParentLoop(); 407 } 408 409 Value *Base = Rewriter.expandCodeFor(NewBase, 0, BaseInsertPt); 410 411 const SCEV *NewValSCEV = SE->getUnknown(Base); 412 413 // Always emit the immediate into the same block as the user. 414 NewValSCEV = SE->getAddExpr(NewValSCEV, Imm); 415 416 return Rewriter.expandCodeFor(NewValSCEV, Ty, IP); 417 } 418 419 420 // Once we rewrite the code to insert the new IVs we want, update the 421 // operands of Inst to use the new expression 'NewBase', with 'Imm' added 422 // to it. NewBasePt is the last instruction which contributes to the 423 // value of NewBase in the case that it's a diffferent instruction from 424 // the PHI that NewBase is computed from, or null otherwise. 425 // 426 void BasedUser::RewriteInstructionToUseNewBase(const SCEV *const &NewBase, 427 Instruction *NewBasePt, 428 SCEVExpander &Rewriter, Loop *L, Pass *P, 429 LoopInfo &LI, 430 SmallVectorImpl<WeakVH> &DeadInsts) { 431 if (!isa<PHINode>(Inst)) { 432 // By default, insert code at the user instruction. 433 BasicBlock::iterator InsertPt = Inst; 434 435 // However, if the Operand is itself an instruction, the (potentially 436 // complex) inserted code may be shared by many users. Because of this, we 437 // want to emit code for the computation of the operand right before its old 438 // computation. This is usually safe, because we obviously used to use the 439 // computation when it was computed in its current block. However, in some 440 // cases (e.g. use of a post-incremented induction variable) the NewBase 441 // value will be pinned to live somewhere after the original computation. 442 // In this case, we have to back off. 443 // 444 // If this is a use outside the loop (which means after, since it is based 445 // on a loop indvar) we use the post-incremented value, so that we don't 446 // artificially make the preinc value live out the bottom of the loop. 447 if (!isUseOfPostIncrementedValue && L->contains(Inst->getParent())) { 448 if (NewBasePt && isa<PHINode>(OperandValToReplace)) { 449 InsertPt = NewBasePt; 450 ++InsertPt; 451 } else if (Instruction *OpInst 452 = dyn_cast<Instruction>(OperandValToReplace)) { 453 InsertPt = OpInst; 454 while (isa<PHINode>(InsertPt)) ++InsertPt; 455 } 456 } 457 Value *NewVal = InsertCodeForBaseAtPosition(NewBase, 458 OperandValToReplace->getType(), 459 Rewriter, InsertPt, L, LI); 460 // Replace the use of the operand Value with the new Phi we just created. 461 Inst->replaceUsesOfWith(OperandValToReplace, NewVal); 462 463 DEBUG(errs() << " Replacing with "); 464 DEBUG(WriteAsOperand(errs(), NewVal, /*PrintType=*/false)); 465 DEBUG(errs() << ", which has value " << *NewBase << " plus IMM " 466 << *Imm << "\n"); 467 return; 468 } 469 470 // PHI nodes are more complex. We have to insert one copy of the NewBase+Imm 471 // expression into each operand block that uses it. Note that PHI nodes can 472 // have multiple entries for the same predecessor. We use a map to make sure 473 // that a PHI node only has a single Value* for each predecessor (which also 474 // prevents us from inserting duplicate code in some blocks). 475 DenseMap<BasicBlock*, Value*> InsertedCode; 476 PHINode *PN = cast<PHINode>(Inst); 477 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 478 if (PN->getIncomingValue(i) == OperandValToReplace) { 479 // If the original expression is outside the loop, put the replacement 480 // code in the same place as the original expression, 481 // which need not be an immediate predecessor of this PHI. This way we 482 // need only one copy of it even if it is referenced multiple times in 483 // the PHI. We don't do this when the original expression is inside the 484 // loop because multiple copies sometimes do useful sinking of code in 485 // that case(?). 486 Instruction *OldLoc = dyn_cast<Instruction>(OperandValToReplace); 487 BasicBlock *PHIPred = PN->getIncomingBlock(i); 488 if (L->contains(OldLoc->getParent())) { 489 // If this is a critical edge, split the edge so that we do not insert 490 // the code on all predecessor/successor paths. We do this unless this 491 // is the canonical backedge for this loop, as this can make some 492 // inserted code be in an illegal position. 493 if (e != 1 && PHIPred->getTerminator()->getNumSuccessors() > 1 && 494 (PN->getParent() != L->getHeader() || !L->contains(PHIPred))) { 495 496 // First step, split the critical edge. 497 BasicBlock *NewBB = SplitCriticalEdge(PHIPred, PN->getParent(), 498 P, false); 499 500 // Next step: move the basic block. In particular, if the PHI node 501 // is outside of the loop, and PredTI is in the loop, we want to 502 // move the block to be immediately before the PHI block, not 503 // immediately after PredTI. 504 if (L->contains(PHIPred) && !L->contains(PN->getParent())) 505 NewBB->moveBefore(PN->getParent()); 506 507 // Splitting the edge can reduce the number of PHI entries we have. 508 e = PN->getNumIncomingValues(); 509 PHIPred = NewBB; 510 i = PN->getBasicBlockIndex(PHIPred); 511 } 512 } 513 Value *&Code = InsertedCode[PHIPred]; 514 if (!Code) { 515 // Insert the code into the end of the predecessor block. 516 Instruction *InsertPt = (L->contains(OldLoc->getParent())) ? 517 PHIPred->getTerminator() : 518 OldLoc->getParent()->getTerminator(); 519 Code = InsertCodeForBaseAtPosition(NewBase, PN->getType(), 520 Rewriter, InsertPt, L, LI); 521 522 DEBUG(errs() << " Changing PHI use to "); 523 DEBUG(WriteAsOperand(errs(), Code, /*PrintType=*/false)); 524 DEBUG(errs() << ", which has value " << *NewBase << " plus IMM " 525 << *Imm << "\n"); 526 } 527 528 // Replace the use of the operand Value with the new Phi we just created. 529 PN->setIncomingValue(i, Code); 530 Rewriter.clear(); 531 } 532 } 533 534 // PHI node might have become a constant value after SplitCriticalEdge. 535 DeadInsts.push_back(Inst); 536 } 537 538 539 /// fitsInAddressMode - Return true if V can be subsumed within an addressing 540 /// mode, and does not need to be put in a register first. 541 static bool fitsInAddressMode(const SCEV *const &V, const Type *AccessTy, 542 const TargetLowering *TLI, bool HasBaseReg) { 543 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(V)) { 544 int64_t VC = SC->getValue()->getSExtValue(); 545 if (TLI) { 546 TargetLowering::AddrMode AM; 547 AM.BaseOffs = VC; 548 AM.HasBaseReg = HasBaseReg; 549 return TLI->isLegalAddressingMode(AM, AccessTy); 550 } else { 551 // Defaults to PPC. PPC allows a sign-extended 16-bit immediate field. 552 return (VC > -(1 << 16) && VC < (1 << 16)-1); 553 } 554 } 555 556 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) 557 if (GlobalValue *GV = dyn_cast<GlobalValue>(SU->getValue())) { 558 if (TLI) { 559 TargetLowering::AddrMode AM; 560 AM.BaseGV = GV; 561 AM.HasBaseReg = HasBaseReg; 562 return TLI->isLegalAddressingMode(AM, AccessTy); 563 } else { 564 // Default: assume global addresses are not legal. 565 } 566 } 567 568 return false; 569 } 570 571 /// MoveLoopVariantsToImmediateField - Move any subexpressions from Val that are 572 /// loop varying to the Imm operand. 573 static void MoveLoopVariantsToImmediateField(const SCEV *&Val, const SCEV *&Imm, 574 Loop *L, ScalarEvolution *SE) { 575 if (Val->isLoopInvariant(L)) return; // Nothing to do. 576 577 if (const SCEVAddExpr *SAE = dyn_cast<SCEVAddExpr>(Val)) { 578 SmallVector<const SCEV *, 4> NewOps; 579 NewOps.reserve(SAE->getNumOperands()); 580 581 for (unsigned i = 0; i != SAE->getNumOperands(); ++i) 582 if (!SAE->getOperand(i)->isLoopInvariant(L)) { 583 // If this is a loop-variant expression, it must stay in the immediate 584 // field of the expression. 585 Imm = SE->getAddExpr(Imm, SAE->getOperand(i)); 586 } else { 587 NewOps.push_back(SAE->getOperand(i)); 588 } 589 590 if (NewOps.empty()) 591 Val = SE->getIntegerSCEV(0, Val->getType()); 592 else 593 Val = SE->getAddExpr(NewOps); 594 } else if (const SCEVAddRecExpr *SARE = dyn_cast<SCEVAddRecExpr>(Val)) { 595 // Try to pull immediates out of the start value of nested addrec's. 596 const SCEV *Start = SARE->getStart(); 597 MoveLoopVariantsToImmediateField(Start, Imm, L, SE); 598 599 SmallVector<const SCEV *, 4> Ops(SARE->op_begin(), SARE->op_end()); 600 Ops[0] = Start; 601 Val = SE->getAddRecExpr(Ops, SARE->getLoop()); 602 } else { 603 // Otherwise, all of Val is variant, move the whole thing over. 604 Imm = SE->getAddExpr(Imm, Val); 605 Val = SE->getIntegerSCEV(0, Val->getType()); 606 } 607 } 608 609 610 /// MoveImmediateValues - Look at Val, and pull out any additions of constants 611 /// that can fit into the immediate field of instructions in the target. 612 /// Accumulate these immediate values into the Imm value. 613 static void MoveImmediateValues(const TargetLowering *TLI, 614 const Type *AccessTy, 615 const SCEV *&Val, const SCEV *&Imm, 616 bool isAddress, Loop *L, 617 ScalarEvolution *SE) { 618 if (const SCEVAddExpr *SAE = dyn_cast<SCEVAddExpr>(Val)) { 619 SmallVector<const SCEV *, 4> NewOps; 620 NewOps.reserve(SAE->getNumOperands()); 621 622 for (unsigned i = 0; i != SAE->getNumOperands(); ++i) { 623 const SCEV *NewOp = SAE->getOperand(i); 624 MoveImmediateValues(TLI, AccessTy, NewOp, Imm, isAddress, L, SE); 625 626 if (!NewOp->isLoopInvariant(L)) { 627 // If this is a loop-variant expression, it must stay in the immediate 628 // field of the expression. 629 Imm = SE->getAddExpr(Imm, NewOp); 630 } else { 631 NewOps.push_back(NewOp); 632 } 633 } 634 635 if (NewOps.empty()) 636 Val = SE->getIntegerSCEV(0, Val->getType()); 637 else 638 Val = SE->getAddExpr(NewOps); 639 return; 640 } else if (const SCEVAddRecExpr *SARE = dyn_cast<SCEVAddRecExpr>(Val)) { 641 // Try to pull immediates out of the start value of nested addrec's. 642 const SCEV *Start = SARE->getStart(); 643 MoveImmediateValues(TLI, AccessTy, Start, Imm, isAddress, L, SE); 644 645 if (Start != SARE->getStart()) { 646 SmallVector<const SCEV *, 4> Ops(SARE->op_begin(), SARE->op_end()); 647 Ops[0] = Start; 648 Val = SE->getAddRecExpr(Ops, SARE->getLoop()); 649 } 650 return; 651 } else if (const SCEVMulExpr *SME = dyn_cast<SCEVMulExpr>(Val)) { 652 // Transform "8 * (4 + v)" -> "32 + 8*V" if "32" fits in the immed field. 653 if (isAddress && 654 fitsInAddressMode(SME->getOperand(0), AccessTy, TLI, false) && 655 SME->getNumOperands() == 2 && SME->isLoopInvariant(L)) { 656 657 const SCEV *SubImm = SE->getIntegerSCEV(0, Val->getType()); 658 const SCEV *NewOp = SME->getOperand(1); 659 MoveImmediateValues(TLI, AccessTy, NewOp, SubImm, isAddress, L, SE); 660 661 // If we extracted something out of the subexpressions, see if we can 662 // simplify this! 663 if (NewOp != SME->getOperand(1)) { 664 // Scale SubImm up by "8". If the result is a target constant, we are 665 // good. 666 SubImm = SE->getMulExpr(SubImm, SME->getOperand(0)); 667 if (fitsInAddressMode(SubImm, AccessTy, TLI, false)) { 668 // Accumulate the immediate. 669 Imm = SE->getAddExpr(Imm, SubImm); 670 671 // Update what is left of 'Val'. 672 Val = SE->getMulExpr(SME->getOperand(0), NewOp); 673 return; 674 } 675 } 676 } 677 } 678 679 // Loop-variant expressions must stay in the immediate field of the 680 // expression. 681 if ((isAddress && fitsInAddressMode(Val, AccessTy, TLI, false)) || 682 !Val->isLoopInvariant(L)) { 683 Imm = SE->getAddExpr(Imm, Val); 684 Val = SE->getIntegerSCEV(0, Val->getType()); 685 return; 686 } 687 688 // Otherwise, no immediates to move. 689 } 690 691 static void MoveImmediateValues(const TargetLowering *TLI, 692 Instruction *User, 693 const SCEV *&Val, const SCEV *&Imm, 694 bool isAddress, Loop *L, 695 ScalarEvolution *SE) { 696 const Type *AccessTy = getAccessType(User); 697 MoveImmediateValues(TLI, AccessTy, Val, Imm, isAddress, L, SE); 698 } 699 700 /// SeparateSubExprs - Decompose Expr into all of the subexpressions that are 701 /// added together. This is used to reassociate common addition subexprs 702 /// together for maximal sharing when rewriting bases. 703 static void SeparateSubExprs(SmallVector<const SCEV *, 16> &SubExprs, 704 const SCEV *Expr, 705 ScalarEvolution *SE) { 706 if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(Expr)) { 707 for (unsigned j = 0, e = AE->getNumOperands(); j != e; ++j) 708 SeparateSubExprs(SubExprs, AE->getOperand(j), SE); 709 } else if (const SCEVAddRecExpr *SARE = dyn_cast<SCEVAddRecExpr>(Expr)) { 710 const SCEV *Zero = SE->getIntegerSCEV(0, Expr->getType()); 711 if (SARE->getOperand(0) == Zero) { 712 SubExprs.push_back(Expr); 713 } else { 714 // Compute the addrec with zero as its base. 715 SmallVector<const SCEV *, 4> Ops(SARE->op_begin(), SARE->op_end()); 716 Ops[0] = Zero; // Start with zero base. 717 SubExprs.push_back(SE->getAddRecExpr(Ops, SARE->getLoop())); 718 719 720 SeparateSubExprs(SubExprs, SARE->getOperand(0), SE); 721 } 722 } else if (!Expr->isZero()) { 723 // Do not add zero. 724 SubExprs.push_back(Expr); 725 } 726 } 727 728 // This is logically local to the following function, but C++ says we have 729 // to make it file scope. 730 struct SubExprUseData { unsigned Count; bool notAllUsesAreFree; }; 731 732 /// RemoveCommonExpressionsFromUseBases - Look through all of the Bases of all 733 /// the Uses, removing any common subexpressions, except that if all such 734 /// subexpressions can be folded into an addressing mode for all uses inside 735 /// the loop (this case is referred to as "free" in comments herein) we do 736 /// not remove anything. This looks for things like (a+b+c) and 737 /// (a+c+d) and computes the common (a+c) subexpression. The common expression 738 /// is *removed* from the Bases and returned. 739 static const SCEV * 740 RemoveCommonExpressionsFromUseBases(std::vector<BasedUser> &Uses, 741 ScalarEvolution *SE, Loop *L, 742 const TargetLowering *TLI) { 743 unsigned NumUses = Uses.size(); 744 745 // Only one use? This is a very common case, so we handle it specially and 746 // cheaply. 747 const SCEV *Zero = SE->getIntegerSCEV(0, Uses[0].Base->getType()); 748 const SCEV *Result = Zero; 749 const SCEV *FreeResult = Zero; 750 if (NumUses == 1) { 751 // If the use is inside the loop, use its base, regardless of what it is: 752 // it is clearly shared across all the IV's. If the use is outside the loop 753 // (which means after it) we don't want to factor anything *into* the loop, 754 // so just use 0 as the base. 755 if (L->contains(Uses[0].Inst->getParent())) 756 std::swap(Result, Uses[0].Base); 757 return Result; 758 } 759 760 // To find common subexpressions, count how many of Uses use each expression. 761 // If any subexpressions are used Uses.size() times, they are common. 762 // Also track whether all uses of each expression can be moved into an 763 // an addressing mode "for free"; such expressions are left within the loop. 764 // struct SubExprUseData { unsigned Count; bool notAllUsesAreFree; }; 765 std::map<const SCEV *, SubExprUseData> SubExpressionUseData; 766 767 // UniqueSubExprs - Keep track of all of the subexpressions we see in the 768 // order we see them. 769 SmallVector<const SCEV *, 16> UniqueSubExprs; 770 771 SmallVector<const SCEV *, 16> SubExprs; 772 unsigned NumUsesInsideLoop = 0; 773 for (unsigned i = 0; i != NumUses; ++i) { 774 // If the user is outside the loop, just ignore it for base computation. 775 // Since the user is outside the loop, it must be *after* the loop (if it 776 // were before, it could not be based on the loop IV). We don't want users 777 // after the loop to affect base computation of values *inside* the loop, 778 // because we can always add their offsets to the result IV after the loop 779 // is done, ensuring we get good code inside the loop. 780 if (!L->contains(Uses[i].Inst->getParent())) 781 continue; 782 NumUsesInsideLoop++; 783 784 // If the base is zero (which is common), return zero now, there are no 785 // CSEs we can find. 786 if (Uses[i].Base == Zero) return Zero; 787 788 // If this use is as an address we may be able to put CSEs in the addressing 789 // mode rather than hoisting them. 790 bool isAddrUse = isAddressUse(Uses[i].Inst, Uses[i].OperandValToReplace); 791 // We may need the AccessTy below, but only when isAddrUse, so compute it 792 // only in that case. 793 const Type *AccessTy = 0; 794 if (isAddrUse) 795 AccessTy = getAccessType(Uses[i].Inst); 796 797 // Split the expression into subexprs. 798 SeparateSubExprs(SubExprs, Uses[i].Base, SE); 799 // Add one to SubExpressionUseData.Count for each subexpr present, and 800 // if the subexpr is not a valid immediate within an addressing mode use, 801 // set SubExpressionUseData.notAllUsesAreFree. We definitely want to 802 // hoist these out of the loop (if they are common to all uses). 803 for (unsigned j = 0, e = SubExprs.size(); j != e; ++j) { 804 if (++SubExpressionUseData[SubExprs[j]].Count == 1) 805 UniqueSubExprs.push_back(SubExprs[j]); 806 if (!isAddrUse || !fitsInAddressMode(SubExprs[j], AccessTy, TLI, false)) 807 SubExpressionUseData[SubExprs[j]].notAllUsesAreFree = true; 808 } 809 SubExprs.clear(); 810 } 811 812 // Now that we know how many times each is used, build Result. Iterate over 813 // UniqueSubexprs so that we have a stable ordering. 814 for (unsigned i = 0, e = UniqueSubExprs.size(); i != e; ++i) { 815 std::map<const SCEV *, SubExprUseData>::iterator I = 816 SubExpressionUseData.find(UniqueSubExprs[i]); 817 assert(I != SubExpressionUseData.end() && "Entry not found?"); 818 if (I->second.Count == NumUsesInsideLoop) { // Found CSE! 819 if (I->second.notAllUsesAreFree) 820 Result = SE->getAddExpr(Result, I->first); 821 else 822 FreeResult = SE->getAddExpr(FreeResult, I->first); 823 } else 824 // Remove non-cse's from SubExpressionUseData. 825 SubExpressionUseData.erase(I); 826 } 827 828 if (FreeResult != Zero) { 829 // We have some subexpressions that can be subsumed into addressing 830 // modes in every use inside the loop. However, it's possible that 831 // there are so many of them that the combined FreeResult cannot 832 // be subsumed, or that the target cannot handle both a FreeResult 833 // and a Result in the same instruction (for example because it would 834 // require too many registers). Check this. 835 for (unsigned i=0; i<NumUses; ++i) { 836 if (!L->contains(Uses[i].Inst->getParent())) 837 continue; 838 // We know this is an addressing mode use; if there are any uses that 839 // are not, FreeResult would be Zero. 840 const Type *AccessTy = getAccessType(Uses[i].Inst); 841 if (!fitsInAddressMode(FreeResult, AccessTy, TLI, Result!=Zero)) { 842 // FIXME: could split up FreeResult into pieces here, some hoisted 843 // and some not. There is no obvious advantage to this. 844 Result = SE->getAddExpr(Result, FreeResult); 845 FreeResult = Zero; 846 break; 847 } 848 } 849 } 850 851 // If we found no CSE's, return now. 852 if (Result == Zero) return Result; 853 854 // If we still have a FreeResult, remove its subexpressions from 855 // SubExpressionUseData. This means they will remain in the use Bases. 856 if (FreeResult != Zero) { 857 SeparateSubExprs(SubExprs, FreeResult, SE); 858 for (unsigned j = 0, e = SubExprs.size(); j != e; ++j) { 859 std::map<const SCEV *, SubExprUseData>::iterator I = 860 SubExpressionUseData.find(SubExprs[j]); 861 SubExpressionUseData.erase(I); 862 } 863 SubExprs.clear(); 864 } 865 866 // Otherwise, remove all of the CSE's we found from each of the base values. 867 for (unsigned i = 0; i != NumUses; ++i) { 868 // Uses outside the loop don't necessarily include the common base, but 869 // the final IV value coming into those uses does. Instead of trying to 870 // remove the pieces of the common base, which might not be there, 871 // subtract off the base to compensate for this. 872 if (!L->contains(Uses[i].Inst->getParent())) { 873 Uses[i].Base = SE->getMinusSCEV(Uses[i].Base, Result); 874 continue; 875 } 876 877 // Split the expression into subexprs. 878 SeparateSubExprs(SubExprs, Uses[i].Base, SE); 879 880 // Remove any common subexpressions. 881 for (unsigned j = 0, e = SubExprs.size(); j != e; ++j) 882 if (SubExpressionUseData.count(SubExprs[j])) { 883 SubExprs.erase(SubExprs.begin()+j); 884 --j; --e; 885 } 886 887 // Finally, add the non-shared expressions together. 888 if (SubExprs.empty()) 889 Uses[i].Base = Zero; 890 else 891 Uses[i].Base = SE->getAddExpr(SubExprs); 892 SubExprs.clear(); 893 } 894 895 return Result; 896 } 897 898 /// ValidScale - Check whether the given Scale is valid for all loads and 899 /// stores in UsersToProcess. 900 /// 901 bool LoopStrengthReduce::ValidScale(bool HasBaseReg, int64_t Scale, 902 const std::vector<BasedUser>& UsersToProcess) { 903 if (!TLI) 904 return true; 905 906 for (unsigned i = 0, e = UsersToProcess.size(); i!=e; ++i) { 907 // If this is a load or other access, pass the type of the access in. 908 const Type *AccessTy = 909 Type::getVoidTy(UsersToProcess[i].Inst->getContext()); 910 if (isAddressUse(UsersToProcess[i].Inst, 911 UsersToProcess[i].OperandValToReplace)) 912 AccessTy = getAccessType(UsersToProcess[i].Inst); 913 else if (isa<PHINode>(UsersToProcess[i].Inst)) 914 continue; 915 916 TargetLowering::AddrMode AM; 917 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(UsersToProcess[i].Imm)) 918 AM.BaseOffs = SC->getValue()->getSExtValue(); 919 AM.HasBaseReg = HasBaseReg || !UsersToProcess[i].Base->isZero(); 920 AM.Scale = Scale; 921 922 // If load[imm+r*scale] is illegal, bail out. 923 if (!TLI->isLegalAddressingMode(AM, AccessTy)) 924 return false; 925 } 926 return true; 927 } 928 929 /// ValidOffset - Check whether the given Offset is valid for all loads and 930 /// stores in UsersToProcess. 931 /// 932 bool LoopStrengthReduce::ValidOffset(bool HasBaseReg, 933 int64_t Offset, 934 int64_t Scale, 935 const std::vector<BasedUser>& UsersToProcess) { 936 if (!TLI) 937 return true; 938 939 for (unsigned i=0, e = UsersToProcess.size(); i!=e; ++i) { 940 // If this is a load or other access, pass the type of the access in. 941 const Type *AccessTy = 942 Type::getVoidTy(UsersToProcess[i].Inst->getContext()); 943 if (isAddressUse(UsersToProcess[i].Inst, 944 UsersToProcess[i].OperandValToReplace)) 945 AccessTy = getAccessType(UsersToProcess[i].Inst); 946 else if (isa<PHINode>(UsersToProcess[i].Inst)) 947 continue; 948 949 TargetLowering::AddrMode AM; 950 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(UsersToProcess[i].Imm)) 951 AM.BaseOffs = SC->getValue()->getSExtValue(); 952 AM.BaseOffs = (uint64_t)AM.BaseOffs + (uint64_t)Offset; 953 AM.HasBaseReg = HasBaseReg || !UsersToProcess[i].Base->isZero(); 954 AM.Scale = Scale; 955 956 // If load[imm+r*scale] is illegal, bail out. 957 if (!TLI->isLegalAddressingMode(AM, AccessTy)) 958 return false; 959 } 960 return true; 961 } 962 963 /// RequiresTypeConversion - Returns true if converting Ty1 to Ty2 is not 964 /// a nop. 965 bool LoopStrengthReduce::RequiresTypeConversion(const Type *Ty1, 966 const Type *Ty2) { 967 if (Ty1 == Ty2) 968 return false; 969 Ty1 = SE->getEffectiveSCEVType(Ty1); 970 Ty2 = SE->getEffectiveSCEVType(Ty2); 971 if (Ty1 == Ty2) 972 return false; 973 if (Ty1->canLosslesslyBitCastTo(Ty2)) 974 return false; 975 if (TLI && TLI->isTruncateFree(Ty1, Ty2)) 976 return false; 977 return true; 978 } 979 980 /// CheckForIVReuse - Returns the multiple if the stride is the multiple 981 /// of a previous stride and it is a legal value for the target addressing 982 /// mode scale component and optional base reg. This allows the users of 983 /// this stride to be rewritten as prev iv * factor. It returns 0 if no 984 /// reuse is possible. Factors can be negative on same targets, e.g. ARM. 985 /// 986 /// If all uses are outside the loop, we don't require that all multiplies 987 /// be folded into the addressing mode, nor even that the factor be constant; 988 /// a multiply (executed once) outside the loop is better than another IV 989 /// within. Well, usually. 990 const SCEV *LoopStrengthReduce::CheckForIVReuse(bool HasBaseReg, 991 bool AllUsesAreAddresses, 992 bool AllUsesAreOutsideLoop, 993 const SCEV *const &Stride, 994 IVExpr &IV, const Type *Ty, 995 const std::vector<BasedUser>& UsersToProcess) { 996 if (StrideNoReuse.count(Stride)) 997 return SE->getIntegerSCEV(0, Stride->getType()); 998 999 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Stride)) { 1000 int64_t SInt = SC->getValue()->getSExtValue(); 1001 for (unsigned NewStride = 0, e = IU->StrideOrder.size(); 1002 NewStride != e; ++NewStride) { 1003 std::map<const SCEV *, IVsOfOneStride>::iterator SI = 1004 IVsByStride.find(IU->StrideOrder[NewStride]); 1005 if (SI == IVsByStride.end() || !isa<SCEVConstant>(SI->first) || 1006 StrideNoReuse.count(SI->first)) 1007 continue; 1008 int64_t SSInt = cast<SCEVConstant>(SI->first)->getValue()->getSExtValue(); 1009 if (SI->first != Stride && 1010 (unsigned(abs64(SInt)) < SSInt || (SInt % SSInt) != 0)) 1011 continue; 1012 int64_t Scale = SInt / SSInt; 1013 // Check that this stride is valid for all the types used for loads and 1014 // stores; if it can be used for some and not others, we might as well use 1015 // the original stride everywhere, since we have to create the IV for it 1016 // anyway. If the scale is 1, then we don't need to worry about folding 1017 // multiplications. 1018 if (Scale == 1 || 1019 (AllUsesAreAddresses && 1020 ValidScale(HasBaseReg, Scale, UsersToProcess))) { 1021 // Prefer to reuse an IV with a base of zero. 1022 for (std::vector<IVExpr>::iterator II = SI->second.IVs.begin(), 1023 IE = SI->second.IVs.end(); II != IE; ++II) 1024 // Only reuse previous IV if it would not require a type conversion 1025 // and if the base difference can be folded. 1026 if (II->Base->isZero() && 1027 !RequiresTypeConversion(II->Base->getType(), Ty)) { 1028 IV = *II; 1029 return SE->getIntegerSCEV(Scale, Stride->getType()); 1030 } 1031 // Otherwise, settle for an IV with a foldable base. 1032 if (AllUsesAreAddresses) 1033 for (std::vector<IVExpr>::iterator II = SI->second.IVs.begin(), 1034 IE = SI->second.IVs.end(); II != IE; ++II) 1035 // Only reuse previous IV if it would not require a type conversion 1036 // and if the base difference can be folded. 1037 if (SE->getEffectiveSCEVType(II->Base->getType()) == 1038 SE->getEffectiveSCEVType(Ty) && 1039 isa<SCEVConstant>(II->Base)) { 1040 int64_t Base = 1041 cast<SCEVConstant>(II->Base)->getValue()->getSExtValue(); 1042 if (Base > INT32_MIN && Base <= INT32_MAX && 1043 ValidOffset(HasBaseReg, -Base * Scale, 1044 Scale, UsersToProcess)) { 1045 IV = *II; 1046 return SE->getIntegerSCEV(Scale, Stride->getType()); 1047 } 1048 } 1049 } 1050 } 1051 } else if (AllUsesAreOutsideLoop) { 1052 // Accept nonconstant strides here; it is really really right to substitute 1053 // an existing IV if we can. 1054 for (unsigned NewStride = 0, e = IU->StrideOrder.size(); 1055 NewStride != e; ++NewStride) { 1056 std::map<const SCEV *, IVsOfOneStride>::iterator SI = 1057 IVsByStride.find(IU->StrideOrder[NewStride]); 1058 if (SI == IVsByStride.end() || !isa<SCEVConstant>(SI->first)) 1059 continue; 1060 int64_t SSInt = cast<SCEVConstant>(SI->first)->getValue()->getSExtValue(); 1061 if (SI->first != Stride && SSInt != 1) 1062 continue; 1063 for (std::vector<IVExpr>::iterator II = SI->second.IVs.begin(), 1064 IE = SI->second.IVs.end(); II != IE; ++II) 1065 // Accept nonzero base here. 1066 // Only reuse previous IV if it would not require a type conversion. 1067 if (!RequiresTypeConversion(II->Base->getType(), Ty)) { 1068 IV = *II; 1069 return Stride; 1070 } 1071 } 1072 // Special case, old IV is -1*x and this one is x. Can treat this one as 1073 // -1*old. 1074 for (unsigned NewStride = 0, e = IU->StrideOrder.size(); 1075 NewStride != e; ++NewStride) { 1076 std::map<const SCEV *, IVsOfOneStride>::iterator SI = 1077 IVsByStride.find(IU->StrideOrder[NewStride]); 1078 if (SI == IVsByStride.end()) 1079 continue; 1080 if (const SCEVMulExpr *ME = dyn_cast<SCEVMulExpr>(SI->first)) 1081 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(ME->getOperand(0))) 1082 if (Stride == ME->getOperand(1) && 1083 SC->getValue()->getSExtValue() == -1LL) 1084 for (std::vector<IVExpr>::iterator II = SI->second.IVs.begin(), 1085 IE = SI->second.IVs.end(); II != IE; ++II) 1086 // Accept nonzero base here. 1087 // Only reuse previous IV if it would not require type conversion. 1088 if (!RequiresTypeConversion(II->Base->getType(), Ty)) { 1089 IV = *II; 1090 return SE->getIntegerSCEV(-1LL, Stride->getType()); 1091 } 1092 } 1093 } 1094 return SE->getIntegerSCEV(0, Stride->getType()); 1095 } 1096 1097 /// PartitionByIsUseOfPostIncrementedValue - Simple boolean predicate that 1098 /// returns true if Val's isUseOfPostIncrementedValue is true. 1099 static bool PartitionByIsUseOfPostIncrementedValue(const BasedUser &Val) { 1100 return Val.isUseOfPostIncrementedValue; 1101 } 1102 1103 /// isNonConstantNegative - Return true if the specified scev is negated, but 1104 /// not a constant. 1105 static bool isNonConstantNegative(const SCEV *const &Expr) { 1106 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Expr); 1107 if (!Mul) return false; 1108 1109 // If there is a constant factor, it will be first. 1110 const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0)); 1111 if (!SC) return false; 1112 1113 // Return true if the value is negative, this matches things like (-42 * V). 1114 return SC->getValue()->getValue().isNegative(); 1115 } 1116 1117 /// CollectIVUsers - Transform our list of users and offsets to a bit more 1118 /// complex table. In this new vector, each 'BasedUser' contains 'Base', the base 1119 /// of the strided accesses, as well as the old information from Uses. We 1120 /// progressively move information from the Base field to the Imm field, until 1121 /// we eventually have the full access expression to rewrite the use. 1122 const SCEV *LoopStrengthReduce::CollectIVUsers(const SCEV *const &Stride, 1123 IVUsersOfOneStride &Uses, 1124 Loop *L, 1125 bool &AllUsesAreAddresses, 1126 bool &AllUsesAreOutsideLoop, 1127 std::vector<BasedUser> &UsersToProcess) { 1128 // FIXME: Generalize to non-affine IV's. 1129 if (!Stride->isLoopInvariant(L)) 1130 return SE->getIntegerSCEV(0, Stride->getType()); 1131 1132 UsersToProcess.reserve(Uses.Users.size()); 1133 for (ilist<IVStrideUse>::iterator I = Uses.Users.begin(), 1134 E = Uses.Users.end(); I != E; ++I) { 1135 UsersToProcess.push_back(BasedUser(*I, SE)); 1136 1137 // Move any loop variant operands from the offset field to the immediate 1138 // field of the use, so that we don't try to use something before it is 1139 // computed. 1140 MoveLoopVariantsToImmediateField(UsersToProcess.back().Base, 1141 UsersToProcess.back().Imm, L, SE); 1142 assert(UsersToProcess.back().Base->isLoopInvariant(L) && 1143 "Base value is not loop invariant!"); 1144 } 1145 1146 // We now have a whole bunch of uses of like-strided induction variables, but 1147 // they might all have different bases. We want to emit one PHI node for this 1148 // stride which we fold as many common expressions (between the IVs) into as 1149 // possible. Start by identifying the common expressions in the base values 1150 // for the strides (e.g. if we have "A+C+B" and "A+B+D" as our bases, find 1151 // "A+B"), emit it to the preheader, then remove the expression from the 1152 // UsersToProcess base values. 1153 const SCEV *CommonExprs = 1154 RemoveCommonExpressionsFromUseBases(UsersToProcess, SE, L, TLI); 1155 1156 // Next, figure out what we can represent in the immediate fields of 1157 // instructions. If we can represent anything there, move it to the imm 1158 // fields of the BasedUsers. We do this so that it increases the commonality 1159 // of the remaining uses. 1160 unsigned NumPHI = 0; 1161 bool HasAddress = false; 1162 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) { 1163 // If the user is not in the current loop, this means it is using the exit 1164 // value of the IV. Do not put anything in the base, make sure it's all in 1165 // the immediate field to allow as much factoring as possible. 1166 if (!L->contains(UsersToProcess[i].Inst->getParent())) { 1167 UsersToProcess[i].Imm = SE->getAddExpr(UsersToProcess[i].Imm, 1168 UsersToProcess[i].Base); 1169 UsersToProcess[i].Base = 1170 SE->getIntegerSCEV(0, UsersToProcess[i].Base->getType()); 1171 } else { 1172 // Not all uses are outside the loop. 1173 AllUsesAreOutsideLoop = false; 1174 1175 // Addressing modes can be folded into loads and stores. Be careful that 1176 // the store is through the expression, not of the expression though. 1177 bool isPHI = false; 1178 bool isAddress = isAddressUse(UsersToProcess[i].Inst, 1179 UsersToProcess[i].OperandValToReplace); 1180 if (isa<PHINode>(UsersToProcess[i].Inst)) { 1181 isPHI = true; 1182 ++NumPHI; 1183 } 1184 1185 if (isAddress) 1186 HasAddress = true; 1187 1188 // If this use isn't an address, then not all uses are addresses. 1189 if (!isAddress && !isPHI) 1190 AllUsesAreAddresses = false; 1191 1192 MoveImmediateValues(TLI, UsersToProcess[i].Inst, UsersToProcess[i].Base, 1193 UsersToProcess[i].Imm, isAddress, L, SE); 1194 } 1195 } 1196 1197 // If one of the use is a PHI node and all other uses are addresses, still 1198 // allow iv reuse. Essentially we are trading one constant multiplication 1199 // for one fewer iv. 1200 if (NumPHI > 1) 1201 AllUsesAreAddresses = false; 1202 1203 // There are no in-loop address uses. 1204 if (AllUsesAreAddresses && (!HasAddress && !AllUsesAreOutsideLoop)) 1205 AllUsesAreAddresses = false; 1206 1207 return CommonExprs; 1208 } 1209 1210 /// ShouldUseFullStrengthReductionMode - Test whether full strength-reduction 1211 /// is valid and profitable for the given set of users of a stride. In 1212 /// full strength-reduction mode, all addresses at the current stride are 1213 /// strength-reduced all the way down to pointer arithmetic. 1214 /// 1215 bool LoopStrengthReduce::ShouldUseFullStrengthReductionMode( 1216 const std::vector<BasedUser> &UsersToProcess, 1217 const Loop *L, 1218 bool AllUsesAreAddresses, 1219 const SCEV *Stride) { 1220 if (!EnableFullLSRMode) 1221 return false; 1222 1223 // The heuristics below aim to avoid increasing register pressure, but 1224 // fully strength-reducing all the addresses increases the number of 1225 // add instructions, so don't do this when optimizing for size. 1226 // TODO: If the loop is large, the savings due to simpler addresses 1227 // may oughtweight the costs of the extra increment instructions. 1228 if (L->getHeader()->getParent()->hasFnAttr(Attribute::OptimizeForSize)) 1229 return false; 1230 1231 // TODO: For now, don't do full strength reduction if there could 1232 // potentially be greater-stride multiples of the current stride 1233 // which could reuse the current stride IV. 1234 if (IU->StrideOrder.back() != Stride) 1235 return false; 1236 1237 // Iterate through the uses to find conditions that automatically rule out 1238 // full-lsr mode. 1239 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ) { 1240 const SCEV *Base = UsersToProcess[i].Base; 1241 const SCEV *Imm = UsersToProcess[i].Imm; 1242 // If any users have a loop-variant component, they can't be fully 1243 // strength-reduced. 1244 if (Imm && !Imm->isLoopInvariant(L)) 1245 return false; 1246 // If there are to users with the same base and the difference between 1247 // the two Imm values can't be folded into the address, full 1248 // strength reduction would increase register pressure. 1249 do { 1250 const SCEV *CurImm = UsersToProcess[i].Imm; 1251 if ((CurImm || Imm) && CurImm != Imm) { 1252 if (!CurImm) CurImm = SE->getIntegerSCEV(0, Stride->getType()); 1253 if (!Imm) Imm = SE->getIntegerSCEV(0, Stride->getType()); 1254 const Instruction *Inst = UsersToProcess[i].Inst; 1255 const Type *AccessTy = getAccessType(Inst); 1256 const SCEV *Diff = SE->getMinusSCEV(UsersToProcess[i].Imm, Imm); 1257 if (!Diff->isZero() && 1258 (!AllUsesAreAddresses || 1259 !fitsInAddressMode(Diff, AccessTy, TLI, /*HasBaseReg=*/true))) 1260 return false; 1261 } 1262 } while (++i != e && Base == UsersToProcess[i].Base); 1263 } 1264 1265 // If there's exactly one user in this stride, fully strength-reducing it 1266 // won't increase register pressure. If it's starting from a non-zero base, 1267 // it'll be simpler this way. 1268 if (UsersToProcess.size() == 1 && !UsersToProcess[0].Base->isZero()) 1269 return true; 1270 1271 // Otherwise, if there are any users in this stride that don't require 1272 // a register for their base, full strength-reduction will increase 1273 // register pressure. 1274 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) 1275 if (UsersToProcess[i].Base->isZero()) 1276 return false; 1277 1278 // Otherwise, go for it. 1279 return true; 1280 } 1281 1282 /// InsertAffinePhi Create and insert a PHI node for an induction variable 1283 /// with the specified start and step values in the specified loop. 1284 /// 1285 /// If NegateStride is true, the stride should be negated by using a 1286 /// subtract instead of an add. 1287 /// 1288 /// Return the created phi node. 1289 /// 1290 static PHINode *InsertAffinePhi(const SCEV *Start, const SCEV *Step, 1291 Instruction *IVIncInsertPt, 1292 const Loop *L, 1293 SCEVExpander &Rewriter) { 1294 assert(Start->isLoopInvariant(L) && "New PHI start is not loop invariant!"); 1295 assert(Step->isLoopInvariant(L) && "New PHI stride is not loop invariant!"); 1296 1297 BasicBlock *Header = L->getHeader(); 1298 BasicBlock *Preheader = L->getLoopPreheader(); 1299 BasicBlock *LatchBlock = L->getLoopLatch(); 1300 const Type *Ty = Start->getType(); 1301 Ty = Rewriter.SE.getEffectiveSCEVType(Ty); 1302 1303 PHINode *PN = PHINode::Create(Ty, "lsr.iv", Header->begin()); 1304 PN->addIncoming(Rewriter.expandCodeFor(Start, Ty, Preheader->getTerminator()), 1305 Preheader); 1306 1307 // If the stride is negative, insert a sub instead of an add for the 1308 // increment. 1309 bool isNegative = isNonConstantNegative(Step); 1310 const SCEV *IncAmount = Step; 1311 if (isNegative) 1312 IncAmount = Rewriter.SE.getNegativeSCEV(Step); 1313 1314 // Insert an add instruction right before the terminator corresponding 1315 // to the back-edge or just before the only use. The location is determined 1316 // by the caller and passed in as IVIncInsertPt. 1317 Value *StepV = Rewriter.expandCodeFor(IncAmount, Ty, 1318 Preheader->getTerminator()); 1319 Instruction *IncV; 1320 if (isNegative) { 1321 IncV = BinaryOperator::CreateSub(PN, StepV, "lsr.iv.next", 1322 IVIncInsertPt); 1323 } else { 1324 IncV = BinaryOperator::CreateAdd(PN, StepV, "lsr.iv.next", 1325 IVIncInsertPt); 1326 } 1327 if (!isa<ConstantInt>(StepV)) ++NumVariable; 1328 1329 PN->addIncoming(IncV, LatchBlock); 1330 1331 ++NumInserted; 1332 return PN; 1333 } 1334 1335 static void SortUsersToProcess(std::vector<BasedUser> &UsersToProcess) { 1336 // We want to emit code for users inside the loop first. To do this, we 1337 // rearrange BasedUser so that the entries at the end have 1338 // isUseOfPostIncrementedValue = false, because we pop off the end of the 1339 // vector (so we handle them first). 1340 std::partition(UsersToProcess.begin(), UsersToProcess.end(), 1341 PartitionByIsUseOfPostIncrementedValue); 1342 1343 // Sort this by base, so that things with the same base are handled 1344 // together. By partitioning first and stable-sorting later, we are 1345 // guaranteed that within each base we will pop off users from within the 1346 // loop before users outside of the loop with a particular base. 1347 // 1348 // We would like to use stable_sort here, but we can't. The problem is that 1349 // const SCEV *'s don't have a deterministic ordering w.r.t to each other, so 1350 // we don't have anything to do a '<' comparison on. Because we think the 1351 // number of uses is small, do a horrible bubble sort which just relies on 1352 // ==. 1353 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) { 1354 // Get a base value. 1355 const SCEV *Base = UsersToProcess[i].Base; 1356 1357 // Compact everything with this base to be consecutive with this one. 1358 for (unsigned j = i+1; j != e; ++j) { 1359 if (UsersToProcess[j].Base == Base) { 1360 std::swap(UsersToProcess[i+1], UsersToProcess[j]); 1361 ++i; 1362 } 1363 } 1364 } 1365 } 1366 1367 /// PrepareToStrengthReduceFully - Prepare to fully strength-reduce 1368 /// UsersToProcess, meaning lowering addresses all the way down to direct 1369 /// pointer arithmetic. 1370 /// 1371 void 1372 LoopStrengthReduce::PrepareToStrengthReduceFully( 1373 std::vector<BasedUser> &UsersToProcess, 1374 const SCEV *Stride, 1375 const SCEV *CommonExprs, 1376 const Loop *L, 1377 SCEVExpander &PreheaderRewriter) { 1378 DEBUG(errs() << " Fully reducing all users\n"); 1379 1380 // Rewrite the UsersToProcess records, creating a separate PHI for each 1381 // unique Base value. 1382 Instruction *IVIncInsertPt = L->getLoopLatch()->getTerminator(); 1383 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ) { 1384 // TODO: The uses are grouped by base, but not sorted. We arbitrarily 1385 // pick the first Imm value here to start with, and adjust it for the 1386 // other uses. 1387 const SCEV *Imm = UsersToProcess[i].Imm; 1388 const SCEV *Base = UsersToProcess[i].Base; 1389 const SCEV *Start = SE->getAddExpr(CommonExprs, Base, Imm); 1390 PHINode *Phi = InsertAffinePhi(Start, Stride, IVIncInsertPt, L, 1391 PreheaderRewriter); 1392 // Loop over all the users with the same base. 1393 do { 1394 UsersToProcess[i].Base = SE->getIntegerSCEV(0, Stride->getType()); 1395 UsersToProcess[i].Imm = SE->getMinusSCEV(UsersToProcess[i].Imm, Imm); 1396 UsersToProcess[i].Phi = Phi; 1397 assert(UsersToProcess[i].Imm->isLoopInvariant(L) && 1398 "ShouldUseFullStrengthReductionMode should reject this!"); 1399 } while (++i != e && Base == UsersToProcess[i].Base); 1400 } 1401 } 1402 1403 /// FindIVIncInsertPt - Return the location to insert the increment instruction. 1404 /// If the only use if a use of postinc value, (must be the loop termination 1405 /// condition), then insert it just before the use. 1406 static Instruction *FindIVIncInsertPt(std::vector<BasedUser> &UsersToProcess, 1407 const Loop *L) { 1408 if (UsersToProcess.size() == 1 && 1409 UsersToProcess[0].isUseOfPostIncrementedValue && 1410 L->contains(UsersToProcess[0].Inst->getParent())) 1411 return UsersToProcess[0].Inst; 1412 return L->getLoopLatch()->getTerminator(); 1413 } 1414 1415 /// PrepareToStrengthReduceWithNewPhi - Insert a new induction variable for the 1416 /// given users to share. 1417 /// 1418 void 1419 LoopStrengthReduce::PrepareToStrengthReduceWithNewPhi( 1420 std::vector<BasedUser> &UsersToProcess, 1421 const SCEV *Stride, 1422 const SCEV *CommonExprs, 1423 Value *CommonBaseV, 1424 Instruction *IVIncInsertPt, 1425 const Loop *L, 1426 SCEVExpander &PreheaderRewriter) { 1427 DEBUG(errs() << " Inserting new PHI:\n"); 1428 1429 PHINode *Phi = InsertAffinePhi(SE->getUnknown(CommonBaseV), 1430 Stride, IVIncInsertPt, L, 1431 PreheaderRewriter); 1432 1433 // Remember this in case a later stride is multiple of this. 1434 IVsByStride[Stride].addIV(Stride, CommonExprs, Phi); 1435 1436 // All the users will share this new IV. 1437 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) 1438 UsersToProcess[i].Phi = Phi; 1439 1440 DEBUG(errs() << " IV="); 1441 DEBUG(WriteAsOperand(errs(), Phi, /*PrintType=*/false)); 1442 DEBUG(errs() << "\n"); 1443 } 1444 1445 /// PrepareToStrengthReduceFromSmallerStride - Prepare for the given users to 1446 /// reuse an induction variable with a stride that is a factor of the current 1447 /// induction variable. 1448 /// 1449 void 1450 LoopStrengthReduce::PrepareToStrengthReduceFromSmallerStride( 1451 std::vector<BasedUser> &UsersToProcess, 1452 Value *CommonBaseV, 1453 const IVExpr &ReuseIV, 1454 Instruction *PreInsertPt) { 1455 DEBUG(errs() << " Rewriting in terms of existing IV of STRIDE " 1456 << *ReuseIV.Stride << " and BASE " << *ReuseIV.Base << "\n"); 1457 1458 // All the users will share the reused IV. 1459 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) 1460 UsersToProcess[i].Phi = ReuseIV.PHI; 1461 1462 Constant *C = dyn_cast<Constant>(CommonBaseV); 1463 if (C && 1464 (!C->isNullValue() && 1465 !fitsInAddressMode(SE->getUnknown(CommonBaseV), CommonBaseV->getType(), 1466 TLI, false))) 1467 // We want the common base emitted into the preheader! This is just 1468 // using cast as a copy so BitCast (no-op cast) is appropriate 1469 CommonBaseV = new BitCastInst(CommonBaseV, CommonBaseV->getType(), 1470 "commonbase", PreInsertPt); 1471 } 1472 1473 static bool IsImmFoldedIntoAddrMode(GlobalValue *GV, int64_t Offset, 1474 const Type *AccessTy, 1475 std::vector<BasedUser> &UsersToProcess, 1476 const TargetLowering *TLI) { 1477 SmallVector<Instruction*, 16> AddrModeInsts; 1478 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) { 1479 if (UsersToProcess[i].isUseOfPostIncrementedValue) 1480 continue; 1481 ExtAddrMode AddrMode = 1482 AddressingModeMatcher::Match(UsersToProcess[i].OperandValToReplace, 1483 AccessTy, UsersToProcess[i].Inst, 1484 AddrModeInsts, *TLI); 1485 if (GV && GV != AddrMode.BaseGV) 1486 return false; 1487 if (Offset && !AddrMode.BaseOffs) 1488 // FIXME: How to accurate check it's immediate offset is folded. 1489 return false; 1490 AddrModeInsts.clear(); 1491 } 1492 return true; 1493 } 1494 1495 /// StrengthReduceStridedIVUsers - Strength reduce all of the users of a single 1496 /// stride of IV. All of the users may have different starting values, and this 1497 /// may not be the only stride. 1498 void LoopStrengthReduce::StrengthReduceStridedIVUsers(const SCEV *const &Stride, 1499 IVUsersOfOneStride &Uses, 1500 Loop *L) { 1501 // If all the users are moved to another stride, then there is nothing to do. 1502 if (Uses.Users.empty()) 1503 return; 1504 1505 // Keep track if every use in UsersToProcess is an address. If they all are, 1506 // we may be able to rewrite the entire collection of them in terms of a 1507 // smaller-stride IV. 1508 bool AllUsesAreAddresses = true; 1509 1510 // Keep track if every use of a single stride is outside the loop. If so, 1511 // we want to be more aggressive about reusing a smaller-stride IV; a 1512 // multiply outside the loop is better than another IV inside. Well, usually. 1513 bool AllUsesAreOutsideLoop = true; 1514 1515 // Transform our list of users and offsets to a bit more complex table. In 1516 // this new vector, each 'BasedUser' contains 'Base' the base of the 1517 // strided accessas well as the old information from Uses. We progressively 1518 // move information from the Base field to the Imm field, until we eventually 1519 // have the full access expression to rewrite the use. 1520 std::vector<BasedUser> UsersToProcess; 1521 const SCEV *CommonExprs = CollectIVUsers(Stride, Uses, L, AllUsesAreAddresses, 1522 AllUsesAreOutsideLoop, 1523 UsersToProcess); 1524 1525 // Sort the UsersToProcess array so that users with common bases are 1526 // next to each other. 1527 SortUsersToProcess(UsersToProcess); 1528 1529 // If we managed to find some expressions in common, we'll need to carry 1530 // their value in a register and add it in for each use. This will take up 1531 // a register operand, which potentially restricts what stride values are 1532 // valid. 1533 bool HaveCommonExprs = !CommonExprs->isZero(); 1534 const Type *ReplacedTy = CommonExprs->getType(); 1535 1536 // If all uses are addresses, consider sinking the immediate part of the 1537 // common expression back into uses if they can fit in the immediate fields. 1538 if (TLI && HaveCommonExprs && AllUsesAreAddresses) { 1539 const SCEV *NewCommon = CommonExprs; 1540 const SCEV *Imm = SE->getIntegerSCEV(0, ReplacedTy); 1541 MoveImmediateValues(TLI, Type::getVoidTy( 1542 L->getLoopPreheader()->getContext()), 1543 NewCommon, Imm, true, L, SE); 1544 if (!Imm->isZero()) { 1545 bool DoSink = true; 1546 1547 // If the immediate part of the common expression is a GV, check if it's 1548 // possible to fold it into the target addressing mode. 1549 GlobalValue *GV = 0; 1550 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(Imm)) 1551 GV = dyn_cast<GlobalValue>(SU->getValue()); 1552 int64_t Offset = 0; 1553 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Imm)) 1554 Offset = SC->getValue()->getSExtValue(); 1555 if (GV || Offset) 1556 // Pass VoidTy as the AccessTy to be conservative, because 1557 // there could be multiple access types among all the uses. 1558 DoSink = IsImmFoldedIntoAddrMode(GV, Offset, 1559 Type::getVoidTy(L->getLoopPreheader()->getContext()), 1560 UsersToProcess, TLI); 1561 1562 if (DoSink) { 1563 DEBUG(errs() << " Sinking " << *Imm << " back down into uses\n"); 1564 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) 1565 UsersToProcess[i].Imm = SE->getAddExpr(UsersToProcess[i].Imm, Imm); 1566 CommonExprs = NewCommon; 1567 HaveCommonExprs = !CommonExprs->isZero(); 1568 ++NumImmSunk; 1569 } 1570 } 1571 } 1572 1573 // Now that we know what we need to do, insert the PHI node itself. 1574 // 1575 DEBUG(errs() << "LSR: Examining IVs of TYPE " << *ReplacedTy << " of STRIDE " 1576 << *Stride << ":\n" 1577 << " Common base: " << *CommonExprs << "\n"); 1578 1579 SCEVExpander Rewriter(*SE); 1580 SCEVExpander PreheaderRewriter(*SE); 1581 1582 BasicBlock *Preheader = L->getLoopPreheader(); 1583 Instruction *PreInsertPt = Preheader->getTerminator(); 1584 BasicBlock *LatchBlock = L->getLoopLatch(); 1585 Instruction *IVIncInsertPt = LatchBlock->getTerminator(); 1586 1587 Value *CommonBaseV = Constant::getNullValue(ReplacedTy); 1588 1589 const SCEV *RewriteFactor = SE->getIntegerSCEV(0, ReplacedTy); 1590 IVExpr ReuseIV(SE->getIntegerSCEV(0, 1591 Type::getInt32Ty(Preheader->getContext())), 1592 SE->getIntegerSCEV(0, 1593 Type::getInt32Ty(Preheader->getContext())), 1594 0); 1595 1596 /// Choose a strength-reduction strategy and prepare for it by creating 1597 /// the necessary PHIs and adjusting the bookkeeping. 1598 if (ShouldUseFullStrengthReductionMode(UsersToProcess, L, 1599 AllUsesAreAddresses, Stride)) { 1600 PrepareToStrengthReduceFully(UsersToProcess, Stride, CommonExprs, L, 1601 PreheaderRewriter); 1602 } else { 1603 // Emit the initial base value into the loop preheader. 1604 CommonBaseV = PreheaderRewriter.expandCodeFor(CommonExprs, ReplacedTy, 1605 PreInsertPt); 1606 1607 // If all uses are addresses, check if it is possible to reuse an IV. The 1608 // new IV must have a stride that is a multiple of the old stride; the 1609 // multiple must be a number that can be encoded in the scale field of the 1610 // target addressing mode; and we must have a valid instruction after this 1611 // substitution, including the immediate field, if any. 1612 RewriteFactor = CheckForIVReuse(HaveCommonExprs, AllUsesAreAddresses, 1613 AllUsesAreOutsideLoop, 1614 Stride, ReuseIV, ReplacedTy, 1615 UsersToProcess); 1616 if (!RewriteFactor->isZero()) 1617 PrepareToStrengthReduceFromSmallerStride(UsersToProcess, CommonBaseV, 1618 ReuseIV, PreInsertPt); 1619 else { 1620 IVIncInsertPt = FindIVIncInsertPt(UsersToProcess, L); 1621 PrepareToStrengthReduceWithNewPhi(UsersToProcess, Stride, CommonExprs, 1622 CommonBaseV, IVIncInsertPt, 1623 L, PreheaderRewriter); 1624 } 1625 } 1626 1627 // Process all the users now, replacing their strided uses with 1628 // strength-reduced forms. This outer loop handles all bases, the inner 1629 // loop handles all users of a particular base. 1630 while (!UsersToProcess.empty()) { 1631 const SCEV *Base = UsersToProcess.back().Base; 1632 Instruction *Inst = UsersToProcess.back().Inst; 1633 1634 // Emit the code for Base into the preheader. 1635 Value *BaseV = 0; 1636 if (!Base->isZero()) { 1637 BaseV = PreheaderRewriter.expandCodeFor(Base, 0, PreInsertPt); 1638 1639 DEBUG(errs() << " INSERTING code for BASE = " << *Base << ":"); 1640 if (BaseV->hasName()) 1641 DEBUG(errs() << " Result value name = %" << BaseV->getName()); 1642 DEBUG(errs() << "\n"); 1643 1644 // If BaseV is a non-zero constant, make sure that it gets inserted into 1645 // the preheader, instead of being forward substituted into the uses. We 1646 // do this by forcing a BitCast (noop cast) to be inserted into the 1647 // preheader in this case. 1648 if (!fitsInAddressMode(Base, getAccessType(Inst), TLI, false) && 1649 isa<Constant>(BaseV)) { 1650 // We want this constant emitted into the preheader! This is just 1651 // using cast as a copy so BitCast (no-op cast) is appropriate 1652 BaseV = new BitCastInst(BaseV, BaseV->getType(), "preheaderinsert", 1653 PreInsertPt); 1654 } 1655 } 1656 1657 // Emit the code to add the immediate offset to the Phi value, just before 1658 // the instructions that we identified as using this stride and base. 1659 do { 1660 // FIXME: Use emitted users to emit other users. 1661 BasedUser &User = UsersToProcess.back(); 1662 1663 DEBUG(errs() << " Examining "); 1664 if (User.isUseOfPostIncrementedValue) 1665 DEBUG(errs() << "postinc"); 1666 else 1667 DEBUG(errs() << "preinc"); 1668 DEBUG(errs() << " use "); 1669 DEBUG(WriteAsOperand(errs(), UsersToProcess.back().OperandValToReplace, 1670 /*PrintType=*/false)); 1671 DEBUG(errs() << " in Inst: " << *User.Inst); 1672 1673 // If this instruction wants to use the post-incremented value, move it 1674 // after the post-inc and use its value instead of the PHI. 1675 Value *RewriteOp = User.Phi; 1676 if (User.isUseOfPostIncrementedValue) { 1677 RewriteOp = User.Phi->getIncomingValueForBlock(LatchBlock); 1678 // If this user is in the loop, make sure it is the last thing in the 1679 // loop to ensure it is dominated by the increment. In case it's the 1680 // only use of the iv, the increment instruction is already before the 1681 // use. 1682 if (L->contains(User.Inst->getParent()) && User.Inst != IVIncInsertPt) 1683 User.Inst->moveBefore(IVIncInsertPt); 1684 } 1685 1686 const SCEV *RewriteExpr = SE->getUnknown(RewriteOp); 1687 1688 if (SE->getEffectiveSCEVType(RewriteOp->getType()) != 1689 SE->getEffectiveSCEVType(ReplacedTy)) { 1690 assert(SE->getTypeSizeInBits(RewriteOp->getType()) > 1691 SE->getTypeSizeInBits(ReplacedTy) && 1692 "Unexpected widening cast!"); 1693 RewriteExpr = SE->getTruncateExpr(RewriteExpr, ReplacedTy); 1694 } 1695 1696 // If we had to insert new instructions for RewriteOp, we have to 1697 // consider that they may not have been able to end up immediately 1698 // next to RewriteOp, because non-PHI instructions may never precede 1699 // PHI instructions in a block. In this case, remember where the last 1700 // instruction was inserted so that if we're replacing a different 1701 // PHI node, we can use the later point to expand the final 1702 // RewriteExpr. 1703 Instruction *NewBasePt = dyn_cast<Instruction>(RewriteOp); 1704 if (RewriteOp == User.Phi) NewBasePt = 0; 1705 1706 // Clear the SCEVExpander's expression map so that we are guaranteed 1707 // to have the code emitted where we expect it. 1708 Rewriter.clear(); 1709 1710 // If we are reusing the iv, then it must be multiplied by a constant 1711 // factor to take advantage of the addressing mode scale component. 1712 if (!RewriteFactor->isZero()) { 1713 // If we're reusing an IV with a nonzero base (currently this happens 1714 // only when all reuses are outside the loop) subtract that base here. 1715 // The base has been used to initialize the PHI node but we don't want 1716 // it here. 1717 if (!ReuseIV.Base->isZero()) { 1718 const SCEV *typedBase = ReuseIV.Base; 1719 if (SE->getEffectiveSCEVType(RewriteExpr->getType()) != 1720 SE->getEffectiveSCEVType(ReuseIV.Base->getType())) { 1721 // It's possible the original IV is a larger type than the new IV, 1722 // in which case we have to truncate the Base. We checked in 1723 // RequiresTypeConversion that this is valid. 1724 assert(SE->getTypeSizeInBits(RewriteExpr->getType()) < 1725 SE->getTypeSizeInBits(ReuseIV.Base->getType()) && 1726 "Unexpected lengthening conversion!"); 1727 typedBase = SE->getTruncateExpr(ReuseIV.Base, 1728 RewriteExpr->getType()); 1729 } 1730 RewriteExpr = SE->getMinusSCEV(RewriteExpr, typedBase); 1731 } 1732 1733 // Multiply old variable, with base removed, by new scale factor. 1734 RewriteExpr = SE->getMulExpr(RewriteFactor, 1735 RewriteExpr); 1736 1737 // The common base is emitted in the loop preheader. But since we 1738 // are reusing an IV, it has not been used to initialize the PHI node. 1739 // Add it to the expression used to rewrite the uses. 1740 // When this use is outside the loop, we earlier subtracted the 1741 // common base, and are adding it back here. Use the same expression 1742 // as before, rather than CommonBaseV, so DAGCombiner will zap it. 1743 if (!CommonExprs->isZero()) { 1744 if (L->contains(User.Inst->getParent())) 1745 RewriteExpr = SE->getAddExpr(RewriteExpr, 1746 SE->getUnknown(CommonBaseV)); 1747 else 1748 RewriteExpr = SE->getAddExpr(RewriteExpr, CommonExprs); 1749 } 1750 } 1751 1752 // Now that we know what we need to do, insert code before User for the 1753 // immediate and any loop-variant expressions. 1754 if (BaseV) 1755 // Add BaseV to the PHI value if needed. 1756 RewriteExpr = SE->getAddExpr(RewriteExpr, SE->getUnknown(BaseV)); 1757 1758 User.RewriteInstructionToUseNewBase(RewriteExpr, NewBasePt, 1759 Rewriter, L, this, *LI, 1760 DeadInsts); 1761 1762 // Mark old value we replaced as possibly dead, so that it is eliminated 1763 // if we just replaced the last use of that value. 1764 DeadInsts.push_back(User.OperandValToReplace); 1765 1766 UsersToProcess.pop_back(); 1767 ++NumReduced; 1768 1769 // If there are any more users to process with the same base, process them 1770 // now. We sorted by base above, so we just have to check the last elt. 1771 } while (!UsersToProcess.empty() && UsersToProcess.back().Base == Base); 1772 // TODO: Next, find out which base index is the most common, pull it out. 1773 } 1774 1775 // IMPORTANT TODO: Figure out how to partition the IV's with this stride, but 1776 // different starting values, into different PHIs. 1777 } 1778 1779 /// FindIVUserForCond - If Cond has an operand that is an expression of an IV, 1780 /// set the IV user and stride information and return true, otherwise return 1781 /// false. 1782 bool LoopStrengthReduce::FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse, 1783 const SCEV *const * &CondStride) { 1784 for (unsigned Stride = 0, e = IU->StrideOrder.size(); 1785 Stride != e && !CondUse; ++Stride) { 1786 std::map<const SCEV *, IVUsersOfOneStride *>::iterator SI = 1787 IU->IVUsesByStride.find(IU->StrideOrder[Stride]); 1788 assert(SI != IU->IVUsesByStride.end() && "Stride doesn't exist!"); 1789 1790 for (ilist<IVStrideUse>::iterator UI = SI->second->Users.begin(), 1791 E = SI->second->Users.end(); UI != E; ++UI) 1792 if (UI->getUser() == Cond) { 1793 // NOTE: we could handle setcc instructions with multiple uses here, but 1794 // InstCombine does it as well for simple uses, it's not clear that it 1795 // occurs enough in real life to handle. 1796 CondUse = UI; 1797 CondStride = &SI->first; 1798 return true; 1799 } 1800 } 1801 return false; 1802 } 1803 1804 namespace { 1805 // Constant strides come first which in turns are sorted by their absolute 1806 // values. If absolute values are the same, then positive strides comes first. 1807 // e.g. 1808 // 4, -1, X, 1, 2 ==> 1, -1, 2, 4, X 1809 struct StrideCompare { 1810 const ScalarEvolution *SE; 1811 explicit StrideCompare(const ScalarEvolution *se) : SE(se) {} 1812 1813 bool operator()(const SCEV *const &LHS, const SCEV *const &RHS) { 1814 const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS); 1815 const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS); 1816 if (LHSC && RHSC) { 1817 int64_t LV = LHSC->getValue()->getSExtValue(); 1818 int64_t RV = RHSC->getValue()->getSExtValue(); 1819 uint64_t ALV = (LV < 0) ? -LV : LV; 1820 uint64_t ARV = (RV < 0) ? -RV : RV; 1821 if (ALV == ARV) { 1822 if (LV != RV) 1823 return LV > RV; 1824 } else { 1825 return ALV < ARV; 1826 } 1827 1828 // If it's the same value but different type, sort by bit width so 1829 // that we emit larger induction variables before smaller 1830 // ones, letting the smaller be re-written in terms of larger ones. 1831 return SE->getTypeSizeInBits(RHS->getType()) < 1832 SE->getTypeSizeInBits(LHS->getType()); 1833 } 1834 return LHSC && !RHSC; 1835 } 1836 }; 1837 } 1838 1839 /// ChangeCompareStride - If a loop termination compare instruction is the 1840 /// only use of its stride, and the compaison is against a constant value, 1841 /// try eliminate the stride by moving the compare instruction to another 1842 /// stride and change its constant operand accordingly. e.g. 1843 /// 1844 /// loop: 1845 /// ... 1846 /// v1 = v1 + 3 1847 /// v2 = v2 + 1 1848 /// if (v2 < 10) goto loop 1849 /// => 1850 /// loop: 1851 /// ... 1852 /// v1 = v1 + 3 1853 /// if (v1 < 30) goto loop 1854 ICmpInst *LoopStrengthReduce::ChangeCompareStride(Loop *L, ICmpInst *Cond, 1855 IVStrideUse* &CondUse, 1856 const SCEV *const* &CondStride) { 1857 // If there's only one stride in the loop, there's nothing to do here. 1858 if (IU->StrideOrder.size() < 2) 1859 return Cond; 1860 // If there are other users of the condition's stride, don't bother 1861 // trying to change the condition because the stride will still 1862 // remain. 1863 std::map<const SCEV *, IVUsersOfOneStride *>::iterator I = 1864 IU->IVUsesByStride.find(*CondStride); 1865 if (I == IU->IVUsesByStride.end() || 1866 I->second->Users.size() != 1) 1867 return Cond; 1868 // Only handle constant strides for now. 1869 const SCEVConstant *SC = dyn_cast<SCEVConstant>(*CondStride); 1870 if (!SC) return Cond; 1871 1872 ICmpInst::Predicate Predicate = Cond->getPredicate(); 1873 int64_t CmpSSInt = SC->getValue()->getSExtValue(); 1874 unsigned BitWidth = SE->getTypeSizeInBits((*CondStride)->getType()); 1875 uint64_t SignBit = 1ULL << (BitWidth-1); 1876 const Type *CmpTy = Cond->getOperand(0)->getType(); 1877 const Type *NewCmpTy = NULL; 1878 unsigned TyBits = SE->getTypeSizeInBits(CmpTy); 1879 unsigned NewTyBits = 0; 1880 const SCEV **NewStride = NULL; 1881 Value *NewCmpLHS = NULL; 1882 Value *NewCmpRHS = NULL; 1883 int64_t Scale = 1; 1884 const SCEV *NewOffset = SE->getIntegerSCEV(0, CmpTy); 1885 1886 if (ConstantInt *C = dyn_cast<ConstantInt>(Cond->getOperand(1))) { 1887 int64_t CmpVal = C->getValue().getSExtValue(); 1888 1889 // Check stride constant and the comparision constant signs to detect 1890 // overflow. 1891 if ((CmpVal & SignBit) != (CmpSSInt & SignBit)) 1892 return Cond; 1893 1894 // Look for a suitable stride / iv as replacement. 1895 for (unsigned i = 0, e = IU->StrideOrder.size(); i != e; ++i) { 1896 std::map<const SCEV *, IVUsersOfOneStride *>::iterator SI = 1897 IU->IVUsesByStride.find(IU->StrideOrder[i]); 1898 if (!isa<SCEVConstant>(SI->first)) 1899 continue; 1900 int64_t SSInt = cast<SCEVConstant>(SI->first)->getValue()->getSExtValue(); 1901 if (SSInt == CmpSSInt || 1902 abs64(SSInt) < abs64(CmpSSInt) || 1903 (SSInt % CmpSSInt) != 0) 1904 continue; 1905 1906 Scale = SSInt / CmpSSInt; 1907 int64_t NewCmpVal = CmpVal * Scale; 1908 APInt Mul = APInt(BitWidth*2, CmpVal, true); 1909 Mul = Mul * APInt(BitWidth*2, Scale, true); 1910 // Check for overflow. 1911 if (!Mul.isSignedIntN(BitWidth)) 1912 continue; 1913 // Check for overflow in the stride's type too. 1914 if (!Mul.isSignedIntN(SE->getTypeSizeInBits(SI->first->getType()))) 1915 continue; 1916 1917 // Watch out for overflow. 1918 if (ICmpInst::isSignedPredicate(Predicate) && 1919 (CmpVal & SignBit) != (NewCmpVal & SignBit)) 1920 continue; 1921 1922 if (NewCmpVal == CmpVal) 1923 continue; 1924 // Pick the best iv to use trying to avoid a cast. 1925 NewCmpLHS = NULL; 1926 for (ilist<IVStrideUse>::iterator UI = SI->second->Users.begin(), 1927 E = SI->second->Users.end(); UI != E; ++UI) { 1928 Value *Op = UI->getOperandValToReplace(); 1929 1930 // If the IVStrideUse implies a cast, check for an actual cast which 1931 // can be used to find the original IV expression. 1932 if (SE->getEffectiveSCEVType(Op->getType()) != 1933 SE->getEffectiveSCEVType(SI->first->getType())) { 1934 CastInst *CI = dyn_cast<CastInst>(Op); 1935 // If it's not a simple cast, it's complicated. 1936 if (!CI) 1937 continue; 1938 // If it's a cast from a type other than the stride type, 1939 // it's complicated. 1940 if (CI->getOperand(0)->getType() != SI->first->getType()) 1941 continue; 1942 // Ok, we found the IV expression in the stride's type. 1943 Op = CI->getOperand(0); 1944 } 1945 1946 NewCmpLHS = Op; 1947 if (NewCmpLHS->getType() == CmpTy) 1948 break; 1949 } 1950 if (!NewCmpLHS) 1951 continue; 1952 1953 NewCmpTy = NewCmpLHS->getType(); 1954 NewTyBits = SE->getTypeSizeInBits(NewCmpTy); 1955 const Type *NewCmpIntTy = IntegerType::get(Cond->getContext(), NewTyBits); 1956 if (RequiresTypeConversion(NewCmpTy, CmpTy)) { 1957 // Check if it is possible to rewrite it using 1958 // an iv / stride of a smaller integer type. 1959 unsigned Bits = NewTyBits; 1960 if (ICmpInst::isSignedPredicate(Predicate)) 1961 --Bits; 1962 uint64_t Mask = (1ULL << Bits) - 1; 1963 if (((uint64_t)NewCmpVal & Mask) != (uint64_t)NewCmpVal) 1964 continue; 1965 } 1966 1967 // Don't rewrite if use offset is non-constant and the new type is 1968 // of a different type. 1969 // FIXME: too conservative? 1970 if (NewTyBits != TyBits && !isa<SCEVConstant>(CondUse->getOffset())) 1971 continue; 1972 1973 bool AllUsesAreAddresses = true; 1974 bool AllUsesAreOutsideLoop = true; 1975 std::vector<BasedUser> UsersToProcess; 1976 const SCEV *CommonExprs = CollectIVUsers(SI->first, *SI->second, L, 1977 AllUsesAreAddresses, 1978 AllUsesAreOutsideLoop, 1979 UsersToProcess); 1980 // Avoid rewriting the compare instruction with an iv of new stride 1981 // if it's likely the new stride uses will be rewritten using the 1982 // stride of the compare instruction. 1983 if (AllUsesAreAddresses && 1984 ValidScale(!CommonExprs->isZero(), Scale, UsersToProcess)) 1985 continue; 1986 1987 // Avoid rewriting the compare instruction with an iv which has 1988 // implicit extension or truncation built into it. 1989 // TODO: This is over-conservative. 1990 if (SE->getTypeSizeInBits(CondUse->getOffset()->getType()) != TyBits) 1991 continue; 1992 1993 // If scale is negative, use swapped predicate unless it's testing 1994 // for equality. 1995 if (Scale < 0 && !Cond->isEquality()) 1996 Predicate = ICmpInst::getSwappedPredicate(Predicate); 1997 1998 NewStride = &IU->StrideOrder[i]; 1999 if (!isa<PointerType>(NewCmpTy)) 2000 NewCmpRHS = ConstantInt::get(NewCmpTy, NewCmpVal); 2001 else { 2002 Constant *CI = ConstantInt::get(NewCmpIntTy, NewCmpVal); 2003 NewCmpRHS = ConstantExpr::getIntToPtr(CI, NewCmpTy); 2004 } 2005 NewOffset = TyBits == NewTyBits 2006 ? SE->getMulExpr(CondUse->getOffset(), 2007 SE->getConstant(CmpTy, Scale)) 2008 : SE->getConstant(NewCmpIntTy, 2009 cast<SCEVConstant>(CondUse->getOffset())->getValue() 2010 ->getSExtValue()*Scale); 2011 break; 2012 } 2013 } 2014 2015 // Forgo this transformation if it the increment happens to be 2016 // unfortunately positioned after the condition, and the condition 2017 // has multiple uses which prevent it from being moved immediately 2018 // before the branch. See 2019 // test/Transforms/LoopStrengthReduce/change-compare-stride-trickiness-*.ll 2020 // for an example of this situation. 2021 if (!Cond->hasOneUse()) { 2022 for (BasicBlock::iterator I = Cond, E = Cond->getParent()->end(); 2023 I != E; ++I) 2024 if (I == NewCmpLHS) 2025 return Cond; 2026 } 2027 2028 if (NewCmpRHS) { 2029 // Create a new compare instruction using new stride / iv. 2030 ICmpInst *OldCond = Cond; 2031 // Insert new compare instruction. 2032 Cond = new ICmpInst(OldCond, Predicate, NewCmpLHS, NewCmpRHS, 2033 L->getHeader()->getName() + ".termcond"); 2034 2035 // Remove the old compare instruction. The old indvar is probably dead too. 2036 DeadInsts.push_back(CondUse->getOperandValToReplace()); 2037 OldCond->replaceAllUsesWith(Cond); 2038 OldCond->eraseFromParent(); 2039 2040 IU->IVUsesByStride[*NewStride]->addUser(NewOffset, Cond, NewCmpLHS); 2041 CondUse = &IU->IVUsesByStride[*NewStride]->Users.back(); 2042 CondStride = NewStride; 2043 ++NumEliminated; 2044 Changed = true; 2045 } 2046 2047 return Cond; 2048 } 2049 2050 /// OptimizeMax - Rewrite the loop's terminating condition if it uses 2051 /// a max computation. 2052 /// 2053 /// This is a narrow solution to a specific, but acute, problem. For loops 2054 /// like this: 2055 /// 2056 /// i = 0; 2057 /// do { 2058 /// p[i] = 0.0; 2059 /// } while (++i < n); 2060 /// 2061 /// the trip count isn't just 'n', because 'n' might not be positive. And 2062 /// unfortunately this can come up even for loops where the user didn't use 2063 /// a C do-while loop. For example, seemingly well-behaved top-test loops 2064 /// will commonly be lowered like this: 2065 // 2066 /// if (n > 0) { 2067 /// i = 0; 2068 /// do { 2069 /// p[i] = 0.0; 2070 /// } while (++i < n); 2071 /// } 2072 /// 2073 /// and then it's possible for subsequent optimization to obscure the if 2074 /// test in such a way that indvars can't find it. 2075 /// 2076 /// When indvars can't find the if test in loops like this, it creates a 2077 /// max expression, which allows it to give the loop a canonical 2078 /// induction variable: 2079 /// 2080 /// i = 0; 2081 /// max = n < 1 ? 1 : n; 2082 /// do { 2083 /// p[i] = 0.0; 2084 /// } while (++i != max); 2085 /// 2086 /// Canonical induction variables are necessary because the loop passes 2087 /// are designed around them. The most obvious example of this is the 2088 /// LoopInfo analysis, which doesn't remember trip count values. It 2089 /// expects to be able to rediscover the trip count each time it is 2090 /// needed, and it does this using a simple analyis that only succeeds if 2091 /// the loop has a canonical induction variable. 2092 /// 2093 /// However, when it comes time to generate code, the maximum operation 2094 /// can be quite costly, especially if it's inside of an outer loop. 2095 /// 2096 /// This function solves this problem by detecting this type of loop and 2097 /// rewriting their conditions from ICMP_NE back to ICMP_SLT, and deleting 2098 /// the instructions for the maximum computation. 2099 /// 2100 ICmpInst *LoopStrengthReduce::OptimizeMax(Loop *L, ICmpInst *Cond, 2101 IVStrideUse* &CondUse) { 2102 // Check that the loop matches the pattern we're looking for. 2103 if (Cond->getPredicate() != CmpInst::ICMP_EQ && 2104 Cond->getPredicate() != CmpInst::ICMP_NE) 2105 return Cond; 2106 2107 SelectInst *Sel = dyn_cast<SelectInst>(Cond->getOperand(1)); 2108 if (!Sel || !Sel->hasOneUse()) return Cond; 2109 2110 const SCEV *BackedgeTakenCount = SE->getBackedgeTakenCount(L); 2111 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) 2112 return Cond; 2113 const SCEV *One = SE->getIntegerSCEV(1, BackedgeTakenCount->getType()); 2114 2115 // Add one to the backedge-taken count to get the trip count. 2116 const SCEV *IterationCount = SE->getAddExpr(BackedgeTakenCount, One); 2117 2118 // Check for a max calculation that matches the pattern. 2119 if (!isa<SCEVSMaxExpr>(IterationCount) && !isa<SCEVUMaxExpr>(IterationCount)) 2120 return Cond; 2121 const SCEVNAryExpr *Max = cast<SCEVNAryExpr>(IterationCount); 2122 if (Max != SE->getSCEV(Sel)) return Cond; 2123 2124 // To handle a max with more than two operands, this optimization would 2125 // require additional checking and setup. 2126 if (Max->getNumOperands() != 2) 2127 return Cond; 2128 2129 const SCEV *MaxLHS = Max->getOperand(0); 2130 const SCEV *MaxRHS = Max->getOperand(1); 2131 if (!MaxLHS || MaxLHS != One) return Cond; 2132 2133 // Check the relevant induction variable for conformance to 2134 // the pattern. 2135 const SCEV *IV = SE->getSCEV(Cond->getOperand(0)); 2136 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(IV); 2137 if (!AR || !AR->isAffine() || 2138 AR->getStart() != One || 2139 AR->getStepRecurrence(*SE) != One) 2140 return Cond; 2141 2142 assert(AR->getLoop() == L && 2143 "Loop condition operand is an addrec in a different loop!"); 2144 2145 // Check the right operand of the select, and remember it, as it will 2146 // be used in the new comparison instruction. 2147 Value *NewRHS = 0; 2148 if (SE->getSCEV(Sel->getOperand(1)) == MaxRHS) 2149 NewRHS = Sel->getOperand(1); 2150 else if (SE->getSCEV(Sel->getOperand(2)) == MaxRHS) 2151 NewRHS = Sel->getOperand(2); 2152 if (!NewRHS) return Cond; 2153 2154 // Determine the new comparison opcode. It may be signed or unsigned, 2155 // and the original comparison may be either equality or inequality. 2156 CmpInst::Predicate Pred = 2157 isa<SCEVSMaxExpr>(Max) ? CmpInst::ICMP_SLT : CmpInst::ICMP_ULT; 2158 if (Cond->getPredicate() == CmpInst::ICMP_EQ) 2159 Pred = CmpInst::getInversePredicate(Pred); 2160 2161 // Ok, everything looks ok to change the condition into an SLT or SGE and 2162 // delete the max calculation. 2163 ICmpInst *NewCond = 2164 new ICmpInst(Cond, Pred, Cond->getOperand(0), NewRHS, "scmp"); 2165 2166 // Delete the max calculation instructions. 2167 Cond->replaceAllUsesWith(NewCond); 2168 CondUse->setUser(NewCond); 2169 Instruction *Cmp = cast<Instruction>(Sel->getOperand(0)); 2170 Cond->eraseFromParent(); 2171 Sel->eraseFromParent(); 2172 if (Cmp->use_empty()) 2173 Cmp->eraseFromParent(); 2174 return NewCond; 2175 } 2176 2177 /// OptimizeShadowIV - If IV is used in a int-to-float cast 2178 /// inside the loop then try to eliminate the cast opeation. 2179 void LoopStrengthReduce::OptimizeShadowIV(Loop *L) { 2180 2181 const SCEV *BackedgeTakenCount = SE->getBackedgeTakenCount(L); 2182 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) 2183 return; 2184 2185 for (unsigned Stride = 0, e = IU->StrideOrder.size(); Stride != e; 2186 ++Stride) { 2187 std::map<const SCEV *, IVUsersOfOneStride *>::iterator SI = 2188 IU->IVUsesByStride.find(IU->StrideOrder[Stride]); 2189 assert(SI != IU->IVUsesByStride.end() && "Stride doesn't exist!"); 2190 if (!isa<SCEVConstant>(SI->first)) 2191 continue; 2192 2193 for (ilist<IVStrideUse>::iterator UI = SI->second->Users.begin(), 2194 E = SI->second->Users.end(); UI != E; /* empty */) { 2195 ilist<IVStrideUse>::iterator CandidateUI = UI; 2196 ++UI; 2197 Instruction *ShadowUse = CandidateUI->getUser(); 2198 const Type *DestTy = NULL; 2199 2200 /* If shadow use is a int->float cast then insert a second IV 2201 to eliminate this cast. 2202 2203 for (unsigned i = 0; i < n; ++i) 2204 foo((double)i); 2205 2206 is transformed into 2207 2208 double d = 0.0; 2209 for (unsigned i = 0; i < n; ++i, ++d) 2210 foo(d); 2211 */ 2212 if (UIToFPInst *UCast = dyn_cast<UIToFPInst>(CandidateUI->getUser())) 2213 DestTy = UCast->getDestTy(); 2214 else if (SIToFPInst *SCast = dyn_cast<SIToFPInst>(CandidateUI->getUser())) 2215 DestTy = SCast->getDestTy(); 2216 if (!DestTy) continue; 2217 2218 if (TLI) { 2219 // If target does not support DestTy natively then do not apply 2220 // this transformation. 2221 EVT DVT = TLI->getValueType(DestTy); 2222 if (!TLI->isTypeLegal(DVT)) continue; 2223 } 2224 2225 PHINode *PH = dyn_cast<PHINode>(ShadowUse->getOperand(0)); 2226 if (!PH) continue; 2227 if (PH->getNumIncomingValues() != 2) continue; 2228 2229 const Type *SrcTy = PH->getType(); 2230 int Mantissa = DestTy->getFPMantissaWidth(); 2231 if (Mantissa == -1) continue; 2232 if ((int)SE->getTypeSizeInBits(SrcTy) > Mantissa) 2233 continue; 2234 2235 unsigned Entry, Latch; 2236 if (PH->getIncomingBlock(0) == L->getLoopPreheader()) { 2237 Entry = 0; 2238 Latch = 1; 2239 } else { 2240 Entry = 1; 2241 Latch = 0; 2242 } 2243 2244 ConstantInt *Init = dyn_cast<ConstantInt>(PH->getIncomingValue(Entry)); 2245 if (!Init) continue; 2246 Constant *NewInit = ConstantFP::get(DestTy, Init->getZExtValue()); 2247 2248 BinaryOperator *Incr = 2249 dyn_cast<BinaryOperator>(PH->getIncomingValue(Latch)); 2250 if (!Incr) continue; 2251 if (Incr->getOpcode() != Instruction::Add 2252 && Incr->getOpcode() != Instruction::Sub) 2253 continue; 2254 2255 /* Initialize new IV, double d = 0.0 in above example. */ 2256 ConstantInt *C = NULL; 2257 if (Incr->getOperand(0) == PH) 2258 C = dyn_cast<ConstantInt>(Incr->getOperand(1)); 2259 else if (Incr->getOperand(1) == PH) 2260 C = dyn_cast<ConstantInt>(Incr->getOperand(0)); 2261 else 2262 continue; 2263 2264 if (!C) continue; 2265 2266 /* Add new PHINode. */ 2267 PHINode *NewPH = PHINode::Create(DestTy, "IV.S.", PH); 2268 2269 /* create new increment. '++d' in above example. */ 2270 Constant *CFP = ConstantFP::get(DestTy, C->getZExtValue()); 2271 BinaryOperator *NewIncr = 2272 BinaryOperator::Create(Incr->getOpcode() == Instruction::Add ? 2273 Instruction::FAdd : Instruction::FSub, 2274 NewPH, CFP, "IV.S.next.", Incr); 2275 2276 NewPH->addIncoming(NewInit, PH->getIncomingBlock(Entry)); 2277 NewPH->addIncoming(NewIncr, PH->getIncomingBlock(Latch)); 2278 2279 /* Remove cast operation */ 2280 ShadowUse->replaceAllUsesWith(NewPH); 2281 ShadowUse->eraseFromParent(); 2282 NumShadow++; 2283 break; 2284 } 2285 } 2286 } 2287 2288 /// OptimizeIndvars - Now that IVUsesByStride is set up with all of the indvar 2289 /// uses in the loop, look to see if we can eliminate some, in favor of using 2290 /// common indvars for the different uses. 2291 void LoopStrengthReduce::OptimizeIndvars(Loop *L) { 2292 // TODO: implement optzns here. 2293 2294 OptimizeShadowIV(L); 2295 } 2296 2297 /// OptimizeLoopTermCond - Change loop terminating condition to use the 2298 /// postinc iv when possible. 2299 void LoopStrengthReduce::OptimizeLoopTermCond(Loop *L) { 2300 // Finally, get the terminating condition for the loop if possible. If we 2301 // can, we want to change it to use a post-incremented version of its 2302 // induction variable, to allow coalescing the live ranges for the IV into 2303 // one register value. 2304 BasicBlock *LatchBlock = L->getLoopLatch(); 2305 BasicBlock *ExitingBlock = L->getExitingBlock(); 2306 LLVMContext &Context = LatchBlock->getContext(); 2307 2308 if (!ExitingBlock) 2309 // Multiple exits, just look at the exit in the latch block if there is one. 2310 ExitingBlock = LatchBlock; 2311 BranchInst *TermBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator()); 2312 if (!TermBr) 2313 return; 2314 if (TermBr->isUnconditional() || !isa<ICmpInst>(TermBr->getCondition())) 2315 return; 2316 2317 // Search IVUsesByStride to find Cond's IVUse if there is one. 2318 IVStrideUse *CondUse = 0; 2319 const SCEV *const *CondStride = 0; 2320 ICmpInst *Cond = cast<ICmpInst>(TermBr->getCondition()); 2321 if (!FindIVUserForCond(Cond, CondUse, CondStride)) 2322 return; // setcc doesn't use the IV. 2323 2324 if (ExitingBlock != LatchBlock) { 2325 if (!Cond->hasOneUse()) 2326 // See below, we don't want the condition to be cloned. 2327 return; 2328 2329 // If exiting block is the latch block, we know it's safe and profitable to 2330 // transform the icmp to use post-inc iv. Otherwise do so only if it would 2331 // not reuse another iv and its iv would be reused by other uses. We are 2332 // optimizing for the case where the icmp is the only use of the iv. 2333 IVUsersOfOneStride &StrideUses = *IU->IVUsesByStride[*CondStride]; 2334 for (ilist<IVStrideUse>::iterator I = StrideUses.Users.begin(), 2335 E = StrideUses.Users.end(); I != E; ++I) { 2336 if (I->getUser() == Cond) 2337 continue; 2338 if (!I->isUseOfPostIncrementedValue()) 2339 return; 2340 } 2341 2342 // FIXME: This is expensive, and worse still ChangeCompareStride does a 2343 // similar check. Can we perform all the icmp related transformations after 2344 // StrengthReduceStridedIVUsers? 2345 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(*CondStride)) { 2346 int64_t SInt = SC->getValue()->getSExtValue(); 2347 for (unsigned NewStride = 0, ee = IU->StrideOrder.size(); NewStride != ee; 2348 ++NewStride) { 2349 std::map<const SCEV *, IVUsersOfOneStride *>::iterator SI = 2350 IU->IVUsesByStride.find(IU->StrideOrder[NewStride]); 2351 if (!isa<SCEVConstant>(SI->first) || SI->first == *CondStride) 2352 continue; 2353 int64_t SSInt = 2354 cast<SCEVConstant>(SI->first)->getValue()->getSExtValue(); 2355 if (SSInt == SInt) 2356 return; // This can definitely be reused. 2357 if (unsigned(abs64(SSInt)) < SInt || (SSInt % SInt) != 0) 2358 continue; 2359 int64_t Scale = SSInt / SInt; 2360 bool AllUsesAreAddresses = true; 2361 bool AllUsesAreOutsideLoop = true; 2362 std::vector<BasedUser> UsersToProcess; 2363 const SCEV *CommonExprs = CollectIVUsers(SI->first, *SI->second, L, 2364 AllUsesAreAddresses, 2365 AllUsesAreOutsideLoop, 2366 UsersToProcess); 2367 // Avoid rewriting the compare instruction with an iv of new stride 2368 // if it's likely the new stride uses will be rewritten using the 2369 // stride of the compare instruction. 2370 if (AllUsesAreAddresses && 2371 ValidScale(!CommonExprs->isZero(), Scale, UsersToProcess)) 2372 return; 2373 } 2374 } 2375 2376 StrideNoReuse.insert(*CondStride); 2377 } 2378 2379 // If the trip count is computed in terms of a max (due to ScalarEvolution 2380 // being unable to find a sufficient guard, for example), change the loop 2381 // comparison to use SLT or ULT instead of NE. 2382 Cond = OptimizeMax(L, Cond, CondUse); 2383 2384 // If possible, change stride and operands of the compare instruction to 2385 // eliminate one stride. 2386 if (ExitingBlock == LatchBlock) 2387 Cond = ChangeCompareStride(L, Cond, CondUse, CondStride); 2388 2389 // It's possible for the setcc instruction to be anywhere in the loop, and 2390 // possible for it to have multiple users. If it is not immediately before 2391 // the latch block branch, move it. 2392 if (&*++BasicBlock::iterator(Cond) != (Instruction*)TermBr) { 2393 if (Cond->hasOneUse()) { // Condition has a single use, just move it. 2394 Cond->moveBefore(TermBr); 2395 } else { 2396 // Otherwise, clone the terminating condition and insert into the loopend. 2397 Cond = cast<ICmpInst>(Cond->clone(Context)); 2398 Cond->setName(L->getHeader()->getName() + ".termcond"); 2399 LatchBlock->getInstList().insert(TermBr, Cond); 2400 2401 // Clone the IVUse, as the old use still exists! 2402 IU->IVUsesByStride[*CondStride]->addUser(CondUse->getOffset(), Cond, 2403 CondUse->getOperandValToReplace()); 2404 CondUse = &IU->IVUsesByStride[*CondStride]->Users.back(); 2405 } 2406 } 2407 2408 // If we get to here, we know that we can transform the setcc instruction to 2409 // use the post-incremented version of the IV, allowing us to coalesce the 2410 // live ranges for the IV correctly. 2411 CondUse->setOffset(SE->getMinusSCEV(CondUse->getOffset(), *CondStride)); 2412 CondUse->setIsUseOfPostIncrementedValue(true); 2413 Changed = true; 2414 2415 ++NumLoopCond; 2416 } 2417 2418 /// OptimizeLoopCountIV - If, after all sharing of IVs, the IV used for deciding 2419 /// when to exit the loop is used only for that purpose, try to rearrange things 2420 /// so it counts down to a test against zero. 2421 void LoopStrengthReduce::OptimizeLoopCountIV(Loop *L) { 2422 2423 // If the number of times the loop is executed isn't computable, give up. 2424 const SCEV *BackedgeTakenCount = SE->getBackedgeTakenCount(L); 2425 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) 2426 return; 2427 2428 // Get the terminating condition for the loop if possible (this isn't 2429 // necessarily in the latch, or a block that's a predecessor of the header). 2430 if (!L->getExitBlock()) 2431 return; // More than one loop exit blocks. 2432 2433 // Okay, there is one exit block. Try to find the condition that causes the 2434 // loop to be exited. 2435 BasicBlock *ExitingBlock = L->getExitingBlock(); 2436 if (!ExitingBlock) 2437 return; // More than one block exiting! 2438 2439 // Okay, we've computed the exiting block. See what condition causes us to 2440 // exit. 2441 // 2442 // FIXME: we should be able to handle switch instructions (with a single exit) 2443 BranchInst *TermBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator()); 2444 if (TermBr == 0) return; 2445 assert(TermBr->isConditional() && "If unconditional, it can't be in loop!"); 2446 if (!isa<ICmpInst>(TermBr->getCondition())) 2447 return; 2448 ICmpInst *Cond = cast<ICmpInst>(TermBr->getCondition()); 2449 2450 // Handle only tests for equality for the moment, and only stride 1. 2451 if (Cond->getPredicate() != CmpInst::ICMP_EQ) 2452 return; 2453 const SCEV *IV = SE->getSCEV(Cond->getOperand(0)); 2454 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(IV); 2455 const SCEV *One = SE->getIntegerSCEV(1, BackedgeTakenCount->getType()); 2456 if (!AR || !AR->isAffine() || AR->getStepRecurrence(*SE) != One) 2457 return; 2458 // If the RHS of the comparison is defined inside the loop, the rewrite 2459 // cannot be done. 2460 if (Instruction *CR = dyn_cast<Instruction>(Cond->getOperand(1))) 2461 if (L->contains(CR->getParent())) 2462 return; 2463 2464 // Make sure the IV is only used for counting. Value may be preinc or 2465 // postinc; 2 uses in either case. 2466 if (!Cond->getOperand(0)->hasNUses(2)) 2467 return; 2468 PHINode *phi = dyn_cast<PHINode>(Cond->getOperand(0)); 2469 Instruction *incr; 2470 if (phi && phi->getParent()==L->getHeader()) { 2471 // value tested is preinc. Find the increment. 2472 // A CmpInst is not a BinaryOperator; we depend on this. 2473 Instruction::use_iterator UI = phi->use_begin(); 2474 incr = dyn_cast<BinaryOperator>(UI); 2475 if (!incr) 2476 incr = dyn_cast<BinaryOperator>(++UI); 2477 // 1 use for postinc value, the phi. Unnecessarily conservative? 2478 if (!incr || !incr->hasOneUse() || incr->getOpcode()!=Instruction::Add) 2479 return; 2480 } else { 2481 // Value tested is postinc. Find the phi node. 2482 incr = dyn_cast<BinaryOperator>(Cond->getOperand(0)); 2483 if (!incr || incr->getOpcode()!=Instruction::Add) 2484 return; 2485 2486 Instruction::use_iterator UI = Cond->getOperand(0)->use_begin(); 2487 phi = dyn_cast<PHINode>(UI); 2488 if (!phi) 2489 phi = dyn_cast<PHINode>(++UI); 2490 // 1 use for preinc value, the increment. 2491 if (!phi || phi->getParent()!=L->getHeader() || !phi->hasOneUse()) 2492 return; 2493 } 2494 2495 // Replace the increment with a decrement. 2496 BinaryOperator *decr = 2497 BinaryOperator::Create(Instruction::Sub, incr->getOperand(0), 2498 incr->getOperand(1), "tmp", incr); 2499 incr->replaceAllUsesWith(decr); 2500 incr->eraseFromParent(); 2501 2502 // Substitute endval-startval for the original startval, and 0 for the 2503 // original endval. Since we're only testing for equality this is OK even 2504 // if the computation wraps around. 2505 BasicBlock *Preheader = L->getLoopPreheader(); 2506 Instruction *PreInsertPt = Preheader->getTerminator(); 2507 int inBlock = L->contains(phi->getIncomingBlock(0)) ? 1 : 0; 2508 Value *startVal = phi->getIncomingValue(inBlock); 2509 Value *endVal = Cond->getOperand(1); 2510 // FIXME check for case where both are constant 2511 Constant* Zero = ConstantInt::get(Cond->getOperand(1)->getType(), 0); 2512 BinaryOperator *NewStartVal = 2513 BinaryOperator::Create(Instruction::Sub, endVal, startVal, 2514 "tmp", PreInsertPt); 2515 phi->setIncomingValue(inBlock, NewStartVal); 2516 Cond->setOperand(1, Zero); 2517 2518 Changed = true; 2519 } 2520 2521 bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager &LPM) { 2522 2523 IU = &getAnalysis<IVUsers>(); 2524 LI = &getAnalysis<LoopInfo>(); 2525 DT = &getAnalysis<DominatorTree>(); 2526 SE = &getAnalysis<ScalarEvolution>(); 2527 Changed = false; 2528 2529 if (!IU->IVUsesByStride.empty()) { 2530 DEBUG(errs() << "\nLSR on \"" << L->getHeader()->getParent()->getName() 2531 << "\" "; 2532 L->dump()); 2533 2534 // Sort the StrideOrder so we process larger strides first. 2535 std::stable_sort(IU->StrideOrder.begin(), IU->StrideOrder.end(), 2536 StrideCompare(SE)); 2537 2538 // Optimize induction variables. Some indvar uses can be transformed to use 2539 // strides that will be needed for other purposes. A common example of this 2540 // is the exit test for the loop, which can often be rewritten to use the 2541 // computation of some other indvar to decide when to terminate the loop. 2542 OptimizeIndvars(L); 2543 2544 // Change loop terminating condition to use the postinc iv when possible 2545 // and optimize loop terminating compare. FIXME: Move this after 2546 // StrengthReduceStridedIVUsers? 2547 OptimizeLoopTermCond(L); 2548 2549 // FIXME: We can shrink overlarge IV's here. e.g. if the code has 2550 // computation in i64 values and the target doesn't support i64, demote 2551 // the computation to 32-bit if safe. 2552 2553 // FIXME: Attempt to reuse values across multiple IV's. In particular, we 2554 // could have something like "for(i) { foo(i*8); bar(i*16) }", which should 2555 // be codegened as "for (j = 0;; j+=8) { foo(j); bar(j+j); }" on X86/PPC. 2556 // Need to be careful that IV's are all the same type. Only works for 2557 // intptr_t indvars. 2558 2559 // IVsByStride keeps IVs for one particular loop. 2560 assert(IVsByStride.empty() && "Stale entries in IVsByStride?"); 2561 2562 // Note: this processes each stride/type pair individually. All users 2563 // passed into StrengthReduceStridedIVUsers have the same type AND stride. 2564 // Also, note that we iterate over IVUsesByStride indirectly by using 2565 // StrideOrder. This extra layer of indirection makes the ordering of 2566 // strides deterministic - not dependent on map order. 2567 for (unsigned Stride = 0, e = IU->StrideOrder.size(); 2568 Stride != e; ++Stride) { 2569 std::map<const SCEV *, IVUsersOfOneStride *>::iterator SI = 2570 IU->IVUsesByStride.find(IU->StrideOrder[Stride]); 2571 assert(SI != IU->IVUsesByStride.end() && "Stride doesn't exist!"); 2572 // FIXME: Generalize to non-affine IV's. 2573 if (!SI->first->isLoopInvariant(L)) 2574 continue; 2575 StrengthReduceStridedIVUsers(SI->first, *SI->second, L); 2576 } 2577 } 2578 2579 // After all sharing is done, see if we can adjust the loop to test against 2580 // zero instead of counting up to a maximum. This is usually faster. 2581 OptimizeLoopCountIV(L); 2582 2583 // We're done analyzing this loop; release all the state we built up for it. 2584 IVsByStride.clear(); 2585 StrideNoReuse.clear(); 2586 2587 // Clean up after ourselves 2588 if (!DeadInsts.empty()) 2589 DeleteTriviallyDeadInstructions(); 2590 2591 // At this point, it is worth checking to see if any recurrence PHIs are also 2592 // dead, so that we can remove them as well. 2593 DeleteDeadPHIs(L->getHeader()); 2594 2595 return Changed; 2596 } 2597