1 //===- LoopStrengthReduce.cpp - Strength Reduce GEPs in Loops -------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file was developed by Nate Begeman and is distributed under the
6 // University of Illinois Open Source License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This pass performs a strength reduction on array references inside loops that
11 // have as one or more of their components the loop induction variable.  This is
12 // accomplished by creating a new Value to hold the initial value of the array
13 // access for the first iteration, and then creating a new GEP instruction in
14 // the loop to increment the value by the appropriate amount.
15 //
16 //===----------------------------------------------------------------------===//
17 
18 #include "llvm/Transforms/Scalar.h"
19 #include "llvm/Constants.h"
20 #include "llvm/Instructions.h"
21 #include "llvm/Type.h"
22 #include "llvm/DerivedTypes.h"
23 #include "llvm/Analysis/Dominators.h"
24 #include "llvm/Analysis/LoopInfo.h"
25 #include "llvm/Analysis/ScalarEvolutionExpander.h"
26 #include "llvm/Support/CFG.h"
27 #include "llvm/Support/GetElementPtrTypeIterator.h"
28 #include "llvm/Transforms/Utils/Local.h"
29 #include "llvm/Target/TargetData.h"
30 #include "llvm/ADT/Statistic.h"
31 #include "llvm/Support/Debug.h"
32 #include <algorithm>
33 #include <set>
34 using namespace llvm;
35 
36 namespace {
37   Statistic<> NumReduced ("loop-reduce", "Number of GEPs strength reduced");
38 
39   class GEPCache {
40   public:
41     GEPCache() : CachedPHINode(0), Map() {}
42 
43     GEPCache *get(Value *v) {
44       std::map<Value *, GEPCache>::iterator I = Map.find(v);
45       if (I == Map.end())
46         I = Map.insert(std::pair<Value *, GEPCache>(v, GEPCache())).first;
47       return &I->second;
48     }
49 
50     PHINode *CachedPHINode;
51     std::map<Value *, GEPCache> Map;
52   };
53 
54   struct IVUse {
55     /// Users - Keep track of all of the users of this stride as well as the
56     /// initial value.
57     std::vector<std::pair<SCEVHandle, Instruction*> > Users;
58     std::vector<Instruction *> UserOperands;
59 
60     void addUser(SCEVHandle &SH, Instruction *U, Instruction *V) {
61       Users.push_back(std::make_pair(SH, U));
62       UserOperands.push_back(V);
63     }
64   };
65 
66 
67   class LoopStrengthReduce : public FunctionPass {
68     LoopInfo *LI;
69     DominatorSet *DS;
70     ScalarEvolution *SE;
71     const TargetData *TD;
72     const Type *UIntPtrTy;
73     bool Changed;
74 
75     /// MaxTargetAMSize - This is the maximum power-of-two scale value that the
76     /// target can handle for free with its addressing modes.
77     unsigned MaxTargetAMSize;
78 
79     /// IVUsesByStride - Keep track of all uses of induction variables that we
80     /// are interested in.  The key of the map is the stride of the access.
81     std::map<Value*, IVUse> IVUsesByStride;
82 
83     /// CastedBasePointers - As we need to lower getelementptr instructions, we
84     /// cast the pointer input to uintptr_t.  This keeps track of the casted
85     /// values for the pointers we have processed so far.
86     std::map<Value*, Value*> CastedBasePointers;
87 
88     /// DeadInsts - Keep track of instructions we may have made dead, so that
89     /// we can remove them after we are done working.
90     std::set<Instruction*> DeadInsts;
91   public:
92     LoopStrengthReduce(unsigned MTAMS = 1)
93       : MaxTargetAMSize(MTAMS) {
94     }
95 
96     virtual bool runOnFunction(Function &) {
97       LI = &getAnalysis<LoopInfo>();
98       DS = &getAnalysis<DominatorSet>();
99       SE = &getAnalysis<ScalarEvolution>();
100       TD = &getAnalysis<TargetData>();
101       UIntPtrTy = TD->getIntPtrType();
102       Changed = false;
103 
104       for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I)
105         runOnLoop(*I);
106       return Changed;
107     }
108 
109     virtual void getAnalysisUsage(AnalysisUsage &AU) const {
110       AU.setPreservesCFG();
111       AU.addRequiredID(LoopSimplifyID);
112       AU.addRequired<LoopInfo>();
113       AU.addRequired<DominatorSet>();
114       AU.addRequired<TargetData>();
115       AU.addRequired<ScalarEvolution>();
116     }
117   private:
118     void runOnLoop(Loop *L);
119     bool AddUsersIfInteresting(Instruction *I, Loop *L);
120     void AnalyzeGetElementPtrUsers(GetElementPtrInst *GEP, Instruction *I,
121                                    Loop *L);
122 
123     void StrengthReduceStridedIVUsers(Value *Stride, IVUse &Uses, Loop *L,
124                                       bool isOnlyStride);
125 
126     void strengthReduceGEP(GetElementPtrInst *GEPI, Loop *L,
127                            GEPCache* GEPCache,
128                            Instruction *InsertBefore,
129                            std::set<Instruction*> &DeadInsts);
130     void DeleteTriviallyDeadInstructions(std::set<Instruction*> &Insts);
131   };
132   RegisterOpt<LoopStrengthReduce> X("loop-reduce",
133                                     "Strength Reduce GEP Uses of Ind. Vars");
134 }
135 
136 FunctionPass *llvm::createLoopStrengthReducePass(unsigned MaxTargetAMSize) {
137   return new LoopStrengthReduce(MaxTargetAMSize);
138 }
139 
140 /// DeleteTriviallyDeadInstructions - If any of the instructions is the
141 /// specified set are trivially dead, delete them and see if this makes any of
142 /// their operands subsequently dead.
143 void LoopStrengthReduce::
144 DeleteTriviallyDeadInstructions(std::set<Instruction*> &Insts) {
145   while (!Insts.empty()) {
146     Instruction *I = *Insts.begin();
147     Insts.erase(Insts.begin());
148     if (isInstructionTriviallyDead(I)) {
149       for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i)
150         if (Instruction *U = dyn_cast<Instruction>(I->getOperand(i)))
151           Insts.insert(U);
152       I->getParent()->getInstList().erase(I);
153       Changed = true;
154     }
155   }
156 }
157 
158 
159 /// CanReduceSCEV - Return true if we can strength reduce this scalar evolution
160 /// in the specified loop.
161 static bool CanReduceSCEV(const SCEVHandle &SH, Loop *L) {
162   SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(SH);
163   if (!AddRec || AddRec->getLoop() != L) return false;
164 
165   // FIXME: Generalize to non-affine IV's.
166   if (!AddRec->isAffine()) return false;
167 
168   // FIXME: generalize to IV's with more complex strides (must emit stride
169   // expression outside of loop!)
170   if (isa<SCEVConstant>(AddRec->getOperand(1)))
171     return true;
172 
173   // We handle steps by unsigned values, because we know we won't have to insert
174   // a cast for them.
175   if (SCEVUnknown *SU = dyn_cast<SCEVUnknown>(AddRec->getOperand(1)))
176     if (SU->getValue()->getType()->isUnsigned())
177       return true;
178 
179   // Otherwise, no, we can't handle it yet.
180   return false;
181 }
182 
183 
184 /// GetAdjustedIndex - Adjust the specified GEP sequential type index to match
185 /// the size of the pointer type, and scale it by the type size.
186 static SCEVHandle GetAdjustedIndex(const SCEVHandle &Idx, uint64_t TySize,
187                                    const Type *UIntPtrTy) {
188   SCEVHandle Result = Idx;
189   if (Result->getType()->getUnsignedVersion() != UIntPtrTy) {
190     if (UIntPtrTy->getPrimitiveSize() < Result->getType()->getPrimitiveSize())
191       Result = SCEVTruncateExpr::get(Result, UIntPtrTy);
192     else
193       Result = SCEVZeroExtendExpr::get(Result, UIntPtrTy);
194   }
195 
196   // This index is scaled by the type size being indexed.
197   if (TySize != 1)
198     Result = SCEVMulExpr::get(Result,
199                               SCEVConstant::get(ConstantUInt::get(UIntPtrTy,
200                                                                   TySize)));
201   return Result;
202 }
203 
204 /// AnalyzeGetElementPtrUsers - Analyze all of the users of the specified
205 /// getelementptr instruction, adding them to the IVUsesByStride table.  Note
206 /// that we only want to analyze a getelementptr instruction once, and it can
207 /// have multiple operands that are uses of the indvar (e.g. A[i][i]).  Because
208 /// of this, we only process a GEP instruction if its first recurrent operand is
209 /// "op", otherwise we will either have already processed it or we will sometime
210 /// later.
211 void LoopStrengthReduce::AnalyzeGetElementPtrUsers(GetElementPtrInst *GEP,
212                                                    Instruction *Op, Loop *L) {
213   // Analyze all of the subscripts of this getelementptr instruction, looking
214   // for uses that are determined by the trip count of L.  First, skip all
215   // operands the are not dependent on the IV.
216 
217   // Build up the base expression.  Insert an LLVM cast of the pointer to
218   // uintptr_t first.
219   Value *BasePtr;
220   if (Constant *CB = dyn_cast<Constant>(GEP->getOperand(0)))
221     BasePtr = ConstantExpr::getCast(CB, UIntPtrTy);
222   else {
223     Value *&BP = CastedBasePointers[GEP->getOperand(0)];
224     if (BP == 0) {
225       BasicBlock::iterator InsertPt;
226       if (isa<Argument>(GEP->getOperand(0))) {
227         InsertPt = GEP->getParent()->getParent()->begin()->begin();
228       } else {
229         InsertPt = cast<Instruction>(GEP->getOperand(0));
230         if (InvokeInst *II = dyn_cast<InvokeInst>(GEP->getOperand(0)))
231           InsertPt = II->getNormalDest()->begin();
232         else
233           ++InsertPt;
234       }
235 
236       // Do not insert casts into the middle of PHI node blocks.
237       while (isa<PHINode>(InsertPt)) ++InsertPt;
238 
239       BP = new CastInst(GEP->getOperand(0), UIntPtrTy,
240                         GEP->getOperand(0)->getName(), InsertPt);
241     }
242     BasePtr = BP;
243   }
244 
245   SCEVHandle Base = SCEVUnknown::get(BasePtr);
246 
247   gep_type_iterator GTI = gep_type_begin(GEP);
248   unsigned i = 1;
249   for (; GEP->getOperand(i) != Op; ++i, ++GTI) {
250     // If this is a use of a recurrence that we can analyze, and it comes before
251     // Op does in the GEP operand list, we will handle this when we process this
252     // operand.
253     if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
254       const StructLayout *SL = TD->getStructLayout(STy);
255       unsigned Idx = cast<ConstantUInt>(GEP->getOperand(i))->getValue();
256       uint64_t Offset = SL->MemberOffsets[Idx];
257       Base = SCEVAddExpr::get(Base, SCEVUnknown::getIntegerSCEV(Offset,
258                                                                 UIntPtrTy));
259     } else {
260       SCEVHandle Idx = SE->getSCEV(GEP->getOperand(i));
261 
262       // If this operand is reducible, and it's not the one we are looking at
263       // currently, do not process the GEP at this time.
264       if (CanReduceSCEV(Idx, L))
265         return;
266       Base = SCEVAddExpr::get(Base, GetAdjustedIndex(Idx,
267                              TD->getTypeSize(GTI.getIndexedType()), UIntPtrTy));
268     }
269   }
270 
271   // Get the index, convert it to intptr_t.
272   SCEVHandle GEPIndexExpr =
273     GetAdjustedIndex(SE->getSCEV(Op), TD->getTypeSize(GTI.getIndexedType()),
274                      UIntPtrTy);
275 
276   // Process all remaining subscripts in the GEP instruction.
277   for (++i, ++GTI; i != GEP->getNumOperands(); ++i, ++GTI)
278     if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
279       const StructLayout *SL = TD->getStructLayout(STy);
280       unsigned Idx = cast<ConstantUInt>(GEP->getOperand(i))->getValue();
281       uint64_t Offset = SL->MemberOffsets[Idx];
282       Base = SCEVAddExpr::get(Base, SCEVUnknown::getIntegerSCEV(Offset,
283                                                                 UIntPtrTy));
284     } else {
285       SCEVHandle Idx = SE->getSCEV(GEP->getOperand(i));
286       if (CanReduceSCEV(Idx, L)) {   // Another IV subscript
287         GEPIndexExpr = SCEVAddExpr::get(GEPIndexExpr,
288                     GetAdjustedIndex(Idx, TD->getTypeSize(GTI.getIndexedType()),
289                                    UIntPtrTy));
290         assert(CanReduceSCEV(GEPIndexExpr, L) &&
291                "Cannot reduce the sum of two reducible SCEV's??");
292       } else {
293         Base = SCEVAddExpr::get(Base, GetAdjustedIndex(Idx,
294                              TD->getTypeSize(GTI.getIndexedType()), UIntPtrTy));
295       }
296     }
297 
298   assert(CanReduceSCEV(GEPIndexExpr, L) && "Non reducible idx??");
299 
300   // FIXME: If the base is not loop invariant, we currently cannot emit this.
301   if (!Base->isLoopInvariant(L)) {
302     DEBUG(std::cerr << "IGNORING GEP due to non-invaiant base: "
303                     << *Base << "\n");
304     return;
305   }
306 
307   Base = SCEVAddExpr::get(Base, cast<SCEVAddRecExpr>(GEPIndexExpr)->getStart());
308   SCEVHandle Stride = cast<SCEVAddRecExpr>(GEPIndexExpr)->getOperand(1);
309 
310   DEBUG(std::cerr << "GEP BASE  : " << *Base << "\n");
311   DEBUG(std::cerr << "GEP STRIDE: " << *Stride << "\n");
312 
313   Value *Step = 0;   // Step of ISE.
314   if (SCEVConstant *SC = dyn_cast<SCEVConstant>(Stride))
315     /// Always get the step value as an unsigned value.
316     Step = ConstantExpr::getCast(SC->getValue(),
317                                SC->getValue()->getType()->getUnsignedVersion());
318   else
319     Step = cast<SCEVUnknown>(Stride)->getValue();
320   assert(Step->getType()->isUnsigned() && "Bad step value!");
321 
322 
323   // Now that we know the base and stride contributed by the GEP instruction,
324   // process all users.
325   for (Value::use_iterator UI = GEP->use_begin(), E = GEP->use_end();
326        UI != E; ++UI) {
327     Instruction *User = cast<Instruction>(*UI);
328 
329     // Do not infinitely recurse on PHI nodes.
330     if (isa<PHINode>(User) && User->getParent() == L->getHeader())
331       continue;
332 
333     // If this is an instruction defined in a nested loop, or outside this loop,
334     // don't mess with it.
335     if (LI->getLoopFor(User->getParent()) != L)
336       continue;
337 
338     DEBUG(std::cerr << "FOUND USER: " << *User
339           << "   OF STRIDE: " << *Step << " BASE = " << *Base << "\n");
340 
341 
342     // Okay, we found a user that we cannot reduce.  Analyze the instruction
343     // and decide what to do with it.
344     IVUsesByStride[Step].addUser(Base, User, GEP);
345   }
346 }
347 
348 /// AddUsersIfInteresting - Inspect the specified instruction.  If it is a
349 /// reducible SCEV, recursively add its users to the IVUsesByStride set and
350 /// return true.  Otherwise, return false.
351 bool LoopStrengthReduce::AddUsersIfInteresting(Instruction *I, Loop *L) {
352   if (I->getType() == Type::VoidTy) return false;
353   SCEVHandle ISE = SE->getSCEV(I);
354   if (!CanReduceSCEV(ISE, L)) return false;
355 
356   SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(ISE);
357   SCEVHandle Start = AR->getStart();
358 
359   // Get the step value, canonicalizing to an unsigned integer type so that
360   // lookups in the map will match.
361   Value *Step = 0;   // Step of ISE.
362   if (SCEVConstant *SC = dyn_cast<SCEVConstant>(AR->getOperand(1)))
363     /// Always get the step value as an unsigned value.
364     Step = ConstantExpr::getCast(SC->getValue(),
365                                SC->getValue()->getType()->getUnsignedVersion());
366   else
367     Step = cast<SCEVUnknown>(AR->getOperand(1))->getValue();
368   assert(Step->getType()->isUnsigned() && "Bad step value!");
369 
370   std::set<GetElementPtrInst*> AnalyzedGEPs;
371 
372   for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E;++UI){
373     Instruction *User = cast<Instruction>(*UI);
374 
375     // Do not infinitely recurse on PHI nodes.
376     if (isa<PHINode>(User) && User->getParent() == L->getHeader())
377       continue;
378 
379     // If this is an instruction defined in a nested loop, or outside this loop,
380     // don't mess with it.
381     if (LI->getLoopFor(User->getParent()) != L)
382       continue;
383 
384     // Next, see if this user is analyzable itself!
385     if (!AddUsersIfInteresting(User, L)) {
386       if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) {
387         // If this is a getelementptr instruction, figure out what linear
388         // expression of induction variable is actually being used.
389         //
390         if (AnalyzedGEPs.insert(GEP).second)   // Not already analyzed?
391           AnalyzeGetElementPtrUsers(GEP, I, L);
392       } else {
393         DEBUG(std::cerr << "FOUND USER: " << *User
394               << "   OF SCEV: " << *ISE << "\n");
395 
396         // Okay, we found a user that we cannot reduce.  Analyze the instruction
397         // and decide what to do with it.
398         IVUsesByStride[Step].addUser(Start, User, I);
399       }
400     }
401   }
402   return true;
403 }
404 
405 namespace {
406   /// BasedUser - For a particular base value, keep information about how we've
407   /// partitioned the expression so far.
408   struct BasedUser {
409     /// Inst - The instruction using the induction variable.
410     Instruction *Inst;
411 
412     /// Op - The value to replace with the EmittedBase.
413     Value *Op;
414 
415     /// Imm - The immediate value that should be added to the base immediately
416     /// before Inst, because it will be folded into the imm field of the
417     /// instruction.
418     SCEVHandle Imm;
419 
420     /// EmittedBase - The actual value* to use for the base value of this
421     /// operation.  This is null if we should just use zero so far.
422     Value *EmittedBase;
423 
424     BasedUser(Instruction *I, Value *V, const SCEVHandle &IMM)
425       : Inst(I), Op(V), Imm(IMM), EmittedBase(0) {}
426 
427 
428     // No need to compare these.
429     bool operator<(const BasedUser &BU) const { return 0; }
430 
431     void dump() const;
432   };
433 }
434 
435 void BasedUser::dump() const {
436   std::cerr << " Imm=" << *Imm;
437   if (EmittedBase)
438     std::cerr << "  EB=" << *EmittedBase;
439 
440   std::cerr << "   Inst: " << *Inst;
441 }
442 
443 /// isTargetConstant - Return true if the following can be referenced by the
444 /// immediate field of a target instruction.
445 static bool isTargetConstant(const SCEVHandle &V) {
446 
447   // FIXME: Look at the target to decide if &GV is a legal constant immediate.
448   if (isa<SCEVConstant>(V)) return true;
449 
450   return false;     // ENABLE this for x86
451 
452   if (SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V))
453     if (ConstantExpr *CE = dyn_cast<ConstantExpr>(SU->getValue()))
454       if (CE->getOpcode() == Instruction::Cast)
455         if (isa<GlobalValue>(CE->getOperand(0)))
456           // FIXME: should check to see that the dest is uintptr_t!
457           return true;
458   return false;
459 }
460 
461 /// GetImmediateValues - Look at Val, and pull out any additions of constants
462 /// that can fit into the immediate field of instructions in the target.
463 static SCEVHandle GetImmediateValues(SCEVHandle Val, bool isAddress) {
464   if (!isAddress)
465     return SCEVUnknown::getIntegerSCEV(0, Val->getType());
466   if (isTargetConstant(Val))
467     return Val;
468 
469   SCEVAddExpr *SAE = dyn_cast<SCEVAddExpr>(Val);
470   if (SAE) {
471     unsigned i = 0;
472     for (; i != SAE->getNumOperands(); ++i)
473       if (isTargetConstant(SAE->getOperand(i))) {
474         SCEVHandle ImmVal = SAE->getOperand(i);
475 
476         // If there are any other immediates that we can handle here, pull them
477         // out too.
478         for (++i; i != SAE->getNumOperands(); ++i)
479           if (isTargetConstant(SAE->getOperand(i)))
480             ImmVal = SCEVAddExpr::get(ImmVal, SAE->getOperand(i));
481         return ImmVal;
482       }
483   }
484 
485   return SCEVUnknown::getIntegerSCEV(0, Val->getType());
486 }
487 
488 /// StrengthReduceStridedIVUsers - Strength reduce all of the users of a single
489 /// stride of IV.  All of the users may have different starting values, and this
490 /// may not be the only stride (we know it is if isOnlyStride is true).
491 void LoopStrengthReduce::StrengthReduceStridedIVUsers(Value *Stride,
492                                                       IVUse &Uses, Loop *L,
493                                                       bool isOnlyStride) {
494   // Transform our list of users and offsets to a bit more complex table.  In
495   // this new vector, the first entry for each element is the base of the
496   // strided access, and the second is the BasedUser object for the use.  We
497   // progressively move information from the first to the second entry, until we
498   // eventually emit the object.
499   std::vector<std::pair<SCEVHandle, BasedUser> > UsersToProcess;
500   UsersToProcess.reserve(Uses.Users.size());
501 
502   SCEVHandle ZeroBase = SCEVUnknown::getIntegerSCEV(0,
503                                               Uses.Users[0].first->getType());
504 
505   for (unsigned i = 0, e = Uses.Users.size(); i != e; ++i)
506     UsersToProcess.push_back(std::make_pair(Uses.Users[i].first,
507                                             BasedUser(Uses.Users[i].second,
508                                                       Uses.UserOperands[i],
509                                                       ZeroBase)));
510 
511   // First pass, figure out what we can represent in the immediate fields of
512   // instructions.  If we can represent anything there, move it to the imm
513   // fields of the BasedUsers.
514   for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) {
515     bool isAddress = isa<LoadInst>(UsersToProcess[i].second.Inst) ||
516                      isa<StoreInst>(UsersToProcess[i].second.Inst);
517     UsersToProcess[i].second.Imm = GetImmediateValues(UsersToProcess[i].first,
518                                                       isAddress);
519     UsersToProcess[i].first = SCEV::getMinusSCEV(UsersToProcess[i].first,
520                                                  UsersToProcess[i].second.Imm);
521 
522     DEBUG(std::cerr << "BASE: " << *UsersToProcess[i].first);
523     DEBUG(UsersToProcess[i].second.dump());
524   }
525 
526   SCEVExpander Rewriter(*SE, *LI);
527   BasicBlock  *Preheader = L->getLoopPreheader();
528   Instruction *PreInsertPt = Preheader->getTerminator();
529   Instruction *PhiInsertBefore = L->getHeader()->begin();
530 
531   assert(isa<PHINode>(PhiInsertBefore) &&
532          "How could this loop have IV's without any phis?");
533   PHINode *SomeLoopPHI = cast<PHINode>(PhiInsertBefore);
534   assert(SomeLoopPHI->getNumIncomingValues() == 2 &&
535          "This loop isn't canonicalized right");
536   BasicBlock *LatchBlock =
537    SomeLoopPHI->getIncomingBlock(SomeLoopPHI->getIncomingBlock(0) == Preheader);
538 
539   // FIXME: This loop needs increasing levels of intelligence.
540   // STAGE 0: just emit everything as its own base.  <-- We are here
541   // STAGE 1: factor out common vars from bases, and try and push resulting
542   //          constants into Imm field.
543   // STAGE 2: factor out large constants to try and make more constants
544   //          acceptable for target loads and stores.
545   std::sort(UsersToProcess.begin(), UsersToProcess.end());
546 
547   while (!UsersToProcess.empty()) {
548     // Create a new Phi for this base, and stick it in the loop header.
549     Value *Replaced = UsersToProcess.front().second.Op;
550     const Type *ReplacedTy = Replaced->getType();
551     PHINode *NewPHI = new PHINode(ReplacedTy, Replaced->getName()+".str",
552                                   PhiInsertBefore);
553 
554     // Emit the initial base value into the loop preheader, and add it to the
555     // Phi node.
556     Value *BaseV = Rewriter.expandCodeFor(UsersToProcess.front().first,
557                                           PreInsertPt, ReplacedTy);
558     NewPHI->addIncoming(BaseV, Preheader);
559 
560     // Emit the increment of the base value before the terminator of the loop
561     // latch block, and add it to the Phi node.
562     SCEVHandle Inc = SCEVAddExpr::get(SCEVUnknown::get(NewPHI),
563                                       SCEVUnknown::get(Stride));
564 
565     Value *IncV = Rewriter.expandCodeFor(Inc, LatchBlock->getTerminator(),
566                                          ReplacedTy);
567     IncV->setName(NewPHI->getName()+".inc");
568     NewPHI->addIncoming(IncV, LatchBlock);
569 
570     // Emit the code to add the immediate offset to the Phi value, just before
571     // the instruction that we identified as using this stride and base.
572     // First, empty the SCEVExpander's expression map  so that we are guaranteed
573     // to have the code emitted where we expect it.
574     Rewriter.clear();
575     SCEVHandle NewValSCEV = SCEVAddExpr::get(SCEVUnknown::get(NewPHI),
576                                              UsersToProcess.front().second.Imm);
577     Value *newVal = Rewriter.expandCodeFor(NewValSCEV,
578                                            UsersToProcess.front().second.Inst,
579                                            ReplacedTy);
580 
581     // Replace the use of the operand Value with the new Phi we just created.
582     DEBUG(std::cerr << "REPLACING: " << *Replaced << "IN: " <<
583           *UsersToProcess.front().second.Inst << "WITH: "<< *newVal << '\n');
584     UsersToProcess.front().second.Inst->replaceUsesOfWith(Replaced, newVal);
585 
586     // Mark old value we replaced as possibly dead, so that it is elminated
587     // if we just replaced the last use of that value.
588     DeadInsts.insert(cast<Instruction>(Replaced));
589 
590     UsersToProcess.erase(UsersToProcess.begin());
591     ++NumReduced;
592 
593     // TODO: Next, find out which base index is the most common, pull it out.
594   }
595 
596   // IMPORTANT TODO: Figure out how to partition the IV's with this stride, but
597   // different starting values, into different PHIs.
598 
599   // BEFORE writing this, it's probably useful to handle GEP's.
600 
601   // NOTE: pull all constants together, for REG+IMM addressing, include &GV in
602   // 'IMM' if the target supports it.
603 }
604 
605 
606 void LoopStrengthReduce::runOnLoop(Loop *L) {
607   // First step, transform all loops nesting inside of this loop.
608   for (LoopInfo::iterator I = L->begin(), E = L->end(); I != E; ++I)
609     runOnLoop(*I);
610 
611   // Next, find all uses of induction variables in this loop, and catagorize
612   // them by stride.  Start by finding all of the PHI nodes in the header for
613   // this loop.  If they are induction variables, inspect their uses.
614   for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ++I)
615     AddUsersIfInteresting(I, L);
616 
617   // If we have nothing to do, return.
618   //if (IVUsesByStride.empty()) return;
619 
620   // FIXME: We can widen subreg IV's here for RISC targets.  e.g. instead of
621   // doing computation in byte values, promote to 32-bit values if safe.
622 
623   // FIXME: Attempt to reuse values across multiple IV's.  In particular, we
624   // could have something like "for(i) { foo(i*8); bar(i*16) }", which should be
625   // codegened as "for (j = 0;; j+=8) { foo(j); bar(j+j); }" on X86/PPC.  Need
626   // to be careful that IV's are all the same type.  Only works for intptr_t
627   // indvars.
628 
629   // If we only have one stride, we can more aggressively eliminate some things.
630   bool HasOneStride = IVUsesByStride.size() == 1;
631 
632   for (std::map<Value*, IVUse>::iterator SI = IVUsesByStride.begin(),
633          E = IVUsesByStride.end(); SI != E; ++SI)
634     StrengthReduceStridedIVUsers(SI->first, SI->second, L, HasOneStride);
635 
636   // Clean up after ourselves
637   if (!DeadInsts.empty()) {
638     DeleteTriviallyDeadInstructions(DeadInsts);
639 
640     BasicBlock::iterator I = L->getHeader()->begin();
641     PHINode *PN;
642     while ((PN = dyn_cast<PHINode>(I))) {
643       ++I;  // Preincrement iterator to avoid invalidating it when deleting PN.
644 
645       // At this point, we know that we have killed one or more GEP instructions.
646       // It is worth checking to see if the cann indvar is also dead, so that we
647       // can remove it as well.  The requirements for the cann indvar to be
648       // considered dead are:
649       // 1. the cann indvar has one use
650       // 2. the use is an add instruction
651       // 3. the add has one use
652       // 4. the add is used by the cann indvar
653       // If all four cases above are true, then we can remove both the add and
654       // the cann indvar.
655       // FIXME: this needs to eliminate an induction variable even if it's being
656       // compared against some value to decide loop termination.
657       if (PN->hasOneUse()) {
658         BinaryOperator *BO = dyn_cast<BinaryOperator>(*(PN->use_begin()));
659         if (BO && BO->hasOneUse()) {
660           if (PN == *(BO->use_begin())) {
661             DeadInsts.insert(BO);
662             // Break the cycle, then delete the PHI.
663             PN->replaceAllUsesWith(UndefValue::get(PN->getType()));
664             PN->eraseFromParent();
665           }
666         }
667       }
668     }
669     DeleteTriviallyDeadInstructions(DeadInsts);
670   }
671 
672   IVUsesByStride.clear();
673   CastedBasePointers.clear();
674   return;
675 }
676