1 //===----------------- LoopRotationUtils.cpp -----------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file provides utilities to convert a loop into a loop with bottom test.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/Utils/LoopRotationUtils.h"
15 #include "llvm/ADT/Statistic.h"
16 #include "llvm/Analysis/AliasAnalysis.h"
17 #include "llvm/Analysis/AssumptionCache.h"
18 #include "llvm/Analysis/BasicAliasAnalysis.h"
19 #include "llvm/Analysis/CodeMetrics.h"
20 #include "llvm/Analysis/GlobalsModRef.h"
21 #include "llvm/Analysis/InstructionSimplify.h"
22 #include "llvm/Analysis/LoopPass.h"
23 #include "llvm/Analysis/MemorySSA.h"
24 #include "llvm/Analysis/MemorySSAUpdater.h"
25 #include "llvm/Analysis/ScalarEvolution.h"
26 #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
27 #include "llvm/Analysis/TargetTransformInfo.h"
28 #include "llvm/Analysis/ValueTracking.h"
29 #include "llvm/IR/CFG.h"
30 #include "llvm/IR/DebugInfoMetadata.h"
31 #include "llvm/IR/DomTreeUpdater.h"
32 #include "llvm/IR/Dominators.h"
33 #include "llvm/IR/Function.h"
34 #include "llvm/IR/IntrinsicInst.h"
35 #include "llvm/IR/Module.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/raw_ostream.h"
39 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
40 #include "llvm/Transforms/Utils/Local.h"
41 #include "llvm/Transforms/Utils/LoopUtils.h"
42 #include "llvm/Transforms/Utils/SSAUpdater.h"
43 #include "llvm/Transforms/Utils/ValueMapper.h"
44 using namespace llvm;
45 
46 #define DEBUG_TYPE "loop-rotate"
47 
48 STATISTIC(NumRotated, "Number of loops rotated");
49 
50 namespace {
51 /// A simple loop rotation transformation.
52 class LoopRotate {
53   const unsigned MaxHeaderSize;
54   LoopInfo *LI;
55   const TargetTransformInfo *TTI;
56   AssumptionCache *AC;
57   DominatorTree *DT;
58   ScalarEvolution *SE;
59   MemorySSAUpdater *MSSAU;
60   const SimplifyQuery &SQ;
61   bool RotationOnly;
62   bool IsUtilMode;
63 
64 public:
LoopRotate(unsigned MaxHeaderSize,LoopInfo * LI,const TargetTransformInfo * TTI,AssumptionCache * AC,DominatorTree * DT,ScalarEvolution * SE,MemorySSAUpdater * MSSAU,const SimplifyQuery & SQ,bool RotationOnly,bool IsUtilMode)65   LoopRotate(unsigned MaxHeaderSize, LoopInfo *LI,
66              const TargetTransformInfo *TTI, AssumptionCache *AC,
67              DominatorTree *DT, ScalarEvolution *SE, MemorySSAUpdater *MSSAU,
68              const SimplifyQuery &SQ, bool RotationOnly, bool IsUtilMode)
69       : MaxHeaderSize(MaxHeaderSize), LI(LI), TTI(TTI), AC(AC), DT(DT), SE(SE),
70         MSSAU(MSSAU), SQ(SQ), RotationOnly(RotationOnly),
71         IsUtilMode(IsUtilMode) {}
72   bool processLoop(Loop *L);
73 
74 private:
75   bool rotateLoop(Loop *L, bool SimplifiedLatch);
76   bool simplifyLoopLatch(Loop *L);
77 };
78 } // end anonymous namespace
79 
80 /// RewriteUsesOfClonedInstructions - We just cloned the instructions from the
81 /// old header into the preheader.  If there were uses of the values produced by
82 /// these instruction that were outside of the loop, we have to insert PHI nodes
83 /// to merge the two values.  Do this now.
RewriteUsesOfClonedInstructions(BasicBlock * OrigHeader,BasicBlock * OrigPreheader,ValueToValueMapTy & ValueMap,SmallVectorImpl<PHINode * > * InsertedPHIs)84 static void RewriteUsesOfClonedInstructions(BasicBlock *OrigHeader,
85                                             BasicBlock *OrigPreheader,
86                                             ValueToValueMapTy &ValueMap,
87                                 SmallVectorImpl<PHINode*> *InsertedPHIs) {
88   // Remove PHI node entries that are no longer live.
89   BasicBlock::iterator I, E = OrigHeader->end();
90   for (I = OrigHeader->begin(); PHINode *PN = dyn_cast<PHINode>(I); ++I)
91     PN->removeIncomingValue(PN->getBasicBlockIndex(OrigPreheader));
92 
93   // Now fix up users of the instructions in OrigHeader, inserting PHI nodes
94   // as necessary.
95   SSAUpdater SSA(InsertedPHIs);
96   for (I = OrigHeader->begin(); I != E; ++I) {
97     Value *OrigHeaderVal = &*I;
98 
99     // If there are no uses of the value (e.g. because it returns void), there
100     // is nothing to rewrite.
101     if (OrigHeaderVal->use_empty())
102       continue;
103 
104     Value *OrigPreHeaderVal = ValueMap.lookup(OrigHeaderVal);
105 
106     // The value now exits in two versions: the initial value in the preheader
107     // and the loop "next" value in the original header.
108     SSA.Initialize(OrigHeaderVal->getType(), OrigHeaderVal->getName());
109     SSA.AddAvailableValue(OrigHeader, OrigHeaderVal);
110     SSA.AddAvailableValue(OrigPreheader, OrigPreHeaderVal);
111 
112     // Visit each use of the OrigHeader instruction.
113     for (Value::use_iterator UI = OrigHeaderVal->use_begin(),
114                              UE = OrigHeaderVal->use_end();
115          UI != UE;) {
116       // Grab the use before incrementing the iterator.
117       Use &U = *UI;
118 
119       // Increment the iterator before removing the use from the list.
120       ++UI;
121 
122       // SSAUpdater can't handle a non-PHI use in the same block as an
123       // earlier def. We can easily handle those cases manually.
124       Instruction *UserInst = cast<Instruction>(U.getUser());
125       if (!isa<PHINode>(UserInst)) {
126         BasicBlock *UserBB = UserInst->getParent();
127 
128         // The original users in the OrigHeader are already using the
129         // original definitions.
130         if (UserBB == OrigHeader)
131           continue;
132 
133         // Users in the OrigPreHeader need to use the value to which the
134         // original definitions are mapped.
135         if (UserBB == OrigPreheader) {
136           U = OrigPreHeaderVal;
137           continue;
138         }
139       }
140 
141       // Anything else can be handled by SSAUpdater.
142       SSA.RewriteUse(U);
143     }
144 
145     // Replace MetadataAsValue(ValueAsMetadata(OrigHeaderVal)) uses in debug
146     // intrinsics.
147     SmallVector<DbgValueInst *, 1> DbgValues;
148     llvm::findDbgValues(DbgValues, OrigHeaderVal);
149     for (auto &DbgValue : DbgValues) {
150       // The original users in the OrigHeader are already using the original
151       // definitions.
152       BasicBlock *UserBB = DbgValue->getParent();
153       if (UserBB == OrigHeader)
154         continue;
155 
156       // Users in the OrigPreHeader need to use the value to which the
157       // original definitions are mapped and anything else can be handled by
158       // the SSAUpdater. To avoid adding PHINodes, check if the value is
159       // available in UserBB, if not substitute undef.
160       Value *NewVal;
161       if (UserBB == OrigPreheader)
162         NewVal = OrigPreHeaderVal;
163       else if (SSA.HasValueForBlock(UserBB))
164         NewVal = SSA.GetValueInMiddleOfBlock(UserBB);
165       else
166         NewVal = UndefValue::get(OrigHeaderVal->getType());
167       DbgValue->setOperand(0,
168                            MetadataAsValue::get(OrigHeaderVal->getContext(),
169                                                 ValueAsMetadata::get(NewVal)));
170     }
171   }
172 }
173 
174 // Look for a phi which is only used outside the loop (via a LCSSA phi)
175 // in the exit from the header. This means that rotating the loop can
176 // remove the phi.
shouldRotateLoopExitingLatch(Loop * L)177 static bool shouldRotateLoopExitingLatch(Loop *L) {
178   BasicBlock *Header = L->getHeader();
179   BasicBlock *HeaderExit = Header->getTerminator()->getSuccessor(0);
180   if (L->contains(HeaderExit))
181     HeaderExit = Header->getTerminator()->getSuccessor(1);
182 
183   for (auto &Phi : Header->phis()) {
184     // Look for uses of this phi in the loop/via exits other than the header.
185     if (llvm::any_of(Phi.users(), [HeaderExit](const User *U) {
186           return cast<Instruction>(U)->getParent() != HeaderExit;
187         }))
188       continue;
189     return true;
190   }
191 
192   return false;
193 }
194 
195 /// Rotate loop LP. Return true if the loop is rotated.
196 ///
197 /// \param SimplifiedLatch is true if the latch was just folded into the final
198 /// loop exit. In this case we may want to rotate even though the new latch is
199 /// now an exiting branch. This rotation would have happened had the latch not
200 /// been simplified. However, if SimplifiedLatch is false, then we avoid
201 /// rotating loops in which the latch exits to avoid excessive or endless
202 /// rotation. LoopRotate should be repeatable and converge to a canonical
203 /// form. This property is satisfied because simplifying the loop latch can only
204 /// happen once across multiple invocations of the LoopRotate pass.
rotateLoop(Loop * L,bool SimplifiedLatch)205 bool LoopRotate::rotateLoop(Loop *L, bool SimplifiedLatch) {
206   // If the loop has only one block then there is not much to rotate.
207   if (L->getBlocks().size() == 1)
208     return false;
209 
210   BasicBlock *OrigHeader = L->getHeader();
211   BasicBlock *OrigLatch = L->getLoopLatch();
212 
213   BranchInst *BI = dyn_cast<BranchInst>(OrigHeader->getTerminator());
214   if (!BI || BI->isUnconditional())
215     return false;
216 
217   // If the loop header is not one of the loop exiting blocks then
218   // either this loop is already rotated or it is not
219   // suitable for loop rotation transformations.
220   if (!L->isLoopExiting(OrigHeader))
221     return false;
222 
223   // If the loop latch already contains a branch that leaves the loop then the
224   // loop is already rotated.
225   if (!OrigLatch)
226     return false;
227 
228   // Rotate if either the loop latch does *not* exit the loop, or if the loop
229   // latch was just simplified. Or if we think it will be profitable.
230   if (L->isLoopExiting(OrigLatch) && !SimplifiedLatch && IsUtilMode == false &&
231       !shouldRotateLoopExitingLatch(L))
232     return false;
233 
234   // Check size of original header and reject loop if it is very big or we can't
235   // duplicate blocks inside it.
236   {
237     SmallPtrSet<const Value *, 32> EphValues;
238     CodeMetrics::collectEphemeralValues(L, AC, EphValues);
239 
240     CodeMetrics Metrics;
241     Metrics.analyzeBasicBlock(OrigHeader, *TTI, EphValues);
242     if (Metrics.notDuplicatable) {
243       LLVM_DEBUG(
244           dbgs() << "LoopRotation: NOT rotating - contains non-duplicatable"
245                  << " instructions: ";
246           L->dump());
247       return false;
248     }
249     if (Metrics.convergent) {
250       LLVM_DEBUG(dbgs() << "LoopRotation: NOT rotating - contains convergent "
251                            "instructions: ";
252                  L->dump());
253       return false;
254     }
255     if (Metrics.NumInsts > MaxHeaderSize)
256       return false;
257   }
258 
259   // Now, this loop is suitable for rotation.
260   BasicBlock *OrigPreheader = L->getLoopPreheader();
261 
262   // If the loop could not be converted to canonical form, it must have an
263   // indirectbr in it, just give up.
264   if (!OrigPreheader || !L->hasDedicatedExits())
265     return false;
266 
267   // Anything ScalarEvolution may know about this loop or the PHI nodes
268   // in its header will soon be invalidated. We should also invalidate
269   // all outer loops because insertion and deletion of blocks that happens
270   // during the rotation may violate invariants related to backedge taken
271   // infos in them.
272   if (SE)
273     SE->forgetTopmostLoop(L);
274 
275   LLVM_DEBUG(dbgs() << "LoopRotation: rotating "; L->dump());
276   if (MSSAU && VerifyMemorySSA)
277     MSSAU->getMemorySSA()->verifyMemorySSA();
278 
279   // Find new Loop header. NewHeader is a Header's one and only successor
280   // that is inside loop.  Header's other successor is outside the
281   // loop.  Otherwise loop is not suitable for rotation.
282   BasicBlock *Exit = BI->getSuccessor(0);
283   BasicBlock *NewHeader = BI->getSuccessor(1);
284   if (L->contains(Exit))
285     std::swap(Exit, NewHeader);
286   assert(NewHeader && "Unable to determine new loop header");
287   assert(L->contains(NewHeader) && !L->contains(Exit) &&
288          "Unable to determine loop header and exit blocks");
289 
290   // This code assumes that the new header has exactly one predecessor.
291   // Remove any single-entry PHI nodes in it.
292   assert(NewHeader->getSinglePredecessor() &&
293          "New header doesn't have one pred!");
294   FoldSingleEntryPHINodes(NewHeader);
295 
296   // Begin by walking OrigHeader and populating ValueMap with an entry for
297   // each Instruction.
298   BasicBlock::iterator I = OrigHeader->begin(), E = OrigHeader->end();
299   ValueToValueMapTy ValueMap;
300 
301   // For PHI nodes, the value available in OldPreHeader is just the
302   // incoming value from OldPreHeader.
303   for (; PHINode *PN = dyn_cast<PHINode>(I); ++I)
304     ValueMap[PN] = PN->getIncomingValueForBlock(OrigPreheader);
305 
306   // For the rest of the instructions, either hoist to the OrigPreheader if
307   // possible or create a clone in the OldPreHeader if not.
308   Instruction *LoopEntryBranch = OrigPreheader->getTerminator();
309 
310   // Record all debug intrinsics preceding LoopEntryBranch to avoid duplication.
311   using DbgIntrinsicHash =
312       std::pair<std::pair<Value *, DILocalVariable *>, DIExpression *>;
313   auto makeHash = [](DbgVariableIntrinsic *D) -> DbgIntrinsicHash {
314     return {{D->getVariableLocation(), D->getVariable()}, D->getExpression()};
315   };
316   SmallDenseSet<DbgIntrinsicHash, 8> DbgIntrinsics;
317   for (auto I = std::next(OrigPreheader->rbegin()), E = OrigPreheader->rend();
318        I != E; ++I) {
319     if (auto *DII = dyn_cast<DbgVariableIntrinsic>(&*I))
320       DbgIntrinsics.insert(makeHash(DII));
321     else
322       break;
323   }
324 
325   while (I != E) {
326     Instruction *Inst = &*I++;
327 
328     // If the instruction's operands are invariant and it doesn't read or write
329     // memory, then it is safe to hoist.  Doing this doesn't change the order of
330     // execution in the preheader, but does prevent the instruction from
331     // executing in each iteration of the loop.  This means it is safe to hoist
332     // something that might trap, but isn't safe to hoist something that reads
333     // memory (without proving that the loop doesn't write).
334     if (L->hasLoopInvariantOperands(Inst) && !Inst->mayReadFromMemory() &&
335         !Inst->mayWriteToMemory() && !Inst->isTerminator() &&
336         !isa<DbgInfoIntrinsic>(Inst) && !isa<AllocaInst>(Inst)) {
337       Inst->moveBefore(LoopEntryBranch);
338       continue;
339     }
340 
341     // Otherwise, create a duplicate of the instruction.
342     Instruction *C = Inst->clone();
343 
344     // Eagerly remap the operands of the instruction.
345     RemapInstruction(C, ValueMap,
346                      RF_NoModuleLevelChanges | RF_IgnoreMissingLocals);
347 
348     // Avoid inserting the same intrinsic twice.
349     if (auto *DII = dyn_cast<DbgVariableIntrinsic>(C))
350       if (DbgIntrinsics.count(makeHash(DII))) {
351         C->deleteValue();
352         continue;
353       }
354 
355     // With the operands remapped, see if the instruction constant folds or is
356     // otherwise simplifyable.  This commonly occurs because the entry from PHI
357     // nodes allows icmps and other instructions to fold.
358     Value *V = SimplifyInstruction(C, SQ);
359     if (V && LI->replacementPreservesLCSSAForm(C, V)) {
360       // If so, then delete the temporary instruction and stick the folded value
361       // in the map.
362       ValueMap[Inst] = V;
363       if (!C->mayHaveSideEffects()) {
364         C->deleteValue();
365         C = nullptr;
366       }
367     } else {
368       ValueMap[Inst] = C;
369     }
370     if (C) {
371       // Otherwise, stick the new instruction into the new block!
372       C->setName(Inst->getName());
373       C->insertBefore(LoopEntryBranch);
374 
375       if (auto *II = dyn_cast<IntrinsicInst>(C))
376         if (II->getIntrinsicID() == Intrinsic::assume)
377           AC->registerAssumption(II);
378     }
379   }
380 
381   // Along with all the other instructions, we just cloned OrigHeader's
382   // terminator into OrigPreHeader. Fix up the PHI nodes in each of OrigHeader's
383   // successors by duplicating their incoming values for OrigHeader.
384   for (BasicBlock *SuccBB : successors(OrigHeader))
385     for (BasicBlock::iterator BI = SuccBB->begin();
386          PHINode *PN = dyn_cast<PHINode>(BI); ++BI)
387       PN->addIncoming(PN->getIncomingValueForBlock(OrigHeader), OrigPreheader);
388 
389   // Now that OrigPreHeader has a clone of OrigHeader's terminator, remove
390   // OrigPreHeader's old terminator (the original branch into the loop), and
391   // remove the corresponding incoming values from the PHI nodes in OrigHeader.
392   LoopEntryBranch->eraseFromParent();
393 
394   // Update MemorySSA before the rewrite call below changes the 1:1
395   // instruction:cloned_instruction_or_value mapping in ValueMap.
396   if (MSSAU) {
397     ValueMap[OrigHeader] = OrigPreheader;
398     MSSAU->updateForClonedBlockIntoPred(OrigHeader, OrigPreheader, ValueMap);
399   }
400 
401   SmallVector<PHINode*, 2> InsertedPHIs;
402   // If there were any uses of instructions in the duplicated block outside the
403   // loop, update them, inserting PHI nodes as required
404   RewriteUsesOfClonedInstructions(OrigHeader, OrigPreheader, ValueMap,
405                                   &InsertedPHIs);
406 
407   // Attach dbg.value intrinsics to the new phis if that phi uses a value that
408   // previously had debug metadata attached. This keeps the debug info
409   // up-to-date in the loop body.
410   if (!InsertedPHIs.empty())
411     insertDebugValuesForPHIs(OrigHeader, InsertedPHIs);
412 
413   // NewHeader is now the header of the loop.
414   L->moveToHeader(NewHeader);
415   assert(L->getHeader() == NewHeader && "Latch block is our new header");
416 
417   // Inform DT about changes to the CFG.
418   if (DT) {
419     // The OrigPreheader branches to the NewHeader and Exit now. Then, inform
420     // the DT about the removed edge to the OrigHeader (that got removed).
421     SmallVector<DominatorTree::UpdateType, 3> Updates;
422     Updates.push_back({DominatorTree::Insert, OrigPreheader, Exit});
423     Updates.push_back({DominatorTree::Insert, OrigPreheader, NewHeader});
424     Updates.push_back({DominatorTree::Delete, OrigPreheader, OrigHeader});
425     DT->applyUpdates(Updates);
426 
427     if (MSSAU) {
428       MSSAU->applyUpdates(Updates, *DT);
429       if (VerifyMemorySSA)
430         MSSAU->getMemorySSA()->verifyMemorySSA();
431     }
432   }
433 
434   // At this point, we've finished our major CFG changes.  As part of cloning
435   // the loop into the preheader we've simplified instructions and the
436   // duplicated conditional branch may now be branching on a constant.  If it is
437   // branching on a constant and if that constant means that we enter the loop,
438   // then we fold away the cond branch to an uncond branch.  This simplifies the
439   // loop in cases important for nested loops, and it also means we don't have
440   // to split as many edges.
441   BranchInst *PHBI = cast<BranchInst>(OrigPreheader->getTerminator());
442   assert(PHBI->isConditional() && "Should be clone of BI condbr!");
443   if (!isa<ConstantInt>(PHBI->getCondition()) ||
444       PHBI->getSuccessor(cast<ConstantInt>(PHBI->getCondition())->isZero()) !=
445           NewHeader) {
446     // The conditional branch can't be folded, handle the general case.
447     // Split edges as necessary to preserve LoopSimplify form.
448 
449     // Right now OrigPreHeader has two successors, NewHeader and ExitBlock, and
450     // thus is not a preheader anymore.
451     // Split the edge to form a real preheader.
452     BasicBlock *NewPH = SplitCriticalEdge(
453         OrigPreheader, NewHeader,
454         CriticalEdgeSplittingOptions(DT, LI, MSSAU).setPreserveLCSSA());
455     NewPH->setName(NewHeader->getName() + ".lr.ph");
456 
457     // Preserve canonical loop form, which means that 'Exit' should have only
458     // one predecessor. Note that Exit could be an exit block for multiple
459     // nested loops, causing both of the edges to now be critical and need to
460     // be split.
461     SmallVector<BasicBlock *, 4> ExitPreds(pred_begin(Exit), pred_end(Exit));
462     bool SplitLatchEdge = false;
463     for (BasicBlock *ExitPred : ExitPreds) {
464       // We only need to split loop exit edges.
465       Loop *PredLoop = LI->getLoopFor(ExitPred);
466       if (!PredLoop || PredLoop->contains(Exit))
467         continue;
468       if (isa<IndirectBrInst>(ExitPred->getTerminator()))
469         continue;
470       SplitLatchEdge |= L->getLoopLatch() == ExitPred;
471       BasicBlock *ExitSplit = SplitCriticalEdge(
472           ExitPred, Exit,
473           CriticalEdgeSplittingOptions(DT, LI, MSSAU).setPreserveLCSSA());
474       ExitSplit->moveBefore(Exit);
475     }
476     assert(SplitLatchEdge &&
477            "Despite splitting all preds, failed to split latch exit?");
478   } else {
479     // We can fold the conditional branch in the preheader, this makes things
480     // simpler. The first step is to remove the extra edge to the Exit block.
481     Exit->removePredecessor(OrigPreheader, true /*preserve LCSSA*/);
482     BranchInst *NewBI = BranchInst::Create(NewHeader, PHBI);
483     NewBI->setDebugLoc(PHBI->getDebugLoc());
484     PHBI->eraseFromParent();
485 
486     // With our CFG finalized, update DomTree if it is available.
487     if (DT) DT->deleteEdge(OrigPreheader, Exit);
488 
489     // Update MSSA too, if available.
490     if (MSSAU)
491       MSSAU->removeEdge(OrigPreheader, Exit);
492   }
493 
494   assert(L->getLoopPreheader() && "Invalid loop preheader after loop rotation");
495   assert(L->getLoopLatch() && "Invalid loop latch after loop rotation");
496 
497   if (MSSAU && VerifyMemorySSA)
498     MSSAU->getMemorySSA()->verifyMemorySSA();
499 
500   // Now that the CFG and DomTree are in a consistent state again, try to merge
501   // the OrigHeader block into OrigLatch.  This will succeed if they are
502   // connected by an unconditional branch.  This is just a cleanup so the
503   // emitted code isn't too gross in this common case.
504   DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager);
505   MergeBlockIntoPredecessor(OrigHeader, &DTU, LI, MSSAU);
506 
507   if (MSSAU && VerifyMemorySSA)
508     MSSAU->getMemorySSA()->verifyMemorySSA();
509 
510   LLVM_DEBUG(dbgs() << "LoopRotation: into "; L->dump());
511 
512   ++NumRotated;
513   return true;
514 }
515 
516 /// Determine whether the instructions in this range may be safely and cheaply
517 /// speculated. This is not an important enough situation to develop complex
518 /// heuristics. We handle a single arithmetic instruction along with any type
519 /// conversions.
shouldSpeculateInstrs(BasicBlock::iterator Begin,BasicBlock::iterator End,Loop * L)520 static bool shouldSpeculateInstrs(BasicBlock::iterator Begin,
521                                   BasicBlock::iterator End, Loop *L) {
522   bool seenIncrement = false;
523   bool MultiExitLoop = false;
524 
525   if (!L->getExitingBlock())
526     MultiExitLoop = true;
527 
528   for (BasicBlock::iterator I = Begin; I != End; ++I) {
529 
530     if (!isSafeToSpeculativelyExecute(&*I))
531       return false;
532 
533     if (isa<DbgInfoIntrinsic>(I))
534       continue;
535 
536     switch (I->getOpcode()) {
537     default:
538       return false;
539     case Instruction::GetElementPtr:
540       // GEPs are cheap if all indices are constant.
541       if (!cast<GEPOperator>(I)->hasAllConstantIndices())
542         return false;
543       // fall-thru to increment case
544       LLVM_FALLTHROUGH;
545     case Instruction::Add:
546     case Instruction::Sub:
547     case Instruction::And:
548     case Instruction::Or:
549     case Instruction::Xor:
550     case Instruction::Shl:
551     case Instruction::LShr:
552     case Instruction::AShr: {
553       Value *IVOpnd =
554           !isa<Constant>(I->getOperand(0))
555               ? I->getOperand(0)
556               : !isa<Constant>(I->getOperand(1)) ? I->getOperand(1) : nullptr;
557       if (!IVOpnd)
558         return false;
559 
560       // If increment operand is used outside of the loop, this speculation
561       // could cause extra live range interference.
562       if (MultiExitLoop) {
563         for (User *UseI : IVOpnd->users()) {
564           auto *UserInst = cast<Instruction>(UseI);
565           if (!L->contains(UserInst))
566             return false;
567         }
568       }
569 
570       if (seenIncrement)
571         return false;
572       seenIncrement = true;
573       break;
574     }
575     case Instruction::Trunc:
576     case Instruction::ZExt:
577     case Instruction::SExt:
578       // ignore type conversions
579       break;
580     }
581   }
582   return true;
583 }
584 
585 /// Fold the loop tail into the loop exit by speculating the loop tail
586 /// instructions. Typically, this is a single post-increment. In the case of a
587 /// simple 2-block loop, hoisting the increment can be much better than
588 /// duplicating the entire loop header. In the case of loops with early exits,
589 /// rotation will not work anyway, but simplifyLoopLatch will put the loop in
590 /// canonical form so downstream passes can handle it.
591 ///
592 /// I don't believe this invalidates SCEV.
simplifyLoopLatch(Loop * L)593 bool LoopRotate::simplifyLoopLatch(Loop *L) {
594   BasicBlock *Latch = L->getLoopLatch();
595   if (!Latch || Latch->hasAddressTaken())
596     return false;
597 
598   BranchInst *Jmp = dyn_cast<BranchInst>(Latch->getTerminator());
599   if (!Jmp || !Jmp->isUnconditional())
600     return false;
601 
602   BasicBlock *LastExit = Latch->getSinglePredecessor();
603   if (!LastExit || !L->isLoopExiting(LastExit))
604     return false;
605 
606   BranchInst *BI = dyn_cast<BranchInst>(LastExit->getTerminator());
607   if (!BI)
608     return false;
609 
610   if (!shouldSpeculateInstrs(Latch->begin(), Jmp->getIterator(), L))
611     return false;
612 
613   LLVM_DEBUG(dbgs() << "Folding loop latch " << Latch->getName() << " into "
614                     << LastExit->getName() << "\n");
615 
616   // Hoist the instructions from Latch into LastExit.
617   Instruction *FirstLatchInst = &*(Latch->begin());
618   LastExit->getInstList().splice(BI->getIterator(), Latch->getInstList(),
619                                  Latch->begin(), Jmp->getIterator());
620 
621   // Update MemorySSA
622   if (MSSAU)
623     MSSAU->moveAllAfterMergeBlocks(Latch, LastExit, FirstLatchInst);
624 
625   unsigned FallThruPath = BI->getSuccessor(0) == Latch ? 0 : 1;
626   BasicBlock *Header = Jmp->getSuccessor(0);
627   assert(Header == L->getHeader() && "expected a backward branch");
628 
629   // Remove Latch from the CFG so that LastExit becomes the new Latch.
630   BI->setSuccessor(FallThruPath, Header);
631   Latch->replaceSuccessorsPhiUsesWith(LastExit);
632   Jmp->eraseFromParent();
633 
634   // Nuke the Latch block.
635   assert(Latch->empty() && "unable to evacuate Latch");
636   LI->removeBlock(Latch);
637   if (DT)
638     DT->eraseNode(Latch);
639   Latch->eraseFromParent();
640 
641   if (MSSAU && VerifyMemorySSA)
642     MSSAU->getMemorySSA()->verifyMemorySSA();
643 
644   return true;
645 }
646 
647 /// Rotate \c L, and return true if any modification was made.
processLoop(Loop * L)648 bool LoopRotate::processLoop(Loop *L) {
649   // Save the loop metadata.
650   MDNode *LoopMD = L->getLoopID();
651 
652   bool SimplifiedLatch = false;
653 
654   // Simplify the loop latch before attempting to rotate the header
655   // upward. Rotation may not be needed if the loop tail can be folded into the
656   // loop exit.
657   if (!RotationOnly)
658     SimplifiedLatch = simplifyLoopLatch(L);
659 
660   bool MadeChange = rotateLoop(L, SimplifiedLatch);
661   assert((!MadeChange || L->isLoopExiting(L->getLoopLatch())) &&
662          "Loop latch should be exiting after loop-rotate.");
663 
664   // Restore the loop metadata.
665   // NB! We presume LoopRotation DOESN'T ADD its own metadata.
666   if ((MadeChange || SimplifiedLatch) && LoopMD)
667     L->setLoopID(LoopMD);
668 
669   return MadeChange || SimplifiedLatch;
670 }
671 
672 
673 /// The utility to convert a loop into a loop with bottom test.
LoopRotation(Loop * L,LoopInfo * LI,const TargetTransformInfo * TTI,AssumptionCache * AC,DominatorTree * DT,ScalarEvolution * SE,MemorySSAUpdater * MSSAU,const SimplifyQuery & SQ,bool RotationOnly=true,unsigned Threshold=unsigned (-1),bool IsUtilMode=true)674 bool llvm::LoopRotation(Loop *L, LoopInfo *LI, const TargetTransformInfo *TTI,
675                         AssumptionCache *AC, DominatorTree *DT,
676                         ScalarEvolution *SE, MemorySSAUpdater *MSSAU,
677                         const SimplifyQuery &SQ, bool RotationOnly = true,
678                         unsigned Threshold = unsigned(-1),
679                         bool IsUtilMode = true) {
680   if (MSSAU && VerifyMemorySSA)
681     MSSAU->getMemorySSA()->verifyMemorySSA();
682   LoopRotate LR(Threshold, LI, TTI, AC, DT, SE, MSSAU, SQ, RotationOnly,
683                 IsUtilMode);
684   if (MSSAU && VerifyMemorySSA)
685     MSSAU->getMemorySSA()->verifyMemorySSA();
686 
687   return LR.processLoop(L);
688 }
689