1 //===- ModuloSchedule.cpp - Software pipeline schedule expansion ----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "llvm/CodeGen/ModuloSchedule.h"
10 #include "llvm/ADT/StringExtras.h"
11 #include "llvm/Analysis/MemoryLocation.h"
12 #include "llvm/CodeGen/LiveIntervals.h"
13 #include "llvm/CodeGen/MachineInstrBuilder.h"
14 #include "llvm/CodeGen/MachineRegisterInfo.h"
15 #include "llvm/InitializePasses.h"
16 #include "llvm/MC/MCContext.h"
17 #include "llvm/Support/Debug.h"
18 #include "llvm/Support/ErrorHandling.h"
19 #include "llvm/Support/raw_ostream.h"
20 
21 #define DEBUG_TYPE "pipeliner"
22 using namespace llvm;
23 
24 void ModuloSchedule::print(raw_ostream &OS) {
25   for (MachineInstr *MI : ScheduledInstrs)
26     OS << "[stage " << getStage(MI) << " @" << getCycle(MI) << "c] " << *MI;
27 }
28 
29 //===----------------------------------------------------------------------===//
30 // ModuloScheduleExpander implementation
31 //===----------------------------------------------------------------------===//
32 
33 /// Return the register values for  the operands of a Phi instruction.
34 /// This function assume the instruction is a Phi.
35 static void getPhiRegs(MachineInstr &Phi, MachineBasicBlock *Loop,
36                        unsigned &InitVal, unsigned &LoopVal) {
37   assert(Phi.isPHI() && "Expecting a Phi.");
38 
39   InitVal = 0;
40   LoopVal = 0;
41   for (unsigned i = 1, e = Phi.getNumOperands(); i != e; i += 2)
42     if (Phi.getOperand(i + 1).getMBB() != Loop)
43       InitVal = Phi.getOperand(i).getReg();
44     else
45       LoopVal = Phi.getOperand(i).getReg();
46 
47   assert(InitVal != 0 && LoopVal != 0 && "Unexpected Phi structure.");
48 }
49 
50 /// Return the Phi register value that comes from the incoming block.
51 static unsigned getInitPhiReg(MachineInstr &Phi, MachineBasicBlock *LoopBB) {
52   for (unsigned i = 1, e = Phi.getNumOperands(); i != e; i += 2)
53     if (Phi.getOperand(i + 1).getMBB() != LoopBB)
54       return Phi.getOperand(i).getReg();
55   return 0;
56 }
57 
58 /// Return the Phi register value that comes the loop block.
59 static unsigned getLoopPhiReg(MachineInstr &Phi, MachineBasicBlock *LoopBB) {
60   for (unsigned i = 1, e = Phi.getNumOperands(); i != e; i += 2)
61     if (Phi.getOperand(i + 1).getMBB() == LoopBB)
62       return Phi.getOperand(i).getReg();
63   return 0;
64 }
65 
66 void ModuloScheduleExpander::expand() {
67   BB = Schedule.getLoop()->getTopBlock();
68   Preheader = *BB->pred_begin();
69   if (Preheader == BB)
70     Preheader = *std::next(BB->pred_begin());
71 
72   // Iterate over the definitions in each instruction, and compute the
73   // stage difference for each use.  Keep the maximum value.
74   for (MachineInstr *MI : Schedule.getInstructions()) {
75     int DefStage = Schedule.getStage(MI);
76     for (unsigned i = 0, e = MI->getNumOperands(); i < e; ++i) {
77       MachineOperand &Op = MI->getOperand(i);
78       if (!Op.isReg() || !Op.isDef())
79         continue;
80 
81       Register Reg = Op.getReg();
82       unsigned MaxDiff = 0;
83       bool PhiIsSwapped = false;
84       for (MachineRegisterInfo::use_iterator UI = MRI.use_begin(Reg),
85                                              EI = MRI.use_end();
86            UI != EI; ++UI) {
87         MachineOperand &UseOp = *UI;
88         MachineInstr *UseMI = UseOp.getParent();
89         int UseStage = Schedule.getStage(UseMI);
90         unsigned Diff = 0;
91         if (UseStage != -1 && UseStage >= DefStage)
92           Diff = UseStage - DefStage;
93         if (MI->isPHI()) {
94           if (isLoopCarried(*MI))
95             ++Diff;
96           else
97             PhiIsSwapped = true;
98         }
99         MaxDiff = std::max(Diff, MaxDiff);
100       }
101       RegToStageDiff[Reg] = std::make_pair(MaxDiff, PhiIsSwapped);
102     }
103   }
104 
105   generatePipelinedLoop();
106 }
107 
108 void ModuloScheduleExpander::generatePipelinedLoop() {
109   LoopInfo = TII->analyzeLoopForPipelining(BB);
110   assert(LoopInfo && "Must be able to analyze loop!");
111 
112   // Create a new basic block for the kernel and add it to the CFG.
113   MachineBasicBlock *KernelBB = MF.CreateMachineBasicBlock(BB->getBasicBlock());
114 
115   unsigned MaxStageCount = Schedule.getNumStages() - 1;
116 
117   // Remember the registers that are used in different stages. The index is
118   // the iteration, or stage, that the instruction is scheduled in.  This is
119   // a map between register names in the original block and the names created
120   // in each stage of the pipelined loop.
121   ValueMapTy *VRMap = new ValueMapTy[(MaxStageCount + 1) * 2];
122   InstrMapTy InstrMap;
123 
124   SmallVector<MachineBasicBlock *, 4> PrologBBs;
125 
126   // Generate the prolog instructions that set up the pipeline.
127   generateProlog(MaxStageCount, KernelBB, VRMap, PrologBBs);
128   MF.insert(BB->getIterator(), KernelBB);
129 
130   // Rearrange the instructions to generate the new, pipelined loop,
131   // and update register names as needed.
132   for (MachineInstr *CI : Schedule.getInstructions()) {
133     if (CI->isPHI())
134       continue;
135     unsigned StageNum = Schedule.getStage(CI);
136     MachineInstr *NewMI = cloneInstr(CI, MaxStageCount, StageNum);
137     updateInstruction(NewMI, false, MaxStageCount, StageNum, VRMap);
138     KernelBB->push_back(NewMI);
139     InstrMap[NewMI] = CI;
140   }
141 
142   // Copy any terminator instructions to the new kernel, and update
143   // names as needed.
144   for (MachineInstr &MI : BB->terminators()) {
145     MachineInstr *NewMI = MF.CloneMachineInstr(&MI);
146     updateInstruction(NewMI, false, MaxStageCount, 0, VRMap);
147     KernelBB->push_back(NewMI);
148     InstrMap[NewMI] = &MI;
149   }
150 
151   NewKernel = KernelBB;
152   KernelBB->transferSuccessors(BB);
153   KernelBB->replaceSuccessor(BB, KernelBB);
154 
155   generateExistingPhis(KernelBB, PrologBBs.back(), KernelBB, KernelBB, VRMap,
156                        InstrMap, MaxStageCount, MaxStageCount, false);
157   generatePhis(KernelBB, PrologBBs.back(), KernelBB, KernelBB, VRMap, InstrMap,
158                MaxStageCount, MaxStageCount, false);
159 
160   LLVM_DEBUG(dbgs() << "New block\n"; KernelBB->dump(););
161 
162   SmallVector<MachineBasicBlock *, 4> EpilogBBs;
163   // Generate the epilog instructions to complete the pipeline.
164   generateEpilog(MaxStageCount, KernelBB, VRMap, EpilogBBs, PrologBBs);
165 
166   // We need this step because the register allocation doesn't handle some
167   // situations well, so we insert copies to help out.
168   splitLifetimes(KernelBB, EpilogBBs);
169 
170   // Remove dead instructions due to loop induction variables.
171   removeDeadInstructions(KernelBB, EpilogBBs);
172 
173   // Add branches between prolog and epilog blocks.
174   addBranches(*Preheader, PrologBBs, KernelBB, EpilogBBs, VRMap);
175 
176   delete[] VRMap;
177 }
178 
179 void ModuloScheduleExpander::cleanup() {
180   // Remove the original loop since it's no longer referenced.
181   for (auto &I : *BB)
182     LIS.RemoveMachineInstrFromMaps(I);
183   BB->clear();
184   BB->eraseFromParent();
185 }
186 
187 /// Generate the pipeline prolog code.
188 void ModuloScheduleExpander::generateProlog(unsigned LastStage,
189                                             MachineBasicBlock *KernelBB,
190                                             ValueMapTy *VRMap,
191                                             MBBVectorTy &PrologBBs) {
192   MachineBasicBlock *PredBB = Preheader;
193   InstrMapTy InstrMap;
194 
195   // Generate a basic block for each stage, not including the last stage,
196   // which will be generated in the kernel. Each basic block may contain
197   // instructions from multiple stages/iterations.
198   for (unsigned i = 0; i < LastStage; ++i) {
199     // Create and insert the prolog basic block prior to the original loop
200     // basic block.  The original loop is removed later.
201     MachineBasicBlock *NewBB = MF.CreateMachineBasicBlock(BB->getBasicBlock());
202     PrologBBs.push_back(NewBB);
203     MF.insert(BB->getIterator(), NewBB);
204     NewBB->transferSuccessors(PredBB);
205     PredBB->addSuccessor(NewBB);
206     PredBB = NewBB;
207 
208     // Generate instructions for each appropriate stage. Process instructions
209     // in original program order.
210     for (int StageNum = i; StageNum >= 0; --StageNum) {
211       for (MachineBasicBlock::iterator BBI = BB->instr_begin(),
212                                        BBE = BB->getFirstTerminator();
213            BBI != BBE; ++BBI) {
214         if (Schedule.getStage(&*BBI) == StageNum) {
215           if (BBI->isPHI())
216             continue;
217           MachineInstr *NewMI =
218               cloneAndChangeInstr(&*BBI, i, (unsigned)StageNum);
219           updateInstruction(NewMI, false, i, (unsigned)StageNum, VRMap);
220           NewBB->push_back(NewMI);
221           InstrMap[NewMI] = &*BBI;
222         }
223       }
224     }
225     rewritePhiValues(NewBB, i, VRMap, InstrMap);
226     LLVM_DEBUG({
227       dbgs() << "prolog:\n";
228       NewBB->dump();
229     });
230   }
231 
232   PredBB->replaceSuccessor(BB, KernelBB);
233 
234   // Check if we need to remove the branch from the preheader to the original
235   // loop, and replace it with a branch to the new loop.
236   unsigned numBranches = TII->removeBranch(*Preheader);
237   if (numBranches) {
238     SmallVector<MachineOperand, 0> Cond;
239     TII->insertBranch(*Preheader, PrologBBs[0], nullptr, Cond, DebugLoc());
240   }
241 }
242 
243 /// Generate the pipeline epilog code. The epilog code finishes the iterations
244 /// that were started in either the prolog or the kernel.  We create a basic
245 /// block for each stage that needs to complete.
246 void ModuloScheduleExpander::generateEpilog(unsigned LastStage,
247                                             MachineBasicBlock *KernelBB,
248                                             ValueMapTy *VRMap,
249                                             MBBVectorTy &EpilogBBs,
250                                             MBBVectorTy &PrologBBs) {
251   // We need to change the branch from the kernel to the first epilog block, so
252   // this call to analyze branch uses the kernel rather than the original BB.
253   MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
254   SmallVector<MachineOperand, 4> Cond;
255   bool checkBranch = TII->analyzeBranch(*KernelBB, TBB, FBB, Cond);
256   assert(!checkBranch && "generateEpilog must be able to analyze the branch");
257   if (checkBranch)
258     return;
259 
260   MachineBasicBlock::succ_iterator LoopExitI = KernelBB->succ_begin();
261   if (*LoopExitI == KernelBB)
262     ++LoopExitI;
263   assert(LoopExitI != KernelBB->succ_end() && "Expecting a successor");
264   MachineBasicBlock *LoopExitBB = *LoopExitI;
265 
266   MachineBasicBlock *PredBB = KernelBB;
267   MachineBasicBlock *EpilogStart = LoopExitBB;
268   InstrMapTy InstrMap;
269 
270   // Generate a basic block for each stage, not including the last stage,
271   // which was generated for the kernel.  Each basic block may contain
272   // instructions from multiple stages/iterations.
273   int EpilogStage = LastStage + 1;
274   for (unsigned i = LastStage; i >= 1; --i, ++EpilogStage) {
275     MachineBasicBlock *NewBB = MF.CreateMachineBasicBlock();
276     EpilogBBs.push_back(NewBB);
277     MF.insert(BB->getIterator(), NewBB);
278 
279     PredBB->replaceSuccessor(LoopExitBB, NewBB);
280     NewBB->addSuccessor(LoopExitBB);
281 
282     if (EpilogStart == LoopExitBB)
283       EpilogStart = NewBB;
284 
285     // Add instructions to the epilog depending on the current block.
286     // Process instructions in original program order.
287     for (unsigned StageNum = i; StageNum <= LastStage; ++StageNum) {
288       for (auto &BBI : *BB) {
289         if (BBI.isPHI())
290           continue;
291         MachineInstr *In = &BBI;
292         if ((unsigned)Schedule.getStage(In) == StageNum) {
293           // Instructions with memoperands in the epilog are updated with
294           // conservative values.
295           MachineInstr *NewMI = cloneInstr(In, UINT_MAX, 0);
296           updateInstruction(NewMI, i == 1, EpilogStage, 0, VRMap);
297           NewBB->push_back(NewMI);
298           InstrMap[NewMI] = In;
299         }
300       }
301     }
302     generateExistingPhis(NewBB, PrologBBs[i - 1], PredBB, KernelBB, VRMap,
303                          InstrMap, LastStage, EpilogStage, i == 1);
304     generatePhis(NewBB, PrologBBs[i - 1], PredBB, KernelBB, VRMap, InstrMap,
305                  LastStage, EpilogStage, i == 1);
306     PredBB = NewBB;
307 
308     LLVM_DEBUG({
309       dbgs() << "epilog:\n";
310       NewBB->dump();
311     });
312   }
313 
314   // Fix any Phi nodes in the loop exit block.
315   LoopExitBB->replacePhiUsesWith(BB, PredBB);
316 
317   // Create a branch to the new epilog from the kernel.
318   // Remove the original branch and add a new branch to the epilog.
319   TII->removeBranch(*KernelBB);
320   TII->insertBranch(*KernelBB, KernelBB, EpilogStart, Cond, DebugLoc());
321   // Add a branch to the loop exit.
322   if (EpilogBBs.size() > 0) {
323     MachineBasicBlock *LastEpilogBB = EpilogBBs.back();
324     SmallVector<MachineOperand, 4> Cond1;
325     TII->insertBranch(*LastEpilogBB, LoopExitBB, nullptr, Cond1, DebugLoc());
326   }
327 }
328 
329 /// Replace all uses of FromReg that appear outside the specified
330 /// basic block with ToReg.
331 static void replaceRegUsesAfterLoop(unsigned FromReg, unsigned ToReg,
332                                     MachineBasicBlock *MBB,
333                                     MachineRegisterInfo &MRI,
334                                     LiveIntervals &LIS) {
335   for (MachineOperand &O :
336        llvm::make_early_inc_range(MRI.use_operands(FromReg)))
337     if (O.getParent()->getParent() != MBB)
338       O.setReg(ToReg);
339   if (!LIS.hasInterval(ToReg))
340     LIS.createEmptyInterval(ToReg);
341 }
342 
343 /// Return true if the register has a use that occurs outside the
344 /// specified loop.
345 static bool hasUseAfterLoop(unsigned Reg, MachineBasicBlock *BB,
346                             MachineRegisterInfo &MRI) {
347   for (MachineRegisterInfo::use_iterator I = MRI.use_begin(Reg),
348                                          E = MRI.use_end();
349        I != E; ++I)
350     if (I->getParent()->getParent() != BB)
351       return true;
352   return false;
353 }
354 
355 /// Generate Phis for the specific block in the generated pipelined code.
356 /// This function looks at the Phis from the original code to guide the
357 /// creation of new Phis.
358 void ModuloScheduleExpander::generateExistingPhis(
359     MachineBasicBlock *NewBB, MachineBasicBlock *BB1, MachineBasicBlock *BB2,
360     MachineBasicBlock *KernelBB, ValueMapTy *VRMap, InstrMapTy &InstrMap,
361     unsigned LastStageNum, unsigned CurStageNum, bool IsLast) {
362   // Compute the stage number for the initial value of the Phi, which
363   // comes from the prolog. The prolog to use depends on to which kernel/
364   // epilog that we're adding the Phi.
365   unsigned PrologStage = 0;
366   unsigned PrevStage = 0;
367   bool InKernel = (LastStageNum == CurStageNum);
368   if (InKernel) {
369     PrologStage = LastStageNum - 1;
370     PrevStage = CurStageNum;
371   } else {
372     PrologStage = LastStageNum - (CurStageNum - LastStageNum);
373     PrevStage = LastStageNum + (CurStageNum - LastStageNum) - 1;
374   }
375 
376   for (MachineBasicBlock::iterator BBI = BB->instr_begin(),
377                                    BBE = BB->getFirstNonPHI();
378        BBI != BBE; ++BBI) {
379     Register Def = BBI->getOperand(0).getReg();
380 
381     unsigned InitVal = 0;
382     unsigned LoopVal = 0;
383     getPhiRegs(*BBI, BB, InitVal, LoopVal);
384 
385     unsigned PhiOp1 = 0;
386     // The Phi value from the loop body typically is defined in the loop, but
387     // not always. So, we need to check if the value is defined in the loop.
388     unsigned PhiOp2 = LoopVal;
389     if (VRMap[LastStageNum].count(LoopVal))
390       PhiOp2 = VRMap[LastStageNum][LoopVal];
391 
392     int StageScheduled = Schedule.getStage(&*BBI);
393     int LoopValStage = Schedule.getStage(MRI.getVRegDef(LoopVal));
394     unsigned NumStages = getStagesForReg(Def, CurStageNum);
395     if (NumStages == 0) {
396       // We don't need to generate a Phi anymore, but we need to rename any uses
397       // of the Phi value.
398       unsigned NewReg = VRMap[PrevStage][LoopVal];
399       rewriteScheduledInstr(NewBB, InstrMap, CurStageNum, 0, &*BBI, Def,
400                             InitVal, NewReg);
401       if (VRMap[CurStageNum].count(LoopVal))
402         VRMap[CurStageNum][Def] = VRMap[CurStageNum][LoopVal];
403     }
404     // Adjust the number of Phis needed depending on the number of prologs left,
405     // and the distance from where the Phi is first scheduled. The number of
406     // Phis cannot exceed the number of prolog stages. Each stage can
407     // potentially define two values.
408     unsigned MaxPhis = PrologStage + 2;
409     if (!InKernel && (int)PrologStage <= LoopValStage)
410       MaxPhis = std::max((int)MaxPhis - (int)LoopValStage, 1);
411     unsigned NumPhis = std::min(NumStages, MaxPhis);
412 
413     unsigned NewReg = 0;
414     unsigned AccessStage = (LoopValStage != -1) ? LoopValStage : StageScheduled;
415     // In the epilog, we may need to look back one stage to get the correct
416     // Phi name, because the epilog and prolog blocks execute the same stage.
417     // The correct name is from the previous block only when the Phi has
418     // been completely scheduled prior to the epilog, and Phi value is not
419     // needed in multiple stages.
420     int StageDiff = 0;
421     if (!InKernel && StageScheduled >= LoopValStage && AccessStage == 0 &&
422         NumPhis == 1)
423       StageDiff = 1;
424     // Adjust the computations below when the phi and the loop definition
425     // are scheduled in different stages.
426     if (InKernel && LoopValStage != -1 && StageScheduled > LoopValStage)
427       StageDiff = StageScheduled - LoopValStage;
428     for (unsigned np = 0; np < NumPhis; ++np) {
429       // If the Phi hasn't been scheduled, then use the initial Phi operand
430       // value. Otherwise, use the scheduled version of the instruction. This
431       // is a little complicated when a Phi references another Phi.
432       if (np > PrologStage || StageScheduled >= (int)LastStageNum)
433         PhiOp1 = InitVal;
434       // Check if the Phi has already been scheduled in a prolog stage.
435       else if (PrologStage >= AccessStage + StageDiff + np &&
436                VRMap[PrologStage - StageDiff - np].count(LoopVal) != 0)
437         PhiOp1 = VRMap[PrologStage - StageDiff - np][LoopVal];
438       // Check if the Phi has already been scheduled, but the loop instruction
439       // is either another Phi, or doesn't occur in the loop.
440       else if (PrologStage >= AccessStage + StageDiff + np) {
441         // If the Phi references another Phi, we need to examine the other
442         // Phi to get the correct value.
443         PhiOp1 = LoopVal;
444         MachineInstr *InstOp1 = MRI.getVRegDef(PhiOp1);
445         int Indirects = 1;
446         while (InstOp1 && InstOp1->isPHI() && InstOp1->getParent() == BB) {
447           int PhiStage = Schedule.getStage(InstOp1);
448           if ((int)(PrologStage - StageDiff - np) < PhiStage + Indirects)
449             PhiOp1 = getInitPhiReg(*InstOp1, BB);
450           else
451             PhiOp1 = getLoopPhiReg(*InstOp1, BB);
452           InstOp1 = MRI.getVRegDef(PhiOp1);
453           int PhiOpStage = Schedule.getStage(InstOp1);
454           int StageAdj = (PhiOpStage != -1 ? PhiStage - PhiOpStage : 0);
455           if (PhiOpStage != -1 && PrologStage - StageAdj >= Indirects + np &&
456               VRMap[PrologStage - StageAdj - Indirects - np].count(PhiOp1)) {
457             PhiOp1 = VRMap[PrologStage - StageAdj - Indirects - np][PhiOp1];
458             break;
459           }
460           ++Indirects;
461         }
462       } else
463         PhiOp1 = InitVal;
464       // If this references a generated Phi in the kernel, get the Phi operand
465       // from the incoming block.
466       if (MachineInstr *InstOp1 = MRI.getVRegDef(PhiOp1))
467         if (InstOp1->isPHI() && InstOp1->getParent() == KernelBB)
468           PhiOp1 = getInitPhiReg(*InstOp1, KernelBB);
469 
470       MachineInstr *PhiInst = MRI.getVRegDef(LoopVal);
471       bool LoopDefIsPhi = PhiInst && PhiInst->isPHI();
472       // In the epilog, a map lookup is needed to get the value from the kernel,
473       // or previous epilog block. How is does this depends on if the
474       // instruction is scheduled in the previous block.
475       if (!InKernel) {
476         int StageDiffAdj = 0;
477         if (LoopValStage != -1 && StageScheduled > LoopValStage)
478           StageDiffAdj = StageScheduled - LoopValStage;
479         // Use the loop value defined in the kernel, unless the kernel
480         // contains the last definition of the Phi.
481         if (np == 0 && PrevStage == LastStageNum &&
482             (StageScheduled != 0 || LoopValStage != 0) &&
483             VRMap[PrevStage - StageDiffAdj].count(LoopVal))
484           PhiOp2 = VRMap[PrevStage - StageDiffAdj][LoopVal];
485         // Use the value defined by the Phi. We add one because we switch
486         // from looking at the loop value to the Phi definition.
487         else if (np > 0 && PrevStage == LastStageNum &&
488                  VRMap[PrevStage - np + 1].count(Def))
489           PhiOp2 = VRMap[PrevStage - np + 1][Def];
490         // Use the loop value defined in the kernel.
491         else if (static_cast<unsigned>(LoopValStage) > PrologStage + 1 &&
492                  VRMap[PrevStage - StageDiffAdj - np].count(LoopVal))
493           PhiOp2 = VRMap[PrevStage - StageDiffAdj - np][LoopVal];
494         // Use the value defined by the Phi, unless we're generating the first
495         // epilog and the Phi refers to a Phi in a different stage.
496         else if (VRMap[PrevStage - np].count(Def) &&
497                  (!LoopDefIsPhi || (PrevStage != LastStageNum) ||
498                   (LoopValStage == StageScheduled)))
499           PhiOp2 = VRMap[PrevStage - np][Def];
500       }
501 
502       // Check if we can reuse an existing Phi. This occurs when a Phi
503       // references another Phi, and the other Phi is scheduled in an
504       // earlier stage. We can try to reuse an existing Phi up until the last
505       // stage of the current Phi.
506       if (LoopDefIsPhi) {
507         if (static_cast<int>(PrologStage - np) >= StageScheduled) {
508           int LVNumStages = getStagesForPhi(LoopVal);
509           int StageDiff = (StageScheduled - LoopValStage);
510           LVNumStages -= StageDiff;
511           // Make sure the loop value Phi has been processed already.
512           if (LVNumStages > (int)np && VRMap[CurStageNum].count(LoopVal)) {
513             NewReg = PhiOp2;
514             unsigned ReuseStage = CurStageNum;
515             if (isLoopCarried(*PhiInst))
516               ReuseStage -= LVNumStages;
517             // Check if the Phi to reuse has been generated yet. If not, then
518             // there is nothing to reuse.
519             if (VRMap[ReuseStage - np].count(LoopVal)) {
520               NewReg = VRMap[ReuseStage - np][LoopVal];
521 
522               rewriteScheduledInstr(NewBB, InstrMap, CurStageNum, np, &*BBI,
523                                     Def, NewReg);
524               // Update the map with the new Phi name.
525               VRMap[CurStageNum - np][Def] = NewReg;
526               PhiOp2 = NewReg;
527               if (VRMap[LastStageNum - np - 1].count(LoopVal))
528                 PhiOp2 = VRMap[LastStageNum - np - 1][LoopVal];
529 
530               if (IsLast && np == NumPhis - 1)
531                 replaceRegUsesAfterLoop(Def, NewReg, BB, MRI, LIS);
532               continue;
533             }
534           }
535         }
536         if (InKernel && StageDiff > 0 &&
537             VRMap[CurStageNum - StageDiff - np].count(LoopVal))
538           PhiOp2 = VRMap[CurStageNum - StageDiff - np][LoopVal];
539       }
540 
541       const TargetRegisterClass *RC = MRI.getRegClass(Def);
542       NewReg = MRI.createVirtualRegister(RC);
543 
544       MachineInstrBuilder NewPhi =
545           BuildMI(*NewBB, NewBB->getFirstNonPHI(), DebugLoc(),
546                   TII->get(TargetOpcode::PHI), NewReg);
547       NewPhi.addReg(PhiOp1).addMBB(BB1);
548       NewPhi.addReg(PhiOp2).addMBB(BB2);
549       if (np == 0)
550         InstrMap[NewPhi] = &*BBI;
551 
552       // We define the Phis after creating the new pipelined code, so
553       // we need to rename the Phi values in scheduled instructions.
554 
555       unsigned PrevReg = 0;
556       if (InKernel && VRMap[PrevStage - np].count(LoopVal))
557         PrevReg = VRMap[PrevStage - np][LoopVal];
558       rewriteScheduledInstr(NewBB, InstrMap, CurStageNum, np, &*BBI, Def,
559                             NewReg, PrevReg);
560       // If the Phi has been scheduled, use the new name for rewriting.
561       if (VRMap[CurStageNum - np].count(Def)) {
562         unsigned R = VRMap[CurStageNum - np][Def];
563         rewriteScheduledInstr(NewBB, InstrMap, CurStageNum, np, &*BBI, R,
564                               NewReg);
565       }
566 
567       // Check if we need to rename any uses that occurs after the loop. The
568       // register to replace depends on whether the Phi is scheduled in the
569       // epilog.
570       if (IsLast && np == NumPhis - 1)
571         replaceRegUsesAfterLoop(Def, NewReg, BB, MRI, LIS);
572 
573       // In the kernel, a dependent Phi uses the value from this Phi.
574       if (InKernel)
575         PhiOp2 = NewReg;
576 
577       // Update the map with the new Phi name.
578       VRMap[CurStageNum - np][Def] = NewReg;
579     }
580 
581     while (NumPhis++ < NumStages) {
582       rewriteScheduledInstr(NewBB, InstrMap, CurStageNum, NumPhis, &*BBI, Def,
583                             NewReg, 0);
584     }
585 
586     // Check if we need to rename a Phi that has been eliminated due to
587     // scheduling.
588     if (NumStages == 0 && IsLast && VRMap[CurStageNum].count(LoopVal))
589       replaceRegUsesAfterLoop(Def, VRMap[CurStageNum][LoopVal], BB, MRI, LIS);
590   }
591 }
592 
593 /// Generate Phis for the specified block in the generated pipelined code.
594 /// These are new Phis needed because the definition is scheduled after the
595 /// use in the pipelined sequence.
596 void ModuloScheduleExpander::generatePhis(
597     MachineBasicBlock *NewBB, MachineBasicBlock *BB1, MachineBasicBlock *BB2,
598     MachineBasicBlock *KernelBB, ValueMapTy *VRMap, InstrMapTy &InstrMap,
599     unsigned LastStageNum, unsigned CurStageNum, bool IsLast) {
600   // Compute the stage number that contains the initial Phi value, and
601   // the Phi from the previous stage.
602   unsigned PrologStage = 0;
603   unsigned PrevStage = 0;
604   unsigned StageDiff = CurStageNum - LastStageNum;
605   bool InKernel = (StageDiff == 0);
606   if (InKernel) {
607     PrologStage = LastStageNum - 1;
608     PrevStage = CurStageNum;
609   } else {
610     PrologStage = LastStageNum - StageDiff;
611     PrevStage = LastStageNum + StageDiff - 1;
612   }
613 
614   for (MachineBasicBlock::iterator BBI = BB->getFirstNonPHI(),
615                                    BBE = BB->instr_end();
616        BBI != BBE; ++BBI) {
617     for (unsigned i = 0, e = BBI->getNumOperands(); i != e; ++i) {
618       MachineOperand &MO = BBI->getOperand(i);
619       if (!MO.isReg() || !MO.isDef() ||
620           !Register::isVirtualRegister(MO.getReg()))
621         continue;
622 
623       int StageScheduled = Schedule.getStage(&*BBI);
624       assert(StageScheduled != -1 && "Expecting scheduled instruction.");
625       Register Def = MO.getReg();
626       unsigned NumPhis = getStagesForReg(Def, CurStageNum);
627       // An instruction scheduled in stage 0 and is used after the loop
628       // requires a phi in the epilog for the last definition from either
629       // the kernel or prolog.
630       if (!InKernel && NumPhis == 0 && StageScheduled == 0 &&
631           hasUseAfterLoop(Def, BB, MRI))
632         NumPhis = 1;
633       if (!InKernel && (unsigned)StageScheduled > PrologStage)
634         continue;
635 
636       unsigned PhiOp2 = VRMap[PrevStage][Def];
637       if (MachineInstr *InstOp2 = MRI.getVRegDef(PhiOp2))
638         if (InstOp2->isPHI() && InstOp2->getParent() == NewBB)
639           PhiOp2 = getLoopPhiReg(*InstOp2, BB2);
640       // The number of Phis can't exceed the number of prolog stages. The
641       // prolog stage number is zero based.
642       if (NumPhis > PrologStage + 1 - StageScheduled)
643         NumPhis = PrologStage + 1 - StageScheduled;
644       for (unsigned np = 0; np < NumPhis; ++np) {
645         unsigned PhiOp1 = VRMap[PrologStage][Def];
646         if (np <= PrologStage)
647           PhiOp1 = VRMap[PrologStage - np][Def];
648         if (MachineInstr *InstOp1 = MRI.getVRegDef(PhiOp1)) {
649           if (InstOp1->isPHI() && InstOp1->getParent() == KernelBB)
650             PhiOp1 = getInitPhiReg(*InstOp1, KernelBB);
651           if (InstOp1->isPHI() && InstOp1->getParent() == NewBB)
652             PhiOp1 = getInitPhiReg(*InstOp1, NewBB);
653         }
654         if (!InKernel)
655           PhiOp2 = VRMap[PrevStage - np][Def];
656 
657         const TargetRegisterClass *RC = MRI.getRegClass(Def);
658         Register NewReg = MRI.createVirtualRegister(RC);
659 
660         MachineInstrBuilder NewPhi =
661             BuildMI(*NewBB, NewBB->getFirstNonPHI(), DebugLoc(),
662                     TII->get(TargetOpcode::PHI), NewReg);
663         NewPhi.addReg(PhiOp1).addMBB(BB1);
664         NewPhi.addReg(PhiOp2).addMBB(BB2);
665         if (np == 0)
666           InstrMap[NewPhi] = &*BBI;
667 
668         // Rewrite uses and update the map. The actions depend upon whether
669         // we generating code for the kernel or epilog blocks.
670         if (InKernel) {
671           rewriteScheduledInstr(NewBB, InstrMap, CurStageNum, np, &*BBI, PhiOp1,
672                                 NewReg);
673           rewriteScheduledInstr(NewBB, InstrMap, CurStageNum, np, &*BBI, PhiOp2,
674                                 NewReg);
675 
676           PhiOp2 = NewReg;
677           VRMap[PrevStage - np - 1][Def] = NewReg;
678         } else {
679           VRMap[CurStageNum - np][Def] = NewReg;
680           if (np == NumPhis - 1)
681             rewriteScheduledInstr(NewBB, InstrMap, CurStageNum, np, &*BBI, Def,
682                                   NewReg);
683         }
684         if (IsLast && np == NumPhis - 1)
685           replaceRegUsesAfterLoop(Def, NewReg, BB, MRI, LIS);
686       }
687     }
688   }
689 }
690 
691 /// Remove instructions that generate values with no uses.
692 /// Typically, these are induction variable operations that generate values
693 /// used in the loop itself.  A dead instruction has a definition with
694 /// no uses, or uses that occur in the original loop only.
695 void ModuloScheduleExpander::removeDeadInstructions(MachineBasicBlock *KernelBB,
696                                                     MBBVectorTy &EpilogBBs) {
697   // For each epilog block, check that the value defined by each instruction
698   // is used.  If not, delete it.
699   for (MachineBasicBlock *MBB : llvm::reverse(EpilogBBs))
700     for (MachineBasicBlock::reverse_instr_iterator MI = MBB->instr_rbegin(),
701                                                    ME = MBB->instr_rend();
702          MI != ME;) {
703       // From DeadMachineInstructionElem. Don't delete inline assembly.
704       if (MI->isInlineAsm()) {
705         ++MI;
706         continue;
707       }
708       bool SawStore = false;
709       // Check if it's safe to remove the instruction due to side effects.
710       // We can, and want to, remove Phis here.
711       if (!MI->isSafeToMove(nullptr, SawStore) && !MI->isPHI()) {
712         ++MI;
713         continue;
714       }
715       bool used = true;
716       for (const MachineOperand &MO : MI->operands()) {
717         if (!MO.isReg() || !MO.isDef())
718           continue;
719         Register reg = MO.getReg();
720         // Assume physical registers are used, unless they are marked dead.
721         if (Register::isPhysicalRegister(reg)) {
722           used = !MO.isDead();
723           if (used)
724             break;
725           continue;
726         }
727         unsigned realUses = 0;
728         for (MachineRegisterInfo::use_iterator UI = MRI.use_begin(reg),
729                                                EI = MRI.use_end();
730              UI != EI; ++UI) {
731           // Check if there are any uses that occur only in the original
732           // loop.  If so, that's not a real use.
733           if (UI->getParent()->getParent() != BB) {
734             realUses++;
735             used = true;
736             break;
737           }
738         }
739         if (realUses > 0)
740           break;
741         used = false;
742       }
743       if (!used) {
744         LIS.RemoveMachineInstrFromMaps(*MI);
745         MI++->eraseFromParent();
746         continue;
747       }
748       ++MI;
749     }
750   // In the kernel block, check if we can remove a Phi that generates a value
751   // used in an instruction removed in the epilog block.
752   for (MachineInstr &MI : llvm::make_early_inc_range(KernelBB->phis())) {
753     Register reg = MI.getOperand(0).getReg();
754     if (MRI.use_begin(reg) == MRI.use_end()) {
755       LIS.RemoveMachineInstrFromMaps(MI);
756       MI.eraseFromParent();
757     }
758   }
759 }
760 
761 /// For loop carried definitions, we split the lifetime of a virtual register
762 /// that has uses past the definition in the next iteration. A copy with a new
763 /// virtual register is inserted before the definition, which helps with
764 /// generating a better register assignment.
765 ///
766 ///   v1 = phi(a, v2)     v1 = phi(a, v2)
767 ///   v2 = phi(b, v3)     v2 = phi(b, v3)
768 ///   v3 = ..             v4 = copy v1
769 ///   .. = V1             v3 = ..
770 ///                       .. = v4
771 void ModuloScheduleExpander::splitLifetimes(MachineBasicBlock *KernelBB,
772                                             MBBVectorTy &EpilogBBs) {
773   const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
774   for (auto &PHI : KernelBB->phis()) {
775     Register Def = PHI.getOperand(0).getReg();
776     // Check for any Phi definition that used as an operand of another Phi
777     // in the same block.
778     for (MachineRegisterInfo::use_instr_iterator I = MRI.use_instr_begin(Def),
779                                                  E = MRI.use_instr_end();
780          I != E; ++I) {
781       if (I->isPHI() && I->getParent() == KernelBB) {
782         // Get the loop carried definition.
783         unsigned LCDef = getLoopPhiReg(PHI, KernelBB);
784         if (!LCDef)
785           continue;
786         MachineInstr *MI = MRI.getVRegDef(LCDef);
787         if (!MI || MI->getParent() != KernelBB || MI->isPHI())
788           continue;
789         // Search through the rest of the block looking for uses of the Phi
790         // definition. If one occurs, then split the lifetime.
791         unsigned SplitReg = 0;
792         for (auto &BBJ : make_range(MachineBasicBlock::instr_iterator(MI),
793                                     KernelBB->instr_end()))
794           if (BBJ.readsRegister(Def)) {
795             // We split the lifetime when we find the first use.
796             if (SplitReg == 0) {
797               SplitReg = MRI.createVirtualRegister(MRI.getRegClass(Def));
798               BuildMI(*KernelBB, MI, MI->getDebugLoc(),
799                       TII->get(TargetOpcode::COPY), SplitReg)
800                   .addReg(Def);
801             }
802             BBJ.substituteRegister(Def, SplitReg, 0, *TRI);
803           }
804         if (!SplitReg)
805           continue;
806         // Search through each of the epilog blocks for any uses to be renamed.
807         for (auto &Epilog : EpilogBBs)
808           for (auto &I : *Epilog)
809             if (I.readsRegister(Def))
810               I.substituteRegister(Def, SplitReg, 0, *TRI);
811         break;
812       }
813     }
814   }
815 }
816 
817 /// Remove the incoming block from the Phis in a basic block.
818 static void removePhis(MachineBasicBlock *BB, MachineBasicBlock *Incoming) {
819   for (MachineInstr &MI : *BB) {
820     if (!MI.isPHI())
821       break;
822     for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2)
823       if (MI.getOperand(i + 1).getMBB() == Incoming) {
824         MI.RemoveOperand(i + 1);
825         MI.RemoveOperand(i);
826         break;
827       }
828   }
829 }
830 
831 /// Create branches from each prolog basic block to the appropriate epilog
832 /// block.  These edges are needed if the loop ends before reaching the
833 /// kernel.
834 void ModuloScheduleExpander::addBranches(MachineBasicBlock &PreheaderBB,
835                                          MBBVectorTy &PrologBBs,
836                                          MachineBasicBlock *KernelBB,
837                                          MBBVectorTy &EpilogBBs,
838                                          ValueMapTy *VRMap) {
839   assert(PrologBBs.size() == EpilogBBs.size() && "Prolog/Epilog mismatch");
840   MachineBasicBlock *LastPro = KernelBB;
841   MachineBasicBlock *LastEpi = KernelBB;
842 
843   // Start from the blocks connected to the kernel and work "out"
844   // to the first prolog and the last epilog blocks.
845   SmallVector<MachineInstr *, 4> PrevInsts;
846   unsigned MaxIter = PrologBBs.size() - 1;
847   for (unsigned i = 0, j = MaxIter; i <= MaxIter; ++i, --j) {
848     // Add branches to the prolog that go to the corresponding
849     // epilog, and the fall-thru prolog/kernel block.
850     MachineBasicBlock *Prolog = PrologBBs[j];
851     MachineBasicBlock *Epilog = EpilogBBs[i];
852 
853     SmallVector<MachineOperand, 4> Cond;
854     Optional<bool> StaticallyGreater =
855         LoopInfo->createTripCountGreaterCondition(j + 1, *Prolog, Cond);
856     unsigned numAdded = 0;
857     if (!StaticallyGreater.hasValue()) {
858       Prolog->addSuccessor(Epilog);
859       numAdded = TII->insertBranch(*Prolog, Epilog, LastPro, Cond, DebugLoc());
860     } else if (*StaticallyGreater == false) {
861       Prolog->addSuccessor(Epilog);
862       Prolog->removeSuccessor(LastPro);
863       LastEpi->removeSuccessor(Epilog);
864       numAdded = TII->insertBranch(*Prolog, Epilog, nullptr, Cond, DebugLoc());
865       removePhis(Epilog, LastEpi);
866       // Remove the blocks that are no longer referenced.
867       if (LastPro != LastEpi) {
868         LastEpi->clear();
869         LastEpi->eraseFromParent();
870       }
871       if (LastPro == KernelBB) {
872         LoopInfo->disposed();
873         NewKernel = nullptr;
874       }
875       LastPro->clear();
876       LastPro->eraseFromParent();
877     } else {
878       numAdded = TII->insertBranch(*Prolog, LastPro, nullptr, Cond, DebugLoc());
879       removePhis(Epilog, Prolog);
880     }
881     LastPro = Prolog;
882     LastEpi = Epilog;
883     for (MachineBasicBlock::reverse_instr_iterator I = Prolog->instr_rbegin(),
884                                                    E = Prolog->instr_rend();
885          I != E && numAdded > 0; ++I, --numAdded)
886       updateInstruction(&*I, false, j, 0, VRMap);
887   }
888 
889   if (NewKernel) {
890     LoopInfo->setPreheader(PrologBBs[MaxIter]);
891     LoopInfo->adjustTripCount(-(MaxIter + 1));
892   }
893 }
894 
895 /// Return true if we can compute the amount the instruction changes
896 /// during each iteration. Set Delta to the amount of the change.
897 bool ModuloScheduleExpander::computeDelta(MachineInstr &MI, unsigned &Delta) {
898   const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
899   const MachineOperand *BaseOp;
900   int64_t Offset;
901   bool OffsetIsScalable;
902   if (!TII->getMemOperandWithOffset(MI, BaseOp, Offset, OffsetIsScalable, TRI))
903     return false;
904 
905   // FIXME: This algorithm assumes instructions have fixed-size offsets.
906   if (OffsetIsScalable)
907     return false;
908 
909   if (!BaseOp->isReg())
910     return false;
911 
912   Register BaseReg = BaseOp->getReg();
913 
914   MachineRegisterInfo &MRI = MF.getRegInfo();
915   // Check if there is a Phi. If so, get the definition in the loop.
916   MachineInstr *BaseDef = MRI.getVRegDef(BaseReg);
917   if (BaseDef && BaseDef->isPHI()) {
918     BaseReg = getLoopPhiReg(*BaseDef, MI.getParent());
919     BaseDef = MRI.getVRegDef(BaseReg);
920   }
921   if (!BaseDef)
922     return false;
923 
924   int D = 0;
925   if (!TII->getIncrementValue(*BaseDef, D) && D >= 0)
926     return false;
927 
928   Delta = D;
929   return true;
930 }
931 
932 /// Update the memory operand with a new offset when the pipeliner
933 /// generates a new copy of the instruction that refers to a
934 /// different memory location.
935 void ModuloScheduleExpander::updateMemOperands(MachineInstr &NewMI,
936                                                MachineInstr &OldMI,
937                                                unsigned Num) {
938   if (Num == 0)
939     return;
940   // If the instruction has memory operands, then adjust the offset
941   // when the instruction appears in different stages.
942   if (NewMI.memoperands_empty())
943     return;
944   SmallVector<MachineMemOperand *, 2> NewMMOs;
945   for (MachineMemOperand *MMO : NewMI.memoperands()) {
946     // TODO: Figure out whether isAtomic is really necessary (see D57601).
947     if (MMO->isVolatile() || MMO->isAtomic() ||
948         (MMO->isInvariant() && MMO->isDereferenceable()) ||
949         (!MMO->getValue())) {
950       NewMMOs.push_back(MMO);
951       continue;
952     }
953     unsigned Delta;
954     if (Num != UINT_MAX && computeDelta(OldMI, Delta)) {
955       int64_t AdjOffset = Delta * Num;
956       NewMMOs.push_back(
957           MF.getMachineMemOperand(MMO, AdjOffset, MMO->getSize()));
958     } else {
959       NewMMOs.push_back(
960           MF.getMachineMemOperand(MMO, 0, MemoryLocation::UnknownSize));
961     }
962   }
963   NewMI.setMemRefs(MF, NewMMOs);
964 }
965 
966 /// Clone the instruction for the new pipelined loop and update the
967 /// memory operands, if needed.
968 MachineInstr *ModuloScheduleExpander::cloneInstr(MachineInstr *OldMI,
969                                                  unsigned CurStageNum,
970                                                  unsigned InstStageNum) {
971   MachineInstr *NewMI = MF.CloneMachineInstr(OldMI);
972   // Check for tied operands in inline asm instructions. This should be handled
973   // elsewhere, but I'm not sure of the best solution.
974   if (OldMI->isInlineAsm())
975     for (unsigned i = 0, e = OldMI->getNumOperands(); i != e; ++i) {
976       const auto &MO = OldMI->getOperand(i);
977       if (MO.isReg() && MO.isUse())
978         break;
979       unsigned UseIdx;
980       if (OldMI->isRegTiedToUseOperand(i, &UseIdx))
981         NewMI->tieOperands(i, UseIdx);
982     }
983   updateMemOperands(*NewMI, *OldMI, CurStageNum - InstStageNum);
984   return NewMI;
985 }
986 
987 /// Clone the instruction for the new pipelined loop. If needed, this
988 /// function updates the instruction using the values saved in the
989 /// InstrChanges structure.
990 MachineInstr *ModuloScheduleExpander::cloneAndChangeInstr(
991     MachineInstr *OldMI, unsigned CurStageNum, unsigned InstStageNum) {
992   MachineInstr *NewMI = MF.CloneMachineInstr(OldMI);
993   auto It = InstrChanges.find(OldMI);
994   if (It != InstrChanges.end()) {
995     std::pair<unsigned, int64_t> RegAndOffset = It->second;
996     unsigned BasePos, OffsetPos;
997     if (!TII->getBaseAndOffsetPosition(*OldMI, BasePos, OffsetPos))
998       return nullptr;
999     int64_t NewOffset = OldMI->getOperand(OffsetPos).getImm();
1000     MachineInstr *LoopDef = findDefInLoop(RegAndOffset.first);
1001     if (Schedule.getStage(LoopDef) > (signed)InstStageNum)
1002       NewOffset += RegAndOffset.second * (CurStageNum - InstStageNum);
1003     NewMI->getOperand(OffsetPos).setImm(NewOffset);
1004   }
1005   updateMemOperands(*NewMI, *OldMI, CurStageNum - InstStageNum);
1006   return NewMI;
1007 }
1008 
1009 /// Update the machine instruction with new virtual registers.  This
1010 /// function may change the defintions and/or uses.
1011 void ModuloScheduleExpander::updateInstruction(MachineInstr *NewMI,
1012                                                bool LastDef,
1013                                                unsigned CurStageNum,
1014                                                unsigned InstrStageNum,
1015                                                ValueMapTy *VRMap) {
1016   for (unsigned i = 0, e = NewMI->getNumOperands(); i != e; ++i) {
1017     MachineOperand &MO = NewMI->getOperand(i);
1018     if (!MO.isReg() || !Register::isVirtualRegister(MO.getReg()))
1019       continue;
1020     Register reg = MO.getReg();
1021     if (MO.isDef()) {
1022       // Create a new virtual register for the definition.
1023       const TargetRegisterClass *RC = MRI.getRegClass(reg);
1024       Register NewReg = MRI.createVirtualRegister(RC);
1025       MO.setReg(NewReg);
1026       VRMap[CurStageNum][reg] = NewReg;
1027       if (LastDef)
1028         replaceRegUsesAfterLoop(reg, NewReg, BB, MRI, LIS);
1029     } else if (MO.isUse()) {
1030       MachineInstr *Def = MRI.getVRegDef(reg);
1031       // Compute the stage that contains the last definition for instruction.
1032       int DefStageNum = Schedule.getStage(Def);
1033       unsigned StageNum = CurStageNum;
1034       if (DefStageNum != -1 && (int)InstrStageNum > DefStageNum) {
1035         // Compute the difference in stages between the defintion and the use.
1036         unsigned StageDiff = (InstrStageNum - DefStageNum);
1037         // Make an adjustment to get the last definition.
1038         StageNum -= StageDiff;
1039       }
1040       if (VRMap[StageNum].count(reg))
1041         MO.setReg(VRMap[StageNum][reg]);
1042     }
1043   }
1044 }
1045 
1046 /// Return the instruction in the loop that defines the register.
1047 /// If the definition is a Phi, then follow the Phi operand to
1048 /// the instruction in the loop.
1049 MachineInstr *ModuloScheduleExpander::findDefInLoop(unsigned Reg) {
1050   SmallPtrSet<MachineInstr *, 8> Visited;
1051   MachineInstr *Def = MRI.getVRegDef(Reg);
1052   while (Def->isPHI()) {
1053     if (!Visited.insert(Def).second)
1054       break;
1055     for (unsigned i = 1, e = Def->getNumOperands(); i < e; i += 2)
1056       if (Def->getOperand(i + 1).getMBB() == BB) {
1057         Def = MRI.getVRegDef(Def->getOperand(i).getReg());
1058         break;
1059       }
1060   }
1061   return Def;
1062 }
1063 
1064 /// Return the new name for the value from the previous stage.
1065 unsigned ModuloScheduleExpander::getPrevMapVal(
1066     unsigned StageNum, unsigned PhiStage, unsigned LoopVal, unsigned LoopStage,
1067     ValueMapTy *VRMap, MachineBasicBlock *BB) {
1068   unsigned PrevVal = 0;
1069   if (StageNum > PhiStage) {
1070     MachineInstr *LoopInst = MRI.getVRegDef(LoopVal);
1071     if (PhiStage == LoopStage && VRMap[StageNum - 1].count(LoopVal))
1072       // The name is defined in the previous stage.
1073       PrevVal = VRMap[StageNum - 1][LoopVal];
1074     else if (VRMap[StageNum].count(LoopVal))
1075       // The previous name is defined in the current stage when the instruction
1076       // order is swapped.
1077       PrevVal = VRMap[StageNum][LoopVal];
1078     else if (!LoopInst->isPHI() || LoopInst->getParent() != BB)
1079       // The loop value hasn't yet been scheduled.
1080       PrevVal = LoopVal;
1081     else if (StageNum == PhiStage + 1)
1082       // The loop value is another phi, which has not been scheduled.
1083       PrevVal = getInitPhiReg(*LoopInst, BB);
1084     else if (StageNum > PhiStage + 1 && LoopInst->getParent() == BB)
1085       // The loop value is another phi, which has been scheduled.
1086       PrevVal =
1087           getPrevMapVal(StageNum - 1, PhiStage, getLoopPhiReg(*LoopInst, BB),
1088                         LoopStage, VRMap, BB);
1089   }
1090   return PrevVal;
1091 }
1092 
1093 /// Rewrite the Phi values in the specified block to use the mappings
1094 /// from the initial operand. Once the Phi is scheduled, we switch
1095 /// to using the loop value instead of the Phi value, so those names
1096 /// do not need to be rewritten.
1097 void ModuloScheduleExpander::rewritePhiValues(MachineBasicBlock *NewBB,
1098                                               unsigned StageNum,
1099                                               ValueMapTy *VRMap,
1100                                               InstrMapTy &InstrMap) {
1101   for (auto &PHI : BB->phis()) {
1102     unsigned InitVal = 0;
1103     unsigned LoopVal = 0;
1104     getPhiRegs(PHI, BB, InitVal, LoopVal);
1105     Register PhiDef = PHI.getOperand(0).getReg();
1106 
1107     unsigned PhiStage = (unsigned)Schedule.getStage(MRI.getVRegDef(PhiDef));
1108     unsigned LoopStage = (unsigned)Schedule.getStage(MRI.getVRegDef(LoopVal));
1109     unsigned NumPhis = getStagesForPhi(PhiDef);
1110     if (NumPhis > StageNum)
1111       NumPhis = StageNum;
1112     for (unsigned np = 0; np <= NumPhis; ++np) {
1113       unsigned NewVal =
1114           getPrevMapVal(StageNum - np, PhiStage, LoopVal, LoopStage, VRMap, BB);
1115       if (!NewVal)
1116         NewVal = InitVal;
1117       rewriteScheduledInstr(NewBB, InstrMap, StageNum - np, np, &PHI, PhiDef,
1118                             NewVal);
1119     }
1120   }
1121 }
1122 
1123 /// Rewrite a previously scheduled instruction to use the register value
1124 /// from the new instruction. Make sure the instruction occurs in the
1125 /// basic block, and we don't change the uses in the new instruction.
1126 void ModuloScheduleExpander::rewriteScheduledInstr(
1127     MachineBasicBlock *BB, InstrMapTy &InstrMap, unsigned CurStageNum,
1128     unsigned PhiNum, MachineInstr *Phi, unsigned OldReg, unsigned NewReg,
1129     unsigned PrevReg) {
1130   bool InProlog = (CurStageNum < (unsigned)Schedule.getNumStages() - 1);
1131   int StagePhi = Schedule.getStage(Phi) + PhiNum;
1132   // Rewrite uses that have been scheduled already to use the new
1133   // Phi register.
1134   for (MachineOperand &UseOp :
1135        llvm::make_early_inc_range(MRI.use_operands(OldReg))) {
1136     MachineInstr *UseMI = UseOp.getParent();
1137     if (UseMI->getParent() != BB)
1138       continue;
1139     if (UseMI->isPHI()) {
1140       if (!Phi->isPHI() && UseMI->getOperand(0).getReg() == NewReg)
1141         continue;
1142       if (getLoopPhiReg(*UseMI, BB) != OldReg)
1143         continue;
1144     }
1145     InstrMapTy::iterator OrigInstr = InstrMap.find(UseMI);
1146     assert(OrigInstr != InstrMap.end() && "Instruction not scheduled.");
1147     MachineInstr *OrigMI = OrigInstr->second;
1148     int StageSched = Schedule.getStage(OrigMI);
1149     int CycleSched = Schedule.getCycle(OrigMI);
1150     unsigned ReplaceReg = 0;
1151     // This is the stage for the scheduled instruction.
1152     if (StagePhi == StageSched && Phi->isPHI()) {
1153       int CyclePhi = Schedule.getCycle(Phi);
1154       if (PrevReg && InProlog)
1155         ReplaceReg = PrevReg;
1156       else if (PrevReg && !isLoopCarried(*Phi) &&
1157                (CyclePhi <= CycleSched || OrigMI->isPHI()))
1158         ReplaceReg = PrevReg;
1159       else
1160         ReplaceReg = NewReg;
1161     }
1162     // The scheduled instruction occurs before the scheduled Phi, and the
1163     // Phi is not loop carried.
1164     if (!InProlog && StagePhi + 1 == StageSched && !isLoopCarried(*Phi))
1165       ReplaceReg = NewReg;
1166     if (StagePhi > StageSched && Phi->isPHI())
1167       ReplaceReg = NewReg;
1168     if (!InProlog && !Phi->isPHI() && StagePhi < StageSched)
1169       ReplaceReg = NewReg;
1170     if (ReplaceReg) {
1171       MRI.constrainRegClass(ReplaceReg, MRI.getRegClass(OldReg));
1172       UseOp.setReg(ReplaceReg);
1173     }
1174   }
1175 }
1176 
1177 bool ModuloScheduleExpander::isLoopCarried(MachineInstr &Phi) {
1178   if (!Phi.isPHI())
1179     return false;
1180   int DefCycle = Schedule.getCycle(&Phi);
1181   int DefStage = Schedule.getStage(&Phi);
1182 
1183   unsigned InitVal = 0;
1184   unsigned LoopVal = 0;
1185   getPhiRegs(Phi, Phi.getParent(), InitVal, LoopVal);
1186   MachineInstr *Use = MRI.getVRegDef(LoopVal);
1187   if (!Use || Use->isPHI())
1188     return true;
1189   int LoopCycle = Schedule.getCycle(Use);
1190   int LoopStage = Schedule.getStage(Use);
1191   return (LoopCycle > DefCycle) || (LoopStage <= DefStage);
1192 }
1193 
1194 //===----------------------------------------------------------------------===//
1195 // PeelingModuloScheduleExpander implementation
1196 //===----------------------------------------------------------------------===//
1197 // This is a reimplementation of ModuloScheduleExpander that works by creating
1198 // a fully correct steady-state kernel and peeling off the prolog and epilogs.
1199 //===----------------------------------------------------------------------===//
1200 
1201 namespace {
1202 // Remove any dead phis in MBB. Dead phis either have only one block as input
1203 // (in which case they are the identity) or have no uses.
1204 void EliminateDeadPhis(MachineBasicBlock *MBB, MachineRegisterInfo &MRI,
1205                        LiveIntervals *LIS, bool KeepSingleSrcPhi = false) {
1206   bool Changed = true;
1207   while (Changed) {
1208     Changed = false;
1209     for (MachineInstr &MI : llvm::make_early_inc_range(MBB->phis())) {
1210       assert(MI.isPHI());
1211       if (MRI.use_empty(MI.getOperand(0).getReg())) {
1212         if (LIS)
1213           LIS->RemoveMachineInstrFromMaps(MI);
1214         MI.eraseFromParent();
1215         Changed = true;
1216       } else if (!KeepSingleSrcPhi && MI.getNumExplicitOperands() == 3) {
1217         MRI.constrainRegClass(MI.getOperand(1).getReg(),
1218                               MRI.getRegClass(MI.getOperand(0).getReg()));
1219         MRI.replaceRegWith(MI.getOperand(0).getReg(),
1220                            MI.getOperand(1).getReg());
1221         if (LIS)
1222           LIS->RemoveMachineInstrFromMaps(MI);
1223         MI.eraseFromParent();
1224         Changed = true;
1225       }
1226     }
1227   }
1228 }
1229 
1230 /// Rewrites the kernel block in-place to adhere to the given schedule.
1231 /// KernelRewriter holds all of the state required to perform the rewriting.
1232 class KernelRewriter {
1233   ModuloSchedule &S;
1234   MachineBasicBlock *BB;
1235   MachineBasicBlock *PreheaderBB, *ExitBB;
1236   MachineRegisterInfo &MRI;
1237   const TargetInstrInfo *TII;
1238   LiveIntervals *LIS;
1239 
1240   // Map from register class to canonical undef register for that class.
1241   DenseMap<const TargetRegisterClass *, Register> Undefs;
1242   // Map from <LoopReg, InitReg> to phi register for all created phis. Note that
1243   // this map is only used when InitReg is non-undef.
1244   DenseMap<std::pair<unsigned, unsigned>, Register> Phis;
1245   // Map from LoopReg to phi register where the InitReg is undef.
1246   DenseMap<Register, Register> UndefPhis;
1247 
1248   // Reg is used by MI. Return the new register MI should use to adhere to the
1249   // schedule. Insert phis as necessary.
1250   Register remapUse(Register Reg, MachineInstr &MI);
1251   // Insert a phi that carries LoopReg from the loop body and InitReg otherwise.
1252   // If InitReg is not given it is chosen arbitrarily. It will either be undef
1253   // or will be chosen so as to share another phi.
1254   Register phi(Register LoopReg, Optional<Register> InitReg = {},
1255                const TargetRegisterClass *RC = nullptr);
1256   // Create an undef register of the given register class.
1257   Register undef(const TargetRegisterClass *RC);
1258 
1259 public:
1260   KernelRewriter(MachineLoop &L, ModuloSchedule &S, MachineBasicBlock *LoopBB,
1261                  LiveIntervals *LIS = nullptr);
1262   void rewrite();
1263 };
1264 } // namespace
1265 
1266 KernelRewriter::KernelRewriter(MachineLoop &L, ModuloSchedule &S,
1267                                MachineBasicBlock *LoopBB, LiveIntervals *LIS)
1268     : S(S), BB(LoopBB), PreheaderBB(L.getLoopPreheader()),
1269       ExitBB(L.getExitBlock()), MRI(BB->getParent()->getRegInfo()),
1270       TII(BB->getParent()->getSubtarget().getInstrInfo()), LIS(LIS) {
1271   PreheaderBB = *BB->pred_begin();
1272   if (PreheaderBB == BB)
1273     PreheaderBB = *std::next(BB->pred_begin());
1274 }
1275 
1276 void KernelRewriter::rewrite() {
1277   // Rearrange the loop to be in schedule order. Note that the schedule may
1278   // contain instructions that are not owned by the loop block (InstrChanges and
1279   // friends), so we gracefully handle unowned instructions and delete any
1280   // instructions that weren't in the schedule.
1281   auto InsertPt = BB->getFirstTerminator();
1282   MachineInstr *FirstMI = nullptr;
1283   for (MachineInstr *MI : S.getInstructions()) {
1284     if (MI->isPHI())
1285       continue;
1286     if (MI->getParent())
1287       MI->removeFromParent();
1288     BB->insert(InsertPt, MI);
1289     if (!FirstMI)
1290       FirstMI = MI;
1291   }
1292   assert(FirstMI && "Failed to find first MI in schedule");
1293 
1294   // At this point all of the scheduled instructions are between FirstMI
1295   // and the end of the block. Kill from the first non-phi to FirstMI.
1296   for (auto I = BB->getFirstNonPHI(); I != FirstMI->getIterator();) {
1297     if (LIS)
1298       LIS->RemoveMachineInstrFromMaps(*I);
1299     (I++)->eraseFromParent();
1300   }
1301 
1302   // Now remap every instruction in the loop.
1303   for (MachineInstr &MI : *BB) {
1304     if (MI.isPHI() || MI.isTerminator())
1305       continue;
1306     for (MachineOperand &MO : MI.uses()) {
1307       if (!MO.isReg() || MO.getReg().isPhysical() || MO.isImplicit())
1308         continue;
1309       Register Reg = remapUse(MO.getReg(), MI);
1310       MO.setReg(Reg);
1311     }
1312   }
1313   EliminateDeadPhis(BB, MRI, LIS);
1314 
1315   // Ensure a phi exists for all instructions that are either referenced by
1316   // an illegal phi or by an instruction outside the loop. This allows us to
1317   // treat remaps of these values the same as "normal" values that come from
1318   // loop-carried phis.
1319   for (auto MI = BB->getFirstNonPHI(); MI != BB->end(); ++MI) {
1320     if (MI->isPHI()) {
1321       Register R = MI->getOperand(0).getReg();
1322       phi(R);
1323       continue;
1324     }
1325 
1326     for (MachineOperand &Def : MI->defs()) {
1327       for (MachineInstr &MI : MRI.use_instructions(Def.getReg())) {
1328         if (MI.getParent() != BB) {
1329           phi(Def.getReg());
1330           break;
1331         }
1332       }
1333     }
1334   }
1335 }
1336 
1337 Register KernelRewriter::remapUse(Register Reg, MachineInstr &MI) {
1338   MachineInstr *Producer = MRI.getUniqueVRegDef(Reg);
1339   if (!Producer)
1340     return Reg;
1341 
1342   int ConsumerStage = S.getStage(&MI);
1343   if (!Producer->isPHI()) {
1344     // Non-phi producers are simple to remap. Insert as many phis as the
1345     // difference between the consumer and producer stages.
1346     if (Producer->getParent() != BB)
1347       // Producer was not inside the loop. Use the register as-is.
1348       return Reg;
1349     int ProducerStage = S.getStage(Producer);
1350     assert(ConsumerStage != -1 &&
1351            "In-loop consumer should always be scheduled!");
1352     assert(ConsumerStage >= ProducerStage);
1353     unsigned StageDiff = ConsumerStage - ProducerStage;
1354 
1355     for (unsigned I = 0; I < StageDiff; ++I)
1356       Reg = phi(Reg);
1357     return Reg;
1358   }
1359 
1360   // First, dive through the phi chain to find the defaults for the generated
1361   // phis.
1362   SmallVector<Optional<Register>, 4> Defaults;
1363   Register LoopReg = Reg;
1364   auto LoopProducer = Producer;
1365   while (LoopProducer->isPHI() && LoopProducer->getParent() == BB) {
1366     LoopReg = getLoopPhiReg(*LoopProducer, BB);
1367     Defaults.emplace_back(getInitPhiReg(*LoopProducer, BB));
1368     LoopProducer = MRI.getUniqueVRegDef(LoopReg);
1369     assert(LoopProducer);
1370   }
1371   int LoopProducerStage = S.getStage(LoopProducer);
1372 
1373   Optional<Register> IllegalPhiDefault;
1374 
1375   if (LoopProducerStage == -1) {
1376     // Do nothing.
1377   } else if (LoopProducerStage > ConsumerStage) {
1378     // This schedule is only representable if ProducerStage == ConsumerStage+1.
1379     // In addition, Consumer's cycle must be scheduled after Producer in the
1380     // rescheduled loop. This is enforced by the pipeliner's ASAP and ALAP
1381     // functions.
1382 #ifndef NDEBUG // Silence unused variables in non-asserts mode.
1383     int LoopProducerCycle = S.getCycle(LoopProducer);
1384     int ConsumerCycle = S.getCycle(&MI);
1385 #endif
1386     assert(LoopProducerCycle <= ConsumerCycle);
1387     assert(LoopProducerStage == ConsumerStage + 1);
1388     // Peel off the first phi from Defaults and insert a phi between producer
1389     // and consumer. This phi will not be at the front of the block so we
1390     // consider it illegal. It will only exist during the rewrite process; it
1391     // needs to exist while we peel off prologs because these could take the
1392     // default value. After that we can replace all uses with the loop producer
1393     // value.
1394     IllegalPhiDefault = Defaults.front();
1395     Defaults.erase(Defaults.begin());
1396   } else {
1397     assert(ConsumerStage >= LoopProducerStage);
1398     int StageDiff = ConsumerStage - LoopProducerStage;
1399     if (StageDiff > 0) {
1400       LLVM_DEBUG(dbgs() << " -- padding defaults array from " << Defaults.size()
1401                         << " to " << (Defaults.size() + StageDiff) << "\n");
1402       // If we need more phis than we have defaults for, pad out with undefs for
1403       // the earliest phis, which are at the end of the defaults chain (the
1404       // chain is in reverse order).
1405       Defaults.resize(Defaults.size() + StageDiff, Defaults.empty()
1406                                                        ? Optional<Register>()
1407                                                        : Defaults.back());
1408     }
1409   }
1410 
1411   // Now we know the number of stages to jump back, insert the phi chain.
1412   auto DefaultI = Defaults.rbegin();
1413   while (DefaultI != Defaults.rend())
1414     LoopReg = phi(LoopReg, *DefaultI++, MRI.getRegClass(Reg));
1415 
1416   if (IllegalPhiDefault.hasValue()) {
1417     // The consumer optionally consumes LoopProducer in the same iteration
1418     // (because the producer is scheduled at an earlier cycle than the consumer)
1419     // or the initial value. To facilitate this we create an illegal block here
1420     // by embedding a phi in the middle of the block. We will fix this up
1421     // immediately prior to pruning.
1422     auto RC = MRI.getRegClass(Reg);
1423     Register R = MRI.createVirtualRegister(RC);
1424     MachineInstr *IllegalPhi =
1425         BuildMI(*BB, MI, DebugLoc(), TII->get(TargetOpcode::PHI), R)
1426             .addReg(IllegalPhiDefault.getValue())
1427             .addMBB(PreheaderBB) // Block choice is arbitrary and has no effect.
1428             .addReg(LoopReg)
1429             .addMBB(BB); // Block choice is arbitrary and has no effect.
1430     // Illegal phi should belong to the producer stage so that it can be
1431     // filtered correctly during peeling.
1432     S.setStage(IllegalPhi, LoopProducerStage);
1433     return R;
1434   }
1435 
1436   return LoopReg;
1437 }
1438 
1439 Register KernelRewriter::phi(Register LoopReg, Optional<Register> InitReg,
1440                              const TargetRegisterClass *RC) {
1441   // If the init register is not undef, try and find an existing phi.
1442   if (InitReg.hasValue()) {
1443     auto I = Phis.find({LoopReg, InitReg.getValue()});
1444     if (I != Phis.end())
1445       return I->second;
1446   } else {
1447     for (auto &KV : Phis) {
1448       if (KV.first.first == LoopReg)
1449         return KV.second;
1450     }
1451   }
1452 
1453   // InitReg is either undef or no existing phi takes InitReg as input. Try and
1454   // find a phi that takes undef as input.
1455   auto I = UndefPhis.find(LoopReg);
1456   if (I != UndefPhis.end()) {
1457     Register R = I->second;
1458     if (!InitReg.hasValue())
1459       // Found a phi taking undef as input, and this input is undef so return
1460       // without any more changes.
1461       return R;
1462     // Found a phi taking undef as input, so rewrite it to take InitReg.
1463     MachineInstr *MI = MRI.getVRegDef(R);
1464     MI->getOperand(1).setReg(InitReg.getValue());
1465     Phis.insert({{LoopReg, InitReg.getValue()}, R});
1466     MRI.constrainRegClass(R, MRI.getRegClass(InitReg.getValue()));
1467     UndefPhis.erase(I);
1468     return R;
1469   }
1470 
1471   // Failed to find any existing phi to reuse, so create a new one.
1472   if (!RC)
1473     RC = MRI.getRegClass(LoopReg);
1474   Register R = MRI.createVirtualRegister(RC);
1475   if (InitReg.hasValue())
1476     MRI.constrainRegClass(R, MRI.getRegClass(*InitReg));
1477   BuildMI(*BB, BB->getFirstNonPHI(), DebugLoc(), TII->get(TargetOpcode::PHI), R)
1478       .addReg(InitReg.hasValue() ? *InitReg : undef(RC))
1479       .addMBB(PreheaderBB)
1480       .addReg(LoopReg)
1481       .addMBB(BB);
1482   if (!InitReg.hasValue())
1483     UndefPhis[LoopReg] = R;
1484   else
1485     Phis[{LoopReg, *InitReg}] = R;
1486   return R;
1487 }
1488 
1489 Register KernelRewriter::undef(const TargetRegisterClass *RC) {
1490   Register &R = Undefs[RC];
1491   if (R == 0) {
1492     // Create an IMPLICIT_DEF that defines this register if we need it.
1493     // All uses of this should be removed by the time we have finished unrolling
1494     // prologs and epilogs.
1495     R = MRI.createVirtualRegister(RC);
1496     auto *InsertBB = &PreheaderBB->getParent()->front();
1497     BuildMI(*InsertBB, InsertBB->getFirstTerminator(), DebugLoc(),
1498             TII->get(TargetOpcode::IMPLICIT_DEF), R);
1499   }
1500   return R;
1501 }
1502 
1503 namespace {
1504 /// Describes an operand in the kernel of a pipelined loop. Characteristics of
1505 /// the operand are discovered, such as how many in-loop PHIs it has to jump
1506 /// through and defaults for these phis.
1507 class KernelOperandInfo {
1508   MachineBasicBlock *BB;
1509   MachineRegisterInfo &MRI;
1510   SmallVector<Register, 4> PhiDefaults;
1511   MachineOperand *Source;
1512   MachineOperand *Target;
1513 
1514 public:
1515   KernelOperandInfo(MachineOperand *MO, MachineRegisterInfo &MRI,
1516                     const SmallPtrSetImpl<MachineInstr *> &IllegalPhis)
1517       : MRI(MRI) {
1518     Source = MO;
1519     BB = MO->getParent()->getParent();
1520     while (isRegInLoop(MO)) {
1521       MachineInstr *MI = MRI.getVRegDef(MO->getReg());
1522       if (MI->isFullCopy()) {
1523         MO = &MI->getOperand(1);
1524         continue;
1525       }
1526       if (!MI->isPHI())
1527         break;
1528       // If this is an illegal phi, don't count it in distance.
1529       if (IllegalPhis.count(MI)) {
1530         MO = &MI->getOperand(3);
1531         continue;
1532       }
1533 
1534       Register Default = getInitPhiReg(*MI, BB);
1535       MO = MI->getOperand(2).getMBB() == BB ? &MI->getOperand(1)
1536                                             : &MI->getOperand(3);
1537       PhiDefaults.push_back(Default);
1538     }
1539     Target = MO;
1540   }
1541 
1542   bool operator==(const KernelOperandInfo &Other) const {
1543     return PhiDefaults.size() == Other.PhiDefaults.size();
1544   }
1545 
1546   void print(raw_ostream &OS) const {
1547     OS << "use of " << *Source << ": distance(" << PhiDefaults.size() << ") in "
1548        << *Source->getParent();
1549   }
1550 
1551 private:
1552   bool isRegInLoop(MachineOperand *MO) {
1553     return MO->isReg() && MO->getReg().isVirtual() &&
1554            MRI.getVRegDef(MO->getReg())->getParent() == BB;
1555   }
1556 };
1557 } // namespace
1558 
1559 MachineBasicBlock *
1560 PeelingModuloScheduleExpander::peelKernel(LoopPeelDirection LPD) {
1561   MachineBasicBlock *NewBB = PeelSingleBlockLoop(LPD, BB, MRI, TII);
1562   if (LPD == LPD_Front)
1563     PeeledFront.push_back(NewBB);
1564   else
1565     PeeledBack.push_front(NewBB);
1566   for (auto I = BB->begin(), NI = NewBB->begin(); !I->isTerminator();
1567        ++I, ++NI) {
1568     CanonicalMIs[&*I] = &*I;
1569     CanonicalMIs[&*NI] = &*I;
1570     BlockMIs[{NewBB, &*I}] = &*NI;
1571     BlockMIs[{BB, &*I}] = &*I;
1572   }
1573   return NewBB;
1574 }
1575 
1576 void PeelingModuloScheduleExpander::filterInstructions(MachineBasicBlock *MB,
1577                                                        int MinStage) {
1578   for (auto I = MB->getFirstInstrTerminator()->getReverseIterator();
1579        I != std::next(MB->getFirstNonPHI()->getReverseIterator());) {
1580     MachineInstr *MI = &*I++;
1581     int Stage = getStage(MI);
1582     if (Stage == -1 || Stage >= MinStage)
1583       continue;
1584 
1585     for (MachineOperand &DefMO : MI->defs()) {
1586       SmallVector<std::pair<MachineInstr *, Register>, 4> Subs;
1587       for (MachineInstr &UseMI : MRI.use_instructions(DefMO.getReg())) {
1588         // Only PHIs can use values from this block by construction.
1589         // Match with the equivalent PHI in B.
1590         assert(UseMI.isPHI());
1591         Register Reg = getEquivalentRegisterIn(UseMI.getOperand(0).getReg(),
1592                                                MI->getParent());
1593         Subs.emplace_back(&UseMI, Reg);
1594       }
1595       for (auto &Sub : Subs)
1596         Sub.first->substituteRegister(DefMO.getReg(), Sub.second, /*SubIdx=*/0,
1597                                       *MRI.getTargetRegisterInfo());
1598     }
1599     if (LIS)
1600       LIS->RemoveMachineInstrFromMaps(*MI);
1601     MI->eraseFromParent();
1602   }
1603 }
1604 
1605 void PeelingModuloScheduleExpander::moveStageBetweenBlocks(
1606     MachineBasicBlock *DestBB, MachineBasicBlock *SourceBB, unsigned Stage) {
1607   auto InsertPt = DestBB->getFirstNonPHI();
1608   DenseMap<Register, Register> Remaps;
1609   for (MachineInstr &MI : llvm::make_early_inc_range(
1610            llvm::make_range(SourceBB->getFirstNonPHI(), SourceBB->end()))) {
1611     if (MI.isPHI()) {
1612       // This is an illegal PHI. If we move any instructions using an illegal
1613       // PHI, we need to create a legal Phi.
1614       if (getStage(&MI) != Stage) {
1615         // The legal Phi is not necessary if the illegal phi's stage
1616         // is being moved.
1617         Register PhiR = MI.getOperand(0).getReg();
1618         auto RC = MRI.getRegClass(PhiR);
1619         Register NR = MRI.createVirtualRegister(RC);
1620         MachineInstr *NI = BuildMI(*DestBB, DestBB->getFirstNonPHI(),
1621                                    DebugLoc(), TII->get(TargetOpcode::PHI), NR)
1622                                .addReg(PhiR)
1623                                .addMBB(SourceBB);
1624         BlockMIs[{DestBB, CanonicalMIs[&MI]}] = NI;
1625         CanonicalMIs[NI] = CanonicalMIs[&MI];
1626         Remaps[PhiR] = NR;
1627       }
1628     }
1629     if (getStage(&MI) != Stage)
1630       continue;
1631     MI.removeFromParent();
1632     DestBB->insert(InsertPt, &MI);
1633     auto *KernelMI = CanonicalMIs[&MI];
1634     BlockMIs[{DestBB, KernelMI}] = &MI;
1635     BlockMIs.erase({SourceBB, KernelMI});
1636   }
1637   SmallVector<MachineInstr *, 4> PhiToDelete;
1638   for (MachineInstr &MI : DestBB->phis()) {
1639     assert(MI.getNumOperands() == 3);
1640     MachineInstr *Def = MRI.getVRegDef(MI.getOperand(1).getReg());
1641     // If the instruction referenced by the phi is moved inside the block
1642     // we don't need the phi anymore.
1643     if (getStage(Def) == Stage) {
1644       Register PhiReg = MI.getOperand(0).getReg();
1645       assert(Def->findRegisterDefOperandIdx(MI.getOperand(1).getReg()) != -1);
1646       MRI.replaceRegWith(MI.getOperand(0).getReg(), MI.getOperand(1).getReg());
1647       MI.getOperand(0).setReg(PhiReg);
1648       PhiToDelete.push_back(&MI);
1649     }
1650   }
1651   for (auto *P : PhiToDelete)
1652     P->eraseFromParent();
1653   InsertPt = DestBB->getFirstNonPHI();
1654   // Helper to clone Phi instructions into the destination block. We clone Phi
1655   // greedily to avoid combinatorial explosion of Phi instructions.
1656   auto clonePhi = [&](MachineInstr *Phi) {
1657     MachineInstr *NewMI = MF.CloneMachineInstr(Phi);
1658     DestBB->insert(InsertPt, NewMI);
1659     Register OrigR = Phi->getOperand(0).getReg();
1660     Register R = MRI.createVirtualRegister(MRI.getRegClass(OrigR));
1661     NewMI->getOperand(0).setReg(R);
1662     NewMI->getOperand(1).setReg(OrigR);
1663     NewMI->getOperand(2).setMBB(*DestBB->pred_begin());
1664     Remaps[OrigR] = R;
1665     CanonicalMIs[NewMI] = CanonicalMIs[Phi];
1666     BlockMIs[{DestBB, CanonicalMIs[Phi]}] = NewMI;
1667     PhiNodeLoopIteration[NewMI] = PhiNodeLoopIteration[Phi];
1668     return R;
1669   };
1670   for (auto I = DestBB->getFirstNonPHI(); I != DestBB->end(); ++I) {
1671     for (MachineOperand &MO : I->uses()) {
1672       if (!MO.isReg())
1673         continue;
1674       if (Remaps.count(MO.getReg()))
1675         MO.setReg(Remaps[MO.getReg()]);
1676       else {
1677         // If we are using a phi from the source block we need to add a new phi
1678         // pointing to the old one.
1679         MachineInstr *Use = MRI.getUniqueVRegDef(MO.getReg());
1680         if (Use && Use->isPHI() && Use->getParent() == SourceBB) {
1681           Register R = clonePhi(Use);
1682           MO.setReg(R);
1683         }
1684       }
1685     }
1686   }
1687 }
1688 
1689 Register
1690 PeelingModuloScheduleExpander::getPhiCanonicalReg(MachineInstr *CanonicalPhi,
1691                                                   MachineInstr *Phi) {
1692   unsigned distance = PhiNodeLoopIteration[Phi];
1693   MachineInstr *CanonicalUse = CanonicalPhi;
1694   Register CanonicalUseReg = CanonicalUse->getOperand(0).getReg();
1695   for (unsigned I = 0; I < distance; ++I) {
1696     assert(CanonicalUse->isPHI());
1697     assert(CanonicalUse->getNumOperands() == 5);
1698     unsigned LoopRegIdx = 3, InitRegIdx = 1;
1699     if (CanonicalUse->getOperand(2).getMBB() == CanonicalUse->getParent())
1700       std::swap(LoopRegIdx, InitRegIdx);
1701     CanonicalUseReg = CanonicalUse->getOperand(LoopRegIdx).getReg();
1702     CanonicalUse = MRI.getVRegDef(CanonicalUseReg);
1703   }
1704   return CanonicalUseReg;
1705 }
1706 
1707 void PeelingModuloScheduleExpander::peelPrologAndEpilogs() {
1708   BitVector LS(Schedule.getNumStages(), true);
1709   BitVector AS(Schedule.getNumStages(), true);
1710   LiveStages[BB] = LS;
1711   AvailableStages[BB] = AS;
1712 
1713   // Peel out the prologs.
1714   LS.reset();
1715   for (int I = 0; I < Schedule.getNumStages() - 1; ++I) {
1716     LS[I] = 1;
1717     Prologs.push_back(peelKernel(LPD_Front));
1718     LiveStages[Prologs.back()] = LS;
1719     AvailableStages[Prologs.back()] = LS;
1720   }
1721 
1722   // Create a block that will end up as the new loop exiting block (dominated by
1723   // all prologs and epilogs). It will only contain PHIs, in the same order as
1724   // BB's PHIs. This gives us a poor-man's LCSSA with the inductive property
1725   // that the exiting block is a (sub) clone of BB. This in turn gives us the
1726   // property that any value deffed in BB but used outside of BB is used by a
1727   // PHI in the exiting block.
1728   MachineBasicBlock *ExitingBB = CreateLCSSAExitingBlock();
1729   EliminateDeadPhis(ExitingBB, MRI, LIS, /*KeepSingleSrcPhi=*/true);
1730   // Push out the epilogs, again in reverse order.
1731   // We can't assume anything about the minumum loop trip count at this point,
1732   // so emit a fairly complex epilog.
1733 
1734   // We first peel number of stages minus one epilogue. Then we remove dead
1735   // stages and reorder instructions based on their stage. If we have 3 stages
1736   // we generate first:
1737   // E0[3, 2, 1]
1738   // E1[3', 2']
1739   // E2[3'']
1740   // And then we move instructions based on their stages to have:
1741   // E0[3]
1742   // E1[2, 3']
1743   // E2[1, 2', 3'']
1744   // The transformation is legal because we only move instructions past
1745   // instructions of a previous loop iteration.
1746   for (int I = 1; I <= Schedule.getNumStages() - 1; ++I) {
1747     Epilogs.push_back(peelKernel(LPD_Back));
1748     MachineBasicBlock *B = Epilogs.back();
1749     filterInstructions(B, Schedule.getNumStages() - I);
1750     // Keep track at which iteration each phi belongs to. We need it to know
1751     // what version of the variable to use during prologue/epilogue stitching.
1752     EliminateDeadPhis(B, MRI, LIS, /*KeepSingleSrcPhi=*/true);
1753     for (MachineInstr &Phi : B->phis())
1754       PhiNodeLoopIteration[&Phi] = Schedule.getNumStages() - I;
1755   }
1756   for (size_t I = 0; I < Epilogs.size(); I++) {
1757     LS.reset();
1758     for (size_t J = I; J < Epilogs.size(); J++) {
1759       int Iteration = J;
1760       unsigned Stage = Schedule.getNumStages() - 1 + I - J;
1761       // Move stage one block at a time so that Phi nodes are updated correctly.
1762       for (size_t K = Iteration; K > I; K--)
1763         moveStageBetweenBlocks(Epilogs[K - 1], Epilogs[K], Stage);
1764       LS[Stage] = 1;
1765     }
1766     LiveStages[Epilogs[I]] = LS;
1767     AvailableStages[Epilogs[I]] = AS;
1768   }
1769 
1770   // Now we've defined all the prolog and epilog blocks as a fallthrough
1771   // sequence, add the edges that will be followed if the loop trip count is
1772   // lower than the number of stages (connecting prologs directly with epilogs).
1773   auto PI = Prologs.begin();
1774   auto EI = Epilogs.begin();
1775   assert(Prologs.size() == Epilogs.size());
1776   for (; PI != Prologs.end(); ++PI, ++EI) {
1777     MachineBasicBlock *Pred = *(*EI)->pred_begin();
1778     (*PI)->addSuccessor(*EI);
1779     for (MachineInstr &MI : (*EI)->phis()) {
1780       Register Reg = MI.getOperand(1).getReg();
1781       MachineInstr *Use = MRI.getUniqueVRegDef(Reg);
1782       if (Use && Use->getParent() == Pred) {
1783         MachineInstr *CanonicalUse = CanonicalMIs[Use];
1784         if (CanonicalUse->isPHI()) {
1785           // If the use comes from a phi we need to skip as many phi as the
1786           // distance between the epilogue and the kernel. Trace through the phi
1787           // chain to find the right value.
1788           Reg = getPhiCanonicalReg(CanonicalUse, Use);
1789         }
1790         Reg = getEquivalentRegisterIn(Reg, *PI);
1791       }
1792       MI.addOperand(MachineOperand::CreateReg(Reg, /*isDef=*/false));
1793       MI.addOperand(MachineOperand::CreateMBB(*PI));
1794     }
1795   }
1796 
1797   // Create a list of all blocks in order.
1798   SmallVector<MachineBasicBlock *, 8> Blocks;
1799   llvm::copy(PeeledFront, std::back_inserter(Blocks));
1800   Blocks.push_back(BB);
1801   llvm::copy(PeeledBack, std::back_inserter(Blocks));
1802 
1803   // Iterate in reverse order over all instructions, remapping as we go.
1804   for (MachineBasicBlock *B : reverse(Blocks)) {
1805     for (auto I = B->getFirstInstrTerminator()->getReverseIterator();
1806          I != std::next(B->getFirstNonPHI()->getReverseIterator());) {
1807       MachineInstr *MI = &*I++;
1808       rewriteUsesOf(MI);
1809     }
1810   }
1811   for (auto *MI : IllegalPhisToDelete) {
1812     if (LIS)
1813       LIS->RemoveMachineInstrFromMaps(*MI);
1814     MI->eraseFromParent();
1815   }
1816   IllegalPhisToDelete.clear();
1817 
1818   // Now all remapping has been done, we're free to optimize the generated code.
1819   for (MachineBasicBlock *B : reverse(Blocks))
1820     EliminateDeadPhis(B, MRI, LIS);
1821   EliminateDeadPhis(ExitingBB, MRI, LIS);
1822 }
1823 
1824 MachineBasicBlock *PeelingModuloScheduleExpander::CreateLCSSAExitingBlock() {
1825   MachineFunction &MF = *BB->getParent();
1826   MachineBasicBlock *Exit = *BB->succ_begin();
1827   if (Exit == BB)
1828     Exit = *std::next(BB->succ_begin());
1829 
1830   MachineBasicBlock *NewBB = MF.CreateMachineBasicBlock(BB->getBasicBlock());
1831   MF.insert(std::next(BB->getIterator()), NewBB);
1832 
1833   // Clone all phis in BB into NewBB and rewrite.
1834   for (MachineInstr &MI : BB->phis()) {
1835     auto RC = MRI.getRegClass(MI.getOperand(0).getReg());
1836     Register OldR = MI.getOperand(3).getReg();
1837     Register R = MRI.createVirtualRegister(RC);
1838     SmallVector<MachineInstr *, 4> Uses;
1839     for (MachineInstr &Use : MRI.use_instructions(OldR))
1840       if (Use.getParent() != BB)
1841         Uses.push_back(&Use);
1842     for (MachineInstr *Use : Uses)
1843       Use->substituteRegister(OldR, R, /*SubIdx=*/0,
1844                               *MRI.getTargetRegisterInfo());
1845     MachineInstr *NI = BuildMI(NewBB, DebugLoc(), TII->get(TargetOpcode::PHI), R)
1846         .addReg(OldR)
1847         .addMBB(BB);
1848     BlockMIs[{NewBB, &MI}] = NI;
1849     CanonicalMIs[NI] = &MI;
1850   }
1851   BB->replaceSuccessor(Exit, NewBB);
1852   Exit->replacePhiUsesWith(BB, NewBB);
1853   NewBB->addSuccessor(Exit);
1854 
1855   MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
1856   SmallVector<MachineOperand, 4> Cond;
1857   bool CanAnalyzeBr = !TII->analyzeBranch(*BB, TBB, FBB, Cond);
1858   (void)CanAnalyzeBr;
1859   assert(CanAnalyzeBr && "Must be able to analyze the loop branch!");
1860   TII->removeBranch(*BB);
1861   TII->insertBranch(*BB, TBB == Exit ? NewBB : TBB, FBB == Exit ? NewBB : FBB,
1862                     Cond, DebugLoc());
1863   TII->insertUnconditionalBranch(*NewBB, Exit, DebugLoc());
1864   return NewBB;
1865 }
1866 
1867 Register
1868 PeelingModuloScheduleExpander::getEquivalentRegisterIn(Register Reg,
1869                                                        MachineBasicBlock *BB) {
1870   MachineInstr *MI = MRI.getUniqueVRegDef(Reg);
1871   unsigned OpIdx = MI->findRegisterDefOperandIdx(Reg);
1872   return BlockMIs[{BB, CanonicalMIs[MI]}]->getOperand(OpIdx).getReg();
1873 }
1874 
1875 void PeelingModuloScheduleExpander::rewriteUsesOf(MachineInstr *MI) {
1876   if (MI->isPHI()) {
1877     // This is an illegal PHI. The loop-carried (desired) value is operand 3,
1878     // and it is produced by this block.
1879     Register PhiR = MI->getOperand(0).getReg();
1880     Register R = MI->getOperand(3).getReg();
1881     int RMIStage = getStage(MRI.getUniqueVRegDef(R));
1882     if (RMIStage != -1 && !AvailableStages[MI->getParent()].test(RMIStage))
1883       R = MI->getOperand(1).getReg();
1884     MRI.setRegClass(R, MRI.getRegClass(PhiR));
1885     MRI.replaceRegWith(PhiR, R);
1886     // Postpone deleting the Phi as it may be referenced by BlockMIs and used
1887     // later to figure out how to remap registers.
1888     MI->getOperand(0).setReg(PhiR);
1889     IllegalPhisToDelete.push_back(MI);
1890     return;
1891   }
1892 
1893   int Stage = getStage(MI);
1894   if (Stage == -1 || LiveStages.count(MI->getParent()) == 0 ||
1895       LiveStages[MI->getParent()].test(Stage))
1896     // Instruction is live, no rewriting to do.
1897     return;
1898 
1899   for (MachineOperand &DefMO : MI->defs()) {
1900     SmallVector<std::pair<MachineInstr *, Register>, 4> Subs;
1901     for (MachineInstr &UseMI : MRI.use_instructions(DefMO.getReg())) {
1902       // Only PHIs can use values from this block by construction.
1903       // Match with the equivalent PHI in B.
1904       assert(UseMI.isPHI());
1905       Register Reg = getEquivalentRegisterIn(UseMI.getOperand(0).getReg(),
1906                                              MI->getParent());
1907       Subs.emplace_back(&UseMI, Reg);
1908     }
1909     for (auto &Sub : Subs)
1910       Sub.first->substituteRegister(DefMO.getReg(), Sub.second, /*SubIdx=*/0,
1911                                     *MRI.getTargetRegisterInfo());
1912   }
1913   if (LIS)
1914     LIS->RemoveMachineInstrFromMaps(*MI);
1915   MI->eraseFromParent();
1916 }
1917 
1918 void PeelingModuloScheduleExpander::fixupBranches() {
1919   // Work outwards from the kernel.
1920   bool KernelDisposed = false;
1921   int TC = Schedule.getNumStages() - 1;
1922   for (auto PI = Prologs.rbegin(), EI = Epilogs.rbegin(); PI != Prologs.rend();
1923        ++PI, ++EI, --TC) {
1924     MachineBasicBlock *Prolog = *PI;
1925     MachineBasicBlock *Fallthrough = *Prolog->succ_begin();
1926     MachineBasicBlock *Epilog = *EI;
1927     SmallVector<MachineOperand, 4> Cond;
1928     TII->removeBranch(*Prolog);
1929     Optional<bool> StaticallyGreater =
1930         LoopInfo->createTripCountGreaterCondition(TC, *Prolog, Cond);
1931     if (!StaticallyGreater.hasValue()) {
1932       LLVM_DEBUG(dbgs() << "Dynamic: TC > " << TC << "\n");
1933       // Dynamically branch based on Cond.
1934       TII->insertBranch(*Prolog, Epilog, Fallthrough, Cond, DebugLoc());
1935     } else if (*StaticallyGreater == false) {
1936       LLVM_DEBUG(dbgs() << "Static-false: TC > " << TC << "\n");
1937       // Prolog never falls through; branch to epilog and orphan interior
1938       // blocks. Leave it to unreachable-block-elim to clean up.
1939       Prolog->removeSuccessor(Fallthrough);
1940       for (MachineInstr &P : Fallthrough->phis()) {
1941         P.RemoveOperand(2);
1942         P.RemoveOperand(1);
1943       }
1944       TII->insertUnconditionalBranch(*Prolog, Epilog, DebugLoc());
1945       KernelDisposed = true;
1946     } else {
1947       LLVM_DEBUG(dbgs() << "Static-true: TC > " << TC << "\n");
1948       // Prolog always falls through; remove incoming values in epilog.
1949       Prolog->removeSuccessor(Epilog);
1950       for (MachineInstr &P : Epilog->phis()) {
1951         P.RemoveOperand(4);
1952         P.RemoveOperand(3);
1953       }
1954     }
1955   }
1956 
1957   if (!KernelDisposed) {
1958     LoopInfo->adjustTripCount(-(Schedule.getNumStages() - 1));
1959     LoopInfo->setPreheader(Prologs.back());
1960   } else {
1961     LoopInfo->disposed();
1962   }
1963 }
1964 
1965 void PeelingModuloScheduleExpander::rewriteKernel() {
1966   KernelRewriter KR(*Schedule.getLoop(), Schedule, BB);
1967   KR.rewrite();
1968 }
1969 
1970 void PeelingModuloScheduleExpander::expand() {
1971   BB = Schedule.getLoop()->getTopBlock();
1972   Preheader = Schedule.getLoop()->getLoopPreheader();
1973   LLVM_DEBUG(Schedule.dump());
1974   LoopInfo = TII->analyzeLoopForPipelining(BB);
1975   assert(LoopInfo);
1976 
1977   rewriteKernel();
1978   peelPrologAndEpilogs();
1979   fixupBranches();
1980 }
1981 
1982 void PeelingModuloScheduleExpander::validateAgainstModuloScheduleExpander() {
1983   BB = Schedule.getLoop()->getTopBlock();
1984   Preheader = Schedule.getLoop()->getLoopPreheader();
1985 
1986   // Dump the schedule before we invalidate and remap all its instructions.
1987   // Stash it in a string so we can print it if we found an error.
1988   std::string ScheduleDump;
1989   raw_string_ostream OS(ScheduleDump);
1990   Schedule.print(OS);
1991   OS.flush();
1992 
1993   // First, run the normal ModuleScheduleExpander. We don't support any
1994   // InstrChanges.
1995   assert(LIS && "Requires LiveIntervals!");
1996   ModuloScheduleExpander MSE(MF, Schedule, *LIS,
1997                              ModuloScheduleExpander::InstrChangesTy());
1998   MSE.expand();
1999   MachineBasicBlock *ExpandedKernel = MSE.getRewrittenKernel();
2000   if (!ExpandedKernel) {
2001     // The expander optimized away the kernel. We can't do any useful checking.
2002     MSE.cleanup();
2003     return;
2004   }
2005   // Before running the KernelRewriter, re-add BB into the CFG.
2006   Preheader->addSuccessor(BB);
2007 
2008   // Now run the new expansion algorithm.
2009   KernelRewriter KR(*Schedule.getLoop(), Schedule, BB);
2010   KR.rewrite();
2011   peelPrologAndEpilogs();
2012 
2013   // Collect all illegal phis that the new algorithm created. We'll give these
2014   // to KernelOperandInfo.
2015   SmallPtrSet<MachineInstr *, 4> IllegalPhis;
2016   for (auto NI = BB->getFirstNonPHI(); NI != BB->end(); ++NI) {
2017     if (NI->isPHI())
2018       IllegalPhis.insert(&*NI);
2019   }
2020 
2021   // Co-iterate across both kernels. We expect them to be identical apart from
2022   // phis and full COPYs (we look through both).
2023   SmallVector<std::pair<KernelOperandInfo, KernelOperandInfo>, 8> KOIs;
2024   auto OI = ExpandedKernel->begin();
2025   auto NI = BB->begin();
2026   for (; !OI->isTerminator() && !NI->isTerminator(); ++OI, ++NI) {
2027     while (OI->isPHI() || OI->isFullCopy())
2028       ++OI;
2029     while (NI->isPHI() || NI->isFullCopy())
2030       ++NI;
2031     assert(OI->getOpcode() == NI->getOpcode() && "Opcodes don't match?!");
2032     // Analyze every operand separately.
2033     for (auto OOpI = OI->operands_begin(), NOpI = NI->operands_begin();
2034          OOpI != OI->operands_end(); ++OOpI, ++NOpI)
2035       KOIs.emplace_back(KernelOperandInfo(&*OOpI, MRI, IllegalPhis),
2036                         KernelOperandInfo(&*NOpI, MRI, IllegalPhis));
2037   }
2038 
2039   bool Failed = false;
2040   for (auto &OldAndNew : KOIs) {
2041     if (OldAndNew.first == OldAndNew.second)
2042       continue;
2043     Failed = true;
2044     errs() << "Modulo kernel validation error: [\n";
2045     errs() << " [golden] ";
2046     OldAndNew.first.print(errs());
2047     errs() << "          ";
2048     OldAndNew.second.print(errs());
2049     errs() << "]\n";
2050   }
2051 
2052   if (Failed) {
2053     errs() << "Golden reference kernel:\n";
2054     ExpandedKernel->print(errs());
2055     errs() << "New kernel:\n";
2056     BB->print(errs());
2057     errs() << ScheduleDump;
2058     report_fatal_error(
2059         "Modulo kernel validation (-pipeliner-experimental-cg) failed");
2060   }
2061 
2062   // Cleanup by removing BB from the CFG again as the original
2063   // ModuloScheduleExpander intended.
2064   Preheader->removeSuccessor(BB);
2065   MSE.cleanup();
2066 }
2067 
2068 //===----------------------------------------------------------------------===//
2069 // ModuloScheduleTestPass implementation
2070 //===----------------------------------------------------------------------===//
2071 // This pass constructs a ModuloSchedule from its module and runs
2072 // ModuloScheduleExpander.
2073 //
2074 // The module is expected to contain a single-block analyzable loop.
2075 // The total order of instructions is taken from the loop as-is.
2076 // Instructions are expected to be annotated with a PostInstrSymbol.
2077 // This PostInstrSymbol must have the following format:
2078 //  "Stage=%d Cycle=%d".
2079 //===----------------------------------------------------------------------===//
2080 
2081 namespace {
2082 class ModuloScheduleTest : public MachineFunctionPass {
2083 public:
2084   static char ID;
2085 
2086   ModuloScheduleTest() : MachineFunctionPass(ID) {
2087     initializeModuloScheduleTestPass(*PassRegistry::getPassRegistry());
2088   }
2089 
2090   bool runOnMachineFunction(MachineFunction &MF) override;
2091   void runOnLoop(MachineFunction &MF, MachineLoop &L);
2092 
2093   void getAnalysisUsage(AnalysisUsage &AU) const override {
2094     AU.addRequired<MachineLoopInfo>();
2095     AU.addRequired<LiveIntervals>();
2096     MachineFunctionPass::getAnalysisUsage(AU);
2097   }
2098 };
2099 } // namespace
2100 
2101 char ModuloScheduleTest::ID = 0;
2102 
2103 INITIALIZE_PASS_BEGIN(ModuloScheduleTest, "modulo-schedule-test",
2104                       "Modulo Schedule test pass", false, false)
2105 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
2106 INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
2107 INITIALIZE_PASS_END(ModuloScheduleTest, "modulo-schedule-test",
2108                     "Modulo Schedule test pass", false, false)
2109 
2110 bool ModuloScheduleTest::runOnMachineFunction(MachineFunction &MF) {
2111   MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
2112   for (auto *L : MLI) {
2113     if (L->getTopBlock() != L->getBottomBlock())
2114       continue;
2115     runOnLoop(MF, *L);
2116     return false;
2117   }
2118   return false;
2119 }
2120 
2121 static void parseSymbolString(StringRef S, int &Cycle, int &Stage) {
2122   std::pair<StringRef, StringRef> StageAndCycle = getToken(S, "_");
2123   std::pair<StringRef, StringRef> StageTokenAndValue =
2124       getToken(StageAndCycle.first, "-");
2125   std::pair<StringRef, StringRef> CycleTokenAndValue =
2126       getToken(StageAndCycle.second, "-");
2127   if (StageTokenAndValue.first != "Stage" ||
2128       CycleTokenAndValue.first != "_Cycle") {
2129     llvm_unreachable(
2130         "Bad post-instr symbol syntax: see comment in ModuloScheduleTest");
2131     return;
2132   }
2133 
2134   StageTokenAndValue.second.drop_front().getAsInteger(10, Stage);
2135   CycleTokenAndValue.second.drop_front().getAsInteger(10, Cycle);
2136 
2137   dbgs() << "  Stage=" << Stage << ", Cycle=" << Cycle << "\n";
2138 }
2139 
2140 void ModuloScheduleTest::runOnLoop(MachineFunction &MF, MachineLoop &L) {
2141   LiveIntervals &LIS = getAnalysis<LiveIntervals>();
2142   MachineBasicBlock *BB = L.getTopBlock();
2143   dbgs() << "--- ModuloScheduleTest running on BB#" << BB->getNumber() << "\n";
2144 
2145   DenseMap<MachineInstr *, int> Cycle, Stage;
2146   std::vector<MachineInstr *> Instrs;
2147   for (MachineInstr &MI : *BB) {
2148     if (MI.isTerminator())
2149       continue;
2150     Instrs.push_back(&MI);
2151     if (MCSymbol *Sym = MI.getPostInstrSymbol()) {
2152       dbgs() << "Parsing post-instr symbol for " << MI;
2153       parseSymbolString(Sym->getName(), Cycle[&MI], Stage[&MI]);
2154     }
2155   }
2156 
2157   ModuloSchedule MS(MF, &L, std::move(Instrs), std::move(Cycle),
2158                     std::move(Stage));
2159   ModuloScheduleExpander MSE(
2160       MF, MS, LIS, /*InstrChanges=*/ModuloScheduleExpander::InstrChangesTy());
2161   MSE.expand();
2162   MSE.cleanup();
2163 }
2164 
2165 //===----------------------------------------------------------------------===//
2166 // ModuloScheduleTestAnnotater implementation
2167 //===----------------------------------------------------------------------===//
2168 
2169 void ModuloScheduleTestAnnotater::annotate() {
2170   for (MachineInstr *MI : S.getInstructions()) {
2171     SmallVector<char, 16> SV;
2172     raw_svector_ostream OS(SV);
2173     OS << "Stage-" << S.getStage(MI) << "_Cycle-" << S.getCycle(MI);
2174     MCSymbol *Sym = MF.getContext().getOrCreateSymbol(OS.str());
2175     MI->setPostInstrSymbol(MF, Sym);
2176   }
2177 }
2178