1 //===-- SILowerControlFlow.cpp - Use predicates for control flow ----------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 /// \file
11 /// \brief This pass lowers the pseudo control flow instructions to real
12 /// machine instructions.
13 ///
14 /// All control flow is handled using predicated instructions and
15 /// a predicate stack.  Each Scalar ALU controls the operations of 64 Vector
16 /// ALUs.  The Scalar ALU can update the predicate for any of the Vector ALUs
17 /// by writting to the 64-bit EXEC register (each bit corresponds to a
18 /// single vector ALU).  Typically, for predicates, a vector ALU will write
19 /// to its bit of the VCC register (like EXEC VCC is 64-bits, one for each
20 /// Vector ALU) and then the ScalarALU will AND the VCC register with the
21 /// EXEC to update the predicates.
22 ///
23 /// For example:
24 /// %VCC = V_CMP_GT_F32 %VGPR1, %VGPR2
25 /// %SGPR0 = SI_IF %VCC
26 ///   %VGPR0 = V_ADD_F32 %VGPR0, %VGPR0
27 /// %SGPR0 = SI_ELSE %SGPR0
28 ///   %VGPR0 = V_SUB_F32 %VGPR0, %VGPR0
29 /// SI_END_CF %SGPR0
30 ///
31 /// becomes:
32 ///
33 /// %SGPR0 = S_AND_SAVEEXEC_B64 %VCC  // Save and update the exec mask
34 /// %SGPR0 = S_XOR_B64 %SGPR0, %EXEC  // Clear live bits from saved exec mask
35 /// S_CBRANCH_EXECZ label0            // This instruction is an optional
36 ///                                   // optimization which allows us to
37 ///                                   // branch if all the bits of
38 ///                                   // EXEC are zero.
39 /// %VGPR0 = V_ADD_F32 %VGPR0, %VGPR0 // Do the IF block of the branch
40 ///
41 /// label0:
42 /// %SGPR0 = S_OR_SAVEEXEC_B64 %EXEC   // Restore the exec mask for the Then block
43 /// %EXEC = S_XOR_B64 %SGPR0, %EXEC    // Clear live bits from saved exec mask
44 /// S_BRANCH_EXECZ label1              // Use our branch optimization
45 ///                                    // instruction again.
46 /// %VGPR0 = V_SUB_F32 %VGPR0, %VGPR   // Do the THEN block
47 /// label1:
48 /// %EXEC = S_OR_B64 %EXEC, %SGPR0     // Re-enable saved exec mask bits
49 //===----------------------------------------------------------------------===//
50 
51 #include "AMDGPU.h"
52 #include "AMDGPUSubtarget.h"
53 #include "SIInstrInfo.h"
54 #include "SIMachineFunctionInfo.h"
55 #include "llvm/CodeGen/LivePhysRegs.h"
56 #include "llvm/CodeGen/MachineFrameInfo.h"
57 #include "llvm/CodeGen/MachineFunction.h"
58 #include "llvm/CodeGen/MachineFunctionPass.h"
59 #include "llvm/CodeGen/MachineInstrBuilder.h"
60 #include "llvm/CodeGen/MachineRegisterInfo.h"
61 
62 using namespace llvm;
63 
64 #define DEBUG_TYPE "si-lower-control-flow"
65 
66 namespace {
67 
68 class SILowerControlFlow : public MachineFunctionPass {
69 private:
70   const SIRegisterInfo *TRI;
71   const SIInstrInfo *TII;
72   LiveIntervals *LIS;
73   MachineRegisterInfo *MRI;
74 
75   void emitIf(MachineInstr &MI);
76   void emitElse(MachineInstr &MI);
77   void emitBreak(MachineInstr &MI);
78   void emitIfBreak(MachineInstr &MI);
79   void emitElseBreak(MachineInstr &MI);
80   void emitLoop(MachineInstr &MI);
81   void emitEndCf(MachineInstr &MI);
82 
83 public:
84   static char ID;
85 
86   SILowerControlFlow() :
87     MachineFunctionPass(ID),
88     TRI(nullptr),
89     TII(nullptr),
90     LIS(nullptr),
91     MRI(nullptr) {}
92 
93   bool runOnMachineFunction(MachineFunction &MF) override;
94 
95   StringRef getPassName() const override {
96     return "SI Lower control flow pseudo instructions";
97   }
98 
99   void getAnalysisUsage(AnalysisUsage &AU) const override {
100     // Should preserve the same set that TwoAddressInstructions does.
101     AU.addPreserved<SlotIndexes>();
102     AU.addPreserved<LiveIntervals>();
103     AU.addPreservedID(LiveVariablesID);
104     AU.addPreservedID(MachineLoopInfoID);
105     AU.addPreservedID(MachineDominatorsID);
106     AU.setPreservesCFG();
107     MachineFunctionPass::getAnalysisUsage(AU);
108   }
109 };
110 
111 } // End anonymous namespace
112 
113 char SILowerControlFlow::ID = 0;
114 
115 INITIALIZE_PASS(SILowerControlFlow, DEBUG_TYPE,
116                "SI lower control flow", false, false)
117 
118 static void setImpSCCDefDead(MachineInstr &MI, bool IsDead) {
119   MachineOperand &ImpDefSCC = MI.getOperand(3);
120   assert(ImpDefSCC.getReg() == AMDGPU::SCC && ImpDefSCC.isDef());
121 
122   ImpDefSCC.setIsDead(IsDead);
123 }
124 
125 char &llvm::SILowerControlFlowID = SILowerControlFlow::ID;
126 
127 void SILowerControlFlow::emitIf(MachineInstr &MI) {
128   MachineBasicBlock &MBB = *MI.getParent();
129   const DebugLoc &DL = MI.getDebugLoc();
130   MachineBasicBlock::iterator I(&MI);
131 
132   MachineOperand &SaveExec = MI.getOperand(0);
133   MachineOperand &Cond = MI.getOperand(1);
134   assert(SaveExec.getSubReg() == AMDGPU::NoSubRegister &&
135          Cond.getSubReg() == AMDGPU::NoSubRegister);
136 
137   unsigned SaveExecReg = SaveExec.getReg();
138 
139   MachineOperand &ImpDefSCC = MI.getOperand(4);
140   assert(ImpDefSCC.getReg() == AMDGPU::SCC && ImpDefSCC.isDef());
141 
142   // Add an implicit def of exec to discourage scheduling VALU after this which
143   // will interfere with trying to form s_and_saveexec_b64 later.
144   MachineInstr *CopyExec =
145     BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), SaveExecReg)
146     .addReg(AMDGPU::EXEC)
147     .addReg(AMDGPU::EXEC, RegState::ImplicitDefine);
148 
149   unsigned Tmp = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
150 
151   MachineInstr *And =
152     BuildMI(MBB, I, DL, TII->get(AMDGPU::S_AND_B64), Tmp)
153     .addReg(SaveExecReg)
154     //.addReg(AMDGPU::EXEC)
155     .addReg(Cond.getReg());
156   setImpSCCDefDead(*And, true);
157 
158   MachineInstr *Xor =
159     BuildMI(MBB, I, DL, TII->get(AMDGPU::S_XOR_B64), SaveExecReg)
160     .addReg(Tmp)
161     .addReg(SaveExecReg);
162   setImpSCCDefDead(*Xor, ImpDefSCC.isDead());
163 
164   // Use a copy that is a terminator to get correct spill code placement it with
165   // fast regalloc.
166   MachineInstr *SetExec =
167     BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B64_term), AMDGPU::EXEC)
168     .addReg(Tmp, RegState::Kill);
169 
170   // Insert a pseudo terminator to help keep the verifier happy. This will also
171   // be used later when inserting skips.
172   MachineInstr *NewBr =
173     BuildMI(MBB, I, DL, TII->get(AMDGPU::SI_MASK_BRANCH))
174     .addOperand(MI.getOperand(2));
175 
176   if (!LIS) {
177     MI.eraseFromParent();
178     return;
179   }
180 
181   LIS->InsertMachineInstrInMaps(*CopyExec);
182 
183   // Replace with and so we don't need to fix the live interval for condition
184   // register.
185   LIS->ReplaceMachineInstrInMaps(MI, *And);
186 
187   LIS->InsertMachineInstrInMaps(*Xor);
188   LIS->InsertMachineInstrInMaps(*SetExec);
189   LIS->InsertMachineInstrInMaps(*NewBr);
190 
191   LIS->removeRegUnit(*MCRegUnitIterator(AMDGPU::EXEC, TRI));
192   MI.eraseFromParent();
193 
194   // FIXME: Is there a better way of adjusting the liveness? It shouldn't be
195   // hard to add another def here but I'm not sure how to correctly update the
196   // valno.
197   LIS->removeInterval(SaveExecReg);
198   LIS->createAndComputeVirtRegInterval(SaveExecReg);
199   LIS->createAndComputeVirtRegInterval(Tmp);
200 }
201 
202 void SILowerControlFlow::emitElse(MachineInstr &MI) {
203   MachineBasicBlock &MBB = *MI.getParent();
204   const DebugLoc &DL = MI.getDebugLoc();
205 
206   unsigned DstReg = MI.getOperand(0).getReg();
207   assert(MI.getOperand(0).getSubReg() == AMDGPU::NoSubRegister);
208 
209   bool ExecModified = MI.getOperand(3).getImm() != 0;
210   MachineBasicBlock::iterator Start = MBB.begin();
211 
212   // We are running before TwoAddressInstructions, and si_else's operands are
213   // tied. In order to correctly tie the registers, split this into a copy of
214   // the src like it does.
215   BuildMI(MBB, Start, DL, TII->get(AMDGPU::COPY), DstReg)
216     .addOperand(MI.getOperand(1)); // Saved EXEC
217 
218   // This must be inserted before phis and any spill code inserted before the
219   // else.
220   MachineInstr *OrSaveExec =
221     BuildMI(MBB, Start, DL, TII->get(AMDGPU::S_OR_SAVEEXEC_B64), DstReg)
222     .addReg(DstReg);
223 
224   MachineBasicBlock *DestBB = MI.getOperand(2).getMBB();
225 
226   MachineBasicBlock::iterator ElsePt(MI);
227 
228   if (ExecModified) {
229     MachineInstr *And =
230       BuildMI(MBB, ElsePt, DL, TII->get(AMDGPU::S_AND_B64), DstReg)
231       .addReg(AMDGPU::EXEC)
232       .addReg(DstReg);
233 
234     if (LIS)
235       LIS->InsertMachineInstrInMaps(*And);
236   }
237 
238   MachineInstr *Xor =
239     BuildMI(MBB, ElsePt, DL, TII->get(AMDGPU::S_XOR_B64_term), AMDGPU::EXEC)
240     .addReg(AMDGPU::EXEC)
241     .addReg(DstReg);
242 
243   MachineInstr *Branch =
244     BuildMI(MBB, ElsePt, DL, TII->get(AMDGPU::SI_MASK_BRANCH))
245     .addMBB(DestBB);
246 
247   if (!LIS) {
248     MI.eraseFromParent();
249     return;
250   }
251 
252   LIS->RemoveMachineInstrFromMaps(MI);
253   MI.eraseFromParent();
254 
255   LIS->InsertMachineInstrInMaps(*OrSaveExec);
256 
257   LIS->InsertMachineInstrInMaps(*Xor);
258   LIS->InsertMachineInstrInMaps(*Branch);
259 
260   // src reg is tied to dst reg.
261   LIS->removeInterval(DstReg);
262   LIS->createAndComputeVirtRegInterval(DstReg);
263 
264   // Let this be recomputed.
265   LIS->removeRegUnit(*MCRegUnitIterator(AMDGPU::EXEC, TRI));
266 }
267 
268 void SILowerControlFlow::emitBreak(MachineInstr &MI) {
269   MachineBasicBlock &MBB = *MI.getParent();
270   const DebugLoc &DL = MI.getDebugLoc();
271   unsigned Dst = MI.getOperand(0).getReg();
272 
273   MachineInstr *Or =
274     BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst)
275     .addReg(AMDGPU::EXEC)
276     .addOperand(MI.getOperand(1));
277 
278   if (LIS)
279     LIS->ReplaceMachineInstrInMaps(MI, *Or);
280   MI.eraseFromParent();
281 }
282 
283 void SILowerControlFlow::emitIfBreak(MachineInstr &MI) {
284   MI.setDesc(TII->get(AMDGPU::S_OR_B64));
285 }
286 
287 void SILowerControlFlow::emitElseBreak(MachineInstr &MI) {
288   MI.setDesc(TII->get(AMDGPU::S_OR_B64));
289 }
290 
291 void SILowerControlFlow::emitLoop(MachineInstr &MI) {
292   MachineBasicBlock &MBB = *MI.getParent();
293   const DebugLoc &DL = MI.getDebugLoc();
294 
295   MachineInstr *AndN2 =
296     BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ANDN2_B64_term), AMDGPU::EXEC)
297     .addReg(AMDGPU::EXEC)
298     .addOperand(MI.getOperand(0));
299 
300   MachineInstr *Branch =
301     BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
302     .addOperand(MI.getOperand(1));
303 
304   if (LIS) {
305     LIS->ReplaceMachineInstrInMaps(MI, *AndN2);
306     LIS->InsertMachineInstrInMaps(*Branch);
307   }
308 
309   MI.eraseFromParent();
310 }
311 
312 void SILowerControlFlow::emitEndCf(MachineInstr &MI) {
313   MachineBasicBlock &MBB = *MI.getParent();
314   const DebugLoc &DL = MI.getDebugLoc();
315 
316   MachineBasicBlock::iterator InsPt = MBB.begin();
317   MachineInstr *NewMI =
318     BuildMI(MBB, InsPt, DL, TII->get(AMDGPU::S_OR_B64), AMDGPU::EXEC)
319     .addReg(AMDGPU::EXEC)
320     .addOperand(MI.getOperand(0));
321 
322   if (LIS)
323     LIS->ReplaceMachineInstrInMaps(MI, *NewMI);
324 
325   MI.eraseFromParent();
326 
327   if (LIS)
328     LIS->handleMove(*NewMI);
329 }
330 
331 bool SILowerControlFlow::runOnMachineFunction(MachineFunction &MF) {
332   const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
333   TII = ST.getInstrInfo();
334   TRI = &TII->getRegisterInfo();
335 
336   // This doesn't actually need LiveIntervals, but we can preserve them.
337   LIS = getAnalysisIfAvailable<LiveIntervals>();
338   MRI = &MF.getRegInfo();
339 
340   MachineFunction::iterator NextBB;
341   for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
342        BI != BE; BI = NextBB) {
343     NextBB = std::next(BI);
344     MachineBasicBlock &MBB = *BI;
345 
346     MachineBasicBlock::iterator I, Next;
347 
348     for (I = MBB.begin(); I != MBB.end(); I = Next) {
349       Next = std::next(I);
350       MachineInstr &MI = *I;
351 
352       switch (MI.getOpcode()) {
353       case AMDGPU::SI_IF:
354         emitIf(MI);
355         break;
356 
357       case AMDGPU::SI_ELSE:
358         emitElse(MI);
359         break;
360 
361       case AMDGPU::SI_BREAK:
362         emitBreak(MI);
363         break;
364 
365       case AMDGPU::SI_IF_BREAK:
366         emitIfBreak(MI);
367         break;
368 
369       case AMDGPU::SI_ELSE_BREAK:
370         emitElseBreak(MI);
371         break;
372 
373       case AMDGPU::SI_LOOP:
374         emitLoop(MI);
375         break;
376 
377       case AMDGPU::SI_END_CF:
378         emitEndCf(MI);
379         break;
380 
381       default:
382         break;
383       }
384     }
385   }
386 
387   return true;
388 }
389