1 //===----------------------- SIFrameLowering.cpp --------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //==-----------------------------------------------------------------------===//
9 
10 #include "SIFrameLowering.h"
11 #include "AMDGPUSubtarget.h"
12 #include "SIInstrInfo.h"
13 #include "SIMachineFunctionInfo.h"
14 #include "SIRegisterInfo.h"
15 
16 #include "llvm/CodeGen/MachineFrameInfo.h"
17 #include "llvm/CodeGen/MachineFunction.h"
18 #include "llvm/CodeGen/MachineInstrBuilder.h"
19 #include "llvm/CodeGen/RegisterScavenging.h"
20 
21 using namespace llvm;
22 
23 
24 static ArrayRef<MCPhysReg> getAllSGPR128(const SISubtarget &ST,
25                                          const MachineFunction &MF) {
26   return makeArrayRef(AMDGPU::SGPR_128RegClass.begin(),
27                       ST.getMaxNumSGPRs(MF) / 4);
28 }
29 
30 static ArrayRef<MCPhysReg> getAllSGPRs(const SISubtarget &ST,
31                                        const MachineFunction &MF) {
32   return makeArrayRef(AMDGPU::SGPR_32RegClass.begin(),
33                       ST.getMaxNumSGPRs(MF));
34 }
35 
36 void SIFrameLowering::emitFlatScratchInit(const SISubtarget &ST,
37                                           MachineFunction &MF,
38                                           MachineBasicBlock &MBB) const {
39   const SIInstrInfo *TII = ST.getInstrInfo();
40   const SIRegisterInfo* TRI = &TII->getRegisterInfo();
41   const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
42 
43   // We don't need this if we only have spills since there is no user facing
44   // scratch.
45 
46   // TODO: If we know we don't have flat instructions earlier, we can omit
47   // this from the input registers.
48   //
49   // TODO: We only need to know if we access scratch space through a flat
50   // pointer. Because we only detect if flat instructions are used at all,
51   // this will be used more often than necessary on VI.
52 
53   // Debug location must be unknown since the first debug location is used to
54   // determine the end of the prologue.
55   DebugLoc DL;
56   MachineBasicBlock::iterator I = MBB.begin();
57 
58   unsigned FlatScratchInitReg
59     = MFI->getPreloadedReg(AMDGPUFunctionArgInfo::FLAT_SCRATCH_INIT);
60 
61   MachineRegisterInfo &MRI = MF.getRegInfo();
62   MRI.addLiveIn(FlatScratchInitReg);
63   MBB.addLiveIn(FlatScratchInitReg);
64 
65   unsigned FlatScrInitLo = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub0);
66   unsigned FlatScrInitHi = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub1);
67 
68   unsigned ScratchWaveOffsetReg = MFI->getScratchWaveOffsetReg();
69 
70   // Do a 64-bit pointer add.
71   if (ST.flatScratchIsPointer()) {
72     BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), AMDGPU::FLAT_SCR_LO)
73       .addReg(FlatScrInitLo)
74       .addReg(ScratchWaveOffsetReg);
75     BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADDC_U32), AMDGPU::FLAT_SCR_HI)
76       .addReg(FlatScrInitHi)
77       .addImm(0);
78 
79     return;
80   }
81 
82   // Copy the size in bytes.
83   BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), AMDGPU::FLAT_SCR_LO)
84     .addReg(FlatScrInitHi, RegState::Kill);
85 
86   // Add wave offset in bytes to private base offset.
87   // See comment in AMDKernelCodeT.h for enable_sgpr_flat_scratch_init.
88   BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), FlatScrInitLo)
89     .addReg(FlatScrInitLo)
90     .addReg(ScratchWaveOffsetReg);
91 
92   // Convert offset to 256-byte units.
93   BuildMI(MBB, I, DL, TII->get(AMDGPU::S_LSHR_B32), AMDGPU::FLAT_SCR_HI)
94     .addReg(FlatScrInitLo, RegState::Kill)
95     .addImm(8);
96 }
97 
98 unsigned SIFrameLowering::getReservedPrivateSegmentBufferReg(
99   const SISubtarget &ST,
100   const SIInstrInfo *TII,
101   const SIRegisterInfo *TRI,
102   SIMachineFunctionInfo *MFI,
103   MachineFunction &MF) const {
104   MachineRegisterInfo &MRI = MF.getRegInfo();
105 
106   // We need to insert initialization of the scratch resource descriptor.
107   unsigned ScratchRsrcReg = MFI->getScratchRSrcReg();
108   if (ScratchRsrcReg == AMDGPU::NoRegister ||
109       !MRI.isPhysRegUsed(ScratchRsrcReg))
110     return AMDGPU::NoRegister;
111 
112   if (ST.hasSGPRInitBug() ||
113       ScratchRsrcReg != TRI->reservedPrivateSegmentBufferReg(MF))
114     return ScratchRsrcReg;
115 
116   // We reserved the last registers for this. Shift it down to the end of those
117   // which were actually used.
118   //
119   // FIXME: It might be safer to use a pseudoregister before replacement.
120 
121   // FIXME: We should be able to eliminate unused input registers. We only
122   // cannot do this for the resources required for scratch access. For now we
123   // skip over user SGPRs and may leave unused holes.
124 
125   // We find the resource first because it has an alignment requirement.
126 
127   unsigned NumPreloaded = (MFI->getNumPreloadedSGPRs() + 3) / 4;
128   ArrayRef<MCPhysReg> AllSGPR128s = getAllSGPR128(ST, MF);
129   AllSGPR128s = AllSGPR128s.slice(std::min(static_cast<unsigned>(AllSGPR128s.size()), NumPreloaded));
130 
131   // Skip the last N reserved elements because they should have already been
132   // reserved for VCC etc.
133   for (MCPhysReg Reg : AllSGPR128s) {
134     // Pick the first unallocated one. Make sure we don't clobber the other
135     // reserved input we needed.
136     if (!MRI.isPhysRegUsed(Reg) && MRI.isAllocatable(Reg)) {
137       MRI.replaceRegWith(ScratchRsrcReg, Reg);
138       MFI->setScratchRSrcReg(Reg);
139       return Reg;
140     }
141   }
142 
143   return ScratchRsrcReg;
144 }
145 
146 // Shift down registers reserved for the scratch wave offset and stack pointer
147 // SGPRs.
148 std::pair<unsigned, unsigned>
149 SIFrameLowering::getReservedPrivateSegmentWaveByteOffsetReg(
150   const SISubtarget &ST,
151   const SIInstrInfo *TII,
152   const SIRegisterInfo *TRI,
153   SIMachineFunctionInfo *MFI,
154   MachineFunction &MF) const {
155   MachineRegisterInfo &MRI = MF.getRegInfo();
156   unsigned ScratchWaveOffsetReg = MFI->getScratchWaveOffsetReg();
157 
158   // No replacement necessary.
159   if (ScratchWaveOffsetReg == AMDGPU::NoRegister ||
160       !MRI.isPhysRegUsed(ScratchWaveOffsetReg)) {
161     assert(MFI->getStackPtrOffsetReg() == AMDGPU::SP_REG);
162     return std::make_pair(AMDGPU::NoRegister, AMDGPU::NoRegister);
163   }
164 
165   unsigned SPReg = MFI->getStackPtrOffsetReg();
166   if (ST.hasSGPRInitBug())
167     return std::make_pair(ScratchWaveOffsetReg, SPReg);
168 
169   unsigned NumPreloaded = MFI->getNumPreloadedSGPRs();
170 
171   ArrayRef<MCPhysReg> AllSGPRs = getAllSGPRs(ST, MF);
172   if (NumPreloaded > AllSGPRs.size())
173     return std::make_pair(ScratchWaveOffsetReg, SPReg);
174 
175   AllSGPRs = AllSGPRs.slice(NumPreloaded);
176 
177   // We need to drop register from the end of the list that we cannot use
178   // for the scratch wave offset.
179   // + 2 s102 and s103 do not exist on VI.
180   // + 2 for vcc
181   // + 2 for xnack_mask
182   // + 2 for flat_scratch
183   // + 4 for registers reserved for scratch resource register
184   // + 1 for register reserved for scratch wave offset.  (By exluding this
185   //     register from the list to consider, it means that when this
186   //     register is being used for the scratch wave offset and there
187   //     are no other free SGPRs, then the value will stay in this register.
188   // + 1 if stack pointer is used.
189   // ----
190   //  13 (+1)
191   unsigned ReservedRegCount = 13;
192 
193   if (AllSGPRs.size() < ReservedRegCount)
194     return std::make_pair(ScratchWaveOffsetReg, SPReg);
195 
196   bool HandledScratchWaveOffsetReg =
197     ScratchWaveOffsetReg != TRI->reservedPrivateSegmentWaveByteOffsetReg(MF);
198 
199   for (MCPhysReg Reg : AllSGPRs.drop_back(ReservedRegCount)) {
200     // Pick the first unallocated SGPR. Be careful not to pick an alias of the
201     // scratch descriptor, since we haven’t added its uses yet.
202     if (!MRI.isPhysRegUsed(Reg) && MRI.isAllocatable(Reg)) {
203       if (!HandledScratchWaveOffsetReg) {
204         HandledScratchWaveOffsetReg = true;
205 
206         MRI.replaceRegWith(ScratchWaveOffsetReg, Reg);
207         MFI->setScratchWaveOffsetReg(Reg);
208         ScratchWaveOffsetReg = Reg;
209         break;
210       }
211     }
212   }
213 
214   return std::make_pair(ScratchWaveOffsetReg, SPReg);
215 }
216 
217 void SIFrameLowering::emitEntryFunctionPrologue(MachineFunction &MF,
218                                                 MachineBasicBlock &MBB) const {
219   // Emit debugger prologue if "amdgpu-debugger-emit-prologue" attribute was
220   // specified.
221   const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
222   if (ST.debuggerEmitPrologue())
223     emitDebuggerPrologue(MF, MBB);
224 
225   assert(&MF.front() == &MBB && "Shrink-wrapping not yet supported");
226 
227   SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
228 
229   // If we only have SGPR spills, we won't actually be using scratch memory
230   // since these spill to VGPRs.
231   //
232   // FIXME: We should be cleaning up these unused SGPR spill frame indices
233   // somewhere.
234 
235   const SIInstrInfo *TII = ST.getInstrInfo();
236   const SIRegisterInfo *TRI = &TII->getRegisterInfo();
237   MachineRegisterInfo &MRI = MF.getRegInfo();
238 
239   // We need to do the replacement of the private segment buffer and wave offset
240   // register even if there are no stack objects. There could be stores to undef
241   // or a constant without an associated object.
242 
243   // FIXME: We still have implicit uses on SGPR spill instructions in case they
244   // need to spill to vector memory. It's likely that will not happen, but at
245   // this point it appears we need the setup. This part of the prolog should be
246   // emitted after frame indices are eliminated.
247 
248   if (MFI->hasFlatScratchInit())
249     emitFlatScratchInit(ST, MF, MBB);
250 
251   unsigned SPReg = MFI->getStackPtrOffsetReg();
252   if (SPReg != AMDGPU::SP_REG) {
253     assert(MRI.isReserved(SPReg) && "SPReg used but not reserved");
254 
255     DebugLoc DL;
256     const MachineFrameInfo &FrameInfo = MF.getFrameInfo();
257     int64_t StackSize = FrameInfo.getStackSize();
258 
259     if (StackSize == 0) {
260       BuildMI(MBB, MBB.begin(), DL, TII->get(AMDGPU::COPY), SPReg)
261         .addReg(MFI->getScratchWaveOffsetReg());
262     } else {
263       BuildMI(MBB, MBB.begin(), DL, TII->get(AMDGPU::S_ADD_U32), SPReg)
264         .addReg(MFI->getScratchWaveOffsetReg())
265         .addImm(StackSize * ST.getWavefrontSize());
266     }
267   }
268 
269   unsigned ScratchRsrcReg
270     = getReservedPrivateSegmentBufferReg(ST, TII, TRI, MFI, MF);
271 
272   unsigned ScratchWaveOffsetReg;
273   std::tie(ScratchWaveOffsetReg, SPReg)
274     = getReservedPrivateSegmentWaveByteOffsetReg(ST, TII, TRI, MFI, MF);
275 
276   // It's possible to have uses of only ScratchWaveOffsetReg without
277   // ScratchRsrcReg if it's only used for the initialization of flat_scratch,
278   // but the inverse is not true.
279   if (ScratchWaveOffsetReg == AMDGPU::NoRegister) {
280     assert(ScratchRsrcReg == AMDGPU::NoRegister);
281     return;
282   }
283 
284   // We need to insert initialization of the scratch resource descriptor.
285   unsigned PreloadedScratchWaveOffsetReg = MFI->getPreloadedReg(
286     AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET);
287 
288   unsigned PreloadedPrivateBufferReg = AMDGPU::NoRegister;
289   if (ST.isAmdCodeObjectV2(MF)) {
290     PreloadedPrivateBufferReg = MFI->getPreloadedReg(
291       AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER);
292   }
293 
294   bool OffsetRegUsed = MRI.isPhysRegUsed(ScratchWaveOffsetReg);
295   bool ResourceRegUsed = ScratchRsrcReg != AMDGPU::NoRegister &&
296                          MRI.isPhysRegUsed(ScratchRsrcReg);
297 
298   // We added live-ins during argument lowering, but since they were not used
299   // they were deleted. We're adding the uses now, so add them back.
300   if (OffsetRegUsed) {
301     assert(PreloadedScratchWaveOffsetReg != AMDGPU::NoRegister &&
302            "scratch wave offset input is required");
303     MRI.addLiveIn(PreloadedScratchWaveOffsetReg);
304     MBB.addLiveIn(PreloadedScratchWaveOffsetReg);
305   }
306 
307   if (ResourceRegUsed && PreloadedPrivateBufferReg != AMDGPU::NoRegister) {
308     assert(ST.isAmdCodeObjectV2(MF) || ST.isMesaGfxShader(MF));
309     MRI.addLiveIn(PreloadedPrivateBufferReg);
310     MBB.addLiveIn(PreloadedPrivateBufferReg);
311   }
312 
313   // Make the register selected live throughout the function.
314   for (MachineBasicBlock &OtherBB : MF) {
315     if (&OtherBB == &MBB)
316       continue;
317 
318     if (OffsetRegUsed)
319       OtherBB.addLiveIn(ScratchWaveOffsetReg);
320 
321     if (ResourceRegUsed)
322       OtherBB.addLiveIn(ScratchRsrcReg);
323   }
324 
325   DebugLoc DL;
326   MachineBasicBlock::iterator I = MBB.begin();
327 
328   // If we reserved the original input registers, we don't need to copy to the
329   // reserved registers.
330 
331   bool CopyBuffer = ResourceRegUsed &&
332     PreloadedPrivateBufferReg != AMDGPU::NoRegister &&
333     ST.isAmdCodeObjectV2(MF) &&
334     ScratchRsrcReg != PreloadedPrivateBufferReg;
335 
336   // This needs to be careful of the copying order to avoid overwriting one of
337   // the input registers before it's been copied to it's final
338   // destination. Usually the offset should be copied first.
339   bool CopyBufferFirst = TRI->isSubRegisterEq(PreloadedPrivateBufferReg,
340                                               ScratchWaveOffsetReg);
341   if (CopyBuffer && CopyBufferFirst) {
342     BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchRsrcReg)
343       .addReg(PreloadedPrivateBufferReg, RegState::Kill);
344   }
345 
346   if (OffsetRegUsed &&
347       PreloadedScratchWaveOffsetReg != ScratchWaveOffsetReg) {
348     BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchWaveOffsetReg)
349       .addReg(PreloadedScratchWaveOffsetReg,
350               MRI.isPhysRegUsed(ScratchWaveOffsetReg) ? 0 : RegState::Kill);
351   }
352 
353   if (CopyBuffer && !CopyBufferFirst) {
354     BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchRsrcReg)
355       .addReg(PreloadedPrivateBufferReg, RegState::Kill);
356   }
357 
358   if (ResourceRegUsed)
359     emitEntryFunctionScratchSetup(ST, MF, MBB, MFI, I,
360         PreloadedPrivateBufferReg, ScratchRsrcReg);
361 }
362 
363 // Emit scratch setup code for AMDPAL or Mesa, assuming ResourceRegUsed is set.
364 void SIFrameLowering::emitEntryFunctionScratchSetup(const SISubtarget &ST,
365       MachineFunction &MF, MachineBasicBlock &MBB, SIMachineFunctionInfo *MFI,
366       MachineBasicBlock::iterator I, unsigned PreloadedPrivateBufferReg,
367       unsigned ScratchRsrcReg) const {
368 
369   const SIInstrInfo *TII = ST.getInstrInfo();
370   const SIRegisterInfo *TRI = &TII->getRegisterInfo();
371   DebugLoc DL;
372 
373   if (ST.isAmdPalOS()) {
374     // The pointer to the GIT is formed from the offset passed in and either
375     // the amdgpu-git-ptr-high function attribute or the top part of the PC
376     unsigned RsrcLo = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0);
377     unsigned RsrcHi = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1);
378     unsigned Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1);
379 
380     const MCInstrDesc &SMovB32 = TII->get(AMDGPU::S_MOV_B32);
381 
382     if (MFI->getGITPtrHigh() != 0xffffffff) {
383       BuildMI(MBB, I, DL, SMovB32, RsrcHi)
384         .addImm(MFI->getGITPtrHigh())
385         .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
386     } else {
387       const MCInstrDesc &GetPC64 = TII->get(AMDGPU::S_GETPC_B64);
388       BuildMI(MBB, I, DL, GetPC64, Rsrc01);
389     }
390     auto GitPtrLo = AMDGPU::SGPR0; // Low GIT address passed in
391     if (ST.hasMergedShaders()) {
392       switch (MF.getFunction().getCallingConv()) {
393         case CallingConv::AMDGPU_HS:
394         case CallingConv::AMDGPU_GS:
395           // Low GIT address is passed in s8 rather than s0 for an LS+HS or
396           // ES+GS merged shader on gfx9+.
397           GitPtrLo = AMDGPU::SGPR8;
398           break;
399         default:
400           break;
401       }
402     }
403     BuildMI(MBB, I, DL, SMovB32, RsrcLo)
404       .addReg(GitPtrLo)
405       .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
406 
407     // We now have the GIT ptr - now get the scratch descriptor from the entry
408     // at offset 0.
409     PointerType *PtrTy =
410       PointerType::get(Type::getInt64Ty(MF.getFunction().getContext()),
411                        AMDGPUAS::CONSTANT_ADDRESS);
412     MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
413     const MCInstrDesc &LoadDwordX4 = TII->get(AMDGPU::S_LOAD_DWORDX4_IMM);
414     auto MMO = MF.getMachineMemOperand(PtrInfo,
415                                        MachineMemOperand::MOLoad |
416                                        MachineMemOperand::MOInvariant |
417                                        MachineMemOperand::MODereferenceable,
418                                        0, 0);
419     BuildMI(MBB, I, DL, LoadDwordX4, ScratchRsrcReg)
420       .addReg(Rsrc01)
421       .addImm(0) // offset
422       .addImm(0) // glc
423       .addReg(ScratchRsrcReg, RegState::ImplicitDefine)
424       .addMemOperand(MMO);
425     return;
426   }
427   if (ST.isMesaGfxShader(MF)
428       || (PreloadedPrivateBufferReg == AMDGPU::NoRegister)) {
429     assert(!ST.isAmdCodeObjectV2(MF));
430     const MCInstrDesc &SMovB32 = TII->get(AMDGPU::S_MOV_B32);
431 
432     unsigned Rsrc2 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub2);
433     unsigned Rsrc3 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub3);
434 
435     // Use relocations to get the pointer, and setup the other bits manually.
436     uint64_t Rsrc23 = TII->getScratchRsrcWords23();
437 
438     if (MFI->hasImplicitBufferPtr()) {
439       unsigned Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1);
440 
441       if (AMDGPU::isCompute(MF.getFunction().getCallingConv())) {
442         const MCInstrDesc &Mov64 = TII->get(AMDGPU::S_MOV_B64);
443 
444         BuildMI(MBB, I, DL, Mov64, Rsrc01)
445           .addReg(MFI->getImplicitBufferPtrUserSGPR())
446           .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
447       } else {
448         const MCInstrDesc &LoadDwordX2 = TII->get(AMDGPU::S_LOAD_DWORDX2_IMM);
449 
450         PointerType *PtrTy =
451           PointerType::get(Type::getInt64Ty(MF.getFunction().getContext()),
452                            AMDGPUAS::CONSTANT_ADDRESS);
453         MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
454         auto MMO = MF.getMachineMemOperand(PtrInfo,
455                                            MachineMemOperand::MOLoad |
456                                            MachineMemOperand::MOInvariant |
457                                            MachineMemOperand::MODereferenceable,
458                                            0, 0);
459         BuildMI(MBB, I, DL, LoadDwordX2, Rsrc01)
460           .addReg(MFI->getImplicitBufferPtrUserSGPR())
461           .addImm(0) // offset
462           .addImm(0) // glc
463           .addMemOperand(MMO)
464           .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
465       }
466     } else {
467       unsigned Rsrc0 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0);
468       unsigned Rsrc1 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1);
469 
470       BuildMI(MBB, I, DL, SMovB32, Rsrc0)
471         .addExternalSymbol("SCRATCH_RSRC_DWORD0")
472         .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
473 
474       BuildMI(MBB, I, DL, SMovB32, Rsrc1)
475         .addExternalSymbol("SCRATCH_RSRC_DWORD1")
476         .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
477 
478     }
479 
480     BuildMI(MBB, I, DL, SMovB32, Rsrc2)
481       .addImm(Rsrc23 & 0xffffffff)
482       .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
483 
484     BuildMI(MBB, I, DL, SMovB32, Rsrc3)
485       .addImm(Rsrc23 >> 32)
486       .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
487   }
488 }
489 
490 void SIFrameLowering::emitPrologue(MachineFunction &MF,
491                                    MachineBasicBlock &MBB) const {
492   const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
493   if (FuncInfo->isEntryFunction()) {
494     emitEntryFunctionPrologue(MF, MBB);
495     return;
496   }
497 
498   const MachineFrameInfo &MFI = MF.getFrameInfo();
499   const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
500   const SIInstrInfo *TII = ST.getInstrInfo();
501 
502   unsigned StackPtrReg = FuncInfo->getStackPtrOffsetReg();
503   unsigned FramePtrReg = FuncInfo->getFrameOffsetReg();
504 
505   MachineBasicBlock::iterator MBBI = MBB.begin();
506   DebugLoc DL;
507 
508   bool NeedFP = hasFP(MF);
509   if (NeedFP) {
510     // If we need a base pointer, set it up here. It's whatever the value of
511     // the stack pointer is at this point. Any variable size objects will be
512     // allocated after this, so we can still use the base pointer to reference
513     // locals.
514     BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), FramePtrReg)
515       .addReg(StackPtrReg)
516       .setMIFlag(MachineInstr::FrameSetup);
517   }
518 
519   uint32_t NumBytes = MFI.getStackSize();
520   if (NumBytes != 0 && hasSP(MF)) {
521     BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_ADD_U32), StackPtrReg)
522       .addReg(StackPtrReg)
523       .addImm(NumBytes * ST.getWavefrontSize())
524       .setMIFlag(MachineInstr::FrameSetup);
525   }
526 
527   for (const SIMachineFunctionInfo::SGPRSpillVGPRCSR &Reg
528          : FuncInfo->getSGPRSpillVGPRs()) {
529     if (!Reg.FI.hasValue())
530       continue;
531     TII->storeRegToStackSlot(MBB, MBBI, Reg.VGPR, true,
532                              Reg.FI.getValue(), &AMDGPU::VGPR_32RegClass,
533                              &TII->getRegisterInfo());
534   }
535 }
536 
537 void SIFrameLowering::emitEpilogue(MachineFunction &MF,
538                                    MachineBasicBlock &MBB) const {
539   const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
540   if (FuncInfo->isEntryFunction())
541     return;
542 
543   const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
544   const SIInstrInfo *TII = ST.getInstrInfo();
545   MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator();
546 
547   for (const SIMachineFunctionInfo::SGPRSpillVGPRCSR &Reg
548          : FuncInfo->getSGPRSpillVGPRs()) {
549     if (!Reg.FI.hasValue())
550       continue;
551     TII->loadRegFromStackSlot(MBB, MBBI, Reg.VGPR,
552                               Reg.FI.getValue(), &AMDGPU::VGPR_32RegClass,
553                               &TII->getRegisterInfo());
554   }
555 
556   unsigned StackPtrReg = FuncInfo->getStackPtrOffsetReg();
557   if (StackPtrReg == AMDGPU::NoRegister)
558     return;
559 
560   const MachineFrameInfo &MFI = MF.getFrameInfo();
561   uint32_t NumBytes = MFI.getStackSize();
562 
563   DebugLoc DL;
564 
565   // FIXME: Clarify distinction between no set SP and SP. For callee functions,
566   // it's really whether we need SP to be accurate or not.
567 
568   if (NumBytes != 0 && hasSP(MF)) {
569     BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_SUB_U32), StackPtrReg)
570       .addReg(StackPtrReg)
571       .addImm(NumBytes * ST.getWavefrontSize())
572       .setMIFlag(MachineInstr::FrameDestroy);
573   }
574 }
575 
576 static bool allStackObjectsAreDead(const MachineFrameInfo &MFI) {
577   for (int I = MFI.getObjectIndexBegin(), E = MFI.getObjectIndexEnd();
578        I != E; ++I) {
579     if (!MFI.isDeadObjectIndex(I))
580       return false;
581   }
582 
583   return true;
584 }
585 
586 int SIFrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
587                                             unsigned &FrameReg) const {
588   const SIRegisterInfo *RI = MF.getSubtarget<SISubtarget>().getRegisterInfo();
589 
590   FrameReg = RI->getFrameRegister(MF);
591   return MF.getFrameInfo().getObjectOffset(FI);
592 }
593 
594 void SIFrameLowering::processFunctionBeforeFrameFinalized(
595   MachineFunction &MF,
596   RegScavenger *RS) const {
597   MachineFrameInfo &MFI = MF.getFrameInfo();
598 
599   if (!MFI.hasStackObjects())
600     return;
601 
602   const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
603   const SIInstrInfo *TII = ST.getInstrInfo();
604   const SIRegisterInfo &TRI = TII->getRegisterInfo();
605   SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
606   bool AllSGPRSpilledToVGPRs = false;
607 
608   if (TRI.spillSGPRToVGPR() && FuncInfo->hasSpilledSGPRs()) {
609     AllSGPRSpilledToVGPRs = true;
610 
611     // Process all SGPR spills before frame offsets are finalized. Ideally SGPRs
612     // are spilled to VGPRs, in which case we can eliminate the stack usage.
613     //
614     // XXX - This operates under the assumption that only other SGPR spills are
615     // users of the frame index. I'm not 100% sure this is correct. The
616     // StackColoring pass has a comment saying a future improvement would be to
617     // merging of allocas with spill slots, but for now according to
618     // MachineFrameInfo isSpillSlot can't alias any other object.
619     for (MachineBasicBlock &MBB : MF) {
620       MachineBasicBlock::iterator Next;
621       for (auto I = MBB.begin(), E = MBB.end(); I != E; I = Next) {
622         MachineInstr &MI = *I;
623         Next = std::next(I);
624 
625         if (TII->isSGPRSpill(MI)) {
626           int FI = TII->getNamedOperand(MI, AMDGPU::OpName::addr)->getIndex();
627           if (FuncInfo->allocateSGPRSpillToVGPR(MF, FI)) {
628             bool Spilled = TRI.eliminateSGPRToVGPRSpillFrameIndex(MI, FI, RS);
629             (void)Spilled;
630             assert(Spilled && "failed to spill SGPR to VGPR when allocated");
631           } else
632             AllSGPRSpilledToVGPRs = false;
633         }
634       }
635     }
636 
637     FuncInfo->removeSGPRToVGPRFrameIndices(MFI);
638   }
639 
640   // FIXME: The other checks should be redundant with allStackObjectsAreDead,
641   // but currently hasNonSpillStackObjects is set only from source
642   // allocas. Stack temps produced from legalization are not counted currently.
643   if (FuncInfo->hasNonSpillStackObjects() || FuncInfo->hasSpilledVGPRs() ||
644       !AllSGPRSpilledToVGPRs || !allStackObjectsAreDead(MFI)) {
645     assert(RS && "RegScavenger required if spilling");
646 
647     // We force this to be at offset 0 so no user object ever has 0 as an
648     // address, so we may use 0 as an invalid pointer value. This is because
649     // LLVM assumes 0 is an invalid pointer in address space 0. Because alloca
650     // is required to be address space 0, we are forced to accept this for
651     // now. Ideally we could have the stack in another address space with 0 as a
652     // valid pointer, and -1 as the null value.
653     //
654     // This will also waste additional space when user stack objects require > 4
655     // byte alignment.
656     //
657     // The main cost here is losing the offset for addressing modes. However
658     // this also ensures we shouldn't need a register for the offset when
659     // emergency scavenging.
660     int ScavengeFI = MFI.CreateFixedObject(
661       TRI.getSpillSize(AMDGPU::SGPR_32RegClass), 0, false);
662     RS->addScavengingFrameIndex(ScavengeFI);
663   }
664 }
665 
666 void SIFrameLowering::determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs,
667                                            RegScavenger *RS) const {
668   TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
669   const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
670 
671   // The SP is specifically managed and we don't want extra spills of it.
672   SavedRegs.reset(MFI->getStackPtrOffsetReg());
673 }
674 
675 MachineBasicBlock::iterator SIFrameLowering::eliminateCallFramePseudoInstr(
676   MachineFunction &MF,
677   MachineBasicBlock &MBB,
678   MachineBasicBlock::iterator I) const {
679   int64_t Amount = I->getOperand(0).getImm();
680   if (Amount == 0)
681     return MBB.erase(I);
682 
683   const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
684   const SIInstrInfo *TII = ST.getInstrInfo();
685   const DebugLoc &DL = I->getDebugLoc();
686   unsigned Opc = I->getOpcode();
687   bool IsDestroy = Opc == TII->getCallFrameDestroyOpcode();
688   uint64_t CalleePopAmount = IsDestroy ? I->getOperand(1).getImm() : 0;
689 
690   const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
691   if (!TFI->hasReservedCallFrame(MF)) {
692     unsigned Align = getStackAlignment();
693 
694     Amount = alignTo(Amount, Align);
695     assert(isUInt<32>(Amount) && "exceeded stack address space size");
696     const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
697     unsigned SPReg = MFI->getStackPtrOffsetReg();
698 
699     unsigned Op = IsDestroy ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32;
700     BuildMI(MBB, I, DL, TII->get(Op), SPReg)
701       .addReg(SPReg)
702       .addImm(Amount * ST.getWavefrontSize());
703   } else if (CalleePopAmount != 0) {
704     llvm_unreachable("is this used?");
705   }
706 
707   return MBB.erase(I);
708 }
709 
710 void SIFrameLowering::emitDebuggerPrologue(MachineFunction &MF,
711                                            MachineBasicBlock &MBB) const {
712   const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
713   const SIInstrInfo *TII = ST.getInstrInfo();
714   const SIRegisterInfo *TRI = &TII->getRegisterInfo();
715   const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
716 
717   MachineBasicBlock::iterator I = MBB.begin();
718   DebugLoc DL;
719 
720   // For each dimension:
721   for (unsigned i = 0; i < 3; ++i) {
722     // Get work group ID SGPR, and make it live-in again.
723     unsigned WorkGroupIDSGPR = MFI->getWorkGroupIDSGPR(i);
724     MF.getRegInfo().addLiveIn(WorkGroupIDSGPR);
725     MBB.addLiveIn(WorkGroupIDSGPR);
726 
727     // Since SGPRs are spilled into VGPRs, copy work group ID SGPR to VGPR in
728     // order to spill it to scratch.
729     unsigned WorkGroupIDVGPR =
730       MF.getRegInfo().createVirtualRegister(&AMDGPU::VGPR_32RegClass);
731     BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), WorkGroupIDVGPR)
732       .addReg(WorkGroupIDSGPR);
733 
734     // Spill work group ID.
735     int WorkGroupIDObjectIdx = MFI->getDebuggerWorkGroupIDStackObjectIndex(i);
736     TII->storeRegToStackSlot(MBB, I, WorkGroupIDVGPR, false,
737       WorkGroupIDObjectIdx, &AMDGPU::VGPR_32RegClass, TRI);
738 
739     // Get work item ID VGPR, and make it live-in again.
740     unsigned WorkItemIDVGPR = MFI->getWorkItemIDVGPR(i);
741     MF.getRegInfo().addLiveIn(WorkItemIDVGPR);
742     MBB.addLiveIn(WorkItemIDVGPR);
743 
744     // Spill work item ID.
745     int WorkItemIDObjectIdx = MFI->getDebuggerWorkItemIDStackObjectIndex(i);
746     TII->storeRegToStackSlot(MBB, I, WorkItemIDVGPR, false,
747       WorkItemIDObjectIdx, &AMDGPU::VGPR_32RegClass, TRI);
748   }
749 }
750 
751 bool SIFrameLowering::hasFP(const MachineFunction &MF) const {
752   // All stack operations are relative to the frame offset SGPR.
753   // TODO: Still want to eliminate sometimes.
754   const MachineFrameInfo &MFI = MF.getFrameInfo();
755 
756   // XXX - Is this only called after frame is finalized? Should be able to check
757   // frame size.
758   return MFI.hasStackObjects() && !allStackObjectsAreDead(MFI);
759 }
760 
761 bool SIFrameLowering::hasSP(const MachineFunction &MF) const {
762   // All stack operations are relative to the frame offset SGPR.
763   const MachineFrameInfo &MFI = MF.getFrameInfo();
764   return MFI.hasCalls() || MFI.hasVarSizedObjects();
765 }
766