1 //===----------------------- SIFrameLowering.cpp --------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //==-----------------------------------------------------------------------===//
9 
10 #include "SIFrameLowering.h"
11 #include "AMDGPUSubtarget.h"
12 #include "SIInstrInfo.h"
13 #include "SIMachineFunctionInfo.h"
14 #include "SIRegisterInfo.h"
15 
16 #include "llvm/CodeGen/MachineFrameInfo.h"
17 #include "llvm/CodeGen/MachineFunction.h"
18 #include "llvm/CodeGen/MachineInstrBuilder.h"
19 #include "llvm/CodeGen/RegisterScavenging.h"
20 
21 using namespace llvm;
22 
23 
24 static ArrayRef<MCPhysReg> getAllSGPR128(const SISubtarget &ST,
25                                          const MachineFunction &MF) {
26   return makeArrayRef(AMDGPU::SGPR_128RegClass.begin(),
27                       ST.getMaxNumSGPRs(MF) / 4);
28 }
29 
30 static ArrayRef<MCPhysReg> getAllSGPRs(const SISubtarget &ST,
31                                        const MachineFunction &MF) {
32   return makeArrayRef(AMDGPU::SGPR_32RegClass.begin(),
33                       ST.getMaxNumSGPRs(MF));
34 }
35 
36 void SIFrameLowering::emitFlatScratchInit(const SISubtarget &ST,
37                                           MachineFunction &MF,
38                                           MachineBasicBlock &MBB) const {
39   const SIInstrInfo *TII = ST.getInstrInfo();
40   const SIRegisterInfo* TRI = &TII->getRegisterInfo();
41   const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
42 
43   // We don't need this if we only have spills since there is no user facing
44   // scratch.
45 
46   // TODO: If we know we don't have flat instructions earlier, we can omit
47   // this from the input registers.
48   //
49   // TODO: We only need to know if we access scratch space through a flat
50   // pointer. Because we only detect if flat instructions are used at all,
51   // this will be used more often than necessary on VI.
52 
53   // Debug location must be unknown since the first debug location is used to
54   // determine the end of the prologue.
55   DebugLoc DL;
56   MachineBasicBlock::iterator I = MBB.begin();
57 
58   unsigned FlatScratchInitReg
59     = MFI->getPreloadedReg(AMDGPUFunctionArgInfo::FLAT_SCRATCH_INIT);
60 
61   MachineRegisterInfo &MRI = MF.getRegInfo();
62   MRI.addLiveIn(FlatScratchInitReg);
63   MBB.addLiveIn(FlatScratchInitReg);
64 
65   unsigned FlatScrInitLo = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub0);
66   unsigned FlatScrInitHi = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub1);
67 
68   unsigned ScratchWaveOffsetReg = MFI->getScratchWaveOffsetReg();
69 
70   // Do a 64-bit pointer add.
71   if (ST.flatScratchIsPointer()) {
72     BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), AMDGPU::FLAT_SCR_LO)
73       .addReg(FlatScrInitLo)
74       .addReg(ScratchWaveOffsetReg);
75     BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADDC_U32), AMDGPU::FLAT_SCR_HI)
76       .addReg(FlatScrInitHi)
77       .addImm(0);
78 
79     return;
80   }
81 
82   // Copy the size in bytes.
83   BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), AMDGPU::FLAT_SCR_LO)
84     .addReg(FlatScrInitHi, RegState::Kill);
85 
86   // Add wave offset in bytes to private base offset.
87   // See comment in AMDKernelCodeT.h for enable_sgpr_flat_scratch_init.
88   BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), FlatScrInitLo)
89     .addReg(FlatScrInitLo)
90     .addReg(ScratchWaveOffsetReg);
91 
92   // Convert offset to 256-byte units.
93   BuildMI(MBB, I, DL, TII->get(AMDGPU::S_LSHR_B32), AMDGPU::FLAT_SCR_HI)
94     .addReg(FlatScrInitLo, RegState::Kill)
95     .addImm(8);
96 }
97 
98 unsigned SIFrameLowering::getReservedPrivateSegmentBufferReg(
99   const SISubtarget &ST,
100   const SIInstrInfo *TII,
101   const SIRegisterInfo *TRI,
102   SIMachineFunctionInfo *MFI,
103   MachineFunction &MF) const {
104   MachineRegisterInfo &MRI = MF.getRegInfo();
105 
106   // We need to insert initialization of the scratch resource descriptor.
107   unsigned ScratchRsrcReg = MFI->getScratchRSrcReg();
108   if (ScratchRsrcReg == AMDGPU::NoRegister ||
109       !MRI.isPhysRegUsed(ScratchRsrcReg))
110     return AMDGPU::NoRegister;
111 
112   if (ST.hasSGPRInitBug() ||
113       ScratchRsrcReg != TRI->reservedPrivateSegmentBufferReg(MF))
114     return ScratchRsrcReg;
115 
116   // We reserved the last registers for this. Shift it down to the end of those
117   // which were actually used.
118   //
119   // FIXME: It might be safer to use a pseudoregister before replacement.
120 
121   // FIXME: We should be able to eliminate unused input registers. We only
122   // cannot do this for the resources required for scratch access. For now we
123   // skip over user SGPRs and may leave unused holes.
124 
125   // We find the resource first because it has an alignment requirement.
126 
127   unsigned NumPreloaded = (MFI->getNumPreloadedSGPRs() + 3) / 4;
128   ArrayRef<MCPhysReg> AllSGPR128s = getAllSGPR128(ST, MF);
129   AllSGPR128s = AllSGPR128s.slice(std::min(static_cast<unsigned>(AllSGPR128s.size()), NumPreloaded));
130 
131   // Skip the last N reserved elements because they should have already been
132   // reserved for VCC etc.
133   for (MCPhysReg Reg : AllSGPR128s) {
134     // Pick the first unallocated one. Make sure we don't clobber the other
135     // reserved input we needed.
136     if (!MRI.isPhysRegUsed(Reg) && MRI.isAllocatable(Reg)) {
137       MRI.replaceRegWith(ScratchRsrcReg, Reg);
138       MFI->setScratchRSrcReg(Reg);
139       return Reg;
140     }
141   }
142 
143   return ScratchRsrcReg;
144 }
145 
146 // Shift down registers reserved for the scratch wave offset and stack pointer
147 // SGPRs.
148 std::pair<unsigned, unsigned>
149 SIFrameLowering::getReservedPrivateSegmentWaveByteOffsetReg(
150   const SISubtarget &ST,
151   const SIInstrInfo *TII,
152   const SIRegisterInfo *TRI,
153   SIMachineFunctionInfo *MFI,
154   MachineFunction &MF) const {
155   MachineRegisterInfo &MRI = MF.getRegInfo();
156   unsigned ScratchWaveOffsetReg = MFI->getScratchWaveOffsetReg();
157 
158   // No replacement necessary.
159   if (ScratchWaveOffsetReg == AMDGPU::NoRegister ||
160       !MRI.isPhysRegUsed(ScratchWaveOffsetReg)) {
161     assert(MFI->getStackPtrOffsetReg() == AMDGPU::SP_REG);
162     return std::make_pair(AMDGPU::NoRegister, AMDGPU::NoRegister);
163   }
164 
165   unsigned SPReg = MFI->getStackPtrOffsetReg();
166   if (ST.hasSGPRInitBug())
167     return std::make_pair(ScratchWaveOffsetReg, SPReg);
168 
169   unsigned NumPreloaded = MFI->getNumPreloadedSGPRs();
170 
171   ArrayRef<MCPhysReg> AllSGPRs = getAllSGPRs(ST, MF);
172   if (NumPreloaded > AllSGPRs.size())
173     return std::make_pair(ScratchWaveOffsetReg, SPReg);
174 
175   AllSGPRs = AllSGPRs.slice(NumPreloaded);
176 
177   // We need to drop register from the end of the list that we cannot use
178   // for the scratch wave offset.
179   // + 2 s102 and s103 do not exist on VI.
180   // + 2 for vcc
181   // + 2 for xnack_mask
182   // + 2 for flat_scratch
183   // + 4 for registers reserved for scratch resource register
184   // + 1 for register reserved for scratch wave offset.  (By exluding this
185   //     register from the list to consider, it means that when this
186   //     register is being used for the scratch wave offset and there
187   //     are no other free SGPRs, then the value will stay in this register.
188   // + 1 if stack pointer is used.
189   // ----
190   //  13 (+1)
191   unsigned ReservedRegCount = 13;
192 
193   if (AllSGPRs.size() < ReservedRegCount)
194     return std::make_pair(ScratchWaveOffsetReg, SPReg);
195 
196   bool HandledScratchWaveOffsetReg =
197     ScratchWaveOffsetReg != TRI->reservedPrivateSegmentWaveByteOffsetReg(MF);
198 
199   for (MCPhysReg Reg : AllSGPRs.drop_back(ReservedRegCount)) {
200     // Pick the first unallocated SGPR. Be careful not to pick an alias of the
201     // scratch descriptor, since we haven’t added its uses yet.
202     if (!MRI.isPhysRegUsed(Reg) && MRI.isAllocatable(Reg)) {
203       if (!HandledScratchWaveOffsetReg) {
204         HandledScratchWaveOffsetReg = true;
205 
206         MRI.replaceRegWith(ScratchWaveOffsetReg, Reg);
207         MFI->setScratchWaveOffsetReg(Reg);
208         ScratchWaveOffsetReg = Reg;
209         break;
210       }
211     }
212   }
213 
214   return std::make_pair(ScratchWaveOffsetReg, SPReg);
215 }
216 
217 void SIFrameLowering::emitEntryFunctionPrologue(MachineFunction &MF,
218                                                 MachineBasicBlock &MBB) const {
219   // Emit debugger prologue if "amdgpu-debugger-emit-prologue" attribute was
220   // specified.
221   const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
222   if (ST.debuggerEmitPrologue())
223     emitDebuggerPrologue(MF, MBB);
224 
225   assert(&MF.front() == &MBB && "Shrink-wrapping not yet supported");
226 
227   SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
228 
229   // If we only have SGPR spills, we won't actually be using scratch memory
230   // since these spill to VGPRs.
231   //
232   // FIXME: We should be cleaning up these unused SGPR spill frame indices
233   // somewhere.
234 
235   const SIInstrInfo *TII = ST.getInstrInfo();
236   const SIRegisterInfo *TRI = &TII->getRegisterInfo();
237   MachineRegisterInfo &MRI = MF.getRegInfo();
238 
239   // We need to do the replacement of the private segment buffer and wave offset
240   // register even if there are no stack objects. There could be stores to undef
241   // or a constant without an associated object.
242 
243   // FIXME: We still have implicit uses on SGPR spill instructions in case they
244   // need to spill to vector memory. It's likely that will not happen, but at
245   // this point it appears we need the setup. This part of the prolog should be
246   // emitted after frame indices are eliminated.
247 
248   if (MFI->hasFlatScratchInit())
249     emitFlatScratchInit(ST, MF, MBB);
250 
251   unsigned SPReg = MFI->getStackPtrOffsetReg();
252   if (SPReg != AMDGPU::SP_REG) {
253     assert(MRI.isReserved(SPReg) && "SPReg used but not reserved");
254 
255     DebugLoc DL;
256     const MachineFrameInfo &FrameInfo = MF.getFrameInfo();
257     int64_t StackSize = FrameInfo.getStackSize();
258 
259     if (StackSize == 0) {
260       BuildMI(MBB, MBB.begin(), DL, TII->get(AMDGPU::COPY), SPReg)
261         .addReg(MFI->getScratchWaveOffsetReg());
262     } else {
263       BuildMI(MBB, MBB.begin(), DL, TII->get(AMDGPU::S_ADD_U32), SPReg)
264         .addReg(MFI->getScratchWaveOffsetReg())
265         .addImm(StackSize * ST.getWavefrontSize());
266     }
267   }
268 
269   unsigned ScratchRsrcReg
270     = getReservedPrivateSegmentBufferReg(ST, TII, TRI, MFI, MF);
271 
272   unsigned ScratchWaveOffsetReg;
273   std::tie(ScratchWaveOffsetReg, SPReg)
274     = getReservedPrivateSegmentWaveByteOffsetReg(ST, TII, TRI, MFI, MF);
275 
276   // It's possible to have uses of only ScratchWaveOffsetReg without
277   // ScratchRsrcReg if it's only used for the initialization of flat_scratch,
278   // but the inverse is not true.
279   if (ScratchWaveOffsetReg == AMDGPU::NoRegister) {
280     assert(ScratchRsrcReg == AMDGPU::NoRegister);
281     return;
282   }
283 
284   // We need to insert initialization of the scratch resource descriptor.
285   unsigned PreloadedScratchWaveOffsetReg = MFI->getPreloadedReg(
286     AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET);
287 
288   unsigned PreloadedPrivateBufferReg = AMDGPU::NoRegister;
289   if (ST.isAmdCodeObjectV2(MF)) {
290     PreloadedPrivateBufferReg = MFI->getPreloadedReg(
291       AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER);
292   }
293 
294   bool OffsetRegUsed = MRI.isPhysRegUsed(ScratchWaveOffsetReg);
295   bool ResourceRegUsed = ScratchRsrcReg != AMDGPU::NoRegister &&
296                          MRI.isPhysRegUsed(ScratchRsrcReg);
297 
298   // We added live-ins during argument lowering, but since they were not used
299   // they were deleted. We're adding the uses now, so add them back.
300   if (OffsetRegUsed) {
301     assert(PreloadedScratchWaveOffsetReg != AMDGPU::NoRegister &&
302            "scratch wave offset input is required");
303     MRI.addLiveIn(PreloadedScratchWaveOffsetReg);
304     MBB.addLiveIn(PreloadedScratchWaveOffsetReg);
305   }
306 
307   if (ResourceRegUsed && PreloadedPrivateBufferReg != AMDGPU::NoRegister) {
308     assert(ST.isAmdCodeObjectV2(MF) || ST.isMesaGfxShader(MF));
309     MRI.addLiveIn(PreloadedPrivateBufferReg);
310     MBB.addLiveIn(PreloadedPrivateBufferReg);
311   }
312 
313   // Make the register selected live throughout the function.
314   for (MachineBasicBlock &OtherBB : MF) {
315     if (&OtherBB == &MBB)
316       continue;
317 
318     if (OffsetRegUsed)
319       OtherBB.addLiveIn(ScratchWaveOffsetReg);
320 
321     if (ResourceRegUsed)
322       OtherBB.addLiveIn(ScratchRsrcReg);
323   }
324 
325   DebugLoc DL;
326   MachineBasicBlock::iterator I = MBB.begin();
327 
328   // If we reserved the original input registers, we don't need to copy to the
329   // reserved registers.
330 
331   bool CopyBuffer = ResourceRegUsed &&
332     PreloadedPrivateBufferReg != AMDGPU::NoRegister &&
333     ST.isAmdCodeObjectV2(MF) &&
334     ScratchRsrcReg != PreloadedPrivateBufferReg;
335 
336   // This needs to be careful of the copying order to avoid overwriting one of
337   // the input registers before it's been copied to it's final
338   // destination. Usually the offset should be copied first.
339   bool CopyBufferFirst = TRI->isSubRegisterEq(PreloadedPrivateBufferReg,
340                                               ScratchWaveOffsetReg);
341   if (CopyBuffer && CopyBufferFirst) {
342     BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchRsrcReg)
343       .addReg(PreloadedPrivateBufferReg, RegState::Kill);
344   }
345 
346   if (OffsetRegUsed &&
347       PreloadedScratchWaveOffsetReg != ScratchWaveOffsetReg) {
348     BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchWaveOffsetReg)
349       .addReg(PreloadedScratchWaveOffsetReg,
350               MRI.isPhysRegUsed(ScratchWaveOffsetReg) ? 0 : RegState::Kill);
351   }
352 
353   if (CopyBuffer && !CopyBufferFirst) {
354     BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchRsrcReg)
355       .addReg(PreloadedPrivateBufferReg, RegState::Kill);
356   }
357 
358   if (ResourceRegUsed)
359     emitEntryFunctionScratchSetup(ST, MF, MBB, MFI, I,
360         PreloadedPrivateBufferReg, ScratchRsrcReg);
361 }
362 
363 // Emit scratch setup code for AMDPAL or Mesa, assuming ResourceRegUsed is set.
364 void SIFrameLowering::emitEntryFunctionScratchSetup(const SISubtarget &ST,
365       MachineFunction &MF, MachineBasicBlock &MBB, SIMachineFunctionInfo *MFI,
366       MachineBasicBlock::iterator I, unsigned PreloadedPrivateBufferReg,
367       unsigned ScratchRsrcReg) const {
368 
369   const SIInstrInfo *TII = ST.getInstrInfo();
370   const SIRegisterInfo *TRI = &TII->getRegisterInfo();
371   DebugLoc DL;
372   auto AMDGPUASI = ST.getAMDGPUAS();
373 
374   if (ST.isAmdPalOS()) {
375     // The pointer to the GIT is formed from the offset passed in and either
376     // the amdgpu-git-ptr-high function attribute or the top part of the PC
377     unsigned RsrcLo = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0);
378     unsigned RsrcHi = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1);
379     unsigned Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1);
380 
381     const MCInstrDesc &SMovB32 = TII->get(AMDGPU::S_MOV_B32);
382 
383     if (MFI->getGITPtrHigh() != 0xffffffff) {
384       BuildMI(MBB, I, DL, SMovB32, RsrcHi)
385         .addImm(MFI->getGITPtrHigh())
386         .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
387     } else {
388       const MCInstrDesc &GetPC64 = TII->get(AMDGPU::S_GETPC_B64);
389       BuildMI(MBB, I, DL, GetPC64, Rsrc01);
390     }
391     BuildMI(MBB, I, DL, SMovB32, RsrcLo)
392       .addReg(AMDGPU::SGPR0) // Low address passed in
393       .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
394 
395     // We now have the GIT ptr - now get the scratch descriptor from the entry
396     // at offset 0.
397     PointerType *PtrTy =
398       PointerType::get(Type::getInt64Ty(MF.getFunction()->getContext()),
399                        AMDGPUAS::CONSTANT_ADDRESS);
400     MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
401     const MCInstrDesc &LoadDwordX4 = TII->get(AMDGPU::S_LOAD_DWORDX4_IMM);
402     auto MMO = MF.getMachineMemOperand(PtrInfo,
403                                        MachineMemOperand::MOLoad |
404                                        MachineMemOperand::MOInvariant |
405                                        MachineMemOperand::MODereferenceable,
406                                        0, 0);
407     BuildMI(MBB, I, DL, LoadDwordX4, ScratchRsrcReg)
408       .addReg(Rsrc01)
409       .addImm(0) // offset
410       .addImm(0) // glc
411       .addReg(ScratchRsrcReg, RegState::ImplicitDefine)
412       .addMemOperand(MMO);
413     return;
414   }
415   if (ST.isMesaGfxShader(MF)
416       || (PreloadedPrivateBufferReg == AMDGPU::NoRegister)) {
417     assert(!ST.isAmdCodeObjectV2(MF));
418     const MCInstrDesc &SMovB32 = TII->get(AMDGPU::S_MOV_B32);
419 
420     unsigned Rsrc2 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub2);
421     unsigned Rsrc3 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub3);
422 
423     // Use relocations to get the pointer, and setup the other bits manually.
424     uint64_t Rsrc23 = TII->getScratchRsrcWords23();
425 
426     if (MFI->hasImplicitBufferPtr()) {
427       unsigned Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1);
428 
429       if (AMDGPU::isCompute(MF.getFunction()->getCallingConv())) {
430         const MCInstrDesc &Mov64 = TII->get(AMDGPU::S_MOV_B64);
431 
432         BuildMI(MBB, I, DL, Mov64, Rsrc01)
433           .addReg(MFI->getImplicitBufferPtrUserSGPR())
434           .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
435       } else {
436         const MCInstrDesc &LoadDwordX2 = TII->get(AMDGPU::S_LOAD_DWORDX2_IMM);
437 
438         PointerType *PtrTy =
439           PointerType::get(Type::getInt64Ty(MF.getFunction()->getContext()),
440                            AMDGPUASI.CONSTANT_ADDRESS);
441         MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
442         auto MMO = MF.getMachineMemOperand(PtrInfo,
443                                            MachineMemOperand::MOLoad |
444                                            MachineMemOperand::MOInvariant |
445                                            MachineMemOperand::MODereferenceable,
446                                            0, 0);
447         BuildMI(MBB, I, DL, LoadDwordX2, Rsrc01)
448           .addReg(MFI->getImplicitBufferPtrUserSGPR())
449           .addImm(0) // offset
450           .addImm(0) // glc
451           .addMemOperand(MMO)
452           .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
453       }
454     } else {
455       unsigned Rsrc0 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0);
456       unsigned Rsrc1 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1);
457 
458       BuildMI(MBB, I, DL, SMovB32, Rsrc0)
459         .addExternalSymbol("SCRATCH_RSRC_DWORD0")
460         .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
461 
462       BuildMI(MBB, I, DL, SMovB32, Rsrc1)
463         .addExternalSymbol("SCRATCH_RSRC_DWORD1")
464         .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
465 
466     }
467 
468     BuildMI(MBB, I, DL, SMovB32, Rsrc2)
469       .addImm(Rsrc23 & 0xffffffff)
470       .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
471 
472     BuildMI(MBB, I, DL, SMovB32, Rsrc3)
473       .addImm(Rsrc23 >> 32)
474       .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
475   }
476 }
477 
478 void SIFrameLowering::emitPrologue(MachineFunction &MF,
479                                    MachineBasicBlock &MBB) const {
480   const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
481   if (FuncInfo->isEntryFunction()) {
482     emitEntryFunctionPrologue(MF, MBB);
483     return;
484   }
485 
486   const MachineFrameInfo &MFI = MF.getFrameInfo();
487   const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
488   const SIInstrInfo *TII = ST.getInstrInfo();
489 
490   unsigned StackPtrReg = FuncInfo->getStackPtrOffsetReg();
491   unsigned FramePtrReg = FuncInfo->getFrameOffsetReg();
492 
493   MachineBasicBlock::iterator MBBI = MBB.begin();
494   DebugLoc DL;
495 
496   bool NeedFP = hasFP(MF);
497   if (NeedFP) {
498     // If we need a base pointer, set it up here. It's whatever the value of
499     // the stack pointer is at this point. Any variable size objects will be
500     // allocated after this, so we can still use the base pointer to reference
501     // locals.
502     BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), FramePtrReg)
503       .addReg(StackPtrReg)
504       .setMIFlag(MachineInstr::FrameSetup);
505   }
506 
507   uint32_t NumBytes = MFI.getStackSize();
508   if (NumBytes != 0 && hasSP(MF)) {
509     BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_ADD_U32), StackPtrReg)
510       .addReg(StackPtrReg)
511       .addImm(NumBytes * ST.getWavefrontSize())
512       .setMIFlag(MachineInstr::FrameSetup);
513   }
514 
515   for (const SIMachineFunctionInfo::SGPRSpillVGPRCSR &Reg
516          : FuncInfo->getSGPRSpillVGPRs()) {
517     if (!Reg.FI.hasValue())
518       continue;
519     TII->storeRegToStackSlot(MBB, MBBI, Reg.VGPR, true,
520                              Reg.FI.getValue(), &AMDGPU::VGPR_32RegClass,
521                              &TII->getRegisterInfo());
522   }
523 }
524 
525 void SIFrameLowering::emitEpilogue(MachineFunction &MF,
526                                    MachineBasicBlock &MBB) const {
527   const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
528   if (FuncInfo->isEntryFunction())
529     return;
530 
531   const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
532   const SIInstrInfo *TII = ST.getInstrInfo();
533   MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator();
534 
535   for (const SIMachineFunctionInfo::SGPRSpillVGPRCSR &Reg
536          : FuncInfo->getSGPRSpillVGPRs()) {
537     if (!Reg.FI.hasValue())
538       continue;
539     TII->loadRegFromStackSlot(MBB, MBBI, Reg.VGPR,
540                               Reg.FI.getValue(), &AMDGPU::VGPR_32RegClass,
541                               &TII->getRegisterInfo());
542   }
543 
544   unsigned StackPtrReg = FuncInfo->getStackPtrOffsetReg();
545   if (StackPtrReg == AMDGPU::NoRegister)
546     return;
547 
548   const MachineFrameInfo &MFI = MF.getFrameInfo();
549   uint32_t NumBytes = MFI.getStackSize();
550 
551   DebugLoc DL;
552 
553   // FIXME: Clarify distinction between no set SP and SP. For callee functions,
554   // it's really whether we need SP to be accurate or not.
555 
556   if (NumBytes != 0 && hasSP(MF)) {
557     BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_SUB_U32), StackPtrReg)
558       .addReg(StackPtrReg)
559       .addImm(NumBytes * ST.getWavefrontSize())
560       .setMIFlag(MachineInstr::FrameDestroy);
561   }
562 }
563 
564 static bool allStackObjectsAreDead(const MachineFrameInfo &MFI) {
565   for (int I = MFI.getObjectIndexBegin(), E = MFI.getObjectIndexEnd();
566        I != E; ++I) {
567     if (!MFI.isDeadObjectIndex(I))
568       return false;
569   }
570 
571   return true;
572 }
573 
574 int SIFrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
575                                             unsigned &FrameReg) const {
576   const SIRegisterInfo *RI = MF.getSubtarget<SISubtarget>().getRegisterInfo();
577 
578   FrameReg = RI->getFrameRegister(MF);
579   return MF.getFrameInfo().getObjectOffset(FI);
580 }
581 
582 void SIFrameLowering::processFunctionBeforeFrameFinalized(
583   MachineFunction &MF,
584   RegScavenger *RS) const {
585   MachineFrameInfo &MFI = MF.getFrameInfo();
586 
587   if (!MFI.hasStackObjects())
588     return;
589 
590   const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
591   const SIInstrInfo *TII = ST.getInstrInfo();
592   const SIRegisterInfo &TRI = TII->getRegisterInfo();
593   SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
594   bool AllSGPRSpilledToVGPRs = false;
595 
596   if (TRI.spillSGPRToVGPR() && FuncInfo->hasSpilledSGPRs()) {
597     AllSGPRSpilledToVGPRs = true;
598 
599     // Process all SGPR spills before frame offsets are finalized. Ideally SGPRs
600     // are spilled to VGPRs, in which case we can eliminate the stack usage.
601     //
602     // XXX - This operates under the assumption that only other SGPR spills are
603     // users of the frame index. I'm not 100% sure this is correct. The
604     // StackColoring pass has a comment saying a future improvement would be to
605     // merging of allocas with spill slots, but for now according to
606     // MachineFrameInfo isSpillSlot can't alias any other object.
607     for (MachineBasicBlock &MBB : MF) {
608       MachineBasicBlock::iterator Next;
609       for (auto I = MBB.begin(), E = MBB.end(); I != E; I = Next) {
610         MachineInstr &MI = *I;
611         Next = std::next(I);
612 
613         if (TII->isSGPRSpill(MI)) {
614           int FI = TII->getNamedOperand(MI, AMDGPU::OpName::addr)->getIndex();
615           if (FuncInfo->allocateSGPRSpillToVGPR(MF, FI)) {
616             bool Spilled = TRI.eliminateSGPRToVGPRSpillFrameIndex(MI, FI, RS);
617             (void)Spilled;
618             assert(Spilled && "failed to spill SGPR to VGPR when allocated");
619           } else
620             AllSGPRSpilledToVGPRs = false;
621         }
622       }
623     }
624 
625     FuncInfo->removeSGPRToVGPRFrameIndices(MFI);
626   }
627 
628   // FIXME: The other checks should be redundant with allStackObjectsAreDead,
629   // but currently hasNonSpillStackObjects is set only from source
630   // allocas. Stack temps produced from legalization are not counted currently.
631   if (FuncInfo->hasNonSpillStackObjects() || FuncInfo->hasSpilledVGPRs() ||
632       !AllSGPRSpilledToVGPRs || !allStackObjectsAreDead(MFI)) {
633     assert(RS && "RegScavenger required if spilling");
634 
635     // We force this to be at offset 0 so no user object ever has 0 as an
636     // address, so we may use 0 as an invalid pointer value. This is because
637     // LLVM assumes 0 is an invalid pointer in address space 0. Because alloca
638     // is required to be address space 0, we are forced to accept this for
639     // now. Ideally we could have the stack in another address space with 0 as a
640     // valid pointer, and -1 as the null value.
641     //
642     // This will also waste additional space when user stack objects require > 4
643     // byte alignment.
644     //
645     // The main cost here is losing the offset for addressing modes. However
646     // this also ensures we shouldn't need a register for the offset when
647     // emergency scavenging.
648     int ScavengeFI = MFI.CreateFixedObject(
649       TRI.getSpillSize(AMDGPU::SGPR_32RegClass), 0, false);
650     RS->addScavengingFrameIndex(ScavengeFI);
651   }
652 }
653 
654 void SIFrameLowering::determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs,
655                                            RegScavenger *RS) const {
656   TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
657   const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
658 
659   // The SP is specifically managed and we don't want extra spills of it.
660   SavedRegs.reset(MFI->getStackPtrOffsetReg());
661 }
662 
663 MachineBasicBlock::iterator SIFrameLowering::eliminateCallFramePseudoInstr(
664   MachineFunction &MF,
665   MachineBasicBlock &MBB,
666   MachineBasicBlock::iterator I) const {
667   int64_t Amount = I->getOperand(0).getImm();
668   if (Amount == 0)
669     return MBB.erase(I);
670 
671   const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
672   const SIInstrInfo *TII = ST.getInstrInfo();
673   const DebugLoc &DL = I->getDebugLoc();
674   unsigned Opc = I->getOpcode();
675   bool IsDestroy = Opc == TII->getCallFrameDestroyOpcode();
676   uint64_t CalleePopAmount = IsDestroy ? I->getOperand(1).getImm() : 0;
677 
678   const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
679   if (!TFI->hasReservedCallFrame(MF)) {
680     unsigned Align = getStackAlignment();
681 
682     Amount = alignTo(Amount, Align);
683     assert(isUInt<32>(Amount) && "exceeded stack address space size");
684     const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
685     unsigned SPReg = MFI->getStackPtrOffsetReg();
686 
687     unsigned Op = IsDestroy ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32;
688     BuildMI(MBB, I, DL, TII->get(Op), SPReg)
689       .addReg(SPReg)
690       .addImm(Amount * ST.getWavefrontSize());
691   } else if (CalleePopAmount != 0) {
692     llvm_unreachable("is this used?");
693   }
694 
695   return MBB.erase(I);
696 }
697 
698 void SIFrameLowering::emitDebuggerPrologue(MachineFunction &MF,
699                                            MachineBasicBlock &MBB) const {
700   const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
701   const SIInstrInfo *TII = ST.getInstrInfo();
702   const SIRegisterInfo *TRI = &TII->getRegisterInfo();
703   const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
704 
705   MachineBasicBlock::iterator I = MBB.begin();
706   DebugLoc DL;
707 
708   // For each dimension:
709   for (unsigned i = 0; i < 3; ++i) {
710     // Get work group ID SGPR, and make it live-in again.
711     unsigned WorkGroupIDSGPR = MFI->getWorkGroupIDSGPR(i);
712     MF.getRegInfo().addLiveIn(WorkGroupIDSGPR);
713     MBB.addLiveIn(WorkGroupIDSGPR);
714 
715     // Since SGPRs are spilled into VGPRs, copy work group ID SGPR to VGPR in
716     // order to spill it to scratch.
717     unsigned WorkGroupIDVGPR =
718       MF.getRegInfo().createVirtualRegister(&AMDGPU::VGPR_32RegClass);
719     BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), WorkGroupIDVGPR)
720       .addReg(WorkGroupIDSGPR);
721 
722     // Spill work group ID.
723     int WorkGroupIDObjectIdx = MFI->getDebuggerWorkGroupIDStackObjectIndex(i);
724     TII->storeRegToStackSlot(MBB, I, WorkGroupIDVGPR, false,
725       WorkGroupIDObjectIdx, &AMDGPU::VGPR_32RegClass, TRI);
726 
727     // Get work item ID VGPR, and make it live-in again.
728     unsigned WorkItemIDVGPR = MFI->getWorkItemIDVGPR(i);
729     MF.getRegInfo().addLiveIn(WorkItemIDVGPR);
730     MBB.addLiveIn(WorkItemIDVGPR);
731 
732     // Spill work item ID.
733     int WorkItemIDObjectIdx = MFI->getDebuggerWorkItemIDStackObjectIndex(i);
734     TII->storeRegToStackSlot(MBB, I, WorkItemIDVGPR, false,
735       WorkItemIDObjectIdx, &AMDGPU::VGPR_32RegClass, TRI);
736   }
737 }
738 
739 bool SIFrameLowering::hasFP(const MachineFunction &MF) const {
740   // All stack operations are relative to the frame offset SGPR.
741   // TODO: Still want to eliminate sometimes.
742   const MachineFrameInfo &MFI = MF.getFrameInfo();
743 
744   // XXX - Is this only called after frame is finalized? Should be able to check
745   // frame size.
746   return MFI.hasStackObjects() && !allStackObjectsAreDead(MFI);
747 }
748 
749 bool SIFrameLowering::hasSP(const MachineFunction &MF) const {
750   // All stack operations are relative to the frame offset SGPR.
751   const MachineFrameInfo &MFI = MF.getFrameInfo();
752   return MFI.hasCalls() || MFI.hasVarSizedObjects();
753 }
754