1 //===----------------------- SIFrameLowering.cpp --------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //==-----------------------------------------------------------------------===//
8 
9 #include "SIFrameLowering.h"
10 #include "AMDGPUSubtarget.h"
11 #include "SIInstrInfo.h"
12 #include "SIMachineFunctionInfo.h"
13 #include "SIRegisterInfo.h"
14 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
15 
16 #include "llvm/CodeGen/LivePhysRegs.h"
17 #include "llvm/CodeGen/MachineFrameInfo.h"
18 #include "llvm/CodeGen/MachineFunction.h"
19 #include "llvm/CodeGen/MachineInstrBuilder.h"
20 #include "llvm/CodeGen/RegisterScavenging.h"
21 
22 using namespace llvm;
23 
24 
25 static ArrayRef<MCPhysReg> getAllSGPR128(const GCNSubtarget &ST,
26                                          const MachineFunction &MF) {
27   return makeArrayRef(AMDGPU::SGPR_128RegClass.begin(),
28                       ST.getMaxNumSGPRs(MF) / 4);
29 }
30 
31 static ArrayRef<MCPhysReg> getAllSGPRs(const GCNSubtarget &ST,
32                                        const MachineFunction &MF) {
33   return makeArrayRef(AMDGPU::SGPR_32RegClass.begin(),
34                       ST.getMaxNumSGPRs(MF));
35 }
36 
37 void SIFrameLowering::emitFlatScratchInit(const GCNSubtarget &ST,
38                                           MachineFunction &MF,
39                                           MachineBasicBlock &MBB) const {
40   const SIInstrInfo *TII = ST.getInstrInfo();
41   const SIRegisterInfo* TRI = &TII->getRegisterInfo();
42   const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
43 
44   // We don't need this if we only have spills since there is no user facing
45   // scratch.
46 
47   // TODO: If we know we don't have flat instructions earlier, we can omit
48   // this from the input registers.
49   //
50   // TODO: We only need to know if we access scratch space through a flat
51   // pointer. Because we only detect if flat instructions are used at all,
52   // this will be used more often than necessary on VI.
53 
54   // Debug location must be unknown since the first debug location is used to
55   // determine the end of the prologue.
56   DebugLoc DL;
57   MachineBasicBlock::iterator I = MBB.begin();
58 
59   unsigned FlatScratchInitReg
60     = MFI->getPreloadedReg(AMDGPUFunctionArgInfo::FLAT_SCRATCH_INIT);
61 
62   MachineRegisterInfo &MRI = MF.getRegInfo();
63   MRI.addLiveIn(FlatScratchInitReg);
64   MBB.addLiveIn(FlatScratchInitReg);
65 
66   unsigned FlatScrInitLo = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub0);
67   unsigned FlatScrInitHi = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub1);
68 
69   unsigned ScratchWaveOffsetReg = MFI->getScratchWaveOffsetReg();
70 
71   // Do a 64-bit pointer add.
72   if (ST.flatScratchIsPointer()) {
73     if (ST.getGeneration() >= AMDGPUSubtarget::GFX10) {
74       BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), FlatScrInitLo)
75         .addReg(FlatScrInitLo)
76         .addReg(ScratchWaveOffsetReg);
77       BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADDC_U32), FlatScrInitHi)
78         .addReg(FlatScrInitHi)
79         .addImm(0);
80       BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SETREG_B32)).
81         addReg(FlatScrInitLo).
82         addImm(int16_t(AMDGPU::Hwreg::ID_FLAT_SCR_LO |
83                        (31 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_)));
84       BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SETREG_B32)).
85         addReg(FlatScrInitHi).
86         addImm(int16_t(AMDGPU::Hwreg::ID_FLAT_SCR_HI |
87                        (31 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_)));
88       return;
89     }
90 
91     BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), AMDGPU::FLAT_SCR_LO)
92       .addReg(FlatScrInitLo)
93       .addReg(ScratchWaveOffsetReg);
94     BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADDC_U32), AMDGPU::FLAT_SCR_HI)
95       .addReg(FlatScrInitHi)
96       .addImm(0);
97 
98     return;
99   }
100 
101   assert(ST.getGeneration() < AMDGPUSubtarget::GFX10);
102 
103   // Copy the size in bytes.
104   BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), AMDGPU::FLAT_SCR_LO)
105     .addReg(FlatScrInitHi, RegState::Kill);
106 
107   // Add wave offset in bytes to private base offset.
108   // See comment in AMDKernelCodeT.h for enable_sgpr_flat_scratch_init.
109   BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), FlatScrInitLo)
110     .addReg(FlatScrInitLo)
111     .addReg(ScratchWaveOffsetReg);
112 
113   // Convert offset to 256-byte units.
114   BuildMI(MBB, I, DL, TII->get(AMDGPU::S_LSHR_B32), AMDGPU::FLAT_SCR_HI)
115     .addReg(FlatScrInitLo, RegState::Kill)
116     .addImm(8);
117 }
118 
119 unsigned SIFrameLowering::getReservedPrivateSegmentBufferReg(
120   const GCNSubtarget &ST,
121   const SIInstrInfo *TII,
122   const SIRegisterInfo *TRI,
123   SIMachineFunctionInfo *MFI,
124   MachineFunction &MF) const {
125   MachineRegisterInfo &MRI = MF.getRegInfo();
126 
127   // We need to insert initialization of the scratch resource descriptor.
128   unsigned ScratchRsrcReg = MFI->getScratchRSrcReg();
129   if (ScratchRsrcReg == AMDGPU::NoRegister ||
130       !MRI.isPhysRegUsed(ScratchRsrcReg))
131     return AMDGPU::NoRegister;
132 
133   if (ST.hasSGPRInitBug() ||
134       ScratchRsrcReg != TRI->reservedPrivateSegmentBufferReg(MF))
135     return ScratchRsrcReg;
136 
137   // We reserved the last registers for this. Shift it down to the end of those
138   // which were actually used.
139   //
140   // FIXME: It might be safer to use a pseudoregister before replacement.
141 
142   // FIXME: We should be able to eliminate unused input registers. We only
143   // cannot do this for the resources required for scratch access. For now we
144   // skip over user SGPRs and may leave unused holes.
145 
146   // We find the resource first because it has an alignment requirement.
147 
148   unsigned NumPreloaded = (MFI->getNumPreloadedSGPRs() + 3) / 4;
149   ArrayRef<MCPhysReg> AllSGPR128s = getAllSGPR128(ST, MF);
150   AllSGPR128s = AllSGPR128s.slice(std::min(static_cast<unsigned>(AllSGPR128s.size()), NumPreloaded));
151 
152   // Skip the last N reserved elements because they should have already been
153   // reserved for VCC etc.
154   for (MCPhysReg Reg : AllSGPR128s) {
155     // Pick the first unallocated one. Make sure we don't clobber the other
156     // reserved input we needed.
157     if (!MRI.isPhysRegUsed(Reg) && MRI.isAllocatable(Reg)) {
158       MRI.replaceRegWith(ScratchRsrcReg, Reg);
159       MFI->setScratchRSrcReg(Reg);
160       return Reg;
161     }
162   }
163 
164   return ScratchRsrcReg;
165 }
166 
167 // Shift down registers reserved for the scratch wave offset.
168 unsigned SIFrameLowering::getReservedPrivateSegmentWaveByteOffsetReg(
169     const GCNSubtarget &ST, const SIInstrInfo *TII, const SIRegisterInfo *TRI,
170     SIMachineFunctionInfo *MFI, MachineFunction &MF) const {
171   MachineRegisterInfo &MRI = MF.getRegInfo();
172   unsigned ScratchWaveOffsetReg = MFI->getScratchWaveOffsetReg();
173 
174   assert(MFI->isEntryFunction());
175 
176   // No replacement necessary.
177   if (ScratchWaveOffsetReg == AMDGPU::NoRegister ||
178       (!hasFP(MF) && !MRI.isPhysRegUsed(ScratchWaveOffsetReg))) {
179     return AMDGPU::NoRegister;
180   }
181 
182   if (ST.hasSGPRInitBug())
183     return ScratchWaveOffsetReg;
184 
185   unsigned NumPreloaded = MFI->getNumPreloadedSGPRs();
186 
187   ArrayRef<MCPhysReg> AllSGPRs = getAllSGPRs(ST, MF);
188   if (NumPreloaded > AllSGPRs.size())
189     return ScratchWaveOffsetReg;
190 
191   AllSGPRs = AllSGPRs.slice(NumPreloaded);
192 
193   // We need to drop register from the end of the list that we cannot use
194   // for the scratch wave offset.
195   // + 2 s102 and s103 do not exist on VI.
196   // + 2 for vcc
197   // + 2 for xnack_mask
198   // + 2 for flat_scratch
199   // + 4 for registers reserved for scratch resource register
200   // + 1 for register reserved for scratch wave offset.  (By exluding this
201   //     register from the list to consider, it means that when this
202   //     register is being used for the scratch wave offset and there
203   //     are no other free SGPRs, then the value will stay in this register.
204   // + 1 if stack pointer is used.
205   // ----
206   //  13 (+1)
207   unsigned ReservedRegCount = 13;
208 
209   if (AllSGPRs.size() < ReservedRegCount)
210     return ScratchWaveOffsetReg;
211 
212   bool HandledScratchWaveOffsetReg =
213     ScratchWaveOffsetReg != TRI->reservedPrivateSegmentWaveByteOffsetReg(MF);
214 
215   for (MCPhysReg Reg : AllSGPRs.drop_back(ReservedRegCount)) {
216     // Pick the first unallocated SGPR. Be careful not to pick an alias of the
217     // scratch descriptor, since we haven’t added its uses yet.
218     if (!MRI.isPhysRegUsed(Reg) && MRI.isAllocatable(Reg)) {
219       if (!HandledScratchWaveOffsetReg) {
220         HandledScratchWaveOffsetReg = true;
221 
222         MRI.replaceRegWith(ScratchWaveOffsetReg, Reg);
223         if (MFI->getScratchWaveOffsetReg() == MFI->getStackPtrOffsetReg()) {
224           assert(!hasFP(MF));
225           MFI->setStackPtrOffsetReg(Reg);
226         }
227 
228         MFI->setScratchWaveOffsetReg(Reg);
229         MFI->setFrameOffsetReg(Reg);
230         ScratchWaveOffsetReg = Reg;
231         break;
232       }
233     }
234   }
235 
236   return ScratchWaveOffsetReg;
237 }
238 
239 void SIFrameLowering::emitEntryFunctionPrologue(MachineFunction &MF,
240                                                 MachineBasicBlock &MBB) const {
241   assert(&MF.front() == &MBB && "Shrink-wrapping not yet supported");
242 
243   SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
244 
245   // If we only have SGPR spills, we won't actually be using scratch memory
246   // since these spill to VGPRs.
247   //
248   // FIXME: We should be cleaning up these unused SGPR spill frame indices
249   // somewhere.
250 
251   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
252   const SIInstrInfo *TII = ST.getInstrInfo();
253   const SIRegisterInfo *TRI = &TII->getRegisterInfo();
254   MachineRegisterInfo &MRI = MF.getRegInfo();
255   const Function &F = MF.getFunction();
256 
257   // We need to do the replacement of the private segment buffer and wave offset
258   // register even if there are no stack objects. There could be stores to undef
259   // or a constant without an associated object.
260 
261   // FIXME: We still have implicit uses on SGPR spill instructions in case they
262   // need to spill to vector memory. It's likely that will not happen, but at
263   // this point it appears we need the setup. This part of the prolog should be
264   // emitted after frame indices are eliminated.
265 
266   if (MFI->hasFlatScratchInit())
267     emitFlatScratchInit(ST, MF, MBB);
268 
269   unsigned ScratchRsrcReg
270     = getReservedPrivateSegmentBufferReg(ST, TII, TRI, MFI, MF);
271 
272   unsigned ScratchWaveOffsetReg =
273       getReservedPrivateSegmentWaveByteOffsetReg(ST, TII, TRI, MFI, MF);
274 
275   // We need to insert initialization of the scratch resource descriptor.
276   unsigned PreloadedScratchWaveOffsetReg = MFI->getPreloadedReg(
277     AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET);
278 
279   unsigned PreloadedPrivateBufferReg = AMDGPU::NoRegister;
280   if (ST.isAmdHsaOrMesa(F)) {
281     PreloadedPrivateBufferReg = MFI->getPreloadedReg(
282       AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER);
283   }
284 
285   bool OffsetRegUsed = ScratchWaveOffsetReg != AMDGPU::NoRegister &&
286                        MRI.isPhysRegUsed(ScratchWaveOffsetReg);
287   bool ResourceRegUsed = ScratchRsrcReg != AMDGPU::NoRegister &&
288                          MRI.isPhysRegUsed(ScratchRsrcReg);
289 
290   // FIXME: Hack to not crash in situations which emitted an error.
291   if (PreloadedScratchWaveOffsetReg == AMDGPU::NoRegister)
292     return;
293 
294   // We added live-ins during argument lowering, but since they were not used
295   // they were deleted. We're adding the uses now, so add them back.
296   MRI.addLiveIn(PreloadedScratchWaveOffsetReg);
297   MBB.addLiveIn(PreloadedScratchWaveOffsetReg);
298 
299   if (ResourceRegUsed && PreloadedPrivateBufferReg != AMDGPU::NoRegister) {
300     assert(ST.isAmdHsaOrMesa(F) || ST.isMesaGfxShader(F));
301     MRI.addLiveIn(PreloadedPrivateBufferReg);
302     MBB.addLiveIn(PreloadedPrivateBufferReg);
303   }
304 
305   // Make the register selected live throughout the function.
306   for (MachineBasicBlock &OtherBB : MF) {
307     if (&OtherBB == &MBB)
308       continue;
309 
310     if (OffsetRegUsed)
311       OtherBB.addLiveIn(ScratchWaveOffsetReg);
312 
313     if (ResourceRegUsed)
314       OtherBB.addLiveIn(ScratchRsrcReg);
315   }
316 
317   DebugLoc DL;
318   MachineBasicBlock::iterator I = MBB.begin();
319 
320   // If we reserved the original input registers, we don't need to copy to the
321   // reserved registers.
322 
323   bool CopyBuffer = ResourceRegUsed &&
324     PreloadedPrivateBufferReg != AMDGPU::NoRegister &&
325     ST.isAmdHsaOrMesa(F) &&
326     ScratchRsrcReg != PreloadedPrivateBufferReg;
327 
328   // This needs to be careful of the copying order to avoid overwriting one of
329   // the input registers before it's been copied to it's final
330   // destination. Usually the offset should be copied first.
331   bool CopyBufferFirst = TRI->isSubRegisterEq(PreloadedPrivateBufferReg,
332                                               ScratchWaveOffsetReg);
333   if (CopyBuffer && CopyBufferFirst) {
334     BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchRsrcReg)
335       .addReg(PreloadedPrivateBufferReg, RegState::Kill);
336   }
337 
338   unsigned SPReg = MFI->getStackPtrOffsetReg();
339   assert(SPReg != AMDGPU::SP_REG);
340 
341   // FIXME: Remove the isPhysRegUsed checks
342   const bool HasFP = hasFP(MF);
343 
344   if (HasFP || OffsetRegUsed) {
345     assert(ScratchWaveOffsetReg);
346     BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchWaveOffsetReg)
347       .addReg(PreloadedScratchWaveOffsetReg, HasFP ? RegState::Kill : 0);
348   }
349 
350   if (CopyBuffer && !CopyBufferFirst) {
351     BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchRsrcReg)
352       .addReg(PreloadedPrivateBufferReg, RegState::Kill);
353   }
354 
355   if (ResourceRegUsed) {
356     emitEntryFunctionScratchSetup(ST, MF, MBB, MFI, I,
357         PreloadedPrivateBufferReg, ScratchRsrcReg);
358   }
359 
360   if (HasFP) {
361     DebugLoc DL;
362     const MachineFrameInfo &FrameInfo = MF.getFrameInfo();
363     int64_t StackSize = FrameInfo.getStackSize();
364 
365     // On kernel entry, the private scratch wave offset is the SP value.
366     if (StackSize == 0) {
367       BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), SPReg)
368         .addReg(MFI->getScratchWaveOffsetReg());
369     } else {
370       BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), SPReg)
371         .addReg(MFI->getScratchWaveOffsetReg())
372         .addImm(StackSize * ST.getWavefrontSize());
373     }
374   }
375 }
376 
377 // Emit scratch setup code for AMDPAL or Mesa, assuming ResourceRegUsed is set.
378 void SIFrameLowering::emitEntryFunctionScratchSetup(const GCNSubtarget &ST,
379       MachineFunction &MF, MachineBasicBlock &MBB, SIMachineFunctionInfo *MFI,
380       MachineBasicBlock::iterator I, unsigned PreloadedPrivateBufferReg,
381       unsigned ScratchRsrcReg) const {
382 
383   const SIInstrInfo *TII = ST.getInstrInfo();
384   const SIRegisterInfo *TRI = &TII->getRegisterInfo();
385   const Function &Fn = MF.getFunction();
386   DebugLoc DL;
387 
388   if (ST.isAmdPalOS()) {
389     // The pointer to the GIT is formed from the offset passed in and either
390     // the amdgpu-git-ptr-high function attribute or the top part of the PC
391     unsigned RsrcLo = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0);
392     unsigned RsrcHi = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1);
393     unsigned Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1);
394 
395     const MCInstrDesc &SMovB32 = TII->get(AMDGPU::S_MOV_B32);
396 
397     if (MFI->getGITPtrHigh() != 0xffffffff) {
398       BuildMI(MBB, I, DL, SMovB32, RsrcHi)
399         .addImm(MFI->getGITPtrHigh())
400         .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
401     } else {
402       const MCInstrDesc &GetPC64 = TII->get(AMDGPU::S_GETPC_B64);
403       BuildMI(MBB, I, DL, GetPC64, Rsrc01);
404     }
405     auto GitPtrLo = AMDGPU::SGPR0; // Low GIT address passed in
406     if (ST.hasMergedShaders()) {
407       switch (MF.getFunction().getCallingConv()) {
408         case CallingConv::AMDGPU_HS:
409         case CallingConv::AMDGPU_GS:
410           // Low GIT address is passed in s8 rather than s0 for an LS+HS or
411           // ES+GS merged shader on gfx9+.
412           GitPtrLo = AMDGPU::SGPR8;
413           break;
414         default:
415           break;
416       }
417     }
418     MF.getRegInfo().addLiveIn(GitPtrLo);
419     MBB.addLiveIn(GitPtrLo);
420     BuildMI(MBB, I, DL, SMovB32, RsrcLo)
421       .addReg(GitPtrLo)
422       .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
423 
424     // We now have the GIT ptr - now get the scratch descriptor from the entry
425     // at offset 0 (or offset 16 for a compute shader).
426     PointerType *PtrTy =
427       PointerType::get(Type::getInt64Ty(MF.getFunction().getContext()),
428                        AMDGPUAS::CONSTANT_ADDRESS);
429     MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
430     const MCInstrDesc &LoadDwordX4 = TII->get(AMDGPU::S_LOAD_DWORDX4_IMM);
431     auto MMO = MF.getMachineMemOperand(PtrInfo,
432                                        MachineMemOperand::MOLoad |
433                                        MachineMemOperand::MOInvariant |
434                                        MachineMemOperand::MODereferenceable,
435                                        16, 4);
436     unsigned Offset = Fn.getCallingConv() == CallingConv::AMDGPU_CS ? 16 : 0;
437     const GCNSubtarget &Subtarget = MF.getSubtarget<GCNSubtarget>();
438     unsigned EncodedOffset = AMDGPU::getSMRDEncodedOffset(Subtarget, Offset);
439     BuildMI(MBB, I, DL, LoadDwordX4, ScratchRsrcReg)
440       .addReg(Rsrc01)
441       .addImm(EncodedOffset) // offset
442       .addImm(0) // glc
443       .addImm(0) // dlc
444       .addReg(ScratchRsrcReg, RegState::ImplicitDefine)
445       .addMemOperand(MMO);
446     return;
447   }
448   if (ST.isMesaGfxShader(Fn)
449       || (PreloadedPrivateBufferReg == AMDGPU::NoRegister)) {
450     assert(!ST.isAmdHsaOrMesa(Fn));
451     const MCInstrDesc &SMovB32 = TII->get(AMDGPU::S_MOV_B32);
452 
453     unsigned Rsrc2 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub2);
454     unsigned Rsrc3 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub3);
455 
456     // Use relocations to get the pointer, and setup the other bits manually.
457     uint64_t Rsrc23 = TII->getScratchRsrcWords23();
458 
459     if (MFI->hasImplicitBufferPtr()) {
460       unsigned Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1);
461 
462       if (AMDGPU::isCompute(MF.getFunction().getCallingConv())) {
463         const MCInstrDesc &Mov64 = TII->get(AMDGPU::S_MOV_B64);
464 
465         BuildMI(MBB, I, DL, Mov64, Rsrc01)
466           .addReg(MFI->getImplicitBufferPtrUserSGPR())
467           .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
468       } else {
469         const MCInstrDesc &LoadDwordX2 = TII->get(AMDGPU::S_LOAD_DWORDX2_IMM);
470 
471         PointerType *PtrTy =
472           PointerType::get(Type::getInt64Ty(MF.getFunction().getContext()),
473                            AMDGPUAS::CONSTANT_ADDRESS);
474         MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
475         auto MMO = MF.getMachineMemOperand(PtrInfo,
476                                            MachineMemOperand::MOLoad |
477                                            MachineMemOperand::MOInvariant |
478                                            MachineMemOperand::MODereferenceable,
479                                            8, 4);
480         BuildMI(MBB, I, DL, LoadDwordX2, Rsrc01)
481           .addReg(MFI->getImplicitBufferPtrUserSGPR())
482           .addImm(0) // offset
483           .addImm(0) // glc
484           .addImm(0) // dlc
485           .addMemOperand(MMO)
486           .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
487 
488         MF.getRegInfo().addLiveIn(MFI->getImplicitBufferPtrUserSGPR());
489         MBB.addLiveIn(MFI->getImplicitBufferPtrUserSGPR());
490       }
491     } else {
492       unsigned Rsrc0 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0);
493       unsigned Rsrc1 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1);
494 
495       BuildMI(MBB, I, DL, SMovB32, Rsrc0)
496         .addExternalSymbol("SCRATCH_RSRC_DWORD0")
497         .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
498 
499       BuildMI(MBB, I, DL, SMovB32, Rsrc1)
500         .addExternalSymbol("SCRATCH_RSRC_DWORD1")
501         .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
502 
503     }
504 
505     BuildMI(MBB, I, DL, SMovB32, Rsrc2)
506       .addImm(Rsrc23 & 0xffffffff)
507       .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
508 
509     BuildMI(MBB, I, DL, SMovB32, Rsrc3)
510       .addImm(Rsrc23 >> 32)
511       .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
512   }
513 }
514 
515 // Find a scratch register that we can use at the start of the prologue to
516 // re-align the stack pointer.  We avoid using callee-save registers since they
517 // may appear to be free when this is called from canUseAsPrologue (during
518 // shrink wrapping), but then no longer be free when this is called from
519 // emitPrologue.
520 //
521 // FIXME: This is a bit conservative, since in the above case we could use one
522 // of the callee-save registers as a scratch temp to re-align the stack pointer,
523 // but we would then have to make sure that we were in fact saving at least one
524 // callee-save register in the prologue, which is additional complexity that
525 // doesn't seem worth the benefit.
526 static unsigned findScratchNonCalleeSaveRegister(MachineFunction &MF,
527                                                  LivePhysRegs &LiveRegs,
528                                                  const TargetRegisterClass &RC) {
529   const GCNSubtarget &Subtarget = MF.getSubtarget<GCNSubtarget>();
530   const SIRegisterInfo &TRI = *Subtarget.getRegisterInfo();
531 
532   // Mark callee saved registers as used so we will not choose them.
533   const MCPhysReg *CSRegs = TRI.getCalleeSavedRegs(&MF);
534   for (unsigned i = 0; CSRegs[i]; ++i)
535     LiveRegs.addReg(CSRegs[i]);
536 
537   MachineRegisterInfo &MRI = MF.getRegInfo();
538 
539   for (unsigned Reg : RC) {
540     if (LiveRegs.available(MRI, Reg))
541       return Reg;
542   }
543 
544   return AMDGPU::NoRegister;
545 }
546 
547 void SIFrameLowering::emitPrologue(MachineFunction &MF,
548                                    MachineBasicBlock &MBB) const {
549   SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
550   if (FuncInfo->isEntryFunction()) {
551     emitEntryFunctionPrologue(MF, MBB);
552     return;
553   }
554 
555   const MachineFrameInfo &MFI = MF.getFrameInfo();
556   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
557   const SIInstrInfo *TII = ST.getInstrInfo();
558   const SIRegisterInfo &TRI = TII->getRegisterInfo();
559 
560   unsigned StackPtrReg = FuncInfo->getStackPtrOffsetReg();
561   unsigned FramePtrReg = FuncInfo->getFrameOffsetReg();
562   LivePhysRegs LiveRegs;
563 
564   MachineBasicBlock::iterator MBBI = MBB.begin();
565   DebugLoc DL;
566 
567   bool HasFP = false;
568   uint32_t NumBytes = MFI.getStackSize();
569   uint32_t RoundedSize = NumBytes;
570 
571   if (TRI.needsStackRealignment(MF)) {
572     HasFP = true;
573     const unsigned Alignment = MFI.getMaxAlignment();
574 
575     RoundedSize += Alignment;
576 
577     LiveRegs.init(TRI);
578     LiveRegs.addLiveIns(MBB);
579 
580     unsigned ScratchSPReg
581       = findScratchNonCalleeSaveRegister(MF, LiveRegs,
582                                          AMDGPU::SReg_32_XM0RegClass);
583     assert(ScratchSPReg != AMDGPU::NoRegister);
584 
585     // s_add_u32 tmp_reg, s32, NumBytes
586     // s_and_b32 s32, tmp_reg, 0b111...0000
587     BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_ADD_U32), ScratchSPReg)
588       .addReg(StackPtrReg)
589       .addImm((Alignment - 1) * ST.getWavefrontSize())
590       .setMIFlag(MachineInstr::FrameSetup);
591     BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_AND_B32), FramePtrReg)
592       .addReg(ScratchSPReg, RegState::Kill)
593       .addImm(-Alignment * ST.getWavefrontSize())
594       .setMIFlag(MachineInstr::FrameSetup);
595     FuncInfo->setIsStackRealigned(true);
596   } else if ((HasFP = hasFP(MF))) {
597     // If we need a base pointer, set it up here. It's whatever the value of
598     // the stack pointer is at this point. Any variable size objects will be
599     // allocated after this, so we can still use the base pointer to reference
600     // locals.
601     BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), FramePtrReg)
602       .addReg(StackPtrReg)
603       .setMIFlag(MachineInstr::FrameSetup);
604   }
605 
606   if (HasFP && RoundedSize != 0) {
607     BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_ADD_U32), StackPtrReg)
608       .addReg(StackPtrReg)
609       .addImm(RoundedSize * ST.getWavefrontSize())
610       .setMIFlag(MachineInstr::FrameSetup);
611   }
612 
613   // To avoid clobbering VGPRs in lanes that weren't active on function entry,
614   // turn on all lanes before doing the spill to memory.
615   unsigned ScratchExecCopy = AMDGPU::NoRegister;
616 
617   for (const SIMachineFunctionInfo::SGPRSpillVGPRCSR &Reg
618          : FuncInfo->getSGPRSpillVGPRs()) {
619     if (!Reg.FI.hasValue())
620       continue;
621 
622     if (ScratchExecCopy == AMDGPU::NoRegister) {
623       if (LiveRegs.empty()) {
624         LiveRegs.init(TRI);
625         LiveRegs.addLiveIns(MBB);
626       }
627 
628       ScratchExecCopy
629         = findScratchNonCalleeSaveRegister(MF, LiveRegs,
630                                            AMDGPU::SReg_64_XEXECRegClass);
631 
632       BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_OR_SAVEEXEC_B64),
633               ScratchExecCopy)
634         .addImm(-1);
635     }
636 
637     TII->storeRegToStackSlot(MBB, MBBI, Reg.VGPR, true,
638                              Reg.FI.getValue(), &AMDGPU::VGPR_32RegClass,
639                              &TII->getRegisterInfo());
640   }
641 
642   if (ScratchExecCopy != AMDGPU::NoRegister) {
643     // FIXME: Split block and make terminator.
644     BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
645       .addReg(ScratchExecCopy);
646   }
647 }
648 
649 void SIFrameLowering::emitEpilogue(MachineFunction &MF,
650                                    MachineBasicBlock &MBB) const {
651   const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
652   if (FuncInfo->isEntryFunction())
653     return;
654 
655   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
656   const SIInstrInfo *TII = ST.getInstrInfo();
657   MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator();
658   DebugLoc DL;
659 
660   unsigned ScratchExecCopy = AMDGPU::NoRegister;
661   for (const SIMachineFunctionInfo::SGPRSpillVGPRCSR &Reg
662          : FuncInfo->getSGPRSpillVGPRs()) {
663     if (!Reg.FI.hasValue())
664       continue;
665 
666     if (ScratchExecCopy == AMDGPU::NoRegister) {
667       // See emitPrologue
668       LivePhysRegs LiveRegs(*ST.getRegisterInfo());
669       LiveRegs.addLiveIns(MBB);
670 
671       ScratchExecCopy
672         = findScratchNonCalleeSaveRegister(MF, LiveRegs,
673                                            AMDGPU::SReg_64_XEXECRegClass);
674 
675       BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_OR_SAVEEXEC_B64), ScratchExecCopy)
676         .addImm(-1);
677     }
678 
679     TII->loadRegFromStackSlot(MBB, MBBI, Reg.VGPR,
680                               Reg.FI.getValue(), &AMDGPU::VGPR_32RegClass,
681                               &TII->getRegisterInfo());
682   }
683 
684   if (ScratchExecCopy != AMDGPU::NoRegister) {
685     // FIXME: Split block and make terminator.
686     BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
687       .addReg(ScratchExecCopy);
688   }
689 
690   if (hasFP(MF)) {
691     const MachineFrameInfo &MFI = MF.getFrameInfo();
692     uint32_t NumBytes = MFI.getStackSize();
693     uint32_t RoundedSize = FuncInfo->isStackRealigned() ?
694       NumBytes + MFI.getMaxAlignment() : NumBytes;
695 
696     const unsigned StackPtrReg = FuncInfo->getStackPtrOffsetReg();
697     BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_SUB_U32), StackPtrReg)
698       .addReg(StackPtrReg)
699       .addImm(RoundedSize * ST.getWavefrontSize())
700       .setMIFlag(MachineInstr::FrameDestroy);
701   }
702 }
703 
704 static bool allStackObjectsAreDead(const MachineFrameInfo &MFI) {
705   for (int I = MFI.getObjectIndexBegin(), E = MFI.getObjectIndexEnd();
706        I != E; ++I) {
707     if (!MFI.isDeadObjectIndex(I))
708       return false;
709   }
710 
711   return true;
712 }
713 
714 int SIFrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
715                                             unsigned &FrameReg) const {
716   const SIRegisterInfo *RI = MF.getSubtarget<GCNSubtarget>().getRegisterInfo();
717 
718   FrameReg = RI->getFrameRegister(MF);
719   return MF.getFrameInfo().getObjectOffset(FI);
720 }
721 
722 void SIFrameLowering::processFunctionBeforeFrameFinalized(
723   MachineFunction &MF,
724   RegScavenger *RS) const {
725   MachineFrameInfo &MFI = MF.getFrameInfo();
726 
727   if (!MFI.hasStackObjects())
728     return;
729 
730   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
731   const SIInstrInfo *TII = ST.getInstrInfo();
732   const SIRegisterInfo &TRI = TII->getRegisterInfo();
733   SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
734   bool AllSGPRSpilledToVGPRs = false;
735 
736   if (TRI.spillSGPRToVGPR() && FuncInfo->hasSpilledSGPRs()) {
737     AllSGPRSpilledToVGPRs = true;
738 
739     // Process all SGPR spills before frame offsets are finalized. Ideally SGPRs
740     // are spilled to VGPRs, in which case we can eliminate the stack usage.
741     //
742     // XXX - This operates under the assumption that only other SGPR spills are
743     // users of the frame index. I'm not 100% sure this is correct. The
744     // StackColoring pass has a comment saying a future improvement would be to
745     // merging of allocas with spill slots, but for now according to
746     // MachineFrameInfo isSpillSlot can't alias any other object.
747     for (MachineBasicBlock &MBB : MF) {
748       MachineBasicBlock::iterator Next;
749       for (auto I = MBB.begin(), E = MBB.end(); I != E; I = Next) {
750         MachineInstr &MI = *I;
751         Next = std::next(I);
752 
753         if (TII->isSGPRSpill(MI)) {
754           int FI = TII->getNamedOperand(MI, AMDGPU::OpName::addr)->getIndex();
755           assert(MFI.getStackID(FI) == SIStackID::SGPR_SPILL);
756           if (FuncInfo->allocateSGPRSpillToVGPR(MF, FI)) {
757             bool Spilled = TRI.eliminateSGPRToVGPRSpillFrameIndex(MI, FI, RS);
758             (void)Spilled;
759             assert(Spilled && "failed to spill SGPR to VGPR when allocated");
760           } else
761             AllSGPRSpilledToVGPRs = false;
762         }
763       }
764     }
765   }
766 
767   FuncInfo->removeSGPRToVGPRFrameIndices(MFI);
768 
769   // FIXME: The other checks should be redundant with allStackObjectsAreDead,
770   // but currently hasNonSpillStackObjects is set only from source
771   // allocas. Stack temps produced from legalization are not counted currently.
772   if (FuncInfo->hasNonSpillStackObjects() || FuncInfo->hasSpilledVGPRs() ||
773       !AllSGPRSpilledToVGPRs || !allStackObjectsAreDead(MFI)) {
774     assert(RS && "RegScavenger required if spilling");
775 
776     if (FuncInfo->isEntryFunction()) {
777       int ScavengeFI = MFI.CreateFixedObject(
778         TRI.getSpillSize(AMDGPU::SGPR_32RegClass), 0, false);
779       RS->addScavengingFrameIndex(ScavengeFI);
780     } else {
781       int ScavengeFI = MFI.CreateStackObject(
782         TRI.getSpillSize(AMDGPU::SGPR_32RegClass),
783         TRI.getSpillAlignment(AMDGPU::SGPR_32RegClass),
784         false);
785       RS->addScavengingFrameIndex(ScavengeFI);
786     }
787   }
788 }
789 
790 void SIFrameLowering::determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs,
791                                            RegScavenger *RS) const {
792   TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
793   const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
794 
795   // The SP is specifically managed and we don't want extra spills of it.
796   SavedRegs.reset(MFI->getStackPtrOffsetReg());
797 }
798 
799 MachineBasicBlock::iterator SIFrameLowering::eliminateCallFramePseudoInstr(
800   MachineFunction &MF,
801   MachineBasicBlock &MBB,
802   MachineBasicBlock::iterator I) const {
803   int64_t Amount = I->getOperand(0).getImm();
804   if (Amount == 0)
805     return MBB.erase(I);
806 
807   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
808   const SIInstrInfo *TII = ST.getInstrInfo();
809   const DebugLoc &DL = I->getDebugLoc();
810   unsigned Opc = I->getOpcode();
811   bool IsDestroy = Opc == TII->getCallFrameDestroyOpcode();
812   uint64_t CalleePopAmount = IsDestroy ? I->getOperand(1).getImm() : 0;
813 
814   const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
815   if (!TFI->hasReservedCallFrame(MF)) {
816     unsigned Align = getStackAlignment();
817 
818     Amount = alignTo(Amount, Align);
819     assert(isUInt<32>(Amount) && "exceeded stack address space size");
820     const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
821     unsigned SPReg = MFI->getStackPtrOffsetReg();
822 
823     unsigned Op = IsDestroy ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32;
824     BuildMI(MBB, I, DL, TII->get(Op), SPReg)
825       .addReg(SPReg)
826       .addImm(Amount * ST.getWavefrontSize());
827   } else if (CalleePopAmount != 0) {
828     llvm_unreachable("is this used?");
829   }
830 
831   return MBB.erase(I);
832 }
833 
834 bool SIFrameLowering::hasFP(const MachineFunction &MF) const {
835   const MachineFrameInfo &MFI = MF.getFrameInfo();
836   if (MFI.hasCalls()) {
837     // All offsets are unsigned, so need to be addressed in the same direction
838     // as stack growth.
839     if (MFI.getStackSize() != 0)
840       return true;
841 
842     // For the entry point, the input wave scratch offset must be copied to the
843     // API SP if there are calls.
844     if (MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction())
845       return true;
846 
847     // Retain behavior of always omitting the FP for leaf functions when
848     // possible.
849     if (MF.getTarget().Options.DisableFramePointerElim(MF))
850       return true;
851   }
852 
853   return MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken() ||
854     MFI.hasStackMap() || MFI.hasPatchPoint() ||
855     MF.getSubtarget<GCNSubtarget>().getRegisterInfo()->needsStackRealignment(MF);
856 }
857