1 //===----------------------- SIFrameLowering.cpp --------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //==-----------------------------------------------------------------------===//
9 
10 #include "SIFrameLowering.h"
11 #include "SIInstrInfo.h"
12 #include "SIMachineFunctionInfo.h"
13 #include "SIRegisterInfo.h"
14 #include "AMDGPUSubtarget.h"
15 
16 #include "llvm/CodeGen/MachineFrameInfo.h"
17 #include "llvm/CodeGen/MachineFunction.h"
18 #include "llvm/CodeGen/MachineInstrBuilder.h"
19 #include "llvm/CodeGen/RegisterScavenging.h"
20 
21 using namespace llvm;
22 
23 
24 static ArrayRef<MCPhysReg> getAllSGPR128(const SISubtarget &ST,
25                                          const MachineFunction &MF) {
26   return makeArrayRef(AMDGPU::SGPR_128RegClass.begin(),
27                       ST.getMaxNumSGPRs(MF) / 4);
28 }
29 
30 static ArrayRef<MCPhysReg> getAllSGPRs(const SISubtarget &ST,
31                                        const MachineFunction &MF) {
32   return makeArrayRef(AMDGPU::SGPR_32RegClass.begin(),
33                       ST.getMaxNumSGPRs(MF));
34 }
35 
36 void SIFrameLowering::emitFlatScratchInit(const SISubtarget &ST,
37                                           MachineFunction &MF,
38                                           MachineBasicBlock &MBB) const {
39   const SIInstrInfo *TII = ST.getInstrInfo();
40   const SIRegisterInfo* TRI = &TII->getRegisterInfo();
41 
42   // We don't need this if we only have spills since there is no user facing
43   // scratch.
44 
45   // TODO: If we know we don't have flat instructions earlier, we can omit
46   // this from the input registers.
47   //
48   // TODO: We only need to know if we access scratch space through a flat
49   // pointer. Because we only detect if flat instructions are used at all,
50   // this will be used more often than necessary on VI.
51 
52   // Debug location must be unknown since the first debug location is used to
53   // determine the end of the prologue.
54   DebugLoc DL;
55   MachineBasicBlock::iterator I = MBB.begin();
56 
57   unsigned FlatScratchInitReg
58     = TRI->getPreloadedValue(MF, SIRegisterInfo::FLAT_SCRATCH_INIT);
59 
60   MachineRegisterInfo &MRI = MF.getRegInfo();
61   MRI.addLiveIn(FlatScratchInitReg);
62   MBB.addLiveIn(FlatScratchInitReg);
63 
64   unsigned FlatScrInitLo = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub0);
65   unsigned FlatScrInitHi = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub1);
66 
67   const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
68   unsigned ScratchWaveOffsetReg = MFI->getScratchWaveOffsetReg();
69 
70   // Do a 64-bit pointer add.
71   if (ST.flatScratchIsPointer()) {
72     BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), AMDGPU::FLAT_SCR_LO)
73       .addReg(FlatScrInitLo)
74       .addReg(ScratchWaveOffsetReg);
75     BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADDC_U32), AMDGPU::FLAT_SCR_HI)
76       .addReg(FlatScrInitHi)
77       .addImm(0);
78 
79     return;
80   }
81 
82   // Copy the size in bytes.
83   BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), AMDGPU::FLAT_SCR_LO)
84     .addReg(FlatScrInitHi, RegState::Kill);
85 
86   // Add wave offset in bytes to private base offset.
87   // See comment in AMDKernelCodeT.h for enable_sgpr_flat_scratch_init.
88   BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), FlatScrInitLo)
89     .addReg(FlatScrInitLo)
90     .addReg(ScratchWaveOffsetReg);
91 
92   // Convert offset to 256-byte units.
93   BuildMI(MBB, I, DL, TII->get(AMDGPU::S_LSHR_B32), AMDGPU::FLAT_SCR_HI)
94     .addReg(FlatScrInitLo, RegState::Kill)
95     .addImm(8);
96 }
97 
98 unsigned SIFrameLowering::getReservedPrivateSegmentBufferReg(
99   const SISubtarget &ST,
100   const SIInstrInfo *TII,
101   const SIRegisterInfo *TRI,
102   SIMachineFunctionInfo *MFI,
103   MachineFunction &MF) const {
104 
105   // We need to insert initialization of the scratch resource descriptor.
106   unsigned ScratchRsrcReg = MFI->getScratchRSrcReg();
107   if (ScratchRsrcReg == AMDGPU::NoRegister)
108     return AMDGPU::NoRegister;
109 
110   if (ST.hasSGPRInitBug() ||
111       ScratchRsrcReg != TRI->reservedPrivateSegmentBufferReg(MF))
112     return ScratchRsrcReg;
113 
114   // We reserved the last registers for this. Shift it down to the end of those
115   // which were actually used.
116   //
117   // FIXME: It might be safer to use a pseudoregister before replacement.
118 
119   // FIXME: We should be able to eliminate unused input registers. We only
120   // cannot do this for the resources required for scratch access. For now we
121   // skip over user SGPRs and may leave unused holes.
122 
123   // We find the resource first because it has an alignment requirement.
124 
125   MachineRegisterInfo &MRI = MF.getRegInfo();
126 
127   unsigned NumPreloaded = (MFI->getNumPreloadedSGPRs() + 3) / 4;
128   ArrayRef<MCPhysReg> AllSGPR128s = getAllSGPR128(ST, MF);
129   AllSGPR128s = AllSGPR128s.slice(std::min(static_cast<unsigned>(AllSGPR128s.size()), NumPreloaded));
130 
131   // Skip the last N reserved elements because they should have already been
132   // reserved for VCC etc.
133   for (MCPhysReg Reg : AllSGPR128s) {
134     // Pick the first unallocated one. Make sure we don't clobber the other
135     // reserved input we needed.
136     if (!MRI.isPhysRegUsed(Reg) && MRI.isAllocatable(Reg)) {
137       MRI.replaceRegWith(ScratchRsrcReg, Reg);
138       MFI->setScratchRSrcReg(Reg);
139       return Reg;
140     }
141   }
142 
143   return ScratchRsrcReg;
144 }
145 
146 unsigned SIFrameLowering::getReservedPrivateSegmentWaveByteOffsetReg(
147   const SISubtarget &ST,
148   const SIInstrInfo *TII,
149   const SIRegisterInfo *TRI,
150   SIMachineFunctionInfo *MFI,
151   MachineFunction &MF) const {
152   unsigned ScratchWaveOffsetReg = MFI->getScratchWaveOffsetReg();
153   if (ST.hasSGPRInitBug() ||
154       ScratchWaveOffsetReg != TRI->reservedPrivateSegmentWaveByteOffsetReg(MF))
155     return ScratchWaveOffsetReg;
156 
157   unsigned ScratchRsrcReg = MFI->getScratchRSrcReg();
158   MachineRegisterInfo &MRI = MF.getRegInfo();
159   unsigned NumPreloaded = MFI->getNumPreloadedSGPRs();
160 
161   ArrayRef<MCPhysReg> AllSGPRs = getAllSGPRs(ST, MF);
162   if (NumPreloaded > AllSGPRs.size())
163     return ScratchWaveOffsetReg;
164 
165   AllSGPRs = AllSGPRs.slice(NumPreloaded);
166 
167   // We need to drop register from the end of the list that we cannot use
168   // for the scratch wave offset.
169   // + 2 s102 and s103 do not exist on VI.
170   // + 2 for vcc
171   // + 2 for xnack_mask
172   // + 2 for flat_scratch
173   // + 4 for registers reserved for scratch resource register
174   // + 1 for register reserved for scratch wave offset.  (By exluding this
175   //     register from the list to consider, it means that when this
176   //     register is being used for the scratch wave offset and there
177   //     are no other free SGPRs, then the value will stay in this register.
178   // ----
179   //  13
180   if (AllSGPRs.size() < 13)
181     return ScratchWaveOffsetReg;
182 
183   for (MCPhysReg Reg : AllSGPRs.drop_back(13)) {
184     // Pick the first unallocated SGPR. Be careful not to pick an alias of the
185     // scratch descriptor, since we haven’t added its uses yet.
186     if (!MRI.isPhysRegUsed(Reg)) {
187       if (!MRI.isAllocatable(Reg) ||
188           TRI->isSubRegisterEq(ScratchRsrcReg, Reg))
189         continue;
190 
191       MRI.replaceRegWith(ScratchWaveOffsetReg, Reg);
192       MFI->setScratchWaveOffsetReg(Reg);
193       return Reg;
194     }
195   }
196 
197   return ScratchWaveOffsetReg;
198 }
199 
200 void SIFrameLowering::emitPrologue(MachineFunction &MF,
201                                    MachineBasicBlock &MBB) const {
202   // Emit debugger prologue if "amdgpu-debugger-emit-prologue" attribute was
203   // specified.
204   const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
205   auto AMDGPUASI = ST.getAMDGPUAS();
206   if (ST.debuggerEmitPrologue())
207     emitDebuggerPrologue(MF, MBB);
208 
209   assert(&MF.front() == &MBB && "Shrink-wrapping not yet supported");
210 
211   SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
212 
213   // If we only have SGPR spills, we won't actually be using scratch memory
214   // since these spill to VGPRs.
215   //
216   // FIXME: We should be cleaning up these unused SGPR spill frame indices
217   // somewhere.
218 
219   const SIInstrInfo *TII = ST.getInstrInfo();
220   const SIRegisterInfo *TRI = &TII->getRegisterInfo();
221   MachineRegisterInfo &MRI = MF.getRegInfo();
222 
223   unsigned ScratchRsrcReg
224     = getReservedPrivateSegmentBufferReg(ST, TII, TRI, MFI, MF);
225   unsigned ScratchWaveOffsetReg
226     = getReservedPrivateSegmentWaveByteOffsetReg(ST, TII, TRI, MFI, MF);
227 
228   if (ScratchRsrcReg == AMDGPU::NoRegister) {
229     assert(ScratchWaveOffsetReg == AMDGPU::NoRegister);
230     return;
231   }
232 
233   assert(!TRI->isSubRegister(ScratchRsrcReg, ScratchWaveOffsetReg));
234 
235   // We need to do the replacement of the private segment buffer and wave offset
236   // register even if there are no stack objects. There could be stores to undef
237   // or a constant without an associated object.
238 
239   // FIXME: We still have implicit uses on SGPR spill instructions in case they
240   // need to spill to vector memory. It's likely that will not happen, but at
241   // this point it appears we need the setup. This part of the prolog should be
242   // emitted after frame indices are eliminated.
243 
244   if (MF.getFrameInfo().hasStackObjects() && MFI->hasFlatScratchInit())
245     emitFlatScratchInit(ST, MF, MBB);
246 
247   // We need to insert initialization of the scratch resource descriptor.
248   unsigned PreloadedScratchWaveOffsetReg = TRI->getPreloadedValue(
249     MF, SIRegisterInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET);
250 
251 
252   unsigned PreloadedPrivateBufferReg = AMDGPU::NoRegister;
253   if (ST.isAmdCodeObjectV2(MF) || ST.isMesaGfxShader(MF)) {
254     PreloadedPrivateBufferReg = TRI->getPreloadedValue(
255       MF, SIRegisterInfo::PRIVATE_SEGMENT_BUFFER);
256   }
257 
258   bool OffsetRegUsed = !MRI.use_empty(ScratchWaveOffsetReg);
259   bool ResourceRegUsed = !MRI.use_empty(ScratchRsrcReg);
260 
261   // We added live-ins during argument lowering, but since they were not used
262   // they were deleted. We're adding the uses now, so add them back.
263   if (OffsetRegUsed) {
264     assert(PreloadedScratchWaveOffsetReg != AMDGPU::NoRegister &&
265            "scratch wave offset input is required");
266     MRI.addLiveIn(PreloadedScratchWaveOffsetReg);
267     MBB.addLiveIn(PreloadedScratchWaveOffsetReg);
268   }
269 
270   if (ResourceRegUsed && PreloadedPrivateBufferReg != AMDGPU::NoRegister) {
271     assert(ST.isAmdCodeObjectV2(MF) || ST.isMesaGfxShader(MF));
272     MRI.addLiveIn(PreloadedPrivateBufferReg);
273     MBB.addLiveIn(PreloadedPrivateBufferReg);
274   }
275 
276   // Make the register selected live throughout the function.
277   for (MachineBasicBlock &OtherBB : MF) {
278     if (&OtherBB == &MBB)
279       continue;
280 
281     if (OffsetRegUsed)
282       OtherBB.addLiveIn(ScratchWaveOffsetReg);
283 
284     if (ResourceRegUsed)
285       OtherBB.addLiveIn(ScratchRsrcReg);
286   }
287 
288   DebugLoc DL;
289   MachineBasicBlock::iterator I = MBB.begin();
290 
291   // If we reserved the original input registers, we don't need to copy to the
292   // reserved registers.
293 
294   bool CopyBuffer = ResourceRegUsed &&
295     PreloadedPrivateBufferReg != AMDGPU::NoRegister &&
296     ST.isAmdCodeObjectV2(MF) &&
297     ScratchRsrcReg != PreloadedPrivateBufferReg;
298 
299   // This needs to be careful of the copying order to avoid overwriting one of
300   // the input registers before it's been copied to it's final
301   // destination. Usually the offset should be copied first.
302   bool CopyBufferFirst = TRI->isSubRegisterEq(PreloadedPrivateBufferReg,
303                                               ScratchWaveOffsetReg);
304   if (CopyBuffer && CopyBufferFirst) {
305     BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchRsrcReg)
306       .addReg(PreloadedPrivateBufferReg, RegState::Kill);
307   }
308 
309   if (OffsetRegUsed &&
310       PreloadedScratchWaveOffsetReg != ScratchWaveOffsetReg) {
311     BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchWaveOffsetReg)
312       .addReg(PreloadedScratchWaveOffsetReg, RegState::Kill);
313   }
314 
315   if (CopyBuffer && !CopyBufferFirst) {
316     BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchRsrcReg)
317       .addReg(PreloadedPrivateBufferReg, RegState::Kill);
318   }
319 
320   if (ResourceRegUsed && (ST.isMesaGfxShader(MF) || (PreloadedPrivateBufferReg == AMDGPU::NoRegister))) {
321     assert(!ST.isAmdCodeObjectV2(MF));
322     const MCInstrDesc &SMovB32 = TII->get(AMDGPU::S_MOV_B32);
323 
324     unsigned Rsrc2 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub2);
325     unsigned Rsrc3 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub3);
326 
327     // Use relocations to get the pointer, and setup the other bits manually.
328     uint64_t Rsrc23 = TII->getScratchRsrcWords23();
329 
330     if (MFI->hasPrivateMemoryInputPtr()) {
331       unsigned Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1);
332 
333       if (AMDGPU::isCompute(MF.getFunction()->getCallingConv())) {
334         const MCInstrDesc &Mov64 = TII->get(AMDGPU::S_MOV_B64);
335 
336         BuildMI(MBB, I, DL, Mov64, Rsrc01)
337           .addReg(PreloadedPrivateBufferReg)
338           .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
339       } else {
340         const MCInstrDesc &LoadDwordX2 = TII->get(AMDGPU::S_LOAD_DWORDX2_IMM);
341 
342         PointerType *PtrTy =
343           PointerType::get(Type::getInt64Ty(MF.getFunction()->getContext()),
344                            AMDGPUASI.CONSTANT_ADDRESS);
345         MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
346         auto MMO = MF.getMachineMemOperand(PtrInfo,
347                                            MachineMemOperand::MOLoad |
348                                            MachineMemOperand::MOInvariant |
349                                            MachineMemOperand::MODereferenceable,
350                                            0, 0);
351         BuildMI(MBB, I, DL, LoadDwordX2, Rsrc01)
352           .addReg(PreloadedPrivateBufferReg)
353           .addImm(0) // offset
354           .addImm(0) // glc
355           .addMemOperand(MMO)
356           .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
357       }
358     } else {
359       unsigned Rsrc0 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0);
360       unsigned Rsrc1 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1);
361 
362       BuildMI(MBB, I, DL, SMovB32, Rsrc0)
363         .addExternalSymbol("SCRATCH_RSRC_DWORD0")
364         .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
365 
366       BuildMI(MBB, I, DL, SMovB32, Rsrc1)
367         .addExternalSymbol("SCRATCH_RSRC_DWORD1")
368         .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
369 
370     }
371 
372     BuildMI(MBB, I, DL, SMovB32, Rsrc2)
373       .addImm(Rsrc23 & 0xffffffff)
374       .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
375 
376     BuildMI(MBB, I, DL, SMovB32, Rsrc3)
377       .addImm(Rsrc23 >> 32)
378       .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
379   }
380 }
381 
382 void SIFrameLowering::emitEpilogue(MachineFunction &MF,
383                                    MachineBasicBlock &MBB) const {
384 
385 }
386 
387 static bool allStackObjectsAreDead(const MachineFrameInfo &MFI) {
388   for (int I = MFI.getObjectIndexBegin(), E = MFI.getObjectIndexEnd();
389        I != E; ++I) {
390     if (!MFI.isDeadObjectIndex(I))
391       return false;
392   }
393 
394   return true;
395 }
396 
397 int SIFrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
398                                             unsigned &FrameReg) const {
399   const SIRegisterInfo *RI = MF.getSubtarget<SISubtarget>().getRegisterInfo();
400 
401   FrameReg = RI->getFrameRegister(MF);
402   return MF.getFrameInfo().getObjectOffset(FI);
403 }
404 
405 void SIFrameLowering::processFunctionBeforeFrameFinalized(
406   MachineFunction &MF,
407   RegScavenger *RS) const {
408   MachineFrameInfo &MFI = MF.getFrameInfo();
409 
410   if (!MFI.hasStackObjects())
411     return;
412 
413   const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
414   const SIInstrInfo *TII = ST.getInstrInfo();
415   const SIRegisterInfo &TRI = TII->getRegisterInfo();
416   SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
417   bool AllSGPRSpilledToVGPRs = false;
418 
419   if (TRI.spillSGPRToVGPR() && FuncInfo->hasSpilledSGPRs()) {
420     AllSGPRSpilledToVGPRs = true;
421 
422     // Process all SGPR spills before frame offsets are finalized. Ideally SGPRs
423     // are spilled to VGPRs, in which case we can eliminate the stack usage.
424     //
425     // XXX - This operates under the assumption that only other SGPR spills are
426     // users of the frame index. I'm not 100% sure this is correct. The
427     // StackColoring pass has a comment saying a future improvement would be to
428     // merging of allocas with spill slots, but for now according to
429     // MachineFrameInfo isSpillSlot can't alias any other object.
430     for (MachineBasicBlock &MBB : MF) {
431       MachineBasicBlock::iterator Next;
432       for (auto I = MBB.begin(), E = MBB.end(); I != E; I = Next) {
433         MachineInstr &MI = *I;
434         Next = std::next(I);
435 
436         if (TII->isSGPRSpill(MI)) {
437           int FI = TII->getNamedOperand(MI, AMDGPU::OpName::addr)->getIndex();
438           if (FuncInfo->allocateSGPRSpillToVGPR(MF, FI)) {
439             bool Spilled = TRI.eliminateSGPRToVGPRSpillFrameIndex(MI, FI, RS);
440             (void)Spilled;
441             assert(Spilled && "failed to spill SGPR to VGPR when allocated");
442           } else
443             AllSGPRSpilledToVGPRs = false;
444         }
445       }
446     }
447 
448     FuncInfo->removeSGPRToVGPRFrameIndices(MFI);
449   }
450 
451   // FIXME: The other checks should be redundant with allStackObjectsAreDead,
452   // but currently hasNonSpillStackObjects is set only from source
453   // allocas. Stack temps produced from legalization are not counted currently.
454   if (FuncInfo->hasNonSpillStackObjects() || FuncInfo->hasSpilledVGPRs() ||
455       !AllSGPRSpilledToVGPRs || !allStackObjectsAreDead(MFI)) {
456     assert(RS && "RegScavenger required if spilling");
457 
458     // We force this to be at offset 0 so no user object ever has 0 as an
459     // address, so we may use 0 as an invalid pointer value. This is because
460     // LLVM assumes 0 is an invalid pointer in address space 0. Because alloca
461     // is required to be address space 0, we are forced to accept this for
462     // now. Ideally we could have the stack in another address space with 0 as a
463     // valid pointer, and -1 as the null value.
464     //
465     // This will also waste additional space when user stack objects require > 4
466     // byte alignment.
467     //
468     // The main cost here is losing the offset for addressing modes. However
469     // this also ensures we shouldn't need a register for the offset when
470     // emergency scavenging.
471     int ScavengeFI = MFI.CreateFixedObject(
472       AMDGPU::SGPR_32RegClass.getSize(), 0, false);
473     RS->addScavengingFrameIndex(ScavengeFI);
474   }
475 }
476 
477 void SIFrameLowering::emitDebuggerPrologue(MachineFunction &MF,
478                                            MachineBasicBlock &MBB) const {
479   const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
480   const SIInstrInfo *TII = ST.getInstrInfo();
481   const SIRegisterInfo *TRI = &TII->getRegisterInfo();
482   const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
483 
484   MachineBasicBlock::iterator I = MBB.begin();
485   DebugLoc DL;
486 
487   // For each dimension:
488   for (unsigned i = 0; i < 3; ++i) {
489     // Get work group ID SGPR, and make it live-in again.
490     unsigned WorkGroupIDSGPR = MFI->getWorkGroupIDSGPR(i);
491     MF.getRegInfo().addLiveIn(WorkGroupIDSGPR);
492     MBB.addLiveIn(WorkGroupIDSGPR);
493 
494     // Since SGPRs are spilled into VGPRs, copy work group ID SGPR to VGPR in
495     // order to spill it to scratch.
496     unsigned WorkGroupIDVGPR =
497       MF.getRegInfo().createVirtualRegister(&AMDGPU::VGPR_32RegClass);
498     BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), WorkGroupIDVGPR)
499       .addReg(WorkGroupIDSGPR);
500 
501     // Spill work group ID.
502     int WorkGroupIDObjectIdx = MFI->getDebuggerWorkGroupIDStackObjectIndex(i);
503     TII->storeRegToStackSlot(MBB, I, WorkGroupIDVGPR, false,
504       WorkGroupIDObjectIdx, &AMDGPU::VGPR_32RegClass, TRI);
505 
506     // Get work item ID VGPR, and make it live-in again.
507     unsigned WorkItemIDVGPR = MFI->getWorkItemIDVGPR(i);
508     MF.getRegInfo().addLiveIn(WorkItemIDVGPR);
509     MBB.addLiveIn(WorkItemIDVGPR);
510 
511     // Spill work item ID.
512     int WorkItemIDObjectIdx = MFI->getDebuggerWorkItemIDStackObjectIndex(i);
513     TII->storeRegToStackSlot(MBB, I, WorkItemIDVGPR, false,
514       WorkItemIDObjectIdx, &AMDGPU::VGPR_32RegClass, TRI);
515   }
516 }
517