1 //===-- SIMachineFunctionInfo.cpp -------- SI Machine Function Info -------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 
10 #include "SIMachineFunctionInfo.h"
11 #include "AMDGPUSubtarget.h"
12 #include "SIInstrInfo.h"
13 #include "llvm/CodeGen/MachineFrameInfo.h"
14 #include "llvm/CodeGen/MachineInstrBuilder.h"
15 #include "llvm/CodeGen/MachineRegisterInfo.h"
16 #include "llvm/IR/Function.h"
17 #include "llvm/IR/LLVMContext.h"
18 
19 #define MAX_LANES 64
20 
21 using namespace llvm;
22 
23 static cl::opt<bool> EnableSpillSGPRToVGPR(
24   "amdgpu-spill-sgpr-to-vgpr",
25   cl::desc("Enable spilling VGPRs to SGPRs"),
26   cl::ReallyHidden,
27   cl::init(true));
28 
29 SIMachineFunctionInfo::SIMachineFunctionInfo(const MachineFunction &MF)
30   : AMDGPUMachineFunction(MF),
31     TIDReg(AMDGPU::NoRegister),
32     ScratchRSrcReg(AMDGPU::NoRegister),
33     ScratchWaveOffsetReg(AMDGPU::NoRegister),
34     PrivateSegmentBufferUserSGPR(AMDGPU::NoRegister),
35     DispatchPtrUserSGPR(AMDGPU::NoRegister),
36     QueuePtrUserSGPR(AMDGPU::NoRegister),
37     KernargSegmentPtrUserSGPR(AMDGPU::NoRegister),
38     DispatchIDUserSGPR(AMDGPU::NoRegister),
39     FlatScratchInitUserSGPR(AMDGPU::NoRegister),
40     PrivateSegmentSizeUserSGPR(AMDGPU::NoRegister),
41     GridWorkGroupCountXUserSGPR(AMDGPU::NoRegister),
42     GridWorkGroupCountYUserSGPR(AMDGPU::NoRegister),
43     GridWorkGroupCountZUserSGPR(AMDGPU::NoRegister),
44     WorkGroupIDXSystemSGPR(AMDGPU::NoRegister),
45     WorkGroupIDYSystemSGPR(AMDGPU::NoRegister),
46     WorkGroupIDZSystemSGPR(AMDGPU::NoRegister),
47     WorkGroupInfoSystemSGPR(AMDGPU::NoRegister),
48     PrivateSegmentWaveByteOffsetSystemSGPR(AMDGPU::NoRegister),
49     PSInputAddr(0),
50     ReturnsVoid(true),
51     MaximumWorkGroupSize(0),
52     DebuggerReservedVGPRCount(0),
53     DebuggerWorkGroupIDStackObjectIndices({{0, 0, 0}}),
54     DebuggerWorkItemIDStackObjectIndices({{0, 0, 0}}),
55     LDSWaveSpillSize(0),
56     PSInputEna(0),
57     NumUserSGPRs(0),
58     NumSystemSGPRs(0),
59     HasSpilledSGPRs(false),
60     HasSpilledVGPRs(false),
61     HasNonSpillStackObjects(false),
62     HasFlatInstructions(false),
63     NumSpilledSGPRs(0),
64     NumSpilledVGPRs(0),
65     PrivateSegmentBuffer(false),
66     DispatchPtr(false),
67     QueuePtr(false),
68     KernargSegmentPtr(false),
69     DispatchID(false),
70     FlatScratchInit(false),
71     GridWorkgroupCountX(false),
72     GridWorkgroupCountY(false),
73     GridWorkgroupCountZ(false),
74     WorkGroupIDX(false),
75     WorkGroupIDY(false),
76     WorkGroupIDZ(false),
77     WorkGroupInfo(false),
78     PrivateSegmentWaveByteOffset(false),
79     WorkItemIDX(false),
80     WorkItemIDY(false),
81     WorkItemIDZ(false) {
82   const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
83   const Function *F = MF.getFunction();
84 
85   PSInputAddr = AMDGPU::getInitialPSInputAddr(*F);
86 
87   const MachineFrameInfo *FrameInfo = MF.getFrameInfo();
88 
89   if (!AMDGPU::isShader(F->getCallingConv())) {
90     KernargSegmentPtr = true;
91     WorkGroupIDX = true;
92     WorkItemIDX = true;
93   }
94 
95   if (F->hasFnAttribute("amdgpu-work-group-id-y") || ST.debuggerEmitPrologue())
96     WorkGroupIDY = true;
97 
98   if (F->hasFnAttribute("amdgpu-work-group-id-z") || ST.debuggerEmitPrologue())
99     WorkGroupIDZ = true;
100 
101   if (F->hasFnAttribute("amdgpu-work-item-id-y") || ST.debuggerEmitPrologue())
102     WorkItemIDY = true;
103 
104   if (F->hasFnAttribute("amdgpu-work-item-id-z") || ST.debuggerEmitPrologue())
105     WorkItemIDZ = true;
106 
107   // X, XY, and XYZ are the only supported combinations, so make sure Y is
108   // enabled if Z is.
109   if (WorkItemIDZ)
110     WorkItemIDY = true;
111 
112   bool MaySpill = ST.isVGPRSpillingEnabled(*F);
113   bool HasStackObjects = FrameInfo->hasStackObjects();
114 
115   if (HasStackObjects || MaySpill)
116     PrivateSegmentWaveByteOffset = true;
117 
118   if (ST.isAmdHsaOS()) {
119     if (HasStackObjects || MaySpill)
120       PrivateSegmentBuffer = true;
121 
122     if (F->hasFnAttribute("amdgpu-dispatch-ptr"))
123       DispatchPtr = true;
124 
125     if (F->hasFnAttribute("amdgpu-queue-ptr"))
126       QueuePtr = true;
127 
128     if (F->hasFnAttribute("amdgpu-dispatch-id"))
129       DispatchID = true;
130   }
131 
132   // We don't need to worry about accessing spills with flat instructions.
133   // TODO: On VI where we must use flat for global, we should be able to omit
134   // this if it is never used for generic access.
135   if (HasStackObjects && ST.getGeneration() >= SISubtarget::SEA_ISLANDS &&
136       ST.isAmdHsaOS())
137     FlatScratchInit = true;
138 
139   if (AMDGPU::isCompute(F->getCallingConv()))
140     MaximumWorkGroupSize = AMDGPU::getMaximumWorkGroupSize(*F);
141   else
142     MaximumWorkGroupSize = ST.getWavefrontSize();
143 
144   if (ST.debuggerReserveRegs())
145     DebuggerReservedVGPRCount = 4;
146 }
147 
148 unsigned SIMachineFunctionInfo::addPrivateSegmentBuffer(
149   const SIRegisterInfo &TRI) {
150   PrivateSegmentBufferUserSGPR = TRI.getMatchingSuperReg(
151     getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_128RegClass);
152   NumUserSGPRs += 4;
153   return PrivateSegmentBufferUserSGPR;
154 }
155 
156 unsigned SIMachineFunctionInfo::addDispatchPtr(const SIRegisterInfo &TRI) {
157   DispatchPtrUserSGPR = TRI.getMatchingSuperReg(
158     getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass);
159   NumUserSGPRs += 2;
160   return DispatchPtrUserSGPR;
161 }
162 
163 unsigned SIMachineFunctionInfo::addQueuePtr(const SIRegisterInfo &TRI) {
164   QueuePtrUserSGPR = TRI.getMatchingSuperReg(
165     getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass);
166   NumUserSGPRs += 2;
167   return QueuePtrUserSGPR;
168 }
169 
170 unsigned SIMachineFunctionInfo::addKernargSegmentPtr(const SIRegisterInfo &TRI) {
171   KernargSegmentPtrUserSGPR = TRI.getMatchingSuperReg(
172     getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass);
173   NumUserSGPRs += 2;
174   return KernargSegmentPtrUserSGPR;
175 }
176 
177 unsigned SIMachineFunctionInfo::addDispatchID(const SIRegisterInfo &TRI) {
178   DispatchIDUserSGPR = TRI.getMatchingSuperReg(
179     getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass);
180   NumUserSGPRs += 2;
181   return DispatchIDUserSGPR;
182 }
183 
184 unsigned SIMachineFunctionInfo::addFlatScratchInit(const SIRegisterInfo &TRI) {
185   FlatScratchInitUserSGPR = TRI.getMatchingSuperReg(
186     getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass);
187   NumUserSGPRs += 2;
188   return FlatScratchInitUserSGPR;
189 }
190 
191 SIMachineFunctionInfo::SpilledReg SIMachineFunctionInfo::getSpilledReg (
192                                                        MachineFunction *MF,
193                                                        unsigned FrameIndex,
194                                                        unsigned SubIdx) {
195   if (!EnableSpillSGPRToVGPR)
196     return SpilledReg();
197 
198   const SISubtarget &ST = MF->getSubtarget<SISubtarget>();
199   const SIRegisterInfo *TRI = ST.getRegisterInfo();
200 
201   MachineFrameInfo *FrameInfo = MF->getFrameInfo();
202   MachineRegisterInfo &MRI = MF->getRegInfo();
203   int64_t Offset = FrameInfo->getObjectOffset(FrameIndex);
204   Offset += SubIdx * 4;
205 
206   unsigned LaneVGPRIdx = Offset / (64 * 4);
207   unsigned Lane = (Offset / 4) % 64;
208 
209   struct SpilledReg Spill;
210   Spill.Lane = Lane;
211 
212   if (!LaneVGPRs.count(LaneVGPRIdx)) {
213     unsigned LaneVGPR = TRI->findUnusedRegister(MRI, &AMDGPU::VGPR_32RegClass);
214 
215     if (LaneVGPR == AMDGPU::NoRegister)
216       // We have no VGPRs left for spilling SGPRs.
217       return Spill;
218 
219     LaneVGPRs[LaneVGPRIdx] = LaneVGPR;
220 
221     // Add this register as live-in to all blocks to avoid machine verifer
222     // complaining about use of an undefined physical register.
223     for (MachineFunction::iterator BI = MF->begin(), BE = MF->end();
224          BI != BE; ++BI) {
225       BI->addLiveIn(LaneVGPR);
226     }
227   }
228 
229   Spill.VGPR = LaneVGPRs[LaneVGPRIdx];
230   return Spill;
231 }
232 
233 unsigned SIMachineFunctionInfo::getMaximumWorkGroupSize(
234                                               const MachineFunction &MF) const {
235   return MaximumWorkGroupSize;
236 }
237