1 //===-- SIMachineFunctionInfo.cpp - SI Machine Function Info -------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 /// \file
9 //===----------------------------------------------------------------------===//
10 
11 
12 #include "SIMachineFunctionInfo.h"
13 #include "AMDGPUSubtarget.h"
14 #include "SIInstrInfo.h"
15 #include "llvm/CodeGen/MachineInstrBuilder.h"
16 #include "llvm/CodeGen/MachineFrameInfo.h"
17 #include "llvm/CodeGen/MachineRegisterInfo.h"
18 #include "llvm/IR/Function.h"
19 #include "llvm/IR/LLVMContext.h"
20 
21 #define MAX_LANES 64
22 
23 using namespace llvm;
24 
25 
26 // Pin the vtable to this file.
27 void SIMachineFunctionInfo::anchor() {}
28 
29 SIMachineFunctionInfo::SIMachineFunctionInfo(const MachineFunction &MF)
30   : AMDGPUMachineFunction(MF),
31     TIDReg(AMDGPU::NoRegister),
32     ScratchRSrcReg(AMDGPU::NoRegister),
33     ScratchWaveOffsetReg(AMDGPU::NoRegister),
34     PrivateSegmentBufferUserSGPR(AMDGPU::NoRegister),
35     DispatchPtrUserSGPR(AMDGPU::NoRegister),
36     QueuePtrUserSGPR(AMDGPU::NoRegister),
37     KernargSegmentPtrUserSGPR(AMDGPU::NoRegister),
38     DispatchIDUserSGPR(AMDGPU::NoRegister),
39     FlatScratchInitUserSGPR(AMDGPU::NoRegister),
40     PrivateSegmentSizeUserSGPR(AMDGPU::NoRegister),
41     GridWorkGroupCountXUserSGPR(AMDGPU::NoRegister),
42     GridWorkGroupCountYUserSGPR(AMDGPU::NoRegister),
43     GridWorkGroupCountZUserSGPR(AMDGPU::NoRegister),
44     WorkGroupIDXSystemSGPR(AMDGPU::NoRegister),
45     WorkGroupIDYSystemSGPR(AMDGPU::NoRegister),
46     WorkGroupIDZSystemSGPR(AMDGPU::NoRegister),
47     WorkGroupInfoSystemSGPR(AMDGPU::NoRegister),
48     PrivateSegmentWaveByteOffsetSystemSGPR(AMDGPU::NoRegister),
49     PSInputAddr(0),
50     ReturnsVoid(true),
51     MaximumWorkGroupSize(0),
52     LDSWaveSpillSize(0),
53     PSInputEna(0),
54     NumUserSGPRs(0),
55     NumSystemSGPRs(0),
56     HasSpilledSGPRs(false),
57     HasSpilledVGPRs(false),
58     HasNonSpillStackObjects(false),
59     HasFlatInstructions(false),
60     PrivateSegmentBuffer(false),
61     DispatchPtr(false),
62     QueuePtr(false),
63     DispatchID(false),
64     KernargSegmentPtr(false),
65     FlatScratchInit(false),
66     GridWorkgroupCountX(false),
67     GridWorkgroupCountY(false),
68     GridWorkgroupCountZ(false),
69     WorkGroupIDX(false),
70     WorkGroupIDY(false),
71     WorkGroupIDZ(false),
72     WorkGroupInfo(false),
73     PrivateSegmentWaveByteOffset(false),
74     WorkItemIDX(false),
75     WorkItemIDY(false),
76     WorkItemIDZ(false) {
77   const AMDGPUSubtarget &ST = MF.getSubtarget<AMDGPUSubtarget>();
78   const Function *F = MF.getFunction();
79 
80   PSInputAddr = AMDGPU::getInitialPSInputAddr(*F);
81 
82   const MachineFrameInfo *FrameInfo = MF.getFrameInfo();
83 
84   if (!AMDGPU::isShader(F->getCallingConv())) {
85     KernargSegmentPtr = true;
86     WorkGroupIDX = true;
87     WorkItemIDX = true;
88   }
89 
90   if (F->hasFnAttribute("amdgpu-work-group-id-y"))
91     WorkGroupIDY = true;
92 
93   if (F->hasFnAttribute("amdgpu-work-group-id-z"))
94     WorkGroupIDZ = true;
95 
96   if (F->hasFnAttribute("amdgpu-work-item-id-y"))
97     WorkItemIDY = true;
98 
99   if (F->hasFnAttribute("amdgpu-work-item-id-z"))
100     WorkItemIDZ = true;
101 
102   // X, XY, and XYZ are the only supported combinations, so make sure Y is
103   // enabled if Z is.
104   if (WorkItemIDZ)
105     WorkItemIDY = true;
106 
107   bool MaySpill = ST.isVGPRSpillingEnabled(*F);
108   bool HasStackObjects = FrameInfo->hasStackObjects();
109 
110   if (HasStackObjects || MaySpill)
111     PrivateSegmentWaveByteOffset = true;
112 
113   if (ST.isAmdHsaOS()) {
114     if (HasStackObjects || MaySpill)
115       PrivateSegmentBuffer = true;
116 
117     if (F->hasFnAttribute("amdgpu-dispatch-ptr"))
118       DispatchPtr = true;
119   }
120 
121   // We don't need to worry about accessing spills with flat instructions.
122   // TODO: On VI where we must use flat for global, we should be able to omit
123   // this if it is never used for generic access.
124   if (HasStackObjects && ST.getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS &&
125       ST.isAmdHsaOS())
126     FlatScratchInit = true;
127 
128   if (AMDGPU::isCompute(F->getCallingConv()))
129     MaximumWorkGroupSize = AMDGPU::getMaximumWorkGroupSize(*F);
130   else
131     MaximumWorkGroupSize = ST.getWavefrontSize();
132 }
133 
134 unsigned SIMachineFunctionInfo::addPrivateSegmentBuffer(
135   const SIRegisterInfo &TRI) {
136   PrivateSegmentBufferUserSGPR = TRI.getMatchingSuperReg(
137     getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_128RegClass);
138   NumUserSGPRs += 4;
139   return PrivateSegmentBufferUserSGPR;
140 }
141 
142 unsigned SIMachineFunctionInfo::addDispatchPtr(const SIRegisterInfo &TRI) {
143   DispatchPtrUserSGPR = TRI.getMatchingSuperReg(
144     getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass);
145   NumUserSGPRs += 2;
146   return DispatchPtrUserSGPR;
147 }
148 
149 unsigned SIMachineFunctionInfo::addQueuePtr(const SIRegisterInfo &TRI) {
150   QueuePtrUserSGPR = TRI.getMatchingSuperReg(
151     getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass);
152   NumUserSGPRs += 2;
153   return QueuePtrUserSGPR;
154 }
155 
156 unsigned SIMachineFunctionInfo::addKernargSegmentPtr(const SIRegisterInfo &TRI) {
157   KernargSegmentPtrUserSGPR = TRI.getMatchingSuperReg(
158     getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass);
159   NumUserSGPRs += 2;
160   return KernargSegmentPtrUserSGPR;
161 }
162 
163 unsigned SIMachineFunctionInfo::addFlatScratchInit(const SIRegisterInfo &TRI) {
164   FlatScratchInitUserSGPR = TRI.getMatchingSuperReg(
165     getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass);
166   NumUserSGPRs += 2;
167   return FlatScratchInitUserSGPR;
168 }
169 
170 SIMachineFunctionInfo::SpilledReg SIMachineFunctionInfo::getSpilledReg(
171                                                        MachineFunction *MF,
172                                                        unsigned FrameIndex,
173                                                        unsigned SubIdx) {
174   MachineFrameInfo *FrameInfo = MF->getFrameInfo();
175   const SIRegisterInfo *TRI = static_cast<const SIRegisterInfo *>(
176       MF->getSubtarget<AMDGPUSubtarget>().getRegisterInfo());
177   MachineRegisterInfo &MRI = MF->getRegInfo();
178   int64_t Offset = FrameInfo->getObjectOffset(FrameIndex);
179   Offset += SubIdx * 4;
180 
181   unsigned LaneVGPRIdx = Offset / (64 * 4);
182   unsigned Lane = (Offset / 4) % 64;
183 
184   struct SpilledReg Spill;
185   Spill.Lane = Lane;
186 
187   if (!LaneVGPRs.count(LaneVGPRIdx)) {
188     unsigned LaneVGPR = TRI->findUnusedRegister(MRI, &AMDGPU::VGPR_32RegClass);
189 
190     if (LaneVGPR == AMDGPU::NoRegister)
191       // We have no VGPRs left for spilling SGPRs.
192       return Spill;
193 
194 
195     LaneVGPRs[LaneVGPRIdx] = LaneVGPR;
196 
197     // Add this register as live-in to all blocks to avoid machine verifer
198     // complaining about use of an undefined physical register.
199     for (MachineFunction::iterator BI = MF->begin(), BE = MF->end();
200          BI != BE; ++BI) {
201       BI->addLiveIn(LaneVGPR);
202     }
203   }
204 
205   Spill.VGPR = LaneVGPRs[LaneVGPRIdx];
206   return Spill;
207 }
208 
209 unsigned SIMachineFunctionInfo::getMaximumWorkGroupSize(
210                                               const MachineFunction &MF) const {
211   return MaximumWorkGroupSize;
212 }
213