1 //===-- SIMachineFunctionInfo.cpp -------- SI Machine Function Info -------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 10 #include "SIMachineFunctionInfo.h" 11 #include "AMDGPUSubtarget.h" 12 #include "SIInstrInfo.h" 13 #include "llvm/CodeGen/MachineFrameInfo.h" 14 #include "llvm/CodeGen/MachineInstrBuilder.h" 15 #include "llvm/CodeGen/MachineRegisterInfo.h" 16 #include "llvm/IR/Function.h" 17 #include "llvm/IR/LLVMContext.h" 18 19 #define MAX_LANES 64 20 21 using namespace llvm; 22 23 static cl::opt<bool> EnableSpillSGPRToVGPR( 24 "amdgpu-spill-sgpr-to-vgpr", 25 cl::desc("Enable spilling VGPRs to SGPRs"), 26 cl::ReallyHidden, 27 cl::init(true)); 28 29 SIMachineFunctionInfo::SIMachineFunctionInfo(const MachineFunction &MF) 30 : AMDGPUMachineFunction(MF), 31 TIDReg(AMDGPU::NoRegister), 32 ScratchRSrcReg(AMDGPU::NoRegister), 33 ScratchWaveOffsetReg(AMDGPU::NoRegister), 34 PrivateSegmentBufferUserSGPR(AMDGPU::NoRegister), 35 DispatchPtrUserSGPR(AMDGPU::NoRegister), 36 QueuePtrUserSGPR(AMDGPU::NoRegister), 37 KernargSegmentPtrUserSGPR(AMDGPU::NoRegister), 38 DispatchIDUserSGPR(AMDGPU::NoRegister), 39 FlatScratchInitUserSGPR(AMDGPU::NoRegister), 40 PrivateSegmentSizeUserSGPR(AMDGPU::NoRegister), 41 GridWorkGroupCountXUserSGPR(AMDGPU::NoRegister), 42 GridWorkGroupCountYUserSGPR(AMDGPU::NoRegister), 43 GridWorkGroupCountZUserSGPR(AMDGPU::NoRegister), 44 WorkGroupIDXSystemSGPR(AMDGPU::NoRegister), 45 WorkGroupIDYSystemSGPR(AMDGPU::NoRegister), 46 WorkGroupIDZSystemSGPR(AMDGPU::NoRegister), 47 WorkGroupInfoSystemSGPR(AMDGPU::NoRegister), 48 PrivateSegmentWaveByteOffsetSystemSGPR(AMDGPU::NoRegister), 49 PSInputAddr(0), 50 ReturnsVoid(true), 51 MaximumWorkGroupSize(0), 52 DebuggerReservedVGPRCount(0), 53 DebuggerWorkGroupIDStackObjectIndices({{0, 0, 0}}), 54 DebuggerWorkItemIDStackObjectIndices({{0, 0, 0}}), 55 LDSWaveSpillSize(0), 56 PSInputEna(0), 57 NumUserSGPRs(0), 58 NumSystemSGPRs(0), 59 HasSpilledSGPRs(false), 60 HasSpilledVGPRs(false), 61 HasNonSpillStackObjects(false), 62 NumSpilledSGPRs(0), 63 NumSpilledVGPRs(0), 64 PrivateSegmentBuffer(false), 65 DispatchPtr(false), 66 QueuePtr(false), 67 KernargSegmentPtr(false), 68 DispatchID(false), 69 FlatScratchInit(false), 70 GridWorkgroupCountX(false), 71 GridWorkgroupCountY(false), 72 GridWorkgroupCountZ(false), 73 WorkGroupIDX(false), 74 WorkGroupIDY(false), 75 WorkGroupIDZ(false), 76 WorkGroupInfo(false), 77 PrivateSegmentWaveByteOffset(false), 78 WorkItemIDX(false), 79 WorkItemIDY(false), 80 WorkItemIDZ(false) { 81 const SISubtarget &ST = MF.getSubtarget<SISubtarget>(); 82 const Function *F = MF.getFunction(); 83 84 PSInputAddr = AMDGPU::getInitialPSInputAddr(*F); 85 86 const MachineFrameInfo &FrameInfo = MF.getFrameInfo(); 87 88 if (!AMDGPU::isShader(F->getCallingConv())) { 89 KernargSegmentPtr = true; 90 WorkGroupIDX = true; 91 WorkItemIDX = true; 92 } 93 94 if (F->hasFnAttribute("amdgpu-work-group-id-y") || ST.debuggerEmitPrologue()) 95 WorkGroupIDY = true; 96 97 if (F->hasFnAttribute("amdgpu-work-group-id-z") || ST.debuggerEmitPrologue()) 98 WorkGroupIDZ = true; 99 100 if (F->hasFnAttribute("amdgpu-work-item-id-y") || ST.debuggerEmitPrologue()) 101 WorkItemIDY = true; 102 103 if (F->hasFnAttribute("amdgpu-work-item-id-z") || ST.debuggerEmitPrologue()) 104 WorkItemIDZ = true; 105 106 // X, XY, and XYZ are the only supported combinations, so make sure Y is 107 // enabled if Z is. 108 if (WorkItemIDZ) 109 WorkItemIDY = true; 110 111 bool MaySpill = ST.isVGPRSpillingEnabled(*F); 112 bool HasStackObjects = FrameInfo.hasStackObjects(); 113 114 if (HasStackObjects || MaySpill) 115 PrivateSegmentWaveByteOffset = true; 116 117 if (ST.isAmdHsaOS()) { 118 if (HasStackObjects || MaySpill) 119 PrivateSegmentBuffer = true; 120 121 if (F->hasFnAttribute("amdgpu-dispatch-ptr")) 122 DispatchPtr = true; 123 124 if (F->hasFnAttribute("amdgpu-queue-ptr")) 125 QueuePtr = true; 126 127 if (F->hasFnAttribute("amdgpu-dispatch-id")) 128 DispatchID = true; 129 } 130 131 // We don't need to worry about accessing spills with flat instructions. 132 // TODO: On VI where we must use flat for global, we should be able to omit 133 // this if it is never used for generic access. 134 if (HasStackObjects && ST.getGeneration() >= SISubtarget::SEA_ISLANDS && 135 ST.isAmdHsaOS()) 136 FlatScratchInit = true; 137 138 if (AMDGPU::isCompute(F->getCallingConv())) 139 MaximumWorkGroupSize = AMDGPU::getMaximumWorkGroupSize(*F); 140 else 141 MaximumWorkGroupSize = ST.getWavefrontSize(); 142 143 if (ST.debuggerReserveRegs()) 144 DebuggerReservedVGPRCount = 4; 145 } 146 147 unsigned SIMachineFunctionInfo::addPrivateSegmentBuffer( 148 const SIRegisterInfo &TRI) { 149 PrivateSegmentBufferUserSGPR = TRI.getMatchingSuperReg( 150 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_128RegClass); 151 NumUserSGPRs += 4; 152 return PrivateSegmentBufferUserSGPR; 153 } 154 155 unsigned SIMachineFunctionInfo::addDispatchPtr(const SIRegisterInfo &TRI) { 156 DispatchPtrUserSGPR = TRI.getMatchingSuperReg( 157 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass); 158 NumUserSGPRs += 2; 159 return DispatchPtrUserSGPR; 160 } 161 162 unsigned SIMachineFunctionInfo::addQueuePtr(const SIRegisterInfo &TRI) { 163 QueuePtrUserSGPR = TRI.getMatchingSuperReg( 164 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass); 165 NumUserSGPRs += 2; 166 return QueuePtrUserSGPR; 167 } 168 169 unsigned SIMachineFunctionInfo::addKernargSegmentPtr(const SIRegisterInfo &TRI) { 170 KernargSegmentPtrUserSGPR = TRI.getMatchingSuperReg( 171 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass); 172 NumUserSGPRs += 2; 173 return KernargSegmentPtrUserSGPR; 174 } 175 176 unsigned SIMachineFunctionInfo::addDispatchID(const SIRegisterInfo &TRI) { 177 DispatchIDUserSGPR = TRI.getMatchingSuperReg( 178 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass); 179 NumUserSGPRs += 2; 180 return DispatchIDUserSGPR; 181 } 182 183 unsigned SIMachineFunctionInfo::addFlatScratchInit(const SIRegisterInfo &TRI) { 184 FlatScratchInitUserSGPR = TRI.getMatchingSuperReg( 185 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass); 186 NumUserSGPRs += 2; 187 return FlatScratchInitUserSGPR; 188 } 189 190 SIMachineFunctionInfo::SpilledReg SIMachineFunctionInfo::getSpilledReg ( 191 MachineFunction *MF, 192 unsigned FrameIndex, 193 unsigned SubIdx) { 194 if (!EnableSpillSGPRToVGPR) 195 return SpilledReg(); 196 197 const SISubtarget &ST = MF->getSubtarget<SISubtarget>(); 198 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 199 200 MachineFrameInfo &FrameInfo = MF->getFrameInfo(); 201 MachineRegisterInfo &MRI = MF->getRegInfo(); 202 int64_t Offset = FrameInfo.getObjectOffset(FrameIndex); 203 Offset += SubIdx * 4; 204 205 unsigned LaneVGPRIdx = Offset / (64 * 4); 206 unsigned Lane = (Offset / 4) % 64; 207 208 struct SpilledReg Spill; 209 Spill.Lane = Lane; 210 211 if (!LaneVGPRs.count(LaneVGPRIdx)) { 212 unsigned LaneVGPR = TRI->findUnusedRegister(MRI, &AMDGPU::VGPR_32RegClass, 213 *MF); 214 215 if (LaneVGPR == AMDGPU::NoRegister) 216 // We have no VGPRs left for spilling SGPRs. 217 return Spill; 218 219 LaneVGPRs[LaneVGPRIdx] = LaneVGPR; 220 221 // Add this register as live-in to all blocks to avoid machine verifer 222 // complaining about use of an undefined physical register. 223 for (MachineFunction::iterator BI = MF->begin(), BE = MF->end(); 224 BI != BE; ++BI) { 225 BI->addLiveIn(LaneVGPR); 226 } 227 } 228 229 Spill.VGPR = LaneVGPRs[LaneVGPRIdx]; 230 return Spill; 231 } 232 233 unsigned SIMachineFunctionInfo::getMaximumWorkGroupSize( 234 const MachineFunction &MF) const { 235 return MaximumWorkGroupSize; 236 } 237