1 //===----------------------- SIFrameLowering.cpp --------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //==-----------------------------------------------------------------------===// 8 9 #include "SIFrameLowering.h" 10 #include "AMDGPUSubtarget.h" 11 #include "SIInstrInfo.h" 12 #include "SIMachineFunctionInfo.h" 13 #include "SIRegisterInfo.h" 14 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 15 16 #include "llvm/CodeGen/LivePhysRegs.h" 17 #include "llvm/CodeGen/MachineFrameInfo.h" 18 #include "llvm/CodeGen/MachineFunction.h" 19 #include "llvm/CodeGen/MachineInstrBuilder.h" 20 #include "llvm/CodeGen/RegisterScavenging.h" 21 22 using namespace llvm; 23 24 25 static ArrayRef<MCPhysReg> getAllSGPR128(const GCNSubtarget &ST, 26 const MachineFunction &MF) { 27 return makeArrayRef(AMDGPU::SGPR_128RegClass.begin(), 28 ST.getMaxNumSGPRs(MF) / 4); 29 } 30 31 static ArrayRef<MCPhysReg> getAllSGPRs(const GCNSubtarget &ST, 32 const MachineFunction &MF) { 33 return makeArrayRef(AMDGPU::SGPR_32RegClass.begin(), 34 ST.getMaxNumSGPRs(MF)); 35 } 36 37 void SIFrameLowering::emitFlatScratchInit(const GCNSubtarget &ST, 38 MachineFunction &MF, 39 MachineBasicBlock &MBB) const { 40 const SIInstrInfo *TII = ST.getInstrInfo(); 41 const SIRegisterInfo* TRI = &TII->getRegisterInfo(); 42 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 43 44 // We don't need this if we only have spills since there is no user facing 45 // scratch. 46 47 // TODO: If we know we don't have flat instructions earlier, we can omit 48 // this from the input registers. 49 // 50 // TODO: We only need to know if we access scratch space through a flat 51 // pointer. Because we only detect if flat instructions are used at all, 52 // this will be used more often than necessary on VI. 53 54 // Debug location must be unknown since the first debug location is used to 55 // determine the end of the prologue. 56 DebugLoc DL; 57 MachineBasicBlock::iterator I = MBB.begin(); 58 59 unsigned FlatScratchInitReg 60 = MFI->getPreloadedReg(AMDGPUFunctionArgInfo::FLAT_SCRATCH_INIT); 61 62 MachineRegisterInfo &MRI = MF.getRegInfo(); 63 MRI.addLiveIn(FlatScratchInitReg); 64 MBB.addLiveIn(FlatScratchInitReg); 65 66 unsigned FlatScrInitLo = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub0); 67 unsigned FlatScrInitHi = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub1); 68 69 unsigned ScratchWaveOffsetReg = MFI->getScratchWaveOffsetReg(); 70 71 // Do a 64-bit pointer add. 72 if (ST.flatScratchIsPointer()) { 73 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), AMDGPU::FLAT_SCR_LO) 74 .addReg(FlatScrInitLo) 75 .addReg(ScratchWaveOffsetReg); 76 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADDC_U32), AMDGPU::FLAT_SCR_HI) 77 .addReg(FlatScrInitHi) 78 .addImm(0); 79 80 return; 81 } 82 83 // Copy the size in bytes. 84 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), AMDGPU::FLAT_SCR_LO) 85 .addReg(FlatScrInitHi, RegState::Kill); 86 87 // Add wave offset in bytes to private base offset. 88 // See comment in AMDKernelCodeT.h for enable_sgpr_flat_scratch_init. 89 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), FlatScrInitLo) 90 .addReg(FlatScrInitLo) 91 .addReg(ScratchWaveOffsetReg); 92 93 // Convert offset to 256-byte units. 94 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_LSHR_B32), AMDGPU::FLAT_SCR_HI) 95 .addReg(FlatScrInitLo, RegState::Kill) 96 .addImm(8); 97 } 98 99 unsigned SIFrameLowering::getReservedPrivateSegmentBufferReg( 100 const GCNSubtarget &ST, 101 const SIInstrInfo *TII, 102 const SIRegisterInfo *TRI, 103 SIMachineFunctionInfo *MFI, 104 MachineFunction &MF) const { 105 MachineRegisterInfo &MRI = MF.getRegInfo(); 106 107 // We need to insert initialization of the scratch resource descriptor. 108 unsigned ScratchRsrcReg = MFI->getScratchRSrcReg(); 109 if (ScratchRsrcReg == AMDGPU::NoRegister || 110 !MRI.isPhysRegUsed(ScratchRsrcReg)) 111 return AMDGPU::NoRegister; 112 113 if (ST.hasSGPRInitBug() || 114 ScratchRsrcReg != TRI->reservedPrivateSegmentBufferReg(MF)) 115 return ScratchRsrcReg; 116 117 // We reserved the last registers for this. Shift it down to the end of those 118 // which were actually used. 119 // 120 // FIXME: It might be safer to use a pseudoregister before replacement. 121 122 // FIXME: We should be able to eliminate unused input registers. We only 123 // cannot do this for the resources required for scratch access. For now we 124 // skip over user SGPRs and may leave unused holes. 125 126 // We find the resource first because it has an alignment requirement. 127 128 unsigned NumPreloaded = (MFI->getNumPreloadedSGPRs() + 3) / 4; 129 ArrayRef<MCPhysReg> AllSGPR128s = getAllSGPR128(ST, MF); 130 AllSGPR128s = AllSGPR128s.slice(std::min(static_cast<unsigned>(AllSGPR128s.size()), NumPreloaded)); 131 132 // Skip the last N reserved elements because they should have already been 133 // reserved for VCC etc. 134 for (MCPhysReg Reg : AllSGPR128s) { 135 // Pick the first unallocated one. Make sure we don't clobber the other 136 // reserved input we needed. 137 if (!MRI.isPhysRegUsed(Reg) && MRI.isAllocatable(Reg)) { 138 MRI.replaceRegWith(ScratchRsrcReg, Reg); 139 MFI->setScratchRSrcReg(Reg); 140 return Reg; 141 } 142 } 143 144 return ScratchRsrcReg; 145 } 146 147 // Shift down registers reserved for the scratch wave offset and stack pointer 148 // SGPRs. 149 std::pair<unsigned, unsigned> 150 SIFrameLowering::getReservedPrivateSegmentWaveByteOffsetReg( 151 const GCNSubtarget &ST, 152 const SIInstrInfo *TII, 153 const SIRegisterInfo *TRI, 154 SIMachineFunctionInfo *MFI, 155 MachineFunction &MF) const { 156 MachineRegisterInfo &MRI = MF.getRegInfo(); 157 unsigned ScratchWaveOffsetReg = MFI->getScratchWaveOffsetReg(); 158 159 // No replacement necessary. 160 if (ScratchWaveOffsetReg == AMDGPU::NoRegister || 161 !MRI.isPhysRegUsed(ScratchWaveOffsetReg)) { 162 assert(MFI->getStackPtrOffsetReg() == AMDGPU::SP_REG); 163 return std::make_pair(AMDGPU::NoRegister, AMDGPU::NoRegister); 164 } 165 166 unsigned SPReg = MFI->getStackPtrOffsetReg(); 167 if (ST.hasSGPRInitBug()) 168 return std::make_pair(ScratchWaveOffsetReg, SPReg); 169 170 unsigned NumPreloaded = MFI->getNumPreloadedSGPRs(); 171 172 ArrayRef<MCPhysReg> AllSGPRs = getAllSGPRs(ST, MF); 173 if (NumPreloaded > AllSGPRs.size()) 174 return std::make_pair(ScratchWaveOffsetReg, SPReg); 175 176 AllSGPRs = AllSGPRs.slice(NumPreloaded); 177 178 // We need to drop register from the end of the list that we cannot use 179 // for the scratch wave offset. 180 // + 2 s102 and s103 do not exist on VI. 181 // + 2 for vcc 182 // + 2 for xnack_mask 183 // + 2 for flat_scratch 184 // + 4 for registers reserved for scratch resource register 185 // + 1 for register reserved for scratch wave offset. (By exluding this 186 // register from the list to consider, it means that when this 187 // register is being used for the scratch wave offset and there 188 // are no other free SGPRs, then the value will stay in this register. 189 // + 1 if stack pointer is used. 190 // ---- 191 // 13 (+1) 192 unsigned ReservedRegCount = 13; 193 194 if (AllSGPRs.size() < ReservedRegCount) 195 return std::make_pair(ScratchWaveOffsetReg, SPReg); 196 197 bool HandledScratchWaveOffsetReg = 198 ScratchWaveOffsetReg != TRI->reservedPrivateSegmentWaveByteOffsetReg(MF); 199 200 for (MCPhysReg Reg : AllSGPRs.drop_back(ReservedRegCount)) { 201 // Pick the first unallocated SGPR. Be careful not to pick an alias of the 202 // scratch descriptor, since we haven’t added its uses yet. 203 if (!MRI.isPhysRegUsed(Reg) && MRI.isAllocatable(Reg)) { 204 if (!HandledScratchWaveOffsetReg) { 205 HandledScratchWaveOffsetReg = true; 206 207 MRI.replaceRegWith(ScratchWaveOffsetReg, Reg); 208 MFI->setScratchWaveOffsetReg(Reg); 209 ScratchWaveOffsetReg = Reg; 210 break; 211 } 212 } 213 } 214 215 return std::make_pair(ScratchWaveOffsetReg, SPReg); 216 } 217 218 void SIFrameLowering::emitEntryFunctionPrologue(MachineFunction &MF, 219 MachineBasicBlock &MBB) const { 220 // Emit debugger prologue if "amdgpu-debugger-emit-prologue" attribute was 221 // specified. 222 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 223 if (ST.debuggerEmitPrologue()) 224 emitDebuggerPrologue(MF, MBB); 225 226 assert(&MF.front() == &MBB && "Shrink-wrapping not yet supported"); 227 228 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 229 230 // If we only have SGPR spills, we won't actually be using scratch memory 231 // since these spill to VGPRs. 232 // 233 // FIXME: We should be cleaning up these unused SGPR spill frame indices 234 // somewhere. 235 236 const SIInstrInfo *TII = ST.getInstrInfo(); 237 const SIRegisterInfo *TRI = &TII->getRegisterInfo(); 238 MachineRegisterInfo &MRI = MF.getRegInfo(); 239 const Function &F = MF.getFunction(); 240 241 // We need to do the replacement of the private segment buffer and wave offset 242 // register even if there are no stack objects. There could be stores to undef 243 // or a constant without an associated object. 244 245 // FIXME: We still have implicit uses on SGPR spill instructions in case they 246 // need to spill to vector memory. It's likely that will not happen, but at 247 // this point it appears we need the setup. This part of the prolog should be 248 // emitted after frame indices are eliminated. 249 250 if (MFI->hasFlatScratchInit()) 251 emitFlatScratchInit(ST, MF, MBB); 252 253 unsigned SPReg = MFI->getStackPtrOffsetReg(); 254 if (SPReg != AMDGPU::SP_REG) { 255 assert(MRI.isReserved(SPReg) && "SPReg used but not reserved"); 256 257 DebugLoc DL; 258 const MachineFrameInfo &FrameInfo = MF.getFrameInfo(); 259 int64_t StackSize = FrameInfo.getStackSize(); 260 261 if (StackSize == 0) { 262 BuildMI(MBB, MBB.begin(), DL, TII->get(AMDGPU::COPY), SPReg) 263 .addReg(MFI->getScratchWaveOffsetReg()); 264 } else { 265 BuildMI(MBB, MBB.begin(), DL, TII->get(AMDGPU::S_ADD_U32), SPReg) 266 .addReg(MFI->getScratchWaveOffsetReg()) 267 .addImm(StackSize * ST.getWavefrontSize()); 268 } 269 } 270 271 unsigned ScratchRsrcReg 272 = getReservedPrivateSegmentBufferReg(ST, TII, TRI, MFI, MF); 273 274 unsigned ScratchWaveOffsetReg; 275 std::tie(ScratchWaveOffsetReg, SPReg) 276 = getReservedPrivateSegmentWaveByteOffsetReg(ST, TII, TRI, MFI, MF); 277 278 // It's possible to have uses of only ScratchWaveOffsetReg without 279 // ScratchRsrcReg if it's only used for the initialization of flat_scratch, 280 // but the inverse is not true. 281 if (ScratchWaveOffsetReg == AMDGPU::NoRegister) { 282 assert(ScratchRsrcReg == AMDGPU::NoRegister); 283 return; 284 } 285 286 // We need to insert initialization of the scratch resource descriptor. 287 unsigned PreloadedScratchWaveOffsetReg = MFI->getPreloadedReg( 288 AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET); 289 290 unsigned PreloadedPrivateBufferReg = AMDGPU::NoRegister; 291 if (ST.isAmdHsaOrMesa(F)) { 292 PreloadedPrivateBufferReg = MFI->getPreloadedReg( 293 AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER); 294 } 295 296 bool OffsetRegUsed = MRI.isPhysRegUsed(ScratchWaveOffsetReg); 297 bool ResourceRegUsed = ScratchRsrcReg != AMDGPU::NoRegister && 298 MRI.isPhysRegUsed(ScratchRsrcReg); 299 300 // We added live-ins during argument lowering, but since they were not used 301 // they were deleted. We're adding the uses now, so add them back. 302 if (OffsetRegUsed) { 303 assert(PreloadedScratchWaveOffsetReg != AMDGPU::NoRegister && 304 "scratch wave offset input is required"); 305 MRI.addLiveIn(PreloadedScratchWaveOffsetReg); 306 MBB.addLiveIn(PreloadedScratchWaveOffsetReg); 307 } 308 309 if (ResourceRegUsed && PreloadedPrivateBufferReg != AMDGPU::NoRegister) { 310 assert(ST.isAmdHsaOrMesa(F) || ST.isMesaGfxShader(F)); 311 MRI.addLiveIn(PreloadedPrivateBufferReg); 312 MBB.addLiveIn(PreloadedPrivateBufferReg); 313 } 314 315 // Make the register selected live throughout the function. 316 for (MachineBasicBlock &OtherBB : MF) { 317 if (&OtherBB == &MBB) 318 continue; 319 320 if (OffsetRegUsed) 321 OtherBB.addLiveIn(ScratchWaveOffsetReg); 322 323 if (ResourceRegUsed) 324 OtherBB.addLiveIn(ScratchRsrcReg); 325 } 326 327 DebugLoc DL; 328 MachineBasicBlock::iterator I = MBB.begin(); 329 330 // If we reserved the original input registers, we don't need to copy to the 331 // reserved registers. 332 333 bool CopyBuffer = ResourceRegUsed && 334 PreloadedPrivateBufferReg != AMDGPU::NoRegister && 335 ST.isAmdHsaOrMesa(F) && 336 ScratchRsrcReg != PreloadedPrivateBufferReg; 337 338 // This needs to be careful of the copying order to avoid overwriting one of 339 // the input registers before it's been copied to it's final 340 // destination. Usually the offset should be copied first. 341 bool CopyBufferFirst = TRI->isSubRegisterEq(PreloadedPrivateBufferReg, 342 ScratchWaveOffsetReg); 343 if (CopyBuffer && CopyBufferFirst) { 344 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchRsrcReg) 345 .addReg(PreloadedPrivateBufferReg, RegState::Kill); 346 } 347 348 if (OffsetRegUsed && 349 PreloadedScratchWaveOffsetReg != ScratchWaveOffsetReg) { 350 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchWaveOffsetReg) 351 .addReg(PreloadedScratchWaveOffsetReg, 352 MRI.isPhysRegUsed(ScratchWaveOffsetReg) ? 0 : RegState::Kill); 353 } 354 355 if (CopyBuffer && !CopyBufferFirst) { 356 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchRsrcReg) 357 .addReg(PreloadedPrivateBufferReg, RegState::Kill); 358 } 359 360 if (ResourceRegUsed) 361 emitEntryFunctionScratchSetup(ST, MF, MBB, MFI, I, 362 PreloadedPrivateBufferReg, ScratchRsrcReg); 363 } 364 365 // Emit scratch setup code for AMDPAL or Mesa, assuming ResourceRegUsed is set. 366 void SIFrameLowering::emitEntryFunctionScratchSetup(const GCNSubtarget &ST, 367 MachineFunction &MF, MachineBasicBlock &MBB, SIMachineFunctionInfo *MFI, 368 MachineBasicBlock::iterator I, unsigned PreloadedPrivateBufferReg, 369 unsigned ScratchRsrcReg) const { 370 371 const SIInstrInfo *TII = ST.getInstrInfo(); 372 const SIRegisterInfo *TRI = &TII->getRegisterInfo(); 373 const Function &Fn = MF.getFunction(); 374 DebugLoc DL; 375 376 if (ST.isAmdPalOS()) { 377 // The pointer to the GIT is formed from the offset passed in and either 378 // the amdgpu-git-ptr-high function attribute or the top part of the PC 379 unsigned RsrcLo = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0); 380 unsigned RsrcHi = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1); 381 unsigned Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1); 382 383 const MCInstrDesc &SMovB32 = TII->get(AMDGPU::S_MOV_B32); 384 385 if (MFI->getGITPtrHigh() != 0xffffffff) { 386 BuildMI(MBB, I, DL, SMovB32, RsrcHi) 387 .addImm(MFI->getGITPtrHigh()) 388 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 389 } else { 390 const MCInstrDesc &GetPC64 = TII->get(AMDGPU::S_GETPC_B64); 391 BuildMI(MBB, I, DL, GetPC64, Rsrc01); 392 } 393 auto GitPtrLo = AMDGPU::SGPR0; // Low GIT address passed in 394 if (ST.hasMergedShaders()) { 395 switch (MF.getFunction().getCallingConv()) { 396 case CallingConv::AMDGPU_HS: 397 case CallingConv::AMDGPU_GS: 398 // Low GIT address is passed in s8 rather than s0 for an LS+HS or 399 // ES+GS merged shader on gfx9+. 400 GitPtrLo = AMDGPU::SGPR8; 401 break; 402 default: 403 break; 404 } 405 } 406 MF.getRegInfo().addLiveIn(GitPtrLo); 407 MF.front().addLiveIn(GitPtrLo); 408 BuildMI(MBB, I, DL, SMovB32, RsrcLo) 409 .addReg(GitPtrLo) 410 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 411 412 // We now have the GIT ptr - now get the scratch descriptor from the entry 413 // at offset 0 (or offset 16 for a compute shader). 414 PointerType *PtrTy = 415 PointerType::get(Type::getInt64Ty(MF.getFunction().getContext()), 416 AMDGPUAS::CONSTANT_ADDRESS); 417 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy)); 418 const MCInstrDesc &LoadDwordX4 = TII->get(AMDGPU::S_LOAD_DWORDX4_IMM); 419 auto MMO = MF.getMachineMemOperand(PtrInfo, 420 MachineMemOperand::MOLoad | 421 MachineMemOperand::MOInvariant | 422 MachineMemOperand::MODereferenceable, 423 16, 4); 424 unsigned Offset = Fn.getCallingConv() == CallingConv::AMDGPU_CS ? 16 : 0; 425 const GCNSubtarget &Subtarget = MF.getSubtarget<GCNSubtarget>(); 426 unsigned EncodedOffset = AMDGPU::getSMRDEncodedOffset(Subtarget, Offset); 427 BuildMI(MBB, I, DL, LoadDwordX4, ScratchRsrcReg) 428 .addReg(Rsrc01) 429 .addImm(EncodedOffset) // offset 430 .addImm(0) // glc 431 .addReg(ScratchRsrcReg, RegState::ImplicitDefine) 432 .addMemOperand(MMO); 433 return; 434 } 435 if (ST.isMesaGfxShader(Fn) 436 || (PreloadedPrivateBufferReg == AMDGPU::NoRegister)) { 437 assert(!ST.isAmdHsaOrMesa(Fn)); 438 const MCInstrDesc &SMovB32 = TII->get(AMDGPU::S_MOV_B32); 439 440 unsigned Rsrc2 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub2); 441 unsigned Rsrc3 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub3); 442 443 // Use relocations to get the pointer, and setup the other bits manually. 444 uint64_t Rsrc23 = TII->getScratchRsrcWords23(); 445 446 if (MFI->hasImplicitBufferPtr()) { 447 unsigned Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1); 448 449 if (AMDGPU::isCompute(MF.getFunction().getCallingConv())) { 450 const MCInstrDesc &Mov64 = TII->get(AMDGPU::S_MOV_B64); 451 452 BuildMI(MBB, I, DL, Mov64, Rsrc01) 453 .addReg(MFI->getImplicitBufferPtrUserSGPR()) 454 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 455 } else { 456 const MCInstrDesc &LoadDwordX2 = TII->get(AMDGPU::S_LOAD_DWORDX2_IMM); 457 458 PointerType *PtrTy = 459 PointerType::get(Type::getInt64Ty(MF.getFunction().getContext()), 460 AMDGPUAS::CONSTANT_ADDRESS); 461 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy)); 462 auto MMO = MF.getMachineMemOperand(PtrInfo, 463 MachineMemOperand::MOLoad | 464 MachineMemOperand::MOInvariant | 465 MachineMemOperand::MODereferenceable, 466 8, 4); 467 BuildMI(MBB, I, DL, LoadDwordX2, Rsrc01) 468 .addReg(MFI->getImplicitBufferPtrUserSGPR()) 469 .addImm(0) // offset 470 .addImm(0) // glc 471 .addMemOperand(MMO) 472 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 473 } 474 } else { 475 unsigned Rsrc0 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0); 476 unsigned Rsrc1 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1); 477 478 BuildMI(MBB, I, DL, SMovB32, Rsrc0) 479 .addExternalSymbol("SCRATCH_RSRC_DWORD0") 480 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 481 482 BuildMI(MBB, I, DL, SMovB32, Rsrc1) 483 .addExternalSymbol("SCRATCH_RSRC_DWORD1") 484 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 485 486 } 487 488 BuildMI(MBB, I, DL, SMovB32, Rsrc2) 489 .addImm(Rsrc23 & 0xffffffff) 490 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 491 492 BuildMI(MBB, I, DL, SMovB32, Rsrc3) 493 .addImm(Rsrc23 >> 32) 494 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 495 } 496 } 497 498 // Find a scratch register that we can use at the start of the prologue to 499 // re-align the stack pointer. We avoid using callee-save registers since they 500 // may appear to be free when this is called from canUseAsPrologue (during 501 // shrink wrapping), but then no longer be free when this is called from 502 // emitPrologue. 503 // 504 // FIXME: This is a bit conservative, since in the above case we could use one 505 // of the callee-save registers as a scratch temp to re-align the stack pointer, 506 // but we would then have to make sure that we were in fact saving at least one 507 // callee-save register in the prologue, which is additional complexity that 508 // doesn't seem worth the benefit. 509 static unsigned findScratchNonCalleeSaveRegister(MachineBasicBlock &MBB) { 510 MachineFunction *MF = MBB.getParent(); 511 512 const GCNSubtarget &Subtarget = MF->getSubtarget<GCNSubtarget>(); 513 const SIRegisterInfo &TRI = *Subtarget.getRegisterInfo(); 514 LivePhysRegs LiveRegs(TRI); 515 LiveRegs.addLiveIns(MBB); 516 517 // Mark callee saved registers as used so we will not choose them. 518 const MCPhysReg *CSRegs = TRI.getCalleeSavedRegs(MF); 519 for (unsigned i = 0; CSRegs[i]; ++i) 520 LiveRegs.addReg(CSRegs[i]); 521 522 MachineRegisterInfo &MRI = MF->getRegInfo(); 523 524 for (unsigned Reg : AMDGPU::SReg_32_XM0RegClass) { 525 if (LiveRegs.available(MRI, Reg)) 526 return Reg; 527 } 528 529 return AMDGPU::NoRegister; 530 } 531 532 void SIFrameLowering::emitPrologue(MachineFunction &MF, 533 MachineBasicBlock &MBB) const { 534 SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 535 if (FuncInfo->isEntryFunction()) { 536 emitEntryFunctionPrologue(MF, MBB); 537 return; 538 } 539 540 const MachineFrameInfo &MFI = MF.getFrameInfo(); 541 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 542 const SIInstrInfo *TII = ST.getInstrInfo(); 543 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 544 545 unsigned StackPtrReg = FuncInfo->getStackPtrOffsetReg(); 546 unsigned FramePtrReg = FuncInfo->getFrameOffsetReg(); 547 548 MachineBasicBlock::iterator MBBI = MBB.begin(); 549 DebugLoc DL; 550 551 // XXX - Is this the right predicate? 552 553 bool NeedFP = hasFP(MF); 554 uint32_t NumBytes = MFI.getStackSize(); 555 uint32_t RoundedSize = NumBytes; 556 const bool NeedsRealignment = TRI.needsStackRealignment(MF); 557 558 if (NeedsRealignment) { 559 assert(NeedFP); 560 const unsigned Alignment = MFI.getMaxAlignment(); 561 562 RoundedSize += Alignment; 563 564 unsigned ScratchSPReg = findScratchNonCalleeSaveRegister(MBB); 565 assert(ScratchSPReg != AMDGPU::NoRegister); 566 567 // s_add_u32 tmp_reg, s32, NumBytes 568 // s_and_b32 s32, tmp_reg, 0b111...0000 569 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_ADD_U32), ScratchSPReg) 570 .addReg(StackPtrReg) 571 .addImm((Alignment - 1) * ST.getWavefrontSize()) 572 .setMIFlag(MachineInstr::FrameSetup); 573 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_AND_B32), FramePtrReg) 574 .addReg(ScratchSPReg, RegState::Kill) 575 .addImm(-Alignment * ST.getWavefrontSize()) 576 .setMIFlag(MachineInstr::FrameSetup); 577 FuncInfo->setIsStackRealigned(true); 578 } else if (NeedFP) { 579 // If we need a base pointer, set it up here. It's whatever the value of 580 // the stack pointer is at this point. Any variable size objects will be 581 // allocated after this, so we can still use the base pointer to reference 582 // locals. 583 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), FramePtrReg) 584 .addReg(StackPtrReg) 585 .setMIFlag(MachineInstr::FrameSetup); 586 } 587 588 if (RoundedSize != 0 && hasSP(MF)) { 589 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_ADD_U32), StackPtrReg) 590 .addReg(StackPtrReg) 591 .addImm(RoundedSize * ST.getWavefrontSize()) 592 .setMIFlag(MachineInstr::FrameSetup); 593 } 594 595 for (const SIMachineFunctionInfo::SGPRSpillVGPRCSR &Reg 596 : FuncInfo->getSGPRSpillVGPRs()) { 597 if (!Reg.FI.hasValue()) 598 continue; 599 TII->storeRegToStackSlot(MBB, MBBI, Reg.VGPR, true, 600 Reg.FI.getValue(), &AMDGPU::VGPR_32RegClass, 601 &TII->getRegisterInfo()); 602 } 603 } 604 605 void SIFrameLowering::emitEpilogue(MachineFunction &MF, 606 MachineBasicBlock &MBB) const { 607 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 608 if (FuncInfo->isEntryFunction()) 609 return; 610 611 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 612 const SIInstrInfo *TII = ST.getInstrInfo(); 613 MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator(); 614 615 for (const SIMachineFunctionInfo::SGPRSpillVGPRCSR &Reg 616 : FuncInfo->getSGPRSpillVGPRs()) { 617 if (!Reg.FI.hasValue()) 618 continue; 619 TII->loadRegFromStackSlot(MBB, MBBI, Reg.VGPR, 620 Reg.FI.getValue(), &AMDGPU::VGPR_32RegClass, 621 &TII->getRegisterInfo()); 622 } 623 624 unsigned StackPtrReg = FuncInfo->getStackPtrOffsetReg(); 625 if (StackPtrReg == AMDGPU::NoRegister) 626 return; 627 628 const MachineFrameInfo &MFI = MF.getFrameInfo(); 629 uint32_t NumBytes = MFI.getStackSize(); 630 631 DebugLoc DL; 632 633 // FIXME: Clarify distinction between no set SP and SP. For callee functions, 634 // it's really whether we need SP to be accurate or not. 635 636 if (NumBytes != 0 && hasSP(MF)) { 637 uint32_t RoundedSize = FuncInfo->isStackRealigned() ? 638 NumBytes + MFI.getMaxAlignment() : NumBytes; 639 640 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_SUB_U32), StackPtrReg) 641 .addReg(StackPtrReg) 642 .addImm(RoundedSize * ST.getWavefrontSize()); 643 } 644 } 645 646 static bool allStackObjectsAreDead(const MachineFrameInfo &MFI) { 647 for (int I = MFI.getObjectIndexBegin(), E = MFI.getObjectIndexEnd(); 648 I != E; ++I) { 649 if (!MFI.isDeadObjectIndex(I)) 650 return false; 651 } 652 653 return true; 654 } 655 656 int SIFrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI, 657 unsigned &FrameReg) const { 658 const SIRegisterInfo *RI = MF.getSubtarget<GCNSubtarget>().getRegisterInfo(); 659 660 FrameReg = RI->getFrameRegister(MF); 661 return MF.getFrameInfo().getObjectOffset(FI); 662 } 663 664 void SIFrameLowering::processFunctionBeforeFrameFinalized( 665 MachineFunction &MF, 666 RegScavenger *RS) const { 667 MachineFrameInfo &MFI = MF.getFrameInfo(); 668 669 if (!MFI.hasStackObjects()) 670 return; 671 672 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 673 const SIInstrInfo *TII = ST.getInstrInfo(); 674 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 675 SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 676 bool AllSGPRSpilledToVGPRs = false; 677 678 if (TRI.spillSGPRToVGPR() && FuncInfo->hasSpilledSGPRs()) { 679 AllSGPRSpilledToVGPRs = true; 680 681 // Process all SGPR spills before frame offsets are finalized. Ideally SGPRs 682 // are spilled to VGPRs, in which case we can eliminate the stack usage. 683 // 684 // XXX - This operates under the assumption that only other SGPR spills are 685 // users of the frame index. I'm not 100% sure this is correct. The 686 // StackColoring pass has a comment saying a future improvement would be to 687 // merging of allocas with spill slots, but for now according to 688 // MachineFrameInfo isSpillSlot can't alias any other object. 689 for (MachineBasicBlock &MBB : MF) { 690 MachineBasicBlock::iterator Next; 691 for (auto I = MBB.begin(), E = MBB.end(); I != E; I = Next) { 692 MachineInstr &MI = *I; 693 Next = std::next(I); 694 695 if (TII->isSGPRSpill(MI)) { 696 int FI = TII->getNamedOperand(MI, AMDGPU::OpName::addr)->getIndex(); 697 assert(MFI.getStackID(FI) == SIStackID::SGPR_SPILL); 698 if (FuncInfo->allocateSGPRSpillToVGPR(MF, FI)) { 699 bool Spilled = TRI.eliminateSGPRToVGPRSpillFrameIndex(MI, FI, RS); 700 (void)Spilled; 701 assert(Spilled && "failed to spill SGPR to VGPR when allocated"); 702 } else 703 AllSGPRSpilledToVGPRs = false; 704 } 705 } 706 } 707 708 FuncInfo->removeSGPRToVGPRFrameIndices(MFI); 709 } 710 711 // FIXME: The other checks should be redundant with allStackObjectsAreDead, 712 // but currently hasNonSpillStackObjects is set only from source 713 // allocas. Stack temps produced from legalization are not counted currently. 714 if (FuncInfo->hasNonSpillStackObjects() || FuncInfo->hasSpilledVGPRs() || 715 !AllSGPRSpilledToVGPRs || !allStackObjectsAreDead(MFI)) { 716 assert(RS && "RegScavenger required if spilling"); 717 718 // We force this to be at offset 0 so no user object ever has 0 as an 719 // address, so we may use 0 as an invalid pointer value. This is because 720 // LLVM assumes 0 is an invalid pointer in address space 0. Because alloca 721 // is required to be address space 0, we are forced to accept this for 722 // now. Ideally we could have the stack in another address space with 0 as a 723 // valid pointer, and -1 as the null value. 724 // 725 // This will also waste additional space when user stack objects require > 4 726 // byte alignment. 727 // 728 // The main cost here is losing the offset for addressing modes. However 729 // this also ensures we shouldn't need a register for the offset when 730 // emergency scavenging. 731 int ScavengeFI = MFI.CreateFixedObject( 732 TRI.getSpillSize(AMDGPU::SGPR_32RegClass), 0, false); 733 RS->addScavengingFrameIndex(ScavengeFI); 734 } 735 } 736 737 void SIFrameLowering::determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, 738 RegScavenger *RS) const { 739 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); 740 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 741 742 // The SP is specifically managed and we don't want extra spills of it. 743 SavedRegs.reset(MFI->getStackPtrOffsetReg()); 744 } 745 746 MachineBasicBlock::iterator SIFrameLowering::eliminateCallFramePseudoInstr( 747 MachineFunction &MF, 748 MachineBasicBlock &MBB, 749 MachineBasicBlock::iterator I) const { 750 int64_t Amount = I->getOperand(0).getImm(); 751 if (Amount == 0) 752 return MBB.erase(I); 753 754 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 755 const SIInstrInfo *TII = ST.getInstrInfo(); 756 const DebugLoc &DL = I->getDebugLoc(); 757 unsigned Opc = I->getOpcode(); 758 bool IsDestroy = Opc == TII->getCallFrameDestroyOpcode(); 759 uint64_t CalleePopAmount = IsDestroy ? I->getOperand(1).getImm() : 0; 760 761 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 762 if (!TFI->hasReservedCallFrame(MF)) { 763 unsigned Align = getStackAlignment(); 764 765 Amount = alignTo(Amount, Align); 766 assert(isUInt<32>(Amount) && "exceeded stack address space size"); 767 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 768 unsigned SPReg = MFI->getStackPtrOffsetReg(); 769 770 unsigned Op = IsDestroy ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32; 771 BuildMI(MBB, I, DL, TII->get(Op), SPReg) 772 .addReg(SPReg) 773 .addImm(Amount * ST.getWavefrontSize()); 774 } else if (CalleePopAmount != 0) { 775 llvm_unreachable("is this used?"); 776 } 777 778 return MBB.erase(I); 779 } 780 781 void SIFrameLowering::emitDebuggerPrologue(MachineFunction &MF, 782 MachineBasicBlock &MBB) const { 783 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 784 const SIInstrInfo *TII = ST.getInstrInfo(); 785 const SIRegisterInfo *TRI = &TII->getRegisterInfo(); 786 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 787 788 MachineBasicBlock::iterator I = MBB.begin(); 789 DebugLoc DL; 790 791 // For each dimension: 792 for (unsigned i = 0; i < 3; ++i) { 793 // Get work group ID SGPR, and make it live-in again. 794 unsigned WorkGroupIDSGPR = MFI->getWorkGroupIDSGPR(i); 795 MF.getRegInfo().addLiveIn(WorkGroupIDSGPR); 796 MBB.addLiveIn(WorkGroupIDSGPR); 797 798 // Since SGPRs are spilled into VGPRs, copy work group ID SGPR to VGPR in 799 // order to spill it to scratch. 800 unsigned WorkGroupIDVGPR = 801 MF.getRegInfo().createVirtualRegister(&AMDGPU::VGPR_32RegClass); 802 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), WorkGroupIDVGPR) 803 .addReg(WorkGroupIDSGPR); 804 805 // Spill work group ID. 806 int WorkGroupIDObjectIdx = MFI->getDebuggerWorkGroupIDStackObjectIndex(i); 807 TII->storeRegToStackSlot(MBB, I, WorkGroupIDVGPR, false, 808 WorkGroupIDObjectIdx, &AMDGPU::VGPR_32RegClass, TRI); 809 810 // Get work item ID VGPR, and make it live-in again. 811 unsigned WorkItemIDVGPR = MFI->getWorkItemIDVGPR(i); 812 MF.getRegInfo().addLiveIn(WorkItemIDVGPR); 813 MBB.addLiveIn(WorkItemIDVGPR); 814 815 // Spill work item ID. 816 int WorkItemIDObjectIdx = MFI->getDebuggerWorkItemIDStackObjectIndex(i); 817 TII->storeRegToStackSlot(MBB, I, WorkItemIDVGPR, false, 818 WorkItemIDObjectIdx, &AMDGPU::VGPR_32RegClass, TRI); 819 } 820 } 821 822 bool SIFrameLowering::hasFP(const MachineFunction &MF) const { 823 // All stack operations are relative to the frame offset SGPR. 824 // TODO: Still want to eliminate sometimes. 825 const MachineFrameInfo &MFI = MF.getFrameInfo(); 826 827 // XXX - Is this only called after frame is finalized? Should be able to check 828 // frame size. 829 return MFI.hasStackObjects() && !allStackObjectsAreDead(MFI); 830 } 831 832 bool SIFrameLowering::hasSP(const MachineFunction &MF) const { 833 const SIRegisterInfo *TRI = MF.getSubtarget<GCNSubtarget>().getRegisterInfo(); 834 // All stack operations are relative to the frame offset SGPR. 835 const MachineFrameInfo &MFI = MF.getFrameInfo(); 836 return MFI.hasCalls() || MFI.hasVarSizedObjects() || TRI->needsStackRealignment(MF); 837 } 838