1 //===----------------------- SIFrameLowering.cpp --------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //==-----------------------------------------------------------------------===// 8 9 #include "SIFrameLowering.h" 10 #include "AMDGPUSubtarget.h" 11 #include "SIInstrInfo.h" 12 #include "SIMachineFunctionInfo.h" 13 #include "SIRegisterInfo.h" 14 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 15 16 #include "llvm/CodeGen/LivePhysRegs.h" 17 #include "llvm/CodeGen/MachineFrameInfo.h" 18 #include "llvm/CodeGen/MachineFunction.h" 19 #include "llvm/CodeGen/MachineInstrBuilder.h" 20 #include "llvm/CodeGen/RegisterScavenging.h" 21 22 using namespace llvm; 23 24 25 static ArrayRef<MCPhysReg> getAllSGPR128(const GCNSubtarget &ST, 26 const MachineFunction &MF) { 27 return makeArrayRef(AMDGPU::SGPR_128RegClass.begin(), 28 ST.getMaxNumSGPRs(MF) / 4); 29 } 30 31 static ArrayRef<MCPhysReg> getAllSGPRs(const GCNSubtarget &ST, 32 const MachineFunction &MF) { 33 return makeArrayRef(AMDGPU::SGPR_32RegClass.begin(), 34 ST.getMaxNumSGPRs(MF)); 35 } 36 37 void SIFrameLowering::emitFlatScratchInit(const GCNSubtarget &ST, 38 MachineFunction &MF, 39 MachineBasicBlock &MBB) const { 40 const SIInstrInfo *TII = ST.getInstrInfo(); 41 const SIRegisterInfo* TRI = &TII->getRegisterInfo(); 42 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 43 44 // We don't need this if we only have spills since there is no user facing 45 // scratch. 46 47 // TODO: If we know we don't have flat instructions earlier, we can omit 48 // this from the input registers. 49 // 50 // TODO: We only need to know if we access scratch space through a flat 51 // pointer. Because we only detect if flat instructions are used at all, 52 // this will be used more often than necessary on VI. 53 54 // Debug location must be unknown since the first debug location is used to 55 // determine the end of the prologue. 56 DebugLoc DL; 57 MachineBasicBlock::iterator I = MBB.begin(); 58 59 unsigned FlatScratchInitReg 60 = MFI->getPreloadedReg(AMDGPUFunctionArgInfo::FLAT_SCRATCH_INIT); 61 62 MachineRegisterInfo &MRI = MF.getRegInfo(); 63 MRI.addLiveIn(FlatScratchInitReg); 64 MBB.addLiveIn(FlatScratchInitReg); 65 66 unsigned FlatScrInitLo = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub0); 67 unsigned FlatScrInitHi = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub1); 68 69 unsigned ScratchWaveOffsetReg = MFI->getScratchWaveOffsetReg(); 70 71 // Do a 64-bit pointer add. 72 if (ST.flatScratchIsPointer()) { 73 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), AMDGPU::FLAT_SCR_LO) 74 .addReg(FlatScrInitLo) 75 .addReg(ScratchWaveOffsetReg); 76 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADDC_U32), AMDGPU::FLAT_SCR_HI) 77 .addReg(FlatScrInitHi) 78 .addImm(0); 79 80 return; 81 } 82 83 // Copy the size in bytes. 84 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), AMDGPU::FLAT_SCR_LO) 85 .addReg(FlatScrInitHi, RegState::Kill); 86 87 // Add wave offset in bytes to private base offset. 88 // See comment in AMDKernelCodeT.h for enable_sgpr_flat_scratch_init. 89 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), FlatScrInitLo) 90 .addReg(FlatScrInitLo) 91 .addReg(ScratchWaveOffsetReg); 92 93 // Convert offset to 256-byte units. 94 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_LSHR_B32), AMDGPU::FLAT_SCR_HI) 95 .addReg(FlatScrInitLo, RegState::Kill) 96 .addImm(8); 97 } 98 99 unsigned SIFrameLowering::getReservedPrivateSegmentBufferReg( 100 const GCNSubtarget &ST, 101 const SIInstrInfo *TII, 102 const SIRegisterInfo *TRI, 103 SIMachineFunctionInfo *MFI, 104 MachineFunction &MF) const { 105 MachineRegisterInfo &MRI = MF.getRegInfo(); 106 107 // We need to insert initialization of the scratch resource descriptor. 108 unsigned ScratchRsrcReg = MFI->getScratchRSrcReg(); 109 if (ScratchRsrcReg == AMDGPU::NoRegister || 110 !MRI.isPhysRegUsed(ScratchRsrcReg)) 111 return AMDGPU::NoRegister; 112 113 if (ST.hasSGPRInitBug() || 114 ScratchRsrcReg != TRI->reservedPrivateSegmentBufferReg(MF)) 115 return ScratchRsrcReg; 116 117 // We reserved the last registers for this. Shift it down to the end of those 118 // which were actually used. 119 // 120 // FIXME: It might be safer to use a pseudoregister before replacement. 121 122 // FIXME: We should be able to eliminate unused input registers. We only 123 // cannot do this for the resources required for scratch access. For now we 124 // skip over user SGPRs and may leave unused holes. 125 126 // We find the resource first because it has an alignment requirement. 127 128 unsigned NumPreloaded = (MFI->getNumPreloadedSGPRs() + 3) / 4; 129 ArrayRef<MCPhysReg> AllSGPR128s = getAllSGPR128(ST, MF); 130 AllSGPR128s = AllSGPR128s.slice(std::min(static_cast<unsigned>(AllSGPR128s.size()), NumPreloaded)); 131 132 // Skip the last N reserved elements because they should have already been 133 // reserved for VCC etc. 134 for (MCPhysReg Reg : AllSGPR128s) { 135 // Pick the first unallocated one. Make sure we don't clobber the other 136 // reserved input we needed. 137 if (!MRI.isPhysRegUsed(Reg) && MRI.isAllocatable(Reg)) { 138 MRI.replaceRegWith(ScratchRsrcReg, Reg); 139 MFI->setScratchRSrcReg(Reg); 140 return Reg; 141 } 142 } 143 144 return ScratchRsrcReg; 145 } 146 147 // Shift down registers reserved for the scratch wave offset and stack pointer 148 // SGPRs. 149 std::pair<unsigned, unsigned> 150 SIFrameLowering::getReservedPrivateSegmentWaveByteOffsetReg( 151 const GCNSubtarget &ST, 152 const SIInstrInfo *TII, 153 const SIRegisterInfo *TRI, 154 SIMachineFunctionInfo *MFI, 155 MachineFunction &MF) const { 156 MachineRegisterInfo &MRI = MF.getRegInfo(); 157 unsigned ScratchWaveOffsetReg = MFI->getScratchWaveOffsetReg(); 158 159 // No replacement necessary. 160 if (ScratchWaveOffsetReg == AMDGPU::NoRegister || 161 !MRI.isPhysRegUsed(ScratchWaveOffsetReg)) { 162 assert(MFI->getStackPtrOffsetReg() == AMDGPU::SP_REG); 163 return std::make_pair(AMDGPU::NoRegister, AMDGPU::NoRegister); 164 } 165 166 unsigned SPReg = MFI->getStackPtrOffsetReg(); 167 if (ST.hasSGPRInitBug()) 168 return std::make_pair(ScratchWaveOffsetReg, SPReg); 169 170 unsigned NumPreloaded = MFI->getNumPreloadedSGPRs(); 171 172 ArrayRef<MCPhysReg> AllSGPRs = getAllSGPRs(ST, MF); 173 if (NumPreloaded > AllSGPRs.size()) 174 return std::make_pair(ScratchWaveOffsetReg, SPReg); 175 176 AllSGPRs = AllSGPRs.slice(NumPreloaded); 177 178 // We need to drop register from the end of the list that we cannot use 179 // for the scratch wave offset. 180 // + 2 s102 and s103 do not exist on VI. 181 // + 2 for vcc 182 // + 2 for xnack_mask 183 // + 2 for flat_scratch 184 // + 4 for registers reserved for scratch resource register 185 // + 1 for register reserved for scratch wave offset. (By exluding this 186 // register from the list to consider, it means that when this 187 // register is being used for the scratch wave offset and there 188 // are no other free SGPRs, then the value will stay in this register. 189 // + 1 if stack pointer is used. 190 // ---- 191 // 13 (+1) 192 unsigned ReservedRegCount = 13; 193 194 if (AllSGPRs.size() < ReservedRegCount) 195 return std::make_pair(ScratchWaveOffsetReg, SPReg); 196 197 bool HandledScratchWaveOffsetReg = 198 ScratchWaveOffsetReg != TRI->reservedPrivateSegmentWaveByteOffsetReg(MF); 199 200 for (MCPhysReg Reg : AllSGPRs.drop_back(ReservedRegCount)) { 201 // Pick the first unallocated SGPR. Be careful not to pick an alias of the 202 // scratch descriptor, since we haven’t added its uses yet. 203 if (!MRI.isPhysRegUsed(Reg) && MRI.isAllocatable(Reg)) { 204 if (!HandledScratchWaveOffsetReg) { 205 HandledScratchWaveOffsetReg = true; 206 207 MRI.replaceRegWith(ScratchWaveOffsetReg, Reg); 208 MFI->setScratchWaveOffsetReg(Reg); 209 ScratchWaveOffsetReg = Reg; 210 break; 211 } 212 } 213 } 214 215 return std::make_pair(ScratchWaveOffsetReg, SPReg); 216 } 217 218 void SIFrameLowering::emitEntryFunctionPrologue(MachineFunction &MF, 219 MachineBasicBlock &MBB) const { 220 // Emit debugger prologue if "amdgpu-debugger-emit-prologue" attribute was 221 // specified. 222 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 223 if (ST.debuggerEmitPrologue()) 224 emitDebuggerPrologue(MF, MBB); 225 226 assert(&MF.front() == &MBB && "Shrink-wrapping not yet supported"); 227 228 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 229 230 // If we only have SGPR spills, we won't actually be using scratch memory 231 // since these spill to VGPRs. 232 // 233 // FIXME: We should be cleaning up these unused SGPR spill frame indices 234 // somewhere. 235 236 const SIInstrInfo *TII = ST.getInstrInfo(); 237 const SIRegisterInfo *TRI = &TII->getRegisterInfo(); 238 MachineRegisterInfo &MRI = MF.getRegInfo(); 239 const Function &F = MF.getFunction(); 240 241 // We need to do the replacement of the private segment buffer and wave offset 242 // register even if there are no stack objects. There could be stores to undef 243 // or a constant without an associated object. 244 245 // FIXME: We still have implicit uses on SGPR spill instructions in case they 246 // need to spill to vector memory. It's likely that will not happen, but at 247 // this point it appears we need the setup. This part of the prolog should be 248 // emitted after frame indices are eliminated. 249 250 if (MFI->hasFlatScratchInit()) 251 emitFlatScratchInit(ST, MF, MBB); 252 253 unsigned SPReg = MFI->getStackPtrOffsetReg(); 254 if (SPReg != AMDGPU::SP_REG) { 255 assert(MRI.isReserved(SPReg) && "SPReg used but not reserved"); 256 257 DebugLoc DL; 258 const MachineFrameInfo &FrameInfo = MF.getFrameInfo(); 259 int64_t StackSize = FrameInfo.getStackSize(); 260 261 if (StackSize == 0) { 262 BuildMI(MBB, MBB.begin(), DL, TII->get(AMDGPU::COPY), SPReg) 263 .addReg(MFI->getScratchWaveOffsetReg()); 264 } else { 265 BuildMI(MBB, MBB.begin(), DL, TII->get(AMDGPU::S_ADD_U32), SPReg) 266 .addReg(MFI->getScratchWaveOffsetReg()) 267 .addImm(StackSize * ST.getWavefrontSize()); 268 } 269 } 270 271 unsigned ScratchRsrcReg 272 = getReservedPrivateSegmentBufferReg(ST, TII, TRI, MFI, MF); 273 274 unsigned ScratchWaveOffsetReg; 275 std::tie(ScratchWaveOffsetReg, SPReg) 276 = getReservedPrivateSegmentWaveByteOffsetReg(ST, TII, TRI, MFI, MF); 277 278 // It's possible to have uses of only ScratchWaveOffsetReg without 279 // ScratchRsrcReg if it's only used for the initialization of flat_scratch, 280 // but the inverse is not true. 281 if (ScratchWaveOffsetReg == AMDGPU::NoRegister) { 282 assert(ScratchRsrcReg == AMDGPU::NoRegister); 283 return; 284 } 285 286 // We need to insert initialization of the scratch resource descriptor. 287 unsigned PreloadedScratchWaveOffsetReg = MFI->getPreloadedReg( 288 AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET); 289 290 unsigned PreloadedPrivateBufferReg = AMDGPU::NoRegister; 291 if (ST.isAmdHsaOrMesa(F)) { 292 PreloadedPrivateBufferReg = MFI->getPreloadedReg( 293 AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER); 294 } 295 296 bool OffsetRegUsed = MRI.isPhysRegUsed(ScratchWaveOffsetReg); 297 bool ResourceRegUsed = ScratchRsrcReg != AMDGPU::NoRegister && 298 MRI.isPhysRegUsed(ScratchRsrcReg); 299 300 // We added live-ins during argument lowering, but since they were not used 301 // they were deleted. We're adding the uses now, so add them back. 302 if (OffsetRegUsed) { 303 assert(PreloadedScratchWaveOffsetReg != AMDGPU::NoRegister && 304 "scratch wave offset input is required"); 305 MRI.addLiveIn(PreloadedScratchWaveOffsetReg); 306 MBB.addLiveIn(PreloadedScratchWaveOffsetReg); 307 } 308 309 if (ResourceRegUsed && PreloadedPrivateBufferReg != AMDGPU::NoRegister) { 310 assert(ST.isAmdHsaOrMesa(F) || ST.isMesaGfxShader(F)); 311 MRI.addLiveIn(PreloadedPrivateBufferReg); 312 MBB.addLiveIn(PreloadedPrivateBufferReg); 313 } 314 315 // Make the register selected live throughout the function. 316 for (MachineBasicBlock &OtherBB : MF) { 317 if (&OtherBB == &MBB) 318 continue; 319 320 if (OffsetRegUsed) 321 OtherBB.addLiveIn(ScratchWaveOffsetReg); 322 323 if (ResourceRegUsed) 324 OtherBB.addLiveIn(ScratchRsrcReg); 325 } 326 327 DebugLoc DL; 328 MachineBasicBlock::iterator I = MBB.begin(); 329 330 // If we reserved the original input registers, we don't need to copy to the 331 // reserved registers. 332 333 bool CopyBuffer = ResourceRegUsed && 334 PreloadedPrivateBufferReg != AMDGPU::NoRegister && 335 ST.isAmdHsaOrMesa(F) && 336 ScratchRsrcReg != PreloadedPrivateBufferReg; 337 338 // This needs to be careful of the copying order to avoid overwriting one of 339 // the input registers before it's been copied to it's final 340 // destination. Usually the offset should be copied first. 341 bool CopyBufferFirst = TRI->isSubRegisterEq(PreloadedPrivateBufferReg, 342 ScratchWaveOffsetReg); 343 if (CopyBuffer && CopyBufferFirst) { 344 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchRsrcReg) 345 .addReg(PreloadedPrivateBufferReg, RegState::Kill); 346 } 347 348 if (OffsetRegUsed && 349 PreloadedScratchWaveOffsetReg != ScratchWaveOffsetReg) { 350 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchWaveOffsetReg) 351 .addReg(PreloadedScratchWaveOffsetReg, 352 MRI.isPhysRegUsed(ScratchWaveOffsetReg) ? 0 : RegState::Kill); 353 } 354 355 if (CopyBuffer && !CopyBufferFirst) { 356 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchRsrcReg) 357 .addReg(PreloadedPrivateBufferReg, RegState::Kill); 358 } 359 360 if (ResourceRegUsed) 361 emitEntryFunctionScratchSetup(ST, MF, MBB, MFI, I, 362 PreloadedPrivateBufferReg, ScratchRsrcReg); 363 } 364 365 // Emit scratch setup code for AMDPAL or Mesa, assuming ResourceRegUsed is set. 366 void SIFrameLowering::emitEntryFunctionScratchSetup(const GCNSubtarget &ST, 367 MachineFunction &MF, MachineBasicBlock &MBB, SIMachineFunctionInfo *MFI, 368 MachineBasicBlock::iterator I, unsigned PreloadedPrivateBufferReg, 369 unsigned ScratchRsrcReg) const { 370 371 const SIInstrInfo *TII = ST.getInstrInfo(); 372 const SIRegisterInfo *TRI = &TII->getRegisterInfo(); 373 const Function &Fn = MF.getFunction(); 374 DebugLoc DL; 375 376 if (ST.isAmdPalOS()) { 377 // The pointer to the GIT is formed from the offset passed in and either 378 // the amdgpu-git-ptr-high function attribute or the top part of the PC 379 unsigned RsrcLo = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0); 380 unsigned RsrcHi = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1); 381 unsigned Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1); 382 383 const MCInstrDesc &SMovB32 = TII->get(AMDGPU::S_MOV_B32); 384 385 if (MFI->getGITPtrHigh() != 0xffffffff) { 386 BuildMI(MBB, I, DL, SMovB32, RsrcHi) 387 .addImm(MFI->getGITPtrHigh()) 388 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 389 } else { 390 const MCInstrDesc &GetPC64 = TII->get(AMDGPU::S_GETPC_B64); 391 BuildMI(MBB, I, DL, GetPC64, Rsrc01); 392 } 393 auto GitPtrLo = AMDGPU::SGPR0; // Low GIT address passed in 394 if (ST.hasMergedShaders()) { 395 switch (MF.getFunction().getCallingConv()) { 396 case CallingConv::AMDGPU_HS: 397 case CallingConv::AMDGPU_GS: 398 // Low GIT address is passed in s8 rather than s0 for an LS+HS or 399 // ES+GS merged shader on gfx9+. 400 GitPtrLo = AMDGPU::SGPR8; 401 break; 402 default: 403 break; 404 } 405 } 406 MF.getRegInfo().addLiveIn(GitPtrLo); 407 MF.front().addLiveIn(GitPtrLo); 408 BuildMI(MBB, I, DL, SMovB32, RsrcLo) 409 .addReg(GitPtrLo) 410 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 411 412 // We now have the GIT ptr - now get the scratch descriptor from the entry 413 // at offset 0 (or offset 16 for a compute shader). 414 PointerType *PtrTy = 415 PointerType::get(Type::getInt64Ty(MF.getFunction().getContext()), 416 AMDGPUAS::CONSTANT_ADDRESS); 417 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy)); 418 const MCInstrDesc &LoadDwordX4 = TII->get(AMDGPU::S_LOAD_DWORDX4_IMM); 419 auto MMO = MF.getMachineMemOperand(PtrInfo, 420 MachineMemOperand::MOLoad | 421 MachineMemOperand::MOInvariant | 422 MachineMemOperand::MODereferenceable, 423 16, 4); 424 unsigned Offset = Fn.getCallingConv() == CallingConv::AMDGPU_CS ? 16 : 0; 425 BuildMI(MBB, I, DL, LoadDwordX4, ScratchRsrcReg) 426 .addReg(Rsrc01) 427 .addImm(Offset) // offset 428 .addImm(0) // glc 429 .addReg(ScratchRsrcReg, RegState::ImplicitDefine) 430 .addMemOperand(MMO); 431 return; 432 } 433 if (ST.isMesaGfxShader(Fn) 434 || (PreloadedPrivateBufferReg == AMDGPU::NoRegister)) { 435 assert(!ST.isAmdHsaOrMesa(Fn)); 436 const MCInstrDesc &SMovB32 = TII->get(AMDGPU::S_MOV_B32); 437 438 unsigned Rsrc2 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub2); 439 unsigned Rsrc3 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub3); 440 441 // Use relocations to get the pointer, and setup the other bits manually. 442 uint64_t Rsrc23 = TII->getScratchRsrcWords23(); 443 444 if (MFI->hasImplicitBufferPtr()) { 445 unsigned Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1); 446 447 if (AMDGPU::isCompute(MF.getFunction().getCallingConv())) { 448 const MCInstrDesc &Mov64 = TII->get(AMDGPU::S_MOV_B64); 449 450 BuildMI(MBB, I, DL, Mov64, Rsrc01) 451 .addReg(MFI->getImplicitBufferPtrUserSGPR()) 452 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 453 } else { 454 const MCInstrDesc &LoadDwordX2 = TII->get(AMDGPU::S_LOAD_DWORDX2_IMM); 455 456 PointerType *PtrTy = 457 PointerType::get(Type::getInt64Ty(MF.getFunction().getContext()), 458 AMDGPUAS::CONSTANT_ADDRESS); 459 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy)); 460 auto MMO = MF.getMachineMemOperand(PtrInfo, 461 MachineMemOperand::MOLoad | 462 MachineMemOperand::MOInvariant | 463 MachineMemOperand::MODereferenceable, 464 8, 4); 465 BuildMI(MBB, I, DL, LoadDwordX2, Rsrc01) 466 .addReg(MFI->getImplicitBufferPtrUserSGPR()) 467 .addImm(0) // offset 468 .addImm(0) // glc 469 .addMemOperand(MMO) 470 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 471 } 472 } else { 473 unsigned Rsrc0 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0); 474 unsigned Rsrc1 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1); 475 476 BuildMI(MBB, I, DL, SMovB32, Rsrc0) 477 .addExternalSymbol("SCRATCH_RSRC_DWORD0") 478 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 479 480 BuildMI(MBB, I, DL, SMovB32, Rsrc1) 481 .addExternalSymbol("SCRATCH_RSRC_DWORD1") 482 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 483 484 } 485 486 BuildMI(MBB, I, DL, SMovB32, Rsrc2) 487 .addImm(Rsrc23 & 0xffffffff) 488 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 489 490 BuildMI(MBB, I, DL, SMovB32, Rsrc3) 491 .addImm(Rsrc23 >> 32) 492 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 493 } 494 } 495 496 // Find a scratch register that we can use at the start of the prologue to 497 // re-align the stack pointer. We avoid using callee-save registers since they 498 // may appear to be free when this is called from canUseAsPrologue (during 499 // shrink wrapping), but then no longer be free when this is called from 500 // emitPrologue. 501 // 502 // FIXME: This is a bit conservative, since in the above case we could use one 503 // of the callee-save registers as a scratch temp to re-align the stack pointer, 504 // but we would then have to make sure that we were in fact saving at least one 505 // callee-save register in the prologue, which is additional complexity that 506 // doesn't seem worth the benefit. 507 static unsigned findScratchNonCalleeSaveRegister(MachineBasicBlock &MBB) { 508 MachineFunction *MF = MBB.getParent(); 509 510 const GCNSubtarget &Subtarget = MF->getSubtarget<GCNSubtarget>(); 511 const SIRegisterInfo &TRI = *Subtarget.getRegisterInfo(); 512 LivePhysRegs LiveRegs(TRI); 513 LiveRegs.addLiveIns(MBB); 514 515 // Mark callee saved registers as used so we will not choose them. 516 const MCPhysReg *CSRegs = TRI.getCalleeSavedRegs(MF); 517 for (unsigned i = 0; CSRegs[i]; ++i) 518 LiveRegs.addReg(CSRegs[i]); 519 520 MachineRegisterInfo &MRI = MF->getRegInfo(); 521 522 for (unsigned Reg : AMDGPU::SReg_32_XM0RegClass) { 523 if (LiveRegs.available(MRI, Reg)) 524 return Reg; 525 } 526 527 return AMDGPU::NoRegister; 528 } 529 530 void SIFrameLowering::emitPrologue(MachineFunction &MF, 531 MachineBasicBlock &MBB) const { 532 SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 533 if (FuncInfo->isEntryFunction()) { 534 emitEntryFunctionPrologue(MF, MBB); 535 return; 536 } 537 538 const MachineFrameInfo &MFI = MF.getFrameInfo(); 539 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 540 const SIInstrInfo *TII = ST.getInstrInfo(); 541 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 542 543 unsigned StackPtrReg = FuncInfo->getStackPtrOffsetReg(); 544 unsigned FramePtrReg = FuncInfo->getFrameOffsetReg(); 545 546 MachineBasicBlock::iterator MBBI = MBB.begin(); 547 DebugLoc DL; 548 549 // XXX - Is this the right predicate? 550 551 bool NeedFP = hasFP(MF); 552 uint32_t NumBytes = MFI.getStackSize(); 553 uint32_t RoundedSize = NumBytes; 554 const bool NeedsRealignment = TRI.needsStackRealignment(MF); 555 556 if (NeedsRealignment) { 557 assert(NeedFP); 558 const unsigned Alignment = MFI.getMaxAlignment(); 559 560 RoundedSize += Alignment; 561 562 unsigned ScratchSPReg = findScratchNonCalleeSaveRegister(MBB); 563 assert(ScratchSPReg != AMDGPU::NoRegister); 564 565 // s_add_u32 tmp_reg, s32, NumBytes 566 // s_and_b32 s32, tmp_reg, 0b111...0000 567 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_ADD_U32), ScratchSPReg) 568 .addReg(StackPtrReg) 569 .addImm((Alignment - 1) * ST.getWavefrontSize()) 570 .setMIFlag(MachineInstr::FrameSetup); 571 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_AND_B32), FramePtrReg) 572 .addReg(ScratchSPReg, RegState::Kill) 573 .addImm(-Alignment * ST.getWavefrontSize()) 574 .setMIFlag(MachineInstr::FrameSetup); 575 FuncInfo->setIsStackRealigned(true); 576 } else if (NeedFP) { 577 // If we need a base pointer, set it up here. It's whatever the value of 578 // the stack pointer is at this point. Any variable size objects will be 579 // allocated after this, so we can still use the base pointer to reference 580 // locals. 581 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), FramePtrReg) 582 .addReg(StackPtrReg) 583 .setMIFlag(MachineInstr::FrameSetup); 584 } 585 586 if (RoundedSize != 0 && hasSP(MF)) { 587 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_ADD_U32), StackPtrReg) 588 .addReg(StackPtrReg) 589 .addImm(RoundedSize * ST.getWavefrontSize()) 590 .setMIFlag(MachineInstr::FrameSetup); 591 } 592 593 for (const SIMachineFunctionInfo::SGPRSpillVGPRCSR &Reg 594 : FuncInfo->getSGPRSpillVGPRs()) { 595 if (!Reg.FI.hasValue()) 596 continue; 597 TII->storeRegToStackSlot(MBB, MBBI, Reg.VGPR, true, 598 Reg.FI.getValue(), &AMDGPU::VGPR_32RegClass, 599 &TII->getRegisterInfo()); 600 } 601 } 602 603 void SIFrameLowering::emitEpilogue(MachineFunction &MF, 604 MachineBasicBlock &MBB) const { 605 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 606 if (FuncInfo->isEntryFunction()) 607 return; 608 609 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 610 const SIInstrInfo *TII = ST.getInstrInfo(); 611 MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator(); 612 613 for (const SIMachineFunctionInfo::SGPRSpillVGPRCSR &Reg 614 : FuncInfo->getSGPRSpillVGPRs()) { 615 if (!Reg.FI.hasValue()) 616 continue; 617 TII->loadRegFromStackSlot(MBB, MBBI, Reg.VGPR, 618 Reg.FI.getValue(), &AMDGPU::VGPR_32RegClass, 619 &TII->getRegisterInfo()); 620 } 621 622 unsigned StackPtrReg = FuncInfo->getStackPtrOffsetReg(); 623 if (StackPtrReg == AMDGPU::NoRegister) 624 return; 625 626 const MachineFrameInfo &MFI = MF.getFrameInfo(); 627 uint32_t NumBytes = MFI.getStackSize(); 628 629 DebugLoc DL; 630 631 // FIXME: Clarify distinction between no set SP and SP. For callee functions, 632 // it's really whether we need SP to be accurate or not. 633 634 if (NumBytes != 0 && hasSP(MF)) { 635 uint32_t RoundedSize = FuncInfo->isStackRealigned() ? 636 NumBytes + MFI.getMaxAlignment() : NumBytes; 637 638 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_SUB_U32), StackPtrReg) 639 .addReg(StackPtrReg) 640 .addImm(RoundedSize * ST.getWavefrontSize()); 641 } 642 } 643 644 static bool allStackObjectsAreDead(const MachineFrameInfo &MFI) { 645 for (int I = MFI.getObjectIndexBegin(), E = MFI.getObjectIndexEnd(); 646 I != E; ++I) { 647 if (!MFI.isDeadObjectIndex(I)) 648 return false; 649 } 650 651 return true; 652 } 653 654 int SIFrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI, 655 unsigned &FrameReg) const { 656 const SIRegisterInfo *RI = MF.getSubtarget<GCNSubtarget>().getRegisterInfo(); 657 658 FrameReg = RI->getFrameRegister(MF); 659 return MF.getFrameInfo().getObjectOffset(FI); 660 } 661 662 void SIFrameLowering::processFunctionBeforeFrameFinalized( 663 MachineFunction &MF, 664 RegScavenger *RS) const { 665 MachineFrameInfo &MFI = MF.getFrameInfo(); 666 667 if (!MFI.hasStackObjects()) 668 return; 669 670 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 671 const SIInstrInfo *TII = ST.getInstrInfo(); 672 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 673 SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 674 bool AllSGPRSpilledToVGPRs = false; 675 676 if (TRI.spillSGPRToVGPR() && FuncInfo->hasSpilledSGPRs()) { 677 AllSGPRSpilledToVGPRs = true; 678 679 // Process all SGPR spills before frame offsets are finalized. Ideally SGPRs 680 // are spilled to VGPRs, in which case we can eliminate the stack usage. 681 // 682 // XXX - This operates under the assumption that only other SGPR spills are 683 // users of the frame index. I'm not 100% sure this is correct. The 684 // StackColoring pass has a comment saying a future improvement would be to 685 // merging of allocas with spill slots, but for now according to 686 // MachineFrameInfo isSpillSlot can't alias any other object. 687 for (MachineBasicBlock &MBB : MF) { 688 MachineBasicBlock::iterator Next; 689 for (auto I = MBB.begin(), E = MBB.end(); I != E; I = Next) { 690 MachineInstr &MI = *I; 691 Next = std::next(I); 692 693 if (TII->isSGPRSpill(MI)) { 694 int FI = TII->getNamedOperand(MI, AMDGPU::OpName::addr)->getIndex(); 695 assert(MFI.getStackID(FI) == SIStackID::SGPR_SPILL); 696 if (FuncInfo->allocateSGPRSpillToVGPR(MF, FI)) { 697 bool Spilled = TRI.eliminateSGPRToVGPRSpillFrameIndex(MI, FI, RS); 698 (void)Spilled; 699 assert(Spilled && "failed to spill SGPR to VGPR when allocated"); 700 } else 701 AllSGPRSpilledToVGPRs = false; 702 } 703 } 704 } 705 706 FuncInfo->removeSGPRToVGPRFrameIndices(MFI); 707 } 708 709 // FIXME: The other checks should be redundant with allStackObjectsAreDead, 710 // but currently hasNonSpillStackObjects is set only from source 711 // allocas. Stack temps produced from legalization are not counted currently. 712 if (FuncInfo->hasNonSpillStackObjects() || FuncInfo->hasSpilledVGPRs() || 713 !AllSGPRSpilledToVGPRs || !allStackObjectsAreDead(MFI)) { 714 assert(RS && "RegScavenger required if spilling"); 715 716 // We force this to be at offset 0 so no user object ever has 0 as an 717 // address, so we may use 0 as an invalid pointer value. This is because 718 // LLVM assumes 0 is an invalid pointer in address space 0. Because alloca 719 // is required to be address space 0, we are forced to accept this for 720 // now. Ideally we could have the stack in another address space with 0 as a 721 // valid pointer, and -1 as the null value. 722 // 723 // This will also waste additional space when user stack objects require > 4 724 // byte alignment. 725 // 726 // The main cost here is losing the offset for addressing modes. However 727 // this also ensures we shouldn't need a register for the offset when 728 // emergency scavenging. 729 int ScavengeFI = MFI.CreateFixedObject( 730 TRI.getSpillSize(AMDGPU::SGPR_32RegClass), 0, false); 731 RS->addScavengingFrameIndex(ScavengeFI); 732 } 733 } 734 735 void SIFrameLowering::determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, 736 RegScavenger *RS) const { 737 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); 738 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 739 740 // The SP is specifically managed and we don't want extra spills of it. 741 SavedRegs.reset(MFI->getStackPtrOffsetReg()); 742 } 743 744 MachineBasicBlock::iterator SIFrameLowering::eliminateCallFramePseudoInstr( 745 MachineFunction &MF, 746 MachineBasicBlock &MBB, 747 MachineBasicBlock::iterator I) const { 748 int64_t Amount = I->getOperand(0).getImm(); 749 if (Amount == 0) 750 return MBB.erase(I); 751 752 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 753 const SIInstrInfo *TII = ST.getInstrInfo(); 754 const DebugLoc &DL = I->getDebugLoc(); 755 unsigned Opc = I->getOpcode(); 756 bool IsDestroy = Opc == TII->getCallFrameDestroyOpcode(); 757 uint64_t CalleePopAmount = IsDestroy ? I->getOperand(1).getImm() : 0; 758 759 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 760 if (!TFI->hasReservedCallFrame(MF)) { 761 unsigned Align = getStackAlignment(); 762 763 Amount = alignTo(Amount, Align); 764 assert(isUInt<32>(Amount) && "exceeded stack address space size"); 765 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 766 unsigned SPReg = MFI->getStackPtrOffsetReg(); 767 768 unsigned Op = IsDestroy ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32; 769 BuildMI(MBB, I, DL, TII->get(Op), SPReg) 770 .addReg(SPReg) 771 .addImm(Amount * ST.getWavefrontSize()); 772 } else if (CalleePopAmount != 0) { 773 llvm_unreachable("is this used?"); 774 } 775 776 return MBB.erase(I); 777 } 778 779 void SIFrameLowering::emitDebuggerPrologue(MachineFunction &MF, 780 MachineBasicBlock &MBB) const { 781 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 782 const SIInstrInfo *TII = ST.getInstrInfo(); 783 const SIRegisterInfo *TRI = &TII->getRegisterInfo(); 784 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 785 786 MachineBasicBlock::iterator I = MBB.begin(); 787 DebugLoc DL; 788 789 // For each dimension: 790 for (unsigned i = 0; i < 3; ++i) { 791 // Get work group ID SGPR, and make it live-in again. 792 unsigned WorkGroupIDSGPR = MFI->getWorkGroupIDSGPR(i); 793 MF.getRegInfo().addLiveIn(WorkGroupIDSGPR); 794 MBB.addLiveIn(WorkGroupIDSGPR); 795 796 // Since SGPRs are spilled into VGPRs, copy work group ID SGPR to VGPR in 797 // order to spill it to scratch. 798 unsigned WorkGroupIDVGPR = 799 MF.getRegInfo().createVirtualRegister(&AMDGPU::VGPR_32RegClass); 800 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), WorkGroupIDVGPR) 801 .addReg(WorkGroupIDSGPR); 802 803 // Spill work group ID. 804 int WorkGroupIDObjectIdx = MFI->getDebuggerWorkGroupIDStackObjectIndex(i); 805 TII->storeRegToStackSlot(MBB, I, WorkGroupIDVGPR, false, 806 WorkGroupIDObjectIdx, &AMDGPU::VGPR_32RegClass, TRI); 807 808 // Get work item ID VGPR, and make it live-in again. 809 unsigned WorkItemIDVGPR = MFI->getWorkItemIDVGPR(i); 810 MF.getRegInfo().addLiveIn(WorkItemIDVGPR); 811 MBB.addLiveIn(WorkItemIDVGPR); 812 813 // Spill work item ID. 814 int WorkItemIDObjectIdx = MFI->getDebuggerWorkItemIDStackObjectIndex(i); 815 TII->storeRegToStackSlot(MBB, I, WorkItemIDVGPR, false, 816 WorkItemIDObjectIdx, &AMDGPU::VGPR_32RegClass, TRI); 817 } 818 } 819 820 bool SIFrameLowering::hasFP(const MachineFunction &MF) const { 821 // All stack operations are relative to the frame offset SGPR. 822 // TODO: Still want to eliminate sometimes. 823 const MachineFrameInfo &MFI = MF.getFrameInfo(); 824 825 // XXX - Is this only called after frame is finalized? Should be able to check 826 // frame size. 827 return MFI.hasStackObjects() && !allStackObjectsAreDead(MFI); 828 } 829 830 bool SIFrameLowering::hasSP(const MachineFunction &MF) const { 831 const SIRegisterInfo *TRI = MF.getSubtarget<GCNSubtarget>().getRegisterInfo(); 832 // All stack operations are relative to the frame offset SGPR. 833 const MachineFrameInfo &MFI = MF.getFrameInfo(); 834 return MFI.hasCalls() || MFI.hasVarSizedObjects() || TRI->needsStackRealignment(MF); 835 } 836