1 //===----------------------- SIFrameLowering.cpp --------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //==-----------------------------------------------------------------------===// 8 9 #include "SIFrameLowering.h" 10 #include "AMDGPUSubtarget.h" 11 #include "SIInstrInfo.h" 12 #include "SIMachineFunctionInfo.h" 13 #include "SIRegisterInfo.h" 14 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 15 16 #include "llvm/CodeGen/LivePhysRegs.h" 17 #include "llvm/CodeGen/MachineFrameInfo.h" 18 #include "llvm/CodeGen/MachineFunction.h" 19 #include "llvm/CodeGen/MachineInstrBuilder.h" 20 #include "llvm/CodeGen/RegisterScavenging.h" 21 22 using namespace llvm; 23 24 25 static ArrayRef<MCPhysReg> getAllSGPR128(const GCNSubtarget &ST, 26 const MachineFunction &MF) { 27 return makeArrayRef(AMDGPU::SGPR_128RegClass.begin(), 28 ST.getMaxNumSGPRs(MF) / 4); 29 } 30 31 static ArrayRef<MCPhysReg> getAllSGPRs(const GCNSubtarget &ST, 32 const MachineFunction &MF) { 33 return makeArrayRef(AMDGPU::SGPR_32RegClass.begin(), 34 ST.getMaxNumSGPRs(MF)); 35 } 36 37 void SIFrameLowering::emitFlatScratchInit(const GCNSubtarget &ST, 38 MachineFunction &MF, 39 MachineBasicBlock &MBB) const { 40 const SIInstrInfo *TII = ST.getInstrInfo(); 41 const SIRegisterInfo* TRI = &TII->getRegisterInfo(); 42 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 43 44 // We don't need this if we only have spills since there is no user facing 45 // scratch. 46 47 // TODO: If we know we don't have flat instructions earlier, we can omit 48 // this from the input registers. 49 // 50 // TODO: We only need to know if we access scratch space through a flat 51 // pointer. Because we only detect if flat instructions are used at all, 52 // this will be used more often than necessary on VI. 53 54 // Debug location must be unknown since the first debug location is used to 55 // determine the end of the prologue. 56 DebugLoc DL; 57 MachineBasicBlock::iterator I = MBB.begin(); 58 59 unsigned FlatScratchInitReg 60 = MFI->getPreloadedReg(AMDGPUFunctionArgInfo::FLAT_SCRATCH_INIT); 61 62 MachineRegisterInfo &MRI = MF.getRegInfo(); 63 MRI.addLiveIn(FlatScratchInitReg); 64 MBB.addLiveIn(FlatScratchInitReg); 65 66 unsigned FlatScrInitLo = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub0); 67 unsigned FlatScrInitHi = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub1); 68 69 unsigned ScratchWaveOffsetReg = MFI->getScratchWaveOffsetReg(); 70 71 // Do a 64-bit pointer add. 72 if (ST.flatScratchIsPointer()) { 73 if (ST.getGeneration() >= AMDGPUSubtarget::GFX10) { 74 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), FlatScrInitLo) 75 .addReg(FlatScrInitLo) 76 .addReg(ScratchWaveOffsetReg); 77 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADDC_U32), FlatScrInitHi) 78 .addReg(FlatScrInitHi) 79 .addImm(0); 80 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SETREG_B32)). 81 addReg(FlatScrInitLo). 82 addImm(int16_t(AMDGPU::Hwreg::ID_FLAT_SCR_LO | 83 (31 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_))); 84 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SETREG_B32)). 85 addReg(FlatScrInitHi). 86 addImm(int16_t(AMDGPU::Hwreg::ID_FLAT_SCR_HI | 87 (31 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_))); 88 return; 89 } 90 91 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), AMDGPU::FLAT_SCR_LO) 92 .addReg(FlatScrInitLo) 93 .addReg(ScratchWaveOffsetReg); 94 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADDC_U32), AMDGPU::FLAT_SCR_HI) 95 .addReg(FlatScrInitHi) 96 .addImm(0); 97 98 return; 99 } 100 101 assert(ST.getGeneration() < AMDGPUSubtarget::GFX10); 102 103 // Copy the size in bytes. 104 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), AMDGPU::FLAT_SCR_LO) 105 .addReg(FlatScrInitHi, RegState::Kill); 106 107 // Add wave offset in bytes to private base offset. 108 // See comment in AMDKernelCodeT.h for enable_sgpr_flat_scratch_init. 109 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), FlatScrInitLo) 110 .addReg(FlatScrInitLo) 111 .addReg(ScratchWaveOffsetReg); 112 113 // Convert offset to 256-byte units. 114 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_LSHR_B32), AMDGPU::FLAT_SCR_HI) 115 .addReg(FlatScrInitLo, RegState::Kill) 116 .addImm(8); 117 } 118 119 unsigned SIFrameLowering::getReservedPrivateSegmentBufferReg( 120 const GCNSubtarget &ST, 121 const SIInstrInfo *TII, 122 const SIRegisterInfo *TRI, 123 SIMachineFunctionInfo *MFI, 124 MachineFunction &MF) const { 125 MachineRegisterInfo &MRI = MF.getRegInfo(); 126 127 // We need to insert initialization of the scratch resource descriptor. 128 unsigned ScratchRsrcReg = MFI->getScratchRSrcReg(); 129 if (ScratchRsrcReg == AMDGPU::NoRegister || 130 !MRI.isPhysRegUsed(ScratchRsrcReg)) 131 return AMDGPU::NoRegister; 132 133 if (ST.hasSGPRInitBug() || 134 ScratchRsrcReg != TRI->reservedPrivateSegmentBufferReg(MF)) 135 return ScratchRsrcReg; 136 137 // We reserved the last registers for this. Shift it down to the end of those 138 // which were actually used. 139 // 140 // FIXME: It might be safer to use a pseudoregister before replacement. 141 142 // FIXME: We should be able to eliminate unused input registers. We only 143 // cannot do this for the resources required for scratch access. For now we 144 // skip over user SGPRs and may leave unused holes. 145 146 // We find the resource first because it has an alignment requirement. 147 148 unsigned NumPreloaded = (MFI->getNumPreloadedSGPRs() + 3) / 4; 149 ArrayRef<MCPhysReg> AllSGPR128s = getAllSGPR128(ST, MF); 150 AllSGPR128s = AllSGPR128s.slice(std::min(static_cast<unsigned>(AllSGPR128s.size()), NumPreloaded)); 151 152 // Skip the last N reserved elements because they should have already been 153 // reserved for VCC etc. 154 for (MCPhysReg Reg : AllSGPR128s) { 155 // Pick the first unallocated one. Make sure we don't clobber the other 156 // reserved input we needed. 157 if (!MRI.isPhysRegUsed(Reg) && MRI.isAllocatable(Reg)) { 158 MRI.replaceRegWith(ScratchRsrcReg, Reg); 159 MFI->setScratchRSrcReg(Reg); 160 return Reg; 161 } 162 } 163 164 return ScratchRsrcReg; 165 } 166 167 // Shift down registers reserved for the scratch wave offset. 168 unsigned SIFrameLowering::getReservedPrivateSegmentWaveByteOffsetReg( 169 const GCNSubtarget &ST, const SIInstrInfo *TII, const SIRegisterInfo *TRI, 170 SIMachineFunctionInfo *MFI, MachineFunction &MF) const { 171 MachineRegisterInfo &MRI = MF.getRegInfo(); 172 unsigned ScratchWaveOffsetReg = MFI->getScratchWaveOffsetReg(); 173 174 assert(MFI->isEntryFunction()); 175 176 // No replacement necessary. 177 if (ScratchWaveOffsetReg == AMDGPU::NoRegister || 178 (!hasFP(MF) && !MRI.isPhysRegUsed(ScratchWaveOffsetReg))) { 179 return AMDGPU::NoRegister; 180 } 181 182 if (ST.hasSGPRInitBug()) 183 return ScratchWaveOffsetReg; 184 185 unsigned NumPreloaded = MFI->getNumPreloadedSGPRs(); 186 187 ArrayRef<MCPhysReg> AllSGPRs = getAllSGPRs(ST, MF); 188 if (NumPreloaded > AllSGPRs.size()) 189 return ScratchWaveOffsetReg; 190 191 AllSGPRs = AllSGPRs.slice(NumPreloaded); 192 193 // We need to drop register from the end of the list that we cannot use 194 // for the scratch wave offset. 195 // + 2 s102 and s103 do not exist on VI. 196 // + 2 for vcc 197 // + 2 for xnack_mask 198 // + 2 for flat_scratch 199 // + 4 for registers reserved for scratch resource register 200 // + 1 for register reserved for scratch wave offset. (By exluding this 201 // register from the list to consider, it means that when this 202 // register is being used for the scratch wave offset and there 203 // are no other free SGPRs, then the value will stay in this register. 204 // + 1 if stack pointer is used. 205 // ---- 206 // 13 (+1) 207 unsigned ReservedRegCount = 13; 208 209 if (AllSGPRs.size() < ReservedRegCount) 210 return ScratchWaveOffsetReg; 211 212 bool HandledScratchWaveOffsetReg = 213 ScratchWaveOffsetReg != TRI->reservedPrivateSegmentWaveByteOffsetReg(MF); 214 215 for (MCPhysReg Reg : AllSGPRs.drop_back(ReservedRegCount)) { 216 // Pick the first unallocated SGPR. Be careful not to pick an alias of the 217 // scratch descriptor, since we haven’t added its uses yet. 218 if (!MRI.isPhysRegUsed(Reg) && MRI.isAllocatable(Reg)) { 219 if (!HandledScratchWaveOffsetReg) { 220 HandledScratchWaveOffsetReg = true; 221 222 MRI.replaceRegWith(ScratchWaveOffsetReg, Reg); 223 if (MFI->getScratchWaveOffsetReg() == MFI->getStackPtrOffsetReg()) { 224 assert(!hasFP(MF)); 225 MFI->setStackPtrOffsetReg(Reg); 226 } 227 228 MFI->setScratchWaveOffsetReg(Reg); 229 MFI->setFrameOffsetReg(Reg); 230 ScratchWaveOffsetReg = Reg; 231 break; 232 } 233 } 234 } 235 236 return ScratchWaveOffsetReg; 237 } 238 239 void SIFrameLowering::emitEntryFunctionPrologue(MachineFunction &MF, 240 MachineBasicBlock &MBB) const { 241 assert(&MF.front() == &MBB && "Shrink-wrapping not yet supported"); 242 243 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 244 245 // If we only have SGPR spills, we won't actually be using scratch memory 246 // since these spill to VGPRs. 247 // 248 // FIXME: We should be cleaning up these unused SGPR spill frame indices 249 // somewhere. 250 251 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 252 const SIInstrInfo *TII = ST.getInstrInfo(); 253 const SIRegisterInfo *TRI = &TII->getRegisterInfo(); 254 MachineRegisterInfo &MRI = MF.getRegInfo(); 255 const Function &F = MF.getFunction(); 256 257 // We need to do the replacement of the private segment buffer and wave offset 258 // register even if there are no stack objects. There could be stores to undef 259 // or a constant without an associated object. 260 261 // FIXME: We still have implicit uses on SGPR spill instructions in case they 262 // need to spill to vector memory. It's likely that will not happen, but at 263 // this point it appears we need the setup. This part of the prolog should be 264 // emitted after frame indices are eliminated. 265 266 if (MFI->hasFlatScratchInit()) 267 emitFlatScratchInit(ST, MF, MBB); 268 269 unsigned ScratchRsrcReg 270 = getReservedPrivateSegmentBufferReg(ST, TII, TRI, MFI, MF); 271 272 unsigned ScratchWaveOffsetReg = 273 getReservedPrivateSegmentWaveByteOffsetReg(ST, TII, TRI, MFI, MF); 274 275 // We need to insert initialization of the scratch resource descriptor. 276 unsigned PreloadedScratchWaveOffsetReg = MFI->getPreloadedReg( 277 AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET); 278 279 unsigned PreloadedPrivateBufferReg = AMDGPU::NoRegister; 280 if (ST.isAmdHsaOrMesa(F)) { 281 PreloadedPrivateBufferReg = MFI->getPreloadedReg( 282 AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER); 283 } 284 285 bool OffsetRegUsed = ScratchWaveOffsetReg != AMDGPU::NoRegister && 286 MRI.isPhysRegUsed(ScratchWaveOffsetReg); 287 bool ResourceRegUsed = ScratchRsrcReg != AMDGPU::NoRegister && 288 MRI.isPhysRegUsed(ScratchRsrcReg); 289 290 // FIXME: Hack to not crash in situations which emitted an error. 291 if (PreloadedScratchWaveOffsetReg == AMDGPU::NoRegister) 292 return; 293 294 // We added live-ins during argument lowering, but since they were not used 295 // they were deleted. We're adding the uses now, so add them back. 296 MRI.addLiveIn(PreloadedScratchWaveOffsetReg); 297 MBB.addLiveIn(PreloadedScratchWaveOffsetReg); 298 299 if (ResourceRegUsed && PreloadedPrivateBufferReg != AMDGPU::NoRegister) { 300 assert(ST.isAmdHsaOrMesa(F) || ST.isMesaGfxShader(F)); 301 MRI.addLiveIn(PreloadedPrivateBufferReg); 302 MBB.addLiveIn(PreloadedPrivateBufferReg); 303 } 304 305 // Make the register selected live throughout the function. 306 for (MachineBasicBlock &OtherBB : MF) { 307 if (&OtherBB == &MBB) 308 continue; 309 310 if (OffsetRegUsed) 311 OtherBB.addLiveIn(ScratchWaveOffsetReg); 312 313 if (ResourceRegUsed) 314 OtherBB.addLiveIn(ScratchRsrcReg); 315 } 316 317 DebugLoc DL; 318 MachineBasicBlock::iterator I = MBB.begin(); 319 320 // If we reserved the original input registers, we don't need to copy to the 321 // reserved registers. 322 323 bool CopyBuffer = ResourceRegUsed && 324 PreloadedPrivateBufferReg != AMDGPU::NoRegister && 325 ST.isAmdHsaOrMesa(F) && 326 ScratchRsrcReg != PreloadedPrivateBufferReg; 327 328 // This needs to be careful of the copying order to avoid overwriting one of 329 // the input registers before it's been copied to it's final 330 // destination. Usually the offset should be copied first. 331 bool CopyBufferFirst = TRI->isSubRegisterEq(PreloadedPrivateBufferReg, 332 ScratchWaveOffsetReg); 333 if (CopyBuffer && CopyBufferFirst) { 334 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchRsrcReg) 335 .addReg(PreloadedPrivateBufferReg, RegState::Kill); 336 } 337 338 unsigned SPReg = MFI->getStackPtrOffsetReg(); 339 assert(SPReg != AMDGPU::SP_REG); 340 341 // FIXME: Remove the isPhysRegUsed checks 342 const bool HasFP = hasFP(MF); 343 344 if (HasFP || OffsetRegUsed) { 345 assert(ScratchWaveOffsetReg); 346 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchWaveOffsetReg) 347 .addReg(PreloadedScratchWaveOffsetReg, HasFP ? RegState::Kill : 0); 348 } 349 350 if (CopyBuffer && !CopyBufferFirst) { 351 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchRsrcReg) 352 .addReg(PreloadedPrivateBufferReg, RegState::Kill); 353 } 354 355 if (ResourceRegUsed) { 356 emitEntryFunctionScratchSetup(ST, MF, MBB, MFI, I, 357 PreloadedPrivateBufferReg, ScratchRsrcReg); 358 } 359 360 if (HasFP) { 361 DebugLoc DL; 362 const MachineFrameInfo &FrameInfo = MF.getFrameInfo(); 363 int64_t StackSize = FrameInfo.getStackSize(); 364 365 // On kernel entry, the private scratch wave offset is the SP value. 366 if (StackSize == 0) { 367 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), SPReg) 368 .addReg(MFI->getScratchWaveOffsetReg()); 369 } else { 370 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), SPReg) 371 .addReg(MFI->getScratchWaveOffsetReg()) 372 .addImm(StackSize * ST.getWavefrontSize()); 373 } 374 } 375 } 376 377 // Emit scratch setup code for AMDPAL or Mesa, assuming ResourceRegUsed is set. 378 void SIFrameLowering::emitEntryFunctionScratchSetup(const GCNSubtarget &ST, 379 MachineFunction &MF, MachineBasicBlock &MBB, SIMachineFunctionInfo *MFI, 380 MachineBasicBlock::iterator I, unsigned PreloadedPrivateBufferReg, 381 unsigned ScratchRsrcReg) const { 382 383 const SIInstrInfo *TII = ST.getInstrInfo(); 384 const SIRegisterInfo *TRI = &TII->getRegisterInfo(); 385 const Function &Fn = MF.getFunction(); 386 DebugLoc DL; 387 388 if (ST.isAmdPalOS()) { 389 // The pointer to the GIT is formed from the offset passed in and either 390 // the amdgpu-git-ptr-high function attribute or the top part of the PC 391 unsigned RsrcLo = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0); 392 unsigned RsrcHi = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1); 393 unsigned Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1); 394 395 const MCInstrDesc &SMovB32 = TII->get(AMDGPU::S_MOV_B32); 396 397 if (MFI->getGITPtrHigh() != 0xffffffff) { 398 BuildMI(MBB, I, DL, SMovB32, RsrcHi) 399 .addImm(MFI->getGITPtrHigh()) 400 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 401 } else { 402 const MCInstrDesc &GetPC64 = TII->get(AMDGPU::S_GETPC_B64); 403 BuildMI(MBB, I, DL, GetPC64, Rsrc01); 404 } 405 auto GitPtrLo = AMDGPU::SGPR0; // Low GIT address passed in 406 if (ST.hasMergedShaders()) { 407 switch (MF.getFunction().getCallingConv()) { 408 case CallingConv::AMDGPU_HS: 409 case CallingConv::AMDGPU_GS: 410 // Low GIT address is passed in s8 rather than s0 for an LS+HS or 411 // ES+GS merged shader on gfx9+. 412 GitPtrLo = AMDGPU::SGPR8; 413 break; 414 default: 415 break; 416 } 417 } 418 MF.getRegInfo().addLiveIn(GitPtrLo); 419 MBB.addLiveIn(GitPtrLo); 420 BuildMI(MBB, I, DL, SMovB32, RsrcLo) 421 .addReg(GitPtrLo) 422 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 423 424 // We now have the GIT ptr - now get the scratch descriptor from the entry 425 // at offset 0 (or offset 16 for a compute shader). 426 PointerType *PtrTy = 427 PointerType::get(Type::getInt64Ty(MF.getFunction().getContext()), 428 AMDGPUAS::CONSTANT_ADDRESS); 429 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy)); 430 const MCInstrDesc &LoadDwordX4 = TII->get(AMDGPU::S_LOAD_DWORDX4_IMM); 431 auto MMO = MF.getMachineMemOperand(PtrInfo, 432 MachineMemOperand::MOLoad | 433 MachineMemOperand::MOInvariant | 434 MachineMemOperand::MODereferenceable, 435 16, 4); 436 unsigned Offset = Fn.getCallingConv() == CallingConv::AMDGPU_CS ? 16 : 0; 437 const GCNSubtarget &Subtarget = MF.getSubtarget<GCNSubtarget>(); 438 unsigned EncodedOffset = AMDGPU::getSMRDEncodedOffset(Subtarget, Offset); 439 BuildMI(MBB, I, DL, LoadDwordX4, ScratchRsrcReg) 440 .addReg(Rsrc01) 441 .addImm(EncodedOffset) // offset 442 .addImm(0) // glc 443 .addImm(0) // dlc 444 .addReg(ScratchRsrcReg, RegState::ImplicitDefine) 445 .addMemOperand(MMO); 446 return; 447 } 448 if (ST.isMesaGfxShader(Fn) 449 || (PreloadedPrivateBufferReg == AMDGPU::NoRegister)) { 450 assert(!ST.isAmdHsaOrMesa(Fn)); 451 const MCInstrDesc &SMovB32 = TII->get(AMDGPU::S_MOV_B32); 452 453 unsigned Rsrc2 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub2); 454 unsigned Rsrc3 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub3); 455 456 // Use relocations to get the pointer, and setup the other bits manually. 457 uint64_t Rsrc23 = TII->getScratchRsrcWords23(); 458 459 if (MFI->hasImplicitBufferPtr()) { 460 unsigned Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1); 461 462 if (AMDGPU::isCompute(MF.getFunction().getCallingConv())) { 463 const MCInstrDesc &Mov64 = TII->get(AMDGPU::S_MOV_B64); 464 465 BuildMI(MBB, I, DL, Mov64, Rsrc01) 466 .addReg(MFI->getImplicitBufferPtrUserSGPR()) 467 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 468 } else { 469 const MCInstrDesc &LoadDwordX2 = TII->get(AMDGPU::S_LOAD_DWORDX2_IMM); 470 471 PointerType *PtrTy = 472 PointerType::get(Type::getInt64Ty(MF.getFunction().getContext()), 473 AMDGPUAS::CONSTANT_ADDRESS); 474 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy)); 475 auto MMO = MF.getMachineMemOperand(PtrInfo, 476 MachineMemOperand::MOLoad | 477 MachineMemOperand::MOInvariant | 478 MachineMemOperand::MODereferenceable, 479 8, 4); 480 BuildMI(MBB, I, DL, LoadDwordX2, Rsrc01) 481 .addReg(MFI->getImplicitBufferPtrUserSGPR()) 482 .addImm(0) // offset 483 .addImm(0) // glc 484 .addImm(0) // dlc 485 .addMemOperand(MMO) 486 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 487 488 MF.getRegInfo().addLiveIn(MFI->getImplicitBufferPtrUserSGPR()); 489 MBB.addLiveIn(MFI->getImplicitBufferPtrUserSGPR()); 490 } 491 } else { 492 unsigned Rsrc0 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0); 493 unsigned Rsrc1 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1); 494 495 BuildMI(MBB, I, DL, SMovB32, Rsrc0) 496 .addExternalSymbol("SCRATCH_RSRC_DWORD0") 497 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 498 499 BuildMI(MBB, I, DL, SMovB32, Rsrc1) 500 .addExternalSymbol("SCRATCH_RSRC_DWORD1") 501 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 502 503 } 504 505 BuildMI(MBB, I, DL, SMovB32, Rsrc2) 506 .addImm(Rsrc23 & 0xffffffff) 507 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 508 509 BuildMI(MBB, I, DL, SMovB32, Rsrc3) 510 .addImm(Rsrc23 >> 32) 511 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 512 } 513 } 514 515 // Find a scratch register that we can use at the start of the prologue to 516 // re-align the stack pointer. We avoid using callee-save registers since they 517 // may appear to be free when this is called from canUseAsPrologue (during 518 // shrink wrapping), but then no longer be free when this is called from 519 // emitPrologue. 520 // 521 // FIXME: This is a bit conservative, since in the above case we could use one 522 // of the callee-save registers as a scratch temp to re-align the stack pointer, 523 // but we would then have to make sure that we were in fact saving at least one 524 // callee-save register in the prologue, which is additional complexity that 525 // doesn't seem worth the benefit. 526 static unsigned findScratchNonCalleeSaveRegister(MachineFunction &MF, 527 LivePhysRegs &LiveRegs, 528 const TargetRegisterClass &RC) { 529 const GCNSubtarget &Subtarget = MF.getSubtarget<GCNSubtarget>(); 530 const SIRegisterInfo &TRI = *Subtarget.getRegisterInfo(); 531 532 // Mark callee saved registers as used so we will not choose them. 533 const MCPhysReg *CSRegs = TRI.getCalleeSavedRegs(&MF); 534 for (unsigned i = 0; CSRegs[i]; ++i) 535 LiveRegs.addReg(CSRegs[i]); 536 537 MachineRegisterInfo &MRI = MF.getRegInfo(); 538 539 for (unsigned Reg : RC) { 540 if (LiveRegs.available(MRI, Reg)) 541 return Reg; 542 } 543 544 return AMDGPU::NoRegister; 545 } 546 547 bool SIFrameLowering::isSupportedStackID(TargetStackID::Value ID) const { 548 switch (ID) { 549 case TargetStackID::Default: 550 case TargetStackID::NoAlloc: 551 case TargetStackID::SGPRSpill: 552 return true; 553 } 554 llvm_unreachable("Invalid TargetStackID::Value"); 555 } 556 557 void SIFrameLowering::emitPrologue(MachineFunction &MF, 558 MachineBasicBlock &MBB) const { 559 SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 560 if (FuncInfo->isEntryFunction()) { 561 emitEntryFunctionPrologue(MF, MBB); 562 return; 563 } 564 565 const MachineFrameInfo &MFI = MF.getFrameInfo(); 566 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 567 const SIInstrInfo *TII = ST.getInstrInfo(); 568 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 569 570 unsigned StackPtrReg = FuncInfo->getStackPtrOffsetReg(); 571 unsigned FramePtrReg = FuncInfo->getFrameOffsetReg(); 572 LivePhysRegs LiveRegs; 573 574 MachineBasicBlock::iterator MBBI = MBB.begin(); 575 DebugLoc DL; 576 577 bool HasFP = false; 578 uint32_t NumBytes = MFI.getStackSize(); 579 uint32_t RoundedSize = NumBytes; 580 581 if (TRI.needsStackRealignment(MF)) { 582 HasFP = true; 583 const unsigned Alignment = MFI.getMaxAlignment(); 584 585 RoundedSize += Alignment; 586 587 LiveRegs.init(TRI); 588 LiveRegs.addLiveIns(MBB); 589 590 unsigned ScratchSPReg 591 = findScratchNonCalleeSaveRegister(MF, LiveRegs, 592 AMDGPU::SReg_32_XM0RegClass); 593 assert(ScratchSPReg != AMDGPU::NoRegister); 594 595 // s_add_u32 tmp_reg, s32, NumBytes 596 // s_and_b32 s32, tmp_reg, 0b111...0000 597 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_ADD_U32), ScratchSPReg) 598 .addReg(StackPtrReg) 599 .addImm((Alignment - 1) * ST.getWavefrontSize()) 600 .setMIFlag(MachineInstr::FrameSetup); 601 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_AND_B32), FramePtrReg) 602 .addReg(ScratchSPReg, RegState::Kill) 603 .addImm(-Alignment * ST.getWavefrontSize()) 604 .setMIFlag(MachineInstr::FrameSetup); 605 FuncInfo->setIsStackRealigned(true); 606 } else if ((HasFP = hasFP(MF))) { 607 // If we need a base pointer, set it up here. It's whatever the value of 608 // the stack pointer is at this point. Any variable size objects will be 609 // allocated after this, so we can still use the base pointer to reference 610 // locals. 611 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), FramePtrReg) 612 .addReg(StackPtrReg) 613 .setMIFlag(MachineInstr::FrameSetup); 614 } 615 616 if (HasFP && RoundedSize != 0) { 617 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_ADD_U32), StackPtrReg) 618 .addReg(StackPtrReg) 619 .addImm(RoundedSize * ST.getWavefrontSize()) 620 .setMIFlag(MachineInstr::FrameSetup); 621 } 622 623 // To avoid clobbering VGPRs in lanes that weren't active on function entry, 624 // turn on all lanes before doing the spill to memory. 625 unsigned ScratchExecCopy = AMDGPU::NoRegister; 626 627 for (const SIMachineFunctionInfo::SGPRSpillVGPRCSR &Reg 628 : FuncInfo->getSGPRSpillVGPRs()) { 629 if (!Reg.FI.hasValue()) 630 continue; 631 632 if (ScratchExecCopy == AMDGPU::NoRegister) { 633 if (LiveRegs.empty()) { 634 LiveRegs.init(TRI); 635 LiveRegs.addLiveIns(MBB); 636 } 637 638 ScratchExecCopy 639 = findScratchNonCalleeSaveRegister(MF, LiveRegs, 640 *TRI.getWaveMaskRegClass()); 641 642 const unsigned OrSaveExec = ST.isWave32() ? 643 AMDGPU::S_OR_SAVEEXEC_B32 : AMDGPU::S_OR_SAVEEXEC_B64; 644 BuildMI(MBB, MBBI, DL, TII->get(OrSaveExec), 645 ScratchExecCopy) 646 .addImm(-1); 647 } 648 649 TII->storeRegToStackSlot(MBB, MBBI, Reg.VGPR, true, 650 Reg.FI.getValue(), &AMDGPU::VGPR_32RegClass, 651 &TII->getRegisterInfo()); 652 } 653 654 if (ScratchExecCopy != AMDGPU::NoRegister) { 655 // FIXME: Split block and make terminator. 656 unsigned ExecMov = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; 657 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 658 BuildMI(MBB, MBBI, DL, TII->get(ExecMov), Exec) 659 .addReg(ScratchExecCopy); 660 } 661 } 662 663 void SIFrameLowering::emitEpilogue(MachineFunction &MF, 664 MachineBasicBlock &MBB) const { 665 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 666 if (FuncInfo->isEntryFunction()) 667 return; 668 669 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 670 const SIInstrInfo *TII = ST.getInstrInfo(); 671 MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator(); 672 DebugLoc DL; 673 674 unsigned ScratchExecCopy = AMDGPU::NoRegister; 675 for (const SIMachineFunctionInfo::SGPRSpillVGPRCSR &Reg 676 : FuncInfo->getSGPRSpillVGPRs()) { 677 if (!Reg.FI.hasValue()) 678 continue; 679 680 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 681 if (ScratchExecCopy == AMDGPU::NoRegister) { 682 // See emitPrologue 683 LivePhysRegs LiveRegs(*ST.getRegisterInfo()); 684 LiveRegs.addLiveIns(MBB); 685 686 ScratchExecCopy 687 = findScratchNonCalleeSaveRegister(MF, LiveRegs, 688 *TRI.getWaveMaskRegClass()); 689 690 const unsigned OrSaveExec = ST.isWave32() ? 691 AMDGPU::S_OR_SAVEEXEC_B32 : AMDGPU::S_OR_SAVEEXEC_B64; 692 693 BuildMI(MBB, MBBI, DL, TII->get(OrSaveExec), ScratchExecCopy) 694 .addImm(-1); 695 } 696 697 TII->loadRegFromStackSlot(MBB, MBBI, Reg.VGPR, 698 Reg.FI.getValue(), &AMDGPU::VGPR_32RegClass, 699 &TII->getRegisterInfo()); 700 } 701 702 if (ScratchExecCopy != AMDGPU::NoRegister) { 703 // FIXME: Split block and make terminator. 704 unsigned ExecMov = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; 705 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 706 BuildMI(MBB, MBBI, DL, TII->get(ExecMov), Exec) 707 .addReg(ScratchExecCopy); 708 } 709 710 if (hasFP(MF)) { 711 const MachineFrameInfo &MFI = MF.getFrameInfo(); 712 uint32_t NumBytes = MFI.getStackSize(); 713 uint32_t RoundedSize = FuncInfo->isStackRealigned() ? 714 NumBytes + MFI.getMaxAlignment() : NumBytes; 715 716 const unsigned StackPtrReg = FuncInfo->getStackPtrOffsetReg(); 717 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_SUB_U32), StackPtrReg) 718 .addReg(StackPtrReg) 719 .addImm(RoundedSize * ST.getWavefrontSize()) 720 .setMIFlag(MachineInstr::FrameDestroy); 721 } 722 } 723 724 static bool allStackObjectsAreDead(const MachineFrameInfo &MFI) { 725 for (int I = MFI.getObjectIndexBegin(), E = MFI.getObjectIndexEnd(); 726 I != E; ++I) { 727 if (!MFI.isDeadObjectIndex(I)) 728 return false; 729 } 730 731 return true; 732 } 733 734 int SIFrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI, 735 unsigned &FrameReg) const { 736 const SIRegisterInfo *RI = MF.getSubtarget<GCNSubtarget>().getRegisterInfo(); 737 738 FrameReg = RI->getFrameRegister(MF); 739 return MF.getFrameInfo().getObjectOffset(FI); 740 } 741 742 void SIFrameLowering::processFunctionBeforeFrameFinalized( 743 MachineFunction &MF, 744 RegScavenger *RS) const { 745 MachineFrameInfo &MFI = MF.getFrameInfo(); 746 747 if (!MFI.hasStackObjects()) 748 return; 749 750 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 751 const SIInstrInfo *TII = ST.getInstrInfo(); 752 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 753 SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 754 bool AllSGPRSpilledToVGPRs = false; 755 756 if (TRI.spillSGPRToVGPR() && FuncInfo->hasSpilledSGPRs()) { 757 AllSGPRSpilledToVGPRs = true; 758 759 // Process all SGPR spills before frame offsets are finalized. Ideally SGPRs 760 // are spilled to VGPRs, in which case we can eliminate the stack usage. 761 // 762 // XXX - This operates under the assumption that only other SGPR spills are 763 // users of the frame index. I'm not 100% sure this is correct. The 764 // StackColoring pass has a comment saying a future improvement would be to 765 // merging of allocas with spill slots, but for now according to 766 // MachineFrameInfo isSpillSlot can't alias any other object. 767 for (MachineBasicBlock &MBB : MF) { 768 MachineBasicBlock::iterator Next; 769 for (auto I = MBB.begin(), E = MBB.end(); I != E; I = Next) { 770 MachineInstr &MI = *I; 771 Next = std::next(I); 772 773 if (TII->isSGPRSpill(MI)) { 774 int FI = TII->getNamedOperand(MI, AMDGPU::OpName::addr)->getIndex(); 775 assert(MFI.getStackID(FI) == TargetStackID::SGPRSpill); 776 if (FuncInfo->allocateSGPRSpillToVGPR(MF, FI)) { 777 bool Spilled = TRI.eliminateSGPRToVGPRSpillFrameIndex(MI, FI, RS); 778 (void)Spilled; 779 assert(Spilled && "failed to spill SGPR to VGPR when allocated"); 780 } else 781 AllSGPRSpilledToVGPRs = false; 782 } 783 } 784 } 785 } 786 787 FuncInfo->removeSGPRToVGPRFrameIndices(MFI); 788 789 // FIXME: The other checks should be redundant with allStackObjectsAreDead, 790 // but currently hasNonSpillStackObjects is set only from source 791 // allocas. Stack temps produced from legalization are not counted currently. 792 if (FuncInfo->hasNonSpillStackObjects() || FuncInfo->hasSpilledVGPRs() || 793 !AllSGPRSpilledToVGPRs || !allStackObjectsAreDead(MFI)) { 794 assert(RS && "RegScavenger required if spilling"); 795 796 if (FuncInfo->isEntryFunction()) { 797 int ScavengeFI = MFI.CreateFixedObject( 798 TRI.getSpillSize(AMDGPU::SGPR_32RegClass), 0, false); 799 RS->addScavengingFrameIndex(ScavengeFI); 800 } else { 801 int ScavengeFI = MFI.CreateStackObject( 802 TRI.getSpillSize(AMDGPU::SGPR_32RegClass), 803 TRI.getSpillAlignment(AMDGPU::SGPR_32RegClass), 804 false); 805 RS->addScavengingFrameIndex(ScavengeFI); 806 } 807 } 808 } 809 810 void SIFrameLowering::determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, 811 RegScavenger *RS) const { 812 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); 813 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 814 815 // The SP is specifically managed and we don't want extra spills of it. 816 SavedRegs.reset(MFI->getStackPtrOffsetReg()); 817 } 818 819 MachineBasicBlock::iterator SIFrameLowering::eliminateCallFramePseudoInstr( 820 MachineFunction &MF, 821 MachineBasicBlock &MBB, 822 MachineBasicBlock::iterator I) const { 823 int64_t Amount = I->getOperand(0).getImm(); 824 if (Amount == 0) 825 return MBB.erase(I); 826 827 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 828 const SIInstrInfo *TII = ST.getInstrInfo(); 829 const DebugLoc &DL = I->getDebugLoc(); 830 unsigned Opc = I->getOpcode(); 831 bool IsDestroy = Opc == TII->getCallFrameDestroyOpcode(); 832 uint64_t CalleePopAmount = IsDestroy ? I->getOperand(1).getImm() : 0; 833 834 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 835 if (!TFI->hasReservedCallFrame(MF)) { 836 unsigned Align = getStackAlignment(); 837 838 Amount = alignTo(Amount, Align); 839 assert(isUInt<32>(Amount) && "exceeded stack address space size"); 840 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 841 unsigned SPReg = MFI->getStackPtrOffsetReg(); 842 843 unsigned Op = IsDestroy ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32; 844 BuildMI(MBB, I, DL, TII->get(Op), SPReg) 845 .addReg(SPReg) 846 .addImm(Amount * ST.getWavefrontSize()); 847 } else if (CalleePopAmount != 0) { 848 llvm_unreachable("is this used?"); 849 } 850 851 return MBB.erase(I); 852 } 853 854 bool SIFrameLowering::hasFP(const MachineFunction &MF) const { 855 const MachineFrameInfo &MFI = MF.getFrameInfo(); 856 if (MFI.hasCalls()) { 857 // All offsets are unsigned, so need to be addressed in the same direction 858 // as stack growth. 859 if (MFI.getStackSize() != 0) 860 return true; 861 862 // For the entry point, the input wave scratch offset must be copied to the 863 // API SP if there are calls. 864 if (MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction()) 865 return true; 866 867 // Retain behavior of always omitting the FP for leaf functions when 868 // possible. 869 if (MF.getTarget().Options.DisableFramePointerElim(MF)) 870 return true; 871 } 872 873 return MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken() || 874 MFI.hasStackMap() || MFI.hasPatchPoint() || 875 MF.getSubtarget<GCNSubtarget>().getRegisterInfo()->needsStackRealignment(MF); 876 } 877