1 //===----------------------- SIFrameLowering.cpp --------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //==-----------------------------------------------------------------------===// 8 9 #include "SIFrameLowering.h" 10 #include "AMDGPUSubtarget.h" 11 #include "SIInstrInfo.h" 12 #include "SIMachineFunctionInfo.h" 13 #include "SIRegisterInfo.h" 14 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 15 16 #include "llvm/CodeGen/LivePhysRegs.h" 17 #include "llvm/CodeGen/MachineFrameInfo.h" 18 #include "llvm/CodeGen/MachineFunction.h" 19 #include "llvm/CodeGen/MachineInstrBuilder.h" 20 #include "llvm/CodeGen/RegisterScavenging.h" 21 22 using namespace llvm; 23 24 25 static ArrayRef<MCPhysReg> getAllSGPR128(const GCNSubtarget &ST, 26 const MachineFunction &MF) { 27 return makeArrayRef(AMDGPU::SGPR_128RegClass.begin(), 28 ST.getMaxNumSGPRs(MF) / 4); 29 } 30 31 static ArrayRef<MCPhysReg> getAllSGPRs(const GCNSubtarget &ST, 32 const MachineFunction &MF) { 33 return makeArrayRef(AMDGPU::SGPR_32RegClass.begin(), 34 ST.getMaxNumSGPRs(MF)); 35 } 36 37 void SIFrameLowering::emitFlatScratchInit(const GCNSubtarget &ST, 38 MachineFunction &MF, 39 MachineBasicBlock &MBB) const { 40 const SIInstrInfo *TII = ST.getInstrInfo(); 41 const SIRegisterInfo* TRI = &TII->getRegisterInfo(); 42 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 43 44 // We don't need this if we only have spills since there is no user facing 45 // scratch. 46 47 // TODO: If we know we don't have flat instructions earlier, we can omit 48 // this from the input registers. 49 // 50 // TODO: We only need to know if we access scratch space through a flat 51 // pointer. Because we only detect if flat instructions are used at all, 52 // this will be used more often than necessary on VI. 53 54 // Debug location must be unknown since the first debug location is used to 55 // determine the end of the prologue. 56 DebugLoc DL; 57 MachineBasicBlock::iterator I = MBB.begin(); 58 59 unsigned FlatScratchInitReg 60 = MFI->getPreloadedReg(AMDGPUFunctionArgInfo::FLAT_SCRATCH_INIT); 61 62 MachineRegisterInfo &MRI = MF.getRegInfo(); 63 MRI.addLiveIn(FlatScratchInitReg); 64 MBB.addLiveIn(FlatScratchInitReg); 65 66 unsigned FlatScrInitLo = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub0); 67 unsigned FlatScrInitHi = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub1); 68 69 unsigned ScratchWaveOffsetReg = MFI->getScratchWaveOffsetReg(); 70 71 // Do a 64-bit pointer add. 72 if (ST.flatScratchIsPointer()) { 73 if (ST.getGeneration() >= AMDGPUSubtarget::GFX10) { 74 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), FlatScrInitLo) 75 .addReg(FlatScrInitLo) 76 .addReg(ScratchWaveOffsetReg); 77 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADDC_U32), FlatScrInitHi) 78 .addReg(FlatScrInitHi) 79 .addImm(0); 80 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SETREG_B32)). 81 addReg(FlatScrInitLo). 82 addImm(int16_t(AMDGPU::Hwreg::ID_FLAT_SCR_LO | 83 (31 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_))); 84 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SETREG_B32)). 85 addReg(FlatScrInitHi). 86 addImm(int16_t(AMDGPU::Hwreg::ID_FLAT_SCR_HI | 87 (31 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_))); 88 return; 89 } 90 91 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), AMDGPU::FLAT_SCR_LO) 92 .addReg(FlatScrInitLo) 93 .addReg(ScratchWaveOffsetReg); 94 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADDC_U32), AMDGPU::FLAT_SCR_HI) 95 .addReg(FlatScrInitHi) 96 .addImm(0); 97 98 return; 99 } 100 101 assert(ST.getGeneration() < AMDGPUSubtarget::GFX10); 102 103 // Copy the size in bytes. 104 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), AMDGPU::FLAT_SCR_LO) 105 .addReg(FlatScrInitHi, RegState::Kill); 106 107 // Add wave offset in bytes to private base offset. 108 // See comment in AMDKernelCodeT.h for enable_sgpr_flat_scratch_init. 109 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), FlatScrInitLo) 110 .addReg(FlatScrInitLo) 111 .addReg(ScratchWaveOffsetReg); 112 113 // Convert offset to 256-byte units. 114 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_LSHR_B32), AMDGPU::FLAT_SCR_HI) 115 .addReg(FlatScrInitLo, RegState::Kill) 116 .addImm(8); 117 } 118 119 unsigned SIFrameLowering::getReservedPrivateSegmentBufferReg( 120 const GCNSubtarget &ST, 121 const SIInstrInfo *TII, 122 const SIRegisterInfo *TRI, 123 SIMachineFunctionInfo *MFI, 124 MachineFunction &MF) const { 125 MachineRegisterInfo &MRI = MF.getRegInfo(); 126 127 // We need to insert initialization of the scratch resource descriptor. 128 unsigned ScratchRsrcReg = MFI->getScratchRSrcReg(); 129 if (ScratchRsrcReg == AMDGPU::NoRegister || 130 !MRI.isPhysRegUsed(ScratchRsrcReg)) 131 return AMDGPU::NoRegister; 132 133 if (ST.hasSGPRInitBug() || 134 ScratchRsrcReg != TRI->reservedPrivateSegmentBufferReg(MF)) 135 return ScratchRsrcReg; 136 137 // We reserved the last registers for this. Shift it down to the end of those 138 // which were actually used. 139 // 140 // FIXME: It might be safer to use a pseudoregister before replacement. 141 142 // FIXME: We should be able to eliminate unused input registers. We only 143 // cannot do this for the resources required for scratch access. For now we 144 // skip over user SGPRs and may leave unused holes. 145 146 // We find the resource first because it has an alignment requirement. 147 148 unsigned NumPreloaded = (MFI->getNumPreloadedSGPRs() + 3) / 4; 149 ArrayRef<MCPhysReg> AllSGPR128s = getAllSGPR128(ST, MF); 150 AllSGPR128s = AllSGPR128s.slice(std::min(static_cast<unsigned>(AllSGPR128s.size()), NumPreloaded)); 151 152 // Skip the last N reserved elements because they should have already been 153 // reserved for VCC etc. 154 for (MCPhysReg Reg : AllSGPR128s) { 155 // Pick the first unallocated one. Make sure we don't clobber the other 156 // reserved input we needed. 157 if (!MRI.isPhysRegUsed(Reg) && MRI.isAllocatable(Reg)) { 158 MRI.replaceRegWith(ScratchRsrcReg, Reg); 159 MFI->setScratchRSrcReg(Reg); 160 return Reg; 161 } 162 } 163 164 return ScratchRsrcReg; 165 } 166 167 // Shift down registers reserved for the scratch wave offset and stack pointer 168 // SGPRs. 169 std::pair<unsigned, unsigned> 170 SIFrameLowering::getReservedPrivateSegmentWaveByteOffsetReg( 171 const GCNSubtarget &ST, 172 const SIInstrInfo *TII, 173 const SIRegisterInfo *TRI, 174 SIMachineFunctionInfo *MFI, 175 MachineFunction &MF) const { 176 MachineRegisterInfo &MRI = MF.getRegInfo(); 177 unsigned ScratchWaveOffsetReg = MFI->getScratchWaveOffsetReg(); 178 179 // No replacement necessary. 180 if (ScratchWaveOffsetReg == AMDGPU::NoRegister || 181 !MRI.isPhysRegUsed(ScratchWaveOffsetReg)) { 182 assert(MFI->getStackPtrOffsetReg() == AMDGPU::SP_REG); 183 return std::make_pair(AMDGPU::NoRegister, AMDGPU::NoRegister); 184 } 185 186 unsigned SPReg = MFI->getStackPtrOffsetReg(); 187 if (ST.hasSGPRInitBug()) 188 return std::make_pair(ScratchWaveOffsetReg, SPReg); 189 190 unsigned NumPreloaded = MFI->getNumPreloadedSGPRs(); 191 192 ArrayRef<MCPhysReg> AllSGPRs = getAllSGPRs(ST, MF); 193 if (NumPreloaded > AllSGPRs.size()) 194 return std::make_pair(ScratchWaveOffsetReg, SPReg); 195 196 AllSGPRs = AllSGPRs.slice(NumPreloaded); 197 198 // We need to drop register from the end of the list that we cannot use 199 // for the scratch wave offset. 200 // + 2 s102 and s103 do not exist on VI. 201 // + 2 for vcc 202 // + 2 for xnack_mask 203 // + 2 for flat_scratch 204 // + 4 for registers reserved for scratch resource register 205 // + 1 for register reserved for scratch wave offset. (By exluding this 206 // register from the list to consider, it means that when this 207 // register is being used for the scratch wave offset and there 208 // are no other free SGPRs, then the value will stay in this register. 209 // + 1 if stack pointer is used. 210 // ---- 211 // 13 (+1) 212 unsigned ReservedRegCount = 13; 213 214 if (AllSGPRs.size() < ReservedRegCount) 215 return std::make_pair(ScratchWaveOffsetReg, SPReg); 216 217 bool HandledScratchWaveOffsetReg = 218 ScratchWaveOffsetReg != TRI->reservedPrivateSegmentWaveByteOffsetReg(MF); 219 220 for (MCPhysReg Reg : AllSGPRs.drop_back(ReservedRegCount)) { 221 // Pick the first unallocated SGPR. Be careful not to pick an alias of the 222 // scratch descriptor, since we haven’t added its uses yet. 223 if (!MRI.isPhysRegUsed(Reg) && MRI.isAllocatable(Reg)) { 224 if (!HandledScratchWaveOffsetReg) { 225 HandledScratchWaveOffsetReg = true; 226 227 MRI.replaceRegWith(ScratchWaveOffsetReg, Reg); 228 MFI->setScratchWaveOffsetReg(Reg); 229 ScratchWaveOffsetReg = Reg; 230 break; 231 } 232 } 233 } 234 235 return std::make_pair(ScratchWaveOffsetReg, SPReg); 236 } 237 238 void SIFrameLowering::emitEntryFunctionPrologue(MachineFunction &MF, 239 MachineBasicBlock &MBB) const { 240 assert(&MF.front() == &MBB && "Shrink-wrapping not yet supported"); 241 242 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 243 244 // If we only have SGPR spills, we won't actually be using scratch memory 245 // since these spill to VGPRs. 246 // 247 // FIXME: We should be cleaning up these unused SGPR spill frame indices 248 // somewhere. 249 250 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 251 const SIInstrInfo *TII = ST.getInstrInfo(); 252 const SIRegisterInfo *TRI = &TII->getRegisterInfo(); 253 MachineRegisterInfo &MRI = MF.getRegInfo(); 254 const Function &F = MF.getFunction(); 255 256 // We need to do the replacement of the private segment buffer and wave offset 257 // register even if there are no stack objects. There could be stores to undef 258 // or a constant without an associated object. 259 260 // FIXME: We still have implicit uses on SGPR spill instructions in case they 261 // need to spill to vector memory. It's likely that will not happen, but at 262 // this point it appears we need the setup. This part of the prolog should be 263 // emitted after frame indices are eliminated. 264 265 if (MFI->hasFlatScratchInit()) 266 emitFlatScratchInit(ST, MF, MBB); 267 268 unsigned SPReg = MFI->getStackPtrOffsetReg(); 269 if (SPReg != AMDGPU::SP_REG) { 270 assert(MRI.isReserved(SPReg) && "SPReg used but not reserved"); 271 272 DebugLoc DL; 273 const MachineFrameInfo &FrameInfo = MF.getFrameInfo(); 274 int64_t StackSize = FrameInfo.getStackSize(); 275 276 if (StackSize == 0) { 277 BuildMI(MBB, MBB.begin(), DL, TII->get(AMDGPU::COPY), SPReg) 278 .addReg(MFI->getScratchWaveOffsetReg()); 279 } else { 280 BuildMI(MBB, MBB.begin(), DL, TII->get(AMDGPU::S_ADD_U32), SPReg) 281 .addReg(MFI->getScratchWaveOffsetReg()) 282 .addImm(StackSize * ST.getWavefrontSize()); 283 } 284 } 285 286 unsigned ScratchRsrcReg 287 = getReservedPrivateSegmentBufferReg(ST, TII, TRI, MFI, MF); 288 289 unsigned ScratchWaveOffsetReg; 290 std::tie(ScratchWaveOffsetReg, SPReg) 291 = getReservedPrivateSegmentWaveByteOffsetReg(ST, TII, TRI, MFI, MF); 292 293 // It's possible to have uses of only ScratchWaveOffsetReg without 294 // ScratchRsrcReg if it's only used for the initialization of flat_scratch, 295 // but the inverse is not true. 296 if (ScratchWaveOffsetReg == AMDGPU::NoRegister) { 297 assert(ScratchRsrcReg == AMDGPU::NoRegister); 298 return; 299 } 300 301 // We need to insert initialization of the scratch resource descriptor. 302 unsigned PreloadedScratchWaveOffsetReg = MFI->getPreloadedReg( 303 AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET); 304 305 unsigned PreloadedPrivateBufferReg = AMDGPU::NoRegister; 306 if (ST.isAmdHsaOrMesa(F)) { 307 PreloadedPrivateBufferReg = MFI->getPreloadedReg( 308 AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER); 309 } 310 311 bool OffsetRegUsed = MRI.isPhysRegUsed(ScratchWaveOffsetReg); 312 bool ResourceRegUsed = ScratchRsrcReg != AMDGPU::NoRegister && 313 MRI.isPhysRegUsed(ScratchRsrcReg); 314 315 // We added live-ins during argument lowering, but since they were not used 316 // they were deleted. We're adding the uses now, so add them back. 317 if (OffsetRegUsed) { 318 assert(PreloadedScratchWaveOffsetReg != AMDGPU::NoRegister && 319 "scratch wave offset input is required"); 320 MRI.addLiveIn(PreloadedScratchWaveOffsetReg); 321 MBB.addLiveIn(PreloadedScratchWaveOffsetReg); 322 } 323 324 if (ResourceRegUsed && PreloadedPrivateBufferReg != AMDGPU::NoRegister) { 325 assert(ST.isAmdHsaOrMesa(F) || ST.isMesaGfxShader(F)); 326 MRI.addLiveIn(PreloadedPrivateBufferReg); 327 MBB.addLiveIn(PreloadedPrivateBufferReg); 328 } 329 330 // Make the register selected live throughout the function. 331 for (MachineBasicBlock &OtherBB : MF) { 332 if (&OtherBB == &MBB) 333 continue; 334 335 if (OffsetRegUsed) 336 OtherBB.addLiveIn(ScratchWaveOffsetReg); 337 338 if (ResourceRegUsed) 339 OtherBB.addLiveIn(ScratchRsrcReg); 340 } 341 342 DebugLoc DL; 343 MachineBasicBlock::iterator I = MBB.begin(); 344 345 // If we reserved the original input registers, we don't need to copy to the 346 // reserved registers. 347 348 bool CopyBuffer = ResourceRegUsed && 349 PreloadedPrivateBufferReg != AMDGPU::NoRegister && 350 ST.isAmdHsaOrMesa(F) && 351 ScratchRsrcReg != PreloadedPrivateBufferReg; 352 353 // This needs to be careful of the copying order to avoid overwriting one of 354 // the input registers before it's been copied to it's final 355 // destination. Usually the offset should be copied first. 356 bool CopyBufferFirst = TRI->isSubRegisterEq(PreloadedPrivateBufferReg, 357 ScratchWaveOffsetReg); 358 if (CopyBuffer && CopyBufferFirst) { 359 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchRsrcReg) 360 .addReg(PreloadedPrivateBufferReg, RegState::Kill); 361 } 362 363 if (OffsetRegUsed && 364 PreloadedScratchWaveOffsetReg != ScratchWaveOffsetReg) { 365 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchWaveOffsetReg) 366 .addReg(PreloadedScratchWaveOffsetReg, 367 MRI.isPhysRegUsed(ScratchWaveOffsetReg) ? 0 : RegState::Kill); 368 } 369 370 if (CopyBuffer && !CopyBufferFirst) { 371 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchRsrcReg) 372 .addReg(PreloadedPrivateBufferReg, RegState::Kill); 373 } 374 375 if (ResourceRegUsed) 376 emitEntryFunctionScratchSetup(ST, MF, MBB, MFI, I, 377 PreloadedPrivateBufferReg, ScratchRsrcReg); 378 } 379 380 // Emit scratch setup code for AMDPAL or Mesa, assuming ResourceRegUsed is set. 381 void SIFrameLowering::emitEntryFunctionScratchSetup(const GCNSubtarget &ST, 382 MachineFunction &MF, MachineBasicBlock &MBB, SIMachineFunctionInfo *MFI, 383 MachineBasicBlock::iterator I, unsigned PreloadedPrivateBufferReg, 384 unsigned ScratchRsrcReg) const { 385 386 const SIInstrInfo *TII = ST.getInstrInfo(); 387 const SIRegisterInfo *TRI = &TII->getRegisterInfo(); 388 const Function &Fn = MF.getFunction(); 389 DebugLoc DL; 390 391 if (ST.isAmdPalOS()) { 392 // The pointer to the GIT is formed from the offset passed in and either 393 // the amdgpu-git-ptr-high function attribute or the top part of the PC 394 unsigned RsrcLo = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0); 395 unsigned RsrcHi = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1); 396 unsigned Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1); 397 398 const MCInstrDesc &SMovB32 = TII->get(AMDGPU::S_MOV_B32); 399 400 if (MFI->getGITPtrHigh() != 0xffffffff) { 401 BuildMI(MBB, I, DL, SMovB32, RsrcHi) 402 .addImm(MFI->getGITPtrHigh()) 403 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 404 } else { 405 const MCInstrDesc &GetPC64 = TII->get(AMDGPU::S_GETPC_B64); 406 BuildMI(MBB, I, DL, GetPC64, Rsrc01); 407 } 408 auto GitPtrLo = AMDGPU::SGPR0; // Low GIT address passed in 409 if (ST.hasMergedShaders()) { 410 switch (MF.getFunction().getCallingConv()) { 411 case CallingConv::AMDGPU_HS: 412 case CallingConv::AMDGPU_GS: 413 // Low GIT address is passed in s8 rather than s0 for an LS+HS or 414 // ES+GS merged shader on gfx9+. 415 GitPtrLo = AMDGPU::SGPR8; 416 break; 417 default: 418 break; 419 } 420 } 421 MF.getRegInfo().addLiveIn(GitPtrLo); 422 MBB.addLiveIn(GitPtrLo); 423 BuildMI(MBB, I, DL, SMovB32, RsrcLo) 424 .addReg(GitPtrLo) 425 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 426 427 // We now have the GIT ptr - now get the scratch descriptor from the entry 428 // at offset 0 (or offset 16 for a compute shader). 429 PointerType *PtrTy = 430 PointerType::get(Type::getInt64Ty(MF.getFunction().getContext()), 431 AMDGPUAS::CONSTANT_ADDRESS); 432 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy)); 433 const MCInstrDesc &LoadDwordX4 = TII->get(AMDGPU::S_LOAD_DWORDX4_IMM); 434 auto MMO = MF.getMachineMemOperand(PtrInfo, 435 MachineMemOperand::MOLoad | 436 MachineMemOperand::MOInvariant | 437 MachineMemOperand::MODereferenceable, 438 16, 4); 439 unsigned Offset = Fn.getCallingConv() == CallingConv::AMDGPU_CS ? 16 : 0; 440 const GCNSubtarget &Subtarget = MF.getSubtarget<GCNSubtarget>(); 441 unsigned EncodedOffset = AMDGPU::getSMRDEncodedOffset(Subtarget, Offset); 442 BuildMI(MBB, I, DL, LoadDwordX4, ScratchRsrcReg) 443 .addReg(Rsrc01) 444 .addImm(EncodedOffset) // offset 445 .addImm(0) // glc 446 .addImm(0) // dlc 447 .addReg(ScratchRsrcReg, RegState::ImplicitDefine) 448 .addMemOperand(MMO); 449 return; 450 } 451 if (ST.isMesaGfxShader(Fn) 452 || (PreloadedPrivateBufferReg == AMDGPU::NoRegister)) { 453 assert(!ST.isAmdHsaOrMesa(Fn)); 454 const MCInstrDesc &SMovB32 = TII->get(AMDGPU::S_MOV_B32); 455 456 unsigned Rsrc2 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub2); 457 unsigned Rsrc3 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub3); 458 459 // Use relocations to get the pointer, and setup the other bits manually. 460 uint64_t Rsrc23 = TII->getScratchRsrcWords23(); 461 462 if (MFI->hasImplicitBufferPtr()) { 463 unsigned Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1); 464 465 if (AMDGPU::isCompute(MF.getFunction().getCallingConv())) { 466 const MCInstrDesc &Mov64 = TII->get(AMDGPU::S_MOV_B64); 467 468 BuildMI(MBB, I, DL, Mov64, Rsrc01) 469 .addReg(MFI->getImplicitBufferPtrUserSGPR()) 470 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 471 } else { 472 const MCInstrDesc &LoadDwordX2 = TII->get(AMDGPU::S_LOAD_DWORDX2_IMM); 473 474 PointerType *PtrTy = 475 PointerType::get(Type::getInt64Ty(MF.getFunction().getContext()), 476 AMDGPUAS::CONSTANT_ADDRESS); 477 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy)); 478 auto MMO = MF.getMachineMemOperand(PtrInfo, 479 MachineMemOperand::MOLoad | 480 MachineMemOperand::MOInvariant | 481 MachineMemOperand::MODereferenceable, 482 8, 4); 483 BuildMI(MBB, I, DL, LoadDwordX2, Rsrc01) 484 .addReg(MFI->getImplicitBufferPtrUserSGPR()) 485 .addImm(0) // offset 486 .addImm(0) // glc 487 .addImm(0) // dlc 488 .addMemOperand(MMO) 489 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 490 491 MF.getRegInfo().addLiveIn(MFI->getImplicitBufferPtrUserSGPR()); 492 MBB.addLiveIn(MFI->getImplicitBufferPtrUserSGPR()); 493 } 494 } else { 495 unsigned Rsrc0 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0); 496 unsigned Rsrc1 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1); 497 498 BuildMI(MBB, I, DL, SMovB32, Rsrc0) 499 .addExternalSymbol("SCRATCH_RSRC_DWORD0") 500 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 501 502 BuildMI(MBB, I, DL, SMovB32, Rsrc1) 503 .addExternalSymbol("SCRATCH_RSRC_DWORD1") 504 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 505 506 } 507 508 BuildMI(MBB, I, DL, SMovB32, Rsrc2) 509 .addImm(Rsrc23 & 0xffffffff) 510 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 511 512 BuildMI(MBB, I, DL, SMovB32, Rsrc3) 513 .addImm(Rsrc23 >> 32) 514 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 515 } 516 } 517 518 // Find a scratch register that we can use at the start of the prologue to 519 // re-align the stack pointer. We avoid using callee-save registers since they 520 // may appear to be free when this is called from canUseAsPrologue (during 521 // shrink wrapping), but then no longer be free when this is called from 522 // emitPrologue. 523 // 524 // FIXME: This is a bit conservative, since in the above case we could use one 525 // of the callee-save registers as a scratch temp to re-align the stack pointer, 526 // but we would then have to make sure that we were in fact saving at least one 527 // callee-save register in the prologue, which is additional complexity that 528 // doesn't seem worth the benefit. 529 static unsigned findScratchNonCalleeSaveRegister(MachineFunction &MF, 530 LivePhysRegs &LiveRegs, 531 const TargetRegisterClass &RC) { 532 const GCNSubtarget &Subtarget = MF.getSubtarget<GCNSubtarget>(); 533 const SIRegisterInfo &TRI = *Subtarget.getRegisterInfo(); 534 535 // Mark callee saved registers as used so we will not choose them. 536 const MCPhysReg *CSRegs = TRI.getCalleeSavedRegs(&MF); 537 for (unsigned i = 0; CSRegs[i]; ++i) 538 LiveRegs.addReg(CSRegs[i]); 539 540 MachineRegisterInfo &MRI = MF.getRegInfo(); 541 542 for (unsigned Reg : RC) { 543 if (LiveRegs.available(MRI, Reg)) 544 return Reg; 545 } 546 547 return AMDGPU::NoRegister; 548 } 549 550 void SIFrameLowering::emitPrologue(MachineFunction &MF, 551 MachineBasicBlock &MBB) const { 552 SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 553 if (FuncInfo->isEntryFunction()) { 554 emitEntryFunctionPrologue(MF, MBB); 555 return; 556 } 557 558 const MachineFrameInfo &MFI = MF.getFrameInfo(); 559 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 560 const SIInstrInfo *TII = ST.getInstrInfo(); 561 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 562 563 unsigned StackPtrReg = FuncInfo->getStackPtrOffsetReg(); 564 unsigned FramePtrReg = FuncInfo->getFrameOffsetReg(); 565 LivePhysRegs LiveRegs; 566 567 MachineBasicBlock::iterator MBBI = MBB.begin(); 568 DebugLoc DL; 569 570 // XXX - Is this the right predicate? 571 572 bool NeedFP = hasFP(MF); 573 uint32_t NumBytes = MFI.getStackSize(); 574 uint32_t RoundedSize = NumBytes; 575 const bool NeedsRealignment = TRI.needsStackRealignment(MF); 576 577 if (NeedsRealignment) { 578 assert(NeedFP); 579 const unsigned Alignment = MFI.getMaxAlignment(); 580 581 RoundedSize += Alignment; 582 583 LiveRegs.init(TRI); 584 LiveRegs.addLiveIns(MBB); 585 586 unsigned ScratchSPReg 587 = findScratchNonCalleeSaveRegister(MF, LiveRegs, 588 AMDGPU::SReg_32_XM0RegClass); 589 assert(ScratchSPReg != AMDGPU::NoRegister); 590 591 // s_add_u32 tmp_reg, s32, NumBytes 592 // s_and_b32 s32, tmp_reg, 0b111...0000 593 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_ADD_U32), ScratchSPReg) 594 .addReg(StackPtrReg) 595 .addImm((Alignment - 1) * ST.getWavefrontSize()) 596 .setMIFlag(MachineInstr::FrameSetup); 597 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_AND_B32), FramePtrReg) 598 .addReg(ScratchSPReg, RegState::Kill) 599 .addImm(-Alignment * ST.getWavefrontSize()) 600 .setMIFlag(MachineInstr::FrameSetup); 601 FuncInfo->setIsStackRealigned(true); 602 } else if (NeedFP) { 603 // If we need a base pointer, set it up here. It's whatever the value of 604 // the stack pointer is at this point. Any variable size objects will be 605 // allocated after this, so we can still use the base pointer to reference 606 // locals. 607 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), FramePtrReg) 608 .addReg(StackPtrReg) 609 .setMIFlag(MachineInstr::FrameSetup); 610 } 611 612 if (RoundedSize != 0 && hasSP(MF)) { 613 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_ADD_U32), StackPtrReg) 614 .addReg(StackPtrReg) 615 .addImm(RoundedSize * ST.getWavefrontSize()) 616 .setMIFlag(MachineInstr::FrameSetup); 617 } 618 619 // To avoid clobbering VGPRs in lanes that weren't active on function entry, 620 // turn on all lanes before doing the spill to memory. 621 unsigned ScratchExecCopy = AMDGPU::NoRegister; 622 623 for (const SIMachineFunctionInfo::SGPRSpillVGPRCSR &Reg 624 : FuncInfo->getSGPRSpillVGPRs()) { 625 if (!Reg.FI.hasValue()) 626 continue; 627 628 if (ScratchExecCopy == AMDGPU::NoRegister) { 629 if (LiveRegs.empty()) { 630 LiveRegs.init(TRI); 631 LiveRegs.addLiveIns(MBB); 632 } 633 634 ScratchExecCopy 635 = findScratchNonCalleeSaveRegister(MF, LiveRegs, 636 AMDGPU::SReg_64_XEXECRegClass); 637 638 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_OR_SAVEEXEC_B64), 639 ScratchExecCopy) 640 .addImm(-1); 641 } 642 643 TII->storeRegToStackSlot(MBB, MBBI, Reg.VGPR, true, 644 Reg.FI.getValue(), &AMDGPU::VGPR_32RegClass, 645 &TII->getRegisterInfo()); 646 } 647 648 if (ScratchExecCopy != AMDGPU::NoRegister) { 649 // FIXME: Split block and make terminator. 650 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC) 651 .addReg(ScratchExecCopy); 652 } 653 } 654 655 void SIFrameLowering::emitEpilogue(MachineFunction &MF, 656 MachineBasicBlock &MBB) const { 657 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 658 if (FuncInfo->isEntryFunction()) 659 return; 660 661 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 662 const SIInstrInfo *TII = ST.getInstrInfo(); 663 MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator(); 664 DebugLoc DL; 665 666 unsigned ScratchExecCopy = AMDGPU::NoRegister; 667 for (const SIMachineFunctionInfo::SGPRSpillVGPRCSR &Reg 668 : FuncInfo->getSGPRSpillVGPRs()) { 669 if (!Reg.FI.hasValue()) 670 continue; 671 672 if (ScratchExecCopy == AMDGPU::NoRegister) { 673 // See emitPrologue 674 LivePhysRegs LiveRegs(*ST.getRegisterInfo()); 675 LiveRegs.addLiveIns(MBB); 676 677 ScratchExecCopy 678 = findScratchNonCalleeSaveRegister(MF, LiveRegs, 679 AMDGPU::SReg_64_XEXECRegClass); 680 681 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_OR_SAVEEXEC_B64), ScratchExecCopy) 682 .addImm(-1); 683 } 684 685 TII->loadRegFromStackSlot(MBB, MBBI, Reg.VGPR, 686 Reg.FI.getValue(), &AMDGPU::VGPR_32RegClass, 687 &TII->getRegisterInfo()); 688 } 689 690 if (ScratchExecCopy != AMDGPU::NoRegister) { 691 // FIXME: Split block and make terminator. 692 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC) 693 .addReg(ScratchExecCopy); 694 } 695 696 unsigned StackPtrReg = FuncInfo->getStackPtrOffsetReg(); 697 if (StackPtrReg == AMDGPU::NoRegister) 698 return; 699 700 const MachineFrameInfo &MFI = MF.getFrameInfo(); 701 uint32_t NumBytes = MFI.getStackSize(); 702 703 // FIXME: Clarify distinction between no set SP and SP. For callee functions, 704 // it's really whether we need SP to be accurate or not. 705 706 if (NumBytes != 0 && hasSP(MF)) { 707 uint32_t RoundedSize = FuncInfo->isStackRealigned() ? 708 NumBytes + MFI.getMaxAlignment() : NumBytes; 709 710 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_SUB_U32), StackPtrReg) 711 .addReg(StackPtrReg) 712 .addImm(RoundedSize * ST.getWavefrontSize()); 713 } 714 } 715 716 static bool allStackObjectsAreDead(const MachineFrameInfo &MFI) { 717 for (int I = MFI.getObjectIndexBegin(), E = MFI.getObjectIndexEnd(); 718 I != E; ++I) { 719 if (!MFI.isDeadObjectIndex(I)) 720 return false; 721 } 722 723 return true; 724 } 725 726 int SIFrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI, 727 unsigned &FrameReg) const { 728 const SIRegisterInfo *RI = MF.getSubtarget<GCNSubtarget>().getRegisterInfo(); 729 730 FrameReg = RI->getFrameRegister(MF); 731 return MF.getFrameInfo().getObjectOffset(FI); 732 } 733 734 void SIFrameLowering::processFunctionBeforeFrameFinalized( 735 MachineFunction &MF, 736 RegScavenger *RS) const { 737 MachineFrameInfo &MFI = MF.getFrameInfo(); 738 739 if (!MFI.hasStackObjects()) 740 return; 741 742 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 743 const SIInstrInfo *TII = ST.getInstrInfo(); 744 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 745 SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 746 bool AllSGPRSpilledToVGPRs = false; 747 748 if (TRI.spillSGPRToVGPR() && FuncInfo->hasSpilledSGPRs()) { 749 AllSGPRSpilledToVGPRs = true; 750 751 // Process all SGPR spills before frame offsets are finalized. Ideally SGPRs 752 // are spilled to VGPRs, in which case we can eliminate the stack usage. 753 // 754 // XXX - This operates under the assumption that only other SGPR spills are 755 // users of the frame index. I'm not 100% sure this is correct. The 756 // StackColoring pass has a comment saying a future improvement would be to 757 // merging of allocas with spill slots, but for now according to 758 // MachineFrameInfo isSpillSlot can't alias any other object. 759 for (MachineBasicBlock &MBB : MF) { 760 MachineBasicBlock::iterator Next; 761 for (auto I = MBB.begin(), E = MBB.end(); I != E; I = Next) { 762 MachineInstr &MI = *I; 763 Next = std::next(I); 764 765 if (TII->isSGPRSpill(MI)) { 766 int FI = TII->getNamedOperand(MI, AMDGPU::OpName::addr)->getIndex(); 767 assert(MFI.getStackID(FI) == SIStackID::SGPR_SPILL); 768 if (FuncInfo->allocateSGPRSpillToVGPR(MF, FI)) { 769 bool Spilled = TRI.eliminateSGPRToVGPRSpillFrameIndex(MI, FI, RS); 770 (void)Spilled; 771 assert(Spilled && "failed to spill SGPR to VGPR when allocated"); 772 } else 773 AllSGPRSpilledToVGPRs = false; 774 } 775 } 776 } 777 } 778 779 FuncInfo->removeSGPRToVGPRFrameIndices(MFI); 780 781 // FIXME: The other checks should be redundant with allStackObjectsAreDead, 782 // but currently hasNonSpillStackObjects is set only from source 783 // allocas. Stack temps produced from legalization are not counted currently. 784 if (FuncInfo->hasNonSpillStackObjects() || FuncInfo->hasSpilledVGPRs() || 785 !AllSGPRSpilledToVGPRs || !allStackObjectsAreDead(MFI)) { 786 assert(RS && "RegScavenger required if spilling"); 787 788 // We force this to be at offset 0 so no user object ever has 0 as an 789 // address, so we may use 0 as an invalid pointer value. This is because 790 // LLVM assumes 0 is an invalid pointer in address space 0. Because alloca 791 // is required to be address space 0, we are forced to accept this for 792 // now. Ideally we could have the stack in another address space with 0 as a 793 // valid pointer, and -1 as the null value. 794 // 795 // This will also waste additional space when user stack objects require > 4 796 // byte alignment. 797 // 798 // The main cost here is losing the offset for addressing modes. However 799 // this also ensures we shouldn't need a register for the offset when 800 // emergency scavenging. 801 int ScavengeFI = MFI.CreateFixedObject( 802 TRI.getSpillSize(AMDGPU::SGPR_32RegClass), 0, false); 803 RS->addScavengingFrameIndex(ScavengeFI); 804 } 805 } 806 807 void SIFrameLowering::determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, 808 RegScavenger *RS) const { 809 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); 810 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 811 812 // The SP is specifically managed and we don't want extra spills of it. 813 SavedRegs.reset(MFI->getStackPtrOffsetReg()); 814 } 815 816 MachineBasicBlock::iterator SIFrameLowering::eliminateCallFramePseudoInstr( 817 MachineFunction &MF, 818 MachineBasicBlock &MBB, 819 MachineBasicBlock::iterator I) const { 820 int64_t Amount = I->getOperand(0).getImm(); 821 if (Amount == 0) 822 return MBB.erase(I); 823 824 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 825 const SIInstrInfo *TII = ST.getInstrInfo(); 826 const DebugLoc &DL = I->getDebugLoc(); 827 unsigned Opc = I->getOpcode(); 828 bool IsDestroy = Opc == TII->getCallFrameDestroyOpcode(); 829 uint64_t CalleePopAmount = IsDestroy ? I->getOperand(1).getImm() : 0; 830 831 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 832 if (!TFI->hasReservedCallFrame(MF)) { 833 unsigned Align = getStackAlignment(); 834 835 Amount = alignTo(Amount, Align); 836 assert(isUInt<32>(Amount) && "exceeded stack address space size"); 837 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 838 unsigned SPReg = MFI->getStackPtrOffsetReg(); 839 840 unsigned Op = IsDestroy ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32; 841 BuildMI(MBB, I, DL, TII->get(Op), SPReg) 842 .addReg(SPReg) 843 .addImm(Amount * ST.getWavefrontSize()); 844 } else if (CalleePopAmount != 0) { 845 llvm_unreachable("is this used?"); 846 } 847 848 return MBB.erase(I); 849 } 850 851 bool SIFrameLowering::hasFP(const MachineFunction &MF) const { 852 // All stack operations are relative to the frame offset SGPR. 853 // TODO: Still want to eliminate sometimes. 854 const MachineFrameInfo &MFI = MF.getFrameInfo(); 855 856 // XXX - Is this only called after frame is finalized? Should be able to check 857 // frame size. 858 return MFI.hasStackObjects() && !allStackObjectsAreDead(MFI); 859 } 860 861 bool SIFrameLowering::hasSP(const MachineFunction &MF) const { 862 const SIRegisterInfo *TRI = MF.getSubtarget<GCNSubtarget>().getRegisterInfo(); 863 // All stack operations are relative to the frame offset SGPR. 864 const MachineFrameInfo &MFI = MF.getFrameInfo(); 865 return MFI.hasCalls() || MFI.hasVarSizedObjects() || TRI->needsStackRealignment(MF); 866 } 867