1 //===----------------------- SIFrameLowering.cpp --------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //==-----------------------------------------------------------------------===// 8 9 #include "SIFrameLowering.h" 10 #include "AMDGPU.h" 11 #include "GCNSubtarget.h" 12 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 13 #include "SIMachineFunctionInfo.h" 14 #include "llvm/CodeGen/LivePhysRegs.h" 15 #include "llvm/CodeGen/MachineFrameInfo.h" 16 #include "llvm/CodeGen/RegisterScavenging.h" 17 #include "llvm/Target/TargetMachine.h" 18 19 using namespace llvm; 20 21 #define DEBUG_TYPE "frame-info" 22 23 // Find a scratch register that we can use in the prologue. We avoid using 24 // callee-save registers since they may appear to be free when this is called 25 // from canUseAsPrologue (during shrink wrapping), but then no longer be free 26 // when this is called from emitPrologue. 27 static MCRegister findScratchNonCalleeSaveRegister(MachineRegisterInfo &MRI, 28 LivePhysRegs &LiveRegs, 29 const TargetRegisterClass &RC, 30 bool Unused = false) { 31 // Mark callee saved registers as used so we will not choose them. 32 const MCPhysReg *CSRegs = MRI.getCalleeSavedRegs(); 33 for (unsigned i = 0; CSRegs[i]; ++i) 34 LiveRegs.addReg(CSRegs[i]); 35 36 if (Unused) { 37 // We are looking for a register that can be used throughout the entire 38 // function, so any use is unacceptable. 39 for (MCRegister Reg : RC) { 40 if (!MRI.isPhysRegUsed(Reg) && LiveRegs.available(MRI, Reg)) 41 return Reg; 42 } 43 } else { 44 for (MCRegister Reg : RC) { 45 if (LiveRegs.available(MRI, Reg)) 46 return Reg; 47 } 48 } 49 50 return MCRegister(); 51 } 52 53 static void getVGPRSpillLaneOrTempRegister(MachineFunction &MF, 54 LivePhysRegs &LiveRegs, 55 Register &TempSGPR, 56 Optional<int> &FrameIndex, 57 bool IsFP) { 58 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 59 MachineFrameInfo &FrameInfo = MF.getFrameInfo(); 60 61 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 62 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 63 64 // We need to save and restore the current FP/BP. 65 66 // 1: If there is already a VGPR with free lanes, use it. We 67 // may already have to pay the penalty for spilling a CSR VGPR. 68 if (MFI->haveFreeLanesForSGPRSpill(MF, 1)) { 69 int NewFI = FrameInfo.CreateStackObject(4, Align(4), true, nullptr, 70 TargetStackID::SGPRSpill); 71 72 if (!MFI->allocateSGPRSpillToVGPR(MF, NewFI)) 73 llvm_unreachable("allocate SGPR spill should have worked"); 74 75 FrameIndex = NewFI; 76 77 LLVM_DEBUG(auto Spill = MFI->getSGPRToVGPRSpills(NewFI).front(); 78 dbgs() << "Spilling " << (IsFP ? "FP" : "BP") << " to " 79 << printReg(Spill.VGPR, TRI) << ':' << Spill.Lane 80 << '\n'); 81 return; 82 } 83 84 // 2: Next, try to save the FP/BP in an unused SGPR. 85 TempSGPR = findScratchNonCalleeSaveRegister( 86 MF.getRegInfo(), LiveRegs, AMDGPU::SReg_32_XM0_XEXECRegClass, true); 87 88 if (!TempSGPR) { 89 int NewFI = FrameInfo.CreateStackObject(4, Align(4), true, nullptr, 90 TargetStackID::SGPRSpill); 91 92 if (TRI->spillSGPRToVGPR() && MFI->allocateSGPRSpillToVGPR(MF, NewFI)) { 93 // 3: There's no free lane to spill, and no free register to save FP/BP, 94 // so we're forced to spill another VGPR to use for the spill. 95 FrameIndex = NewFI; 96 97 LLVM_DEBUG( 98 auto Spill = MFI->getSGPRToVGPRSpills(NewFI).front(); 99 dbgs() << (IsFP ? "FP" : "BP") << " requires fallback spill to " 100 << printReg(Spill.VGPR, TRI) << ':' << Spill.Lane << '\n';); 101 } else { 102 // Remove dead <NewFI> index 103 MF.getFrameInfo().RemoveStackObject(NewFI); 104 // 4: If all else fails, spill the FP/BP to memory. 105 FrameIndex = FrameInfo.CreateSpillStackObject(4, Align(4)); 106 LLVM_DEBUG(dbgs() << "Reserved FI " << FrameIndex << " for spilling " 107 << (IsFP ? "FP" : "BP") << '\n'); 108 } 109 } else { 110 LLVM_DEBUG(dbgs() << "Saving " << (IsFP ? "FP" : "BP") << " with copy to " 111 << printReg(TempSGPR, TRI) << '\n'); 112 } 113 } 114 115 // We need to specially emit stack operations here because a different frame 116 // register is used than in the rest of the function, as getFrameRegister would 117 // use. 118 static void buildPrologSpill(const GCNSubtarget &ST, const SIRegisterInfo &TRI, 119 const SIMachineFunctionInfo &FuncInfo, 120 LivePhysRegs &LiveRegs, MachineFunction &MF, 121 MachineBasicBlock::iterator I, Register SpillReg, 122 int FI) { 123 unsigned Opc = ST.enableFlatScratch() ? AMDGPU::SCRATCH_STORE_DWORD_SADDR 124 : AMDGPU::BUFFER_STORE_DWORD_OFFSET; 125 126 MachineFrameInfo &FrameInfo = MF.getFrameInfo(); 127 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF, FI); 128 MachineMemOperand *MMO = MF.getMachineMemOperand( 129 PtrInfo, MachineMemOperand::MOStore, FrameInfo.getObjectSize(FI), 130 FrameInfo.getObjectAlign(FI)); 131 LiveRegs.addReg(SpillReg); 132 TRI.buildSpillLoadStore(I, Opc, FI, SpillReg, true, 133 FuncInfo.getStackPtrOffsetReg(), 0, MMO, nullptr, 134 &LiveRegs); 135 LiveRegs.removeReg(SpillReg); 136 } 137 138 static void buildEpilogRestore(const GCNSubtarget &ST, 139 const SIRegisterInfo &TRI, 140 const SIMachineFunctionInfo &FuncInfo, 141 LivePhysRegs &LiveRegs, MachineFunction &MF, 142 MachineBasicBlock::iterator I, Register SpillReg, 143 int FI) { 144 unsigned Opc = ST.enableFlatScratch() ? AMDGPU::SCRATCH_LOAD_DWORD_SADDR 145 : AMDGPU::BUFFER_LOAD_DWORD_OFFSET; 146 147 MachineFrameInfo &FrameInfo = MF.getFrameInfo(); 148 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF, FI); 149 MachineMemOperand *MMO = MF.getMachineMemOperand( 150 PtrInfo, MachineMemOperand::MOLoad, FrameInfo.getObjectSize(FI), 151 FrameInfo.getObjectAlign(FI)); 152 TRI.buildSpillLoadStore(I, Opc, FI, SpillReg, false, 153 FuncInfo.getStackPtrOffsetReg(), 0, MMO, nullptr, 154 &LiveRegs); 155 } 156 157 static void buildGitPtr(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 158 const DebugLoc &DL, const SIInstrInfo *TII, 159 Register TargetReg) { 160 MachineFunction *MF = MBB.getParent(); 161 const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 162 const SIRegisterInfo *TRI = &TII->getRegisterInfo(); 163 const MCInstrDesc &SMovB32 = TII->get(AMDGPU::S_MOV_B32); 164 Register TargetLo = TRI->getSubReg(TargetReg, AMDGPU::sub0); 165 Register TargetHi = TRI->getSubReg(TargetReg, AMDGPU::sub1); 166 167 if (MFI->getGITPtrHigh() != 0xffffffff) { 168 BuildMI(MBB, I, DL, SMovB32, TargetHi) 169 .addImm(MFI->getGITPtrHigh()) 170 .addReg(TargetReg, RegState::ImplicitDefine); 171 } else { 172 const MCInstrDesc &GetPC64 = TII->get(AMDGPU::S_GETPC_B64); 173 BuildMI(MBB, I, DL, GetPC64, TargetReg); 174 } 175 Register GitPtrLo = MFI->getGITPtrLoReg(*MF); 176 MF->getRegInfo().addLiveIn(GitPtrLo); 177 MBB.addLiveIn(GitPtrLo); 178 BuildMI(MBB, I, DL, SMovB32, TargetLo) 179 .addReg(GitPtrLo); 180 } 181 182 // Emit flat scratch setup code, assuming `MFI->hasFlatScratchInit()` 183 void SIFrameLowering::emitEntryFunctionFlatScratchInit( 184 MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 185 const DebugLoc &DL, Register ScratchWaveOffsetReg) const { 186 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 187 const SIInstrInfo *TII = ST.getInstrInfo(); 188 const SIRegisterInfo *TRI = &TII->getRegisterInfo(); 189 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 190 191 // We don't need this if we only have spills since there is no user facing 192 // scratch. 193 194 // TODO: If we know we don't have flat instructions earlier, we can omit 195 // this from the input registers. 196 // 197 // TODO: We only need to know if we access scratch space through a flat 198 // pointer. Because we only detect if flat instructions are used at all, 199 // this will be used more often than necessary on VI. 200 201 Register FlatScrInitLo; 202 Register FlatScrInitHi; 203 204 if (ST.isAmdPalOS()) { 205 // Extract the scratch offset from the descriptor in the GIT 206 LivePhysRegs LiveRegs; 207 LiveRegs.init(*TRI); 208 LiveRegs.addLiveIns(MBB); 209 210 // Find unused reg to load flat scratch init into 211 MachineRegisterInfo &MRI = MF.getRegInfo(); 212 Register FlatScrInit = AMDGPU::NoRegister; 213 ArrayRef<MCPhysReg> AllSGPR64s = TRI->getAllSGPR64(MF); 214 unsigned NumPreloaded = (MFI->getNumPreloadedSGPRs() + 1) / 2; 215 AllSGPR64s = AllSGPR64s.slice( 216 std::min(static_cast<unsigned>(AllSGPR64s.size()), NumPreloaded)); 217 Register GITPtrLoReg = MFI->getGITPtrLoReg(MF); 218 for (MCPhysReg Reg : AllSGPR64s) { 219 if (LiveRegs.available(MRI, Reg) && MRI.isAllocatable(Reg) && 220 !TRI->isSubRegisterEq(Reg, GITPtrLoReg)) { 221 FlatScrInit = Reg; 222 break; 223 } 224 } 225 assert(FlatScrInit && "Failed to find free register for scratch init"); 226 227 FlatScrInitLo = TRI->getSubReg(FlatScrInit, AMDGPU::sub0); 228 FlatScrInitHi = TRI->getSubReg(FlatScrInit, AMDGPU::sub1); 229 230 buildGitPtr(MBB, I, DL, TII, FlatScrInit); 231 232 // We now have the GIT ptr - now get the scratch descriptor from the entry 233 // at offset 0 (or offset 16 for a compute shader). 234 MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS); 235 const MCInstrDesc &LoadDwordX2 = TII->get(AMDGPU::S_LOAD_DWORDX2_IMM); 236 auto *MMO = MF.getMachineMemOperand( 237 PtrInfo, 238 MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant | 239 MachineMemOperand::MODereferenceable, 240 8, Align(4)); 241 unsigned Offset = 242 MF.getFunction().getCallingConv() == CallingConv::AMDGPU_CS ? 16 : 0; 243 const GCNSubtarget &Subtarget = MF.getSubtarget<GCNSubtarget>(); 244 unsigned EncodedOffset = AMDGPU::convertSMRDOffsetUnits(Subtarget, Offset); 245 BuildMI(MBB, I, DL, LoadDwordX2, FlatScrInit) 246 .addReg(FlatScrInit) 247 .addImm(EncodedOffset) // offset 248 .addImm(0) // cpol 249 .addMemOperand(MMO); 250 251 // Mask the offset in [47:0] of the descriptor 252 const MCInstrDesc &SAndB32 = TII->get(AMDGPU::S_AND_B32); 253 BuildMI(MBB, I, DL, SAndB32, FlatScrInitHi) 254 .addReg(FlatScrInitHi) 255 .addImm(0xffff); 256 } else { 257 Register FlatScratchInitReg = 258 MFI->getPreloadedReg(AMDGPUFunctionArgInfo::FLAT_SCRATCH_INIT); 259 assert(FlatScratchInitReg); 260 261 MachineRegisterInfo &MRI = MF.getRegInfo(); 262 MRI.addLiveIn(FlatScratchInitReg); 263 MBB.addLiveIn(FlatScratchInitReg); 264 265 FlatScrInitLo = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub0); 266 FlatScrInitHi = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub1); 267 } 268 269 // Do a 64-bit pointer add. 270 if (ST.flatScratchIsPointer()) { 271 if (ST.getGeneration() >= AMDGPUSubtarget::GFX10) { 272 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), FlatScrInitLo) 273 .addReg(FlatScrInitLo) 274 .addReg(ScratchWaveOffsetReg); 275 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADDC_U32), FlatScrInitHi) 276 .addReg(FlatScrInitHi) 277 .addImm(0); 278 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SETREG_B32)). 279 addReg(FlatScrInitLo). 280 addImm(int16_t(AMDGPU::Hwreg::ID_FLAT_SCR_LO | 281 (31 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_))); 282 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SETREG_B32)). 283 addReg(FlatScrInitHi). 284 addImm(int16_t(AMDGPU::Hwreg::ID_FLAT_SCR_HI | 285 (31 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_))); 286 return; 287 } 288 289 // For GFX9. 290 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), AMDGPU::FLAT_SCR_LO) 291 .addReg(FlatScrInitLo) 292 .addReg(ScratchWaveOffsetReg); 293 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADDC_U32), AMDGPU::FLAT_SCR_HI) 294 .addReg(FlatScrInitHi) 295 .addImm(0); 296 297 return; 298 } 299 300 assert(ST.getGeneration() < AMDGPUSubtarget::GFX9); 301 302 // Copy the size in bytes. 303 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), AMDGPU::FLAT_SCR_LO) 304 .addReg(FlatScrInitHi, RegState::Kill); 305 306 // Add wave offset in bytes to private base offset. 307 // See comment in AMDKernelCodeT.h for enable_sgpr_flat_scratch_init. 308 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), FlatScrInitLo) 309 .addReg(FlatScrInitLo) 310 .addReg(ScratchWaveOffsetReg); 311 312 // Convert offset to 256-byte units. 313 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_LSHR_B32), AMDGPU::FLAT_SCR_HI) 314 .addReg(FlatScrInitLo, RegState::Kill) 315 .addImm(8); 316 } 317 318 // Note SGPRSpill stack IDs should only be used for SGPR spilling to VGPRs, not 319 // memory. They should have been removed by now. 320 static bool allStackObjectsAreDead(const MachineFrameInfo &MFI) { 321 for (int I = MFI.getObjectIndexBegin(), E = MFI.getObjectIndexEnd(); 322 I != E; ++I) { 323 if (!MFI.isDeadObjectIndex(I)) 324 return false; 325 } 326 327 return true; 328 } 329 330 // Shift down registers reserved for the scratch RSRC. 331 Register SIFrameLowering::getEntryFunctionReservedScratchRsrcReg( 332 MachineFunction &MF) const { 333 334 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 335 const SIInstrInfo *TII = ST.getInstrInfo(); 336 const SIRegisterInfo *TRI = &TII->getRegisterInfo(); 337 MachineRegisterInfo &MRI = MF.getRegInfo(); 338 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 339 340 assert(MFI->isEntryFunction()); 341 342 Register ScratchRsrcReg = MFI->getScratchRSrcReg(); 343 344 if (!ScratchRsrcReg || (!MRI.isPhysRegUsed(ScratchRsrcReg) && 345 allStackObjectsAreDead(MF.getFrameInfo()))) 346 return Register(); 347 348 if (ST.hasSGPRInitBug() || 349 ScratchRsrcReg != TRI->reservedPrivateSegmentBufferReg(MF)) 350 return ScratchRsrcReg; 351 352 // We reserved the last registers for this. Shift it down to the end of those 353 // which were actually used. 354 // 355 // FIXME: It might be safer to use a pseudoregister before replacement. 356 357 // FIXME: We should be able to eliminate unused input registers. We only 358 // cannot do this for the resources required for scratch access. For now we 359 // skip over user SGPRs and may leave unused holes. 360 361 unsigned NumPreloaded = (MFI->getNumPreloadedSGPRs() + 3) / 4; 362 ArrayRef<MCPhysReg> AllSGPR128s = TRI->getAllSGPR128(MF); 363 AllSGPR128s = AllSGPR128s.slice(std::min(static_cast<unsigned>(AllSGPR128s.size()), NumPreloaded)); 364 365 // Skip the last N reserved elements because they should have already been 366 // reserved for VCC etc. 367 Register GITPtrLoReg = MFI->getGITPtrLoReg(MF); 368 for (MCPhysReg Reg : AllSGPR128s) { 369 // Pick the first unallocated one. Make sure we don't clobber the other 370 // reserved input we needed. Also for PAL, make sure we don't clobber 371 // the GIT pointer passed in SGPR0 or SGPR8. 372 if (!MRI.isPhysRegUsed(Reg) && MRI.isAllocatable(Reg) && 373 !TRI->isSubRegisterEq(Reg, GITPtrLoReg)) { 374 MRI.replaceRegWith(ScratchRsrcReg, Reg); 375 MFI->setScratchRSrcReg(Reg); 376 return Reg; 377 } 378 } 379 380 return ScratchRsrcReg; 381 } 382 383 static unsigned getScratchScaleFactor(const GCNSubtarget &ST) { 384 return ST.enableFlatScratch() ? 1 : ST.getWavefrontSize(); 385 } 386 387 void SIFrameLowering::emitEntryFunctionPrologue(MachineFunction &MF, 388 MachineBasicBlock &MBB) const { 389 assert(&MF.front() == &MBB && "Shrink-wrapping not yet supported"); 390 391 // FIXME: If we only have SGPR spills, we won't actually be using scratch 392 // memory since these spill to VGPRs. We should be cleaning up these unused 393 // SGPR spill frame indices somewhere. 394 395 // FIXME: We still have implicit uses on SGPR spill instructions in case they 396 // need to spill to vector memory. It's likely that will not happen, but at 397 // this point it appears we need the setup. This part of the prolog should be 398 // emitted after frame indices are eliminated. 399 400 // FIXME: Remove all of the isPhysRegUsed checks 401 402 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 403 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 404 const SIInstrInfo *TII = ST.getInstrInfo(); 405 const SIRegisterInfo *TRI = &TII->getRegisterInfo(); 406 MachineRegisterInfo &MRI = MF.getRegInfo(); 407 const Function &F = MF.getFunction(); 408 409 assert(MFI->isEntryFunction()); 410 411 Register PreloadedScratchWaveOffsetReg = MFI->getPreloadedReg( 412 AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET); 413 // FIXME: Hack to not crash in situations which emitted an error. 414 if (!PreloadedScratchWaveOffsetReg) 415 return; 416 417 // We need to do the replacement of the private segment buffer register even 418 // if there are no stack objects. There could be stores to undef or a 419 // constant without an associated object. 420 // 421 // This will return `Register()` in cases where there are no actual 422 // uses of the SRSRC. 423 Register ScratchRsrcReg; 424 if (!ST.enableFlatScratch()) 425 ScratchRsrcReg = getEntryFunctionReservedScratchRsrcReg(MF); 426 427 // Make the selected register live throughout the function. 428 if (ScratchRsrcReg) { 429 for (MachineBasicBlock &OtherBB : MF) { 430 if (&OtherBB != &MBB) { 431 OtherBB.addLiveIn(ScratchRsrcReg); 432 } 433 } 434 } 435 436 // Now that we have fixed the reserved SRSRC we need to locate the 437 // (potentially) preloaded SRSRC. 438 Register PreloadedScratchRsrcReg; 439 if (ST.isAmdHsaOrMesa(F)) { 440 PreloadedScratchRsrcReg = 441 MFI->getPreloadedReg(AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER); 442 if (ScratchRsrcReg && PreloadedScratchRsrcReg) { 443 // We added live-ins during argument lowering, but since they were not 444 // used they were deleted. We're adding the uses now, so add them back. 445 MRI.addLiveIn(PreloadedScratchRsrcReg); 446 MBB.addLiveIn(PreloadedScratchRsrcReg); 447 } 448 } 449 450 // Debug location must be unknown since the first debug location is used to 451 // determine the end of the prologue. 452 DebugLoc DL; 453 MachineBasicBlock::iterator I = MBB.begin(); 454 455 // We found the SRSRC first because it needs four registers and has an 456 // alignment requirement. If the SRSRC that we found is clobbering with 457 // the scratch wave offset, which may be in a fixed SGPR or a free SGPR 458 // chosen by SITargetLowering::allocateSystemSGPRs, COPY the scratch 459 // wave offset to a free SGPR. 460 Register ScratchWaveOffsetReg; 461 if (TRI->isSubRegisterEq(ScratchRsrcReg, PreloadedScratchWaveOffsetReg)) { 462 ArrayRef<MCPhysReg> AllSGPRs = TRI->getAllSGPR32(MF); 463 unsigned NumPreloaded = MFI->getNumPreloadedSGPRs(); 464 AllSGPRs = AllSGPRs.slice( 465 std::min(static_cast<unsigned>(AllSGPRs.size()), NumPreloaded)); 466 Register GITPtrLoReg = MFI->getGITPtrLoReg(MF); 467 for (MCPhysReg Reg : AllSGPRs) { 468 if (!MRI.isPhysRegUsed(Reg) && MRI.isAllocatable(Reg) && 469 !TRI->isSubRegisterEq(ScratchRsrcReg, Reg) && GITPtrLoReg != Reg) { 470 ScratchWaveOffsetReg = Reg; 471 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchWaveOffsetReg) 472 .addReg(PreloadedScratchWaveOffsetReg, RegState::Kill); 473 break; 474 } 475 } 476 } else { 477 ScratchWaveOffsetReg = PreloadedScratchWaveOffsetReg; 478 } 479 assert(ScratchWaveOffsetReg); 480 481 if (requiresStackPointerReference(MF)) { 482 Register SPReg = MFI->getStackPtrOffsetReg(); 483 assert(SPReg != AMDGPU::SP_REG); 484 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), SPReg) 485 .addImm(MF.getFrameInfo().getStackSize() * getScratchScaleFactor(ST)); 486 } 487 488 if (hasFP(MF)) { 489 Register FPReg = MFI->getFrameOffsetReg(); 490 assert(FPReg != AMDGPU::FP_REG); 491 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), FPReg).addImm(0); 492 } 493 494 if (MFI->hasFlatScratchInit() || ScratchRsrcReg) { 495 MRI.addLiveIn(PreloadedScratchWaveOffsetReg); 496 MBB.addLiveIn(PreloadedScratchWaveOffsetReg); 497 } 498 499 if (MFI->hasFlatScratchInit()) { 500 emitEntryFunctionFlatScratchInit(MF, MBB, I, DL, ScratchWaveOffsetReg); 501 } 502 503 if (ScratchRsrcReg) { 504 emitEntryFunctionScratchRsrcRegSetup(MF, MBB, I, DL, 505 PreloadedScratchRsrcReg, 506 ScratchRsrcReg, ScratchWaveOffsetReg); 507 } 508 } 509 510 // Emit scratch RSRC setup code, assuming `ScratchRsrcReg != AMDGPU::NoReg` 511 void SIFrameLowering::emitEntryFunctionScratchRsrcRegSetup( 512 MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 513 const DebugLoc &DL, Register PreloadedScratchRsrcReg, 514 Register ScratchRsrcReg, Register ScratchWaveOffsetReg) const { 515 516 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 517 const SIInstrInfo *TII = ST.getInstrInfo(); 518 const SIRegisterInfo *TRI = &TII->getRegisterInfo(); 519 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 520 const Function &Fn = MF.getFunction(); 521 522 if (ST.isAmdPalOS()) { 523 // The pointer to the GIT is formed from the offset passed in and either 524 // the amdgpu-git-ptr-high function attribute or the top part of the PC 525 Register Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1); 526 527 buildGitPtr(MBB, I, DL, TII, Rsrc01); 528 529 // We now have the GIT ptr - now get the scratch descriptor from the entry 530 // at offset 0 (or offset 16 for a compute shader). 531 MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS); 532 const MCInstrDesc &LoadDwordX4 = TII->get(AMDGPU::S_LOAD_DWORDX4_IMM); 533 auto MMO = MF.getMachineMemOperand(PtrInfo, 534 MachineMemOperand::MOLoad | 535 MachineMemOperand::MOInvariant | 536 MachineMemOperand::MODereferenceable, 537 16, Align(4)); 538 unsigned Offset = Fn.getCallingConv() == CallingConv::AMDGPU_CS ? 16 : 0; 539 const GCNSubtarget &Subtarget = MF.getSubtarget<GCNSubtarget>(); 540 unsigned EncodedOffset = AMDGPU::convertSMRDOffsetUnits(Subtarget, Offset); 541 BuildMI(MBB, I, DL, LoadDwordX4, ScratchRsrcReg) 542 .addReg(Rsrc01) 543 .addImm(EncodedOffset) // offset 544 .addImm(0) // cpol 545 .addReg(ScratchRsrcReg, RegState::ImplicitDefine) 546 .addMemOperand(MMO); 547 } else if (ST.isMesaGfxShader(Fn) || !PreloadedScratchRsrcReg) { 548 assert(!ST.isAmdHsaOrMesa(Fn)); 549 const MCInstrDesc &SMovB32 = TII->get(AMDGPU::S_MOV_B32); 550 551 Register Rsrc2 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub2); 552 Register Rsrc3 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub3); 553 554 // Use relocations to get the pointer, and setup the other bits manually. 555 uint64_t Rsrc23 = TII->getScratchRsrcWords23(); 556 557 if (MFI->hasImplicitBufferPtr()) { 558 Register Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1); 559 560 if (AMDGPU::isCompute(MF.getFunction().getCallingConv())) { 561 const MCInstrDesc &Mov64 = TII->get(AMDGPU::S_MOV_B64); 562 563 BuildMI(MBB, I, DL, Mov64, Rsrc01) 564 .addReg(MFI->getImplicitBufferPtrUserSGPR()) 565 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 566 } else { 567 const MCInstrDesc &LoadDwordX2 = TII->get(AMDGPU::S_LOAD_DWORDX2_IMM); 568 569 MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS); 570 auto MMO = MF.getMachineMemOperand( 571 PtrInfo, 572 MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant | 573 MachineMemOperand::MODereferenceable, 574 8, Align(4)); 575 BuildMI(MBB, I, DL, LoadDwordX2, Rsrc01) 576 .addReg(MFI->getImplicitBufferPtrUserSGPR()) 577 .addImm(0) // offset 578 .addImm(0) // cpol 579 .addMemOperand(MMO) 580 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 581 582 MF.getRegInfo().addLiveIn(MFI->getImplicitBufferPtrUserSGPR()); 583 MBB.addLiveIn(MFI->getImplicitBufferPtrUserSGPR()); 584 } 585 } else { 586 Register Rsrc0 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0); 587 Register Rsrc1 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1); 588 589 BuildMI(MBB, I, DL, SMovB32, Rsrc0) 590 .addExternalSymbol("SCRATCH_RSRC_DWORD0") 591 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 592 593 BuildMI(MBB, I, DL, SMovB32, Rsrc1) 594 .addExternalSymbol("SCRATCH_RSRC_DWORD1") 595 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 596 597 } 598 599 BuildMI(MBB, I, DL, SMovB32, Rsrc2) 600 .addImm(Rsrc23 & 0xffffffff) 601 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 602 603 BuildMI(MBB, I, DL, SMovB32, Rsrc3) 604 .addImm(Rsrc23 >> 32) 605 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 606 } else if (ST.isAmdHsaOrMesa(Fn)) { 607 assert(PreloadedScratchRsrcReg); 608 609 if (ScratchRsrcReg != PreloadedScratchRsrcReg) { 610 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchRsrcReg) 611 .addReg(PreloadedScratchRsrcReg, RegState::Kill); 612 } 613 } 614 615 // Add the scratch wave offset into the scratch RSRC. 616 // 617 // We only want to update the first 48 bits, which is the base address 618 // pointer, without touching the adjacent 16 bits of flags. We know this add 619 // cannot carry-out from bit 47, otherwise the scratch allocation would be 620 // impossible to fit in the 48-bit global address space. 621 // 622 // TODO: Evaluate if it is better to just construct an SRD using the flat 623 // scratch init and some constants rather than update the one we are passed. 624 Register ScratchRsrcSub0 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0); 625 Register ScratchRsrcSub1 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1); 626 627 // We cannot Kill ScratchWaveOffsetReg here because we allow it to be used in 628 // the kernel body via inreg arguments. 629 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), ScratchRsrcSub0) 630 .addReg(ScratchRsrcSub0) 631 .addReg(ScratchWaveOffsetReg) 632 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 633 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADDC_U32), ScratchRsrcSub1) 634 .addReg(ScratchRsrcSub1) 635 .addImm(0) 636 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 637 } 638 639 bool SIFrameLowering::isSupportedStackID(TargetStackID::Value ID) const { 640 switch (ID) { 641 case TargetStackID::Default: 642 case TargetStackID::NoAlloc: 643 case TargetStackID::SGPRSpill: 644 return true; 645 case TargetStackID::ScalableVector: 646 return false; 647 } 648 llvm_unreachable("Invalid TargetStackID::Value"); 649 } 650 651 static void initLiveRegs(LivePhysRegs &LiveRegs, const SIRegisterInfo &TRI, 652 const SIMachineFunctionInfo *FuncInfo, 653 MachineFunction &MF, MachineBasicBlock &MBB, 654 MachineBasicBlock::iterator MBBI, bool IsProlog) { 655 if (LiveRegs.empty()) { 656 LiveRegs.init(TRI); 657 if (IsProlog) { 658 LiveRegs.addLiveIns(MBB); 659 } else { 660 // In epilog. 661 LiveRegs.addLiveOuts(MBB); 662 LiveRegs.stepBackward(*MBBI); 663 } 664 } 665 } 666 667 // Activate all lanes, returns saved exec. 668 static Register buildScratchExecCopy(LivePhysRegs &LiveRegs, 669 MachineFunction &MF, 670 MachineBasicBlock &MBB, 671 MachineBasicBlock::iterator MBBI, 672 bool IsProlog) { 673 Register ScratchExecCopy; 674 MachineRegisterInfo &MRI = MF.getRegInfo(); 675 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 676 const SIInstrInfo *TII = ST.getInstrInfo(); 677 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 678 SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 679 DebugLoc DL; 680 681 initLiveRegs(LiveRegs, TRI, FuncInfo, MF, MBB, MBBI, IsProlog); 682 683 ScratchExecCopy = findScratchNonCalleeSaveRegister( 684 MRI, LiveRegs, *TRI.getWaveMaskRegClass()); 685 if (!ScratchExecCopy) 686 report_fatal_error("failed to find free scratch register"); 687 688 LiveRegs.addReg(ScratchExecCopy); 689 690 const unsigned OrSaveExec = 691 ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32 : AMDGPU::S_OR_SAVEEXEC_B64; 692 BuildMI(MBB, MBBI, DL, TII->get(OrSaveExec), ScratchExecCopy).addImm(-1); 693 694 return ScratchExecCopy; 695 } 696 697 // A StackID of SGPRSpill implies that this is a spill from SGPR to VGPR. 698 // Otherwise we are spilling to memory. 699 static bool spilledToMemory(const MachineFunction &MF, int SaveIndex) { 700 const MachineFrameInfo &MFI = MF.getFrameInfo(); 701 return MFI.getStackID(SaveIndex) != TargetStackID::SGPRSpill; 702 } 703 704 void SIFrameLowering::emitPrologue(MachineFunction &MF, 705 MachineBasicBlock &MBB) const { 706 SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 707 if (FuncInfo->isEntryFunction()) { 708 emitEntryFunctionPrologue(MF, MBB); 709 return; 710 } 711 712 const MachineFrameInfo &MFI = MF.getFrameInfo(); 713 MachineRegisterInfo &MRI = MF.getRegInfo(); 714 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 715 const SIInstrInfo *TII = ST.getInstrInfo(); 716 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 717 718 Register StackPtrReg = FuncInfo->getStackPtrOffsetReg(); 719 Register FramePtrReg = FuncInfo->getFrameOffsetReg(); 720 Register BasePtrReg = 721 TRI.hasBasePointer(MF) ? TRI.getBaseRegister() : Register(); 722 LivePhysRegs LiveRegs; 723 724 MachineBasicBlock::iterator MBBI = MBB.begin(); 725 DebugLoc DL; 726 727 bool HasFP = false; 728 bool HasBP = false; 729 uint32_t NumBytes = MFI.getStackSize(); 730 uint32_t RoundedSize = NumBytes; 731 // To avoid clobbering VGPRs in lanes that weren't active on function entry, 732 // turn on all lanes before doing the spill to memory. 733 Register ScratchExecCopy; 734 735 Optional<int> FPSaveIndex = FuncInfo->FramePointerSaveIndex; 736 Optional<int> BPSaveIndex = FuncInfo->BasePointerSaveIndex; 737 738 // VGPRs used for SGPR->VGPR spills 739 for (const SIMachineFunctionInfo::SGPRSpillVGPR &Reg : 740 FuncInfo->getSGPRSpillVGPRs()) { 741 if (!Reg.FI) 742 continue; 743 744 if (!ScratchExecCopy) 745 ScratchExecCopy = buildScratchExecCopy(LiveRegs, MF, MBB, MBBI, 746 /*IsProlog*/ true); 747 748 buildPrologSpill(ST, TRI, *FuncInfo, LiveRegs, MF, MBBI, Reg.VGPR, *Reg.FI); 749 } 750 751 // VGPRs used for Whole Wave Mode 752 for (const auto &Reg : FuncInfo->WWMReservedRegs) { 753 auto VGPR = Reg.first; 754 auto FI = Reg.second; 755 if (!FI) 756 continue; 757 758 if (!ScratchExecCopy) 759 ScratchExecCopy = 760 buildScratchExecCopy(LiveRegs, MF, MBB, MBBI, /*IsProlog*/ true); 761 762 buildPrologSpill(ST, TRI, *FuncInfo, LiveRegs, MF, MBBI, VGPR, *FI); 763 } 764 765 if (ScratchExecCopy) { 766 // FIXME: Split block and make terminator. 767 unsigned ExecMov = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; 768 MCRegister Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 769 BuildMI(MBB, MBBI, DL, TII->get(ExecMov), Exec) 770 .addReg(ScratchExecCopy, RegState::Kill); 771 LiveRegs.addReg(ScratchExecCopy); 772 } 773 774 if (FPSaveIndex && spilledToMemory(MF, *FPSaveIndex)) { 775 const int FramePtrFI = *FPSaveIndex; 776 assert(!MFI.isDeadObjectIndex(FramePtrFI)); 777 778 initLiveRegs(LiveRegs, TRI, FuncInfo, MF, MBB, MBBI, /*IsProlog*/ true); 779 780 MCPhysReg TmpVGPR = findScratchNonCalleeSaveRegister( 781 MRI, LiveRegs, AMDGPU::VGPR_32RegClass); 782 if (!TmpVGPR) 783 report_fatal_error("failed to find free scratch register"); 784 785 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpVGPR) 786 .addReg(FramePtrReg); 787 788 buildPrologSpill(ST, TRI, *FuncInfo, LiveRegs, MF, MBBI, TmpVGPR, 789 FramePtrFI); 790 } 791 792 if (BPSaveIndex && spilledToMemory(MF, *BPSaveIndex)) { 793 const int BasePtrFI = *BPSaveIndex; 794 assert(!MFI.isDeadObjectIndex(BasePtrFI)); 795 796 initLiveRegs(LiveRegs, TRI, FuncInfo, MF, MBB, MBBI, /*IsProlog*/ true); 797 798 MCPhysReg TmpVGPR = findScratchNonCalleeSaveRegister( 799 MRI, LiveRegs, AMDGPU::VGPR_32RegClass); 800 if (!TmpVGPR) 801 report_fatal_error("failed to find free scratch register"); 802 803 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpVGPR) 804 .addReg(BasePtrReg); 805 806 buildPrologSpill(ST, TRI, *FuncInfo, LiveRegs, MF, MBBI, TmpVGPR, 807 BasePtrFI); 808 } 809 810 // In this case, spill the FP to a reserved VGPR. 811 if (FPSaveIndex && !spilledToMemory(MF, *FPSaveIndex)) { 812 const int FramePtrFI = *FPSaveIndex; 813 assert(!MFI.isDeadObjectIndex(FramePtrFI)); 814 815 assert(MFI.getStackID(FramePtrFI) == TargetStackID::SGPRSpill); 816 ArrayRef<SIMachineFunctionInfo::SpilledReg> Spill = 817 FuncInfo->getSGPRToVGPRSpills(FramePtrFI); 818 assert(Spill.size() == 1); 819 820 // Save FP before setting it up. 821 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::V_WRITELANE_B32), Spill[0].VGPR) 822 .addReg(FramePtrReg) 823 .addImm(Spill[0].Lane) 824 .addReg(Spill[0].VGPR, RegState::Undef); 825 } 826 827 // In this case, spill the BP to a reserved VGPR. 828 if (BPSaveIndex && !spilledToMemory(MF, *BPSaveIndex)) { 829 const int BasePtrFI = *BPSaveIndex; 830 assert(!MFI.isDeadObjectIndex(BasePtrFI)); 831 832 assert(MFI.getStackID(BasePtrFI) == TargetStackID::SGPRSpill); 833 ArrayRef<SIMachineFunctionInfo::SpilledReg> Spill = 834 FuncInfo->getSGPRToVGPRSpills(BasePtrFI); 835 assert(Spill.size() == 1); 836 837 // Save BP before setting it up. 838 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::V_WRITELANE_B32), Spill[0].VGPR) 839 .addReg(BasePtrReg) 840 .addImm(Spill[0].Lane) 841 .addReg(Spill[0].VGPR, RegState::Undef); 842 } 843 844 // Emit the copy if we need an FP, and are using a free SGPR to save it. 845 if (FuncInfo->SGPRForFPSaveRestoreCopy) { 846 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), 847 FuncInfo->SGPRForFPSaveRestoreCopy) 848 .addReg(FramePtrReg) 849 .setMIFlag(MachineInstr::FrameSetup); 850 } 851 852 // Emit the copy if we need a BP, and are using a free SGPR to save it. 853 if (FuncInfo->SGPRForBPSaveRestoreCopy) { 854 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), 855 FuncInfo->SGPRForBPSaveRestoreCopy) 856 .addReg(BasePtrReg) 857 .setMIFlag(MachineInstr::FrameSetup); 858 } 859 860 // If a copy has been emitted for FP and/or BP, Make the SGPRs 861 // used in the copy instructions live throughout the function. 862 SmallVector<MCPhysReg, 2> TempSGPRs; 863 if (FuncInfo->SGPRForFPSaveRestoreCopy) 864 TempSGPRs.push_back(FuncInfo->SGPRForFPSaveRestoreCopy); 865 866 if (FuncInfo->SGPRForBPSaveRestoreCopy) 867 TempSGPRs.push_back(FuncInfo->SGPRForBPSaveRestoreCopy); 868 869 if (!TempSGPRs.empty()) { 870 for (MachineBasicBlock &MBB : MF) { 871 for (MCPhysReg Reg : TempSGPRs) 872 MBB.addLiveIn(Reg); 873 874 MBB.sortUniqueLiveIns(); 875 } 876 if (!LiveRegs.empty()) { 877 LiveRegs.addReg(FuncInfo->SGPRForFPSaveRestoreCopy); 878 LiveRegs.addReg(FuncInfo->SGPRForBPSaveRestoreCopy); 879 } 880 } 881 882 if (TRI.hasStackRealignment(MF)) { 883 HasFP = true; 884 const unsigned Alignment = MFI.getMaxAlign().value(); 885 886 RoundedSize += Alignment; 887 if (LiveRegs.empty()) { 888 LiveRegs.init(TRI); 889 LiveRegs.addLiveIns(MBB); 890 } 891 892 // s_add_u32 s33, s32, NumBytes 893 // s_and_b32 s33, s33, 0b111...0000 894 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_ADD_U32), FramePtrReg) 895 .addReg(StackPtrReg) 896 .addImm((Alignment - 1) * getScratchScaleFactor(ST)) 897 .setMIFlag(MachineInstr::FrameSetup); 898 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_AND_B32), FramePtrReg) 899 .addReg(FramePtrReg, RegState::Kill) 900 .addImm(-Alignment * getScratchScaleFactor(ST)) 901 .setMIFlag(MachineInstr::FrameSetup); 902 FuncInfo->setIsStackRealigned(true); 903 } else if ((HasFP = hasFP(MF))) { 904 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), FramePtrReg) 905 .addReg(StackPtrReg) 906 .setMIFlag(MachineInstr::FrameSetup); 907 } 908 909 // If we need a base pointer, set it up here. It's whatever the value of 910 // the stack pointer is at this point. Any variable size objects will be 911 // allocated after this, so we can still use the base pointer to reference 912 // the incoming arguments. 913 if ((HasBP = TRI.hasBasePointer(MF))) { 914 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), BasePtrReg) 915 .addReg(StackPtrReg) 916 .setMIFlag(MachineInstr::FrameSetup); 917 } 918 919 if (HasFP && RoundedSize != 0) { 920 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_ADD_U32), StackPtrReg) 921 .addReg(StackPtrReg) 922 .addImm(RoundedSize * getScratchScaleFactor(ST)) 923 .setMIFlag(MachineInstr::FrameSetup); 924 } 925 926 assert((!HasFP || (FuncInfo->SGPRForFPSaveRestoreCopy || 927 FuncInfo->FramePointerSaveIndex)) && 928 "Needed to save FP but didn't save it anywhere"); 929 930 assert((HasFP || (!FuncInfo->SGPRForFPSaveRestoreCopy && 931 !FuncInfo->FramePointerSaveIndex)) && 932 "Saved FP but didn't need it"); 933 934 assert((!HasBP || (FuncInfo->SGPRForBPSaveRestoreCopy || 935 FuncInfo->BasePointerSaveIndex)) && 936 "Needed to save BP but didn't save it anywhere"); 937 938 assert((HasBP || (!FuncInfo->SGPRForBPSaveRestoreCopy && 939 !FuncInfo->BasePointerSaveIndex)) && 940 "Saved BP but didn't need it"); 941 } 942 943 void SIFrameLowering::emitEpilogue(MachineFunction &MF, 944 MachineBasicBlock &MBB) const { 945 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 946 if (FuncInfo->isEntryFunction()) 947 return; 948 949 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 950 const SIInstrInfo *TII = ST.getInstrInfo(); 951 MachineRegisterInfo &MRI = MF.getRegInfo(); 952 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 953 MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator(); 954 LivePhysRegs LiveRegs; 955 DebugLoc DL; 956 957 const MachineFrameInfo &MFI = MF.getFrameInfo(); 958 uint32_t NumBytes = MFI.getStackSize(); 959 uint32_t RoundedSize = FuncInfo->isStackRealigned() 960 ? NumBytes + MFI.getMaxAlign().value() 961 : NumBytes; 962 const Register StackPtrReg = FuncInfo->getStackPtrOffsetReg(); 963 const Register FramePtrReg = FuncInfo->getFrameOffsetReg(); 964 const Register BasePtrReg = 965 TRI.hasBasePointer(MF) ? TRI.getBaseRegister() : Register(); 966 967 Optional<int> FPSaveIndex = FuncInfo->FramePointerSaveIndex; 968 Optional<int> BPSaveIndex = FuncInfo->BasePointerSaveIndex; 969 970 if (RoundedSize != 0 && hasFP(MF)) { 971 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_SUB_U32), StackPtrReg) 972 .addReg(StackPtrReg) 973 .addImm(RoundedSize * getScratchScaleFactor(ST)) 974 .setMIFlag(MachineInstr::FrameDestroy); 975 } 976 977 if (FuncInfo->SGPRForFPSaveRestoreCopy) { 978 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), FramePtrReg) 979 .addReg(FuncInfo->SGPRForFPSaveRestoreCopy) 980 .setMIFlag(MachineInstr::FrameDestroy); 981 } 982 983 if (FuncInfo->SGPRForBPSaveRestoreCopy) { 984 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), BasePtrReg) 985 .addReg(FuncInfo->SGPRForBPSaveRestoreCopy) 986 .setMIFlag(MachineInstr::FrameDestroy); 987 } 988 989 if (FPSaveIndex) { 990 const int FramePtrFI = *FPSaveIndex; 991 assert(!MFI.isDeadObjectIndex(FramePtrFI)); 992 if (spilledToMemory(MF, FramePtrFI)) { 993 initLiveRegs(LiveRegs, TRI, FuncInfo, MF, MBB, MBBI, /*IsProlog*/ false); 994 995 MCPhysReg TmpVGPR = findScratchNonCalleeSaveRegister( 996 MRI, LiveRegs, AMDGPU::VGPR_32RegClass); 997 if (!TmpVGPR) 998 report_fatal_error("failed to find free scratch register"); 999 buildEpilogRestore(ST, TRI, *FuncInfo, LiveRegs, MF, MBBI, TmpVGPR, 1000 FramePtrFI); 1001 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), FramePtrReg) 1002 .addReg(TmpVGPR, RegState::Kill); 1003 } else { 1004 // Reload from VGPR spill. 1005 assert(MFI.getStackID(FramePtrFI) == TargetStackID::SGPRSpill); 1006 ArrayRef<SIMachineFunctionInfo::SpilledReg> Spill = 1007 FuncInfo->getSGPRToVGPRSpills(FramePtrFI); 1008 assert(Spill.size() == 1); 1009 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::V_READLANE_B32), FramePtrReg) 1010 .addReg(Spill[0].VGPR) 1011 .addImm(Spill[0].Lane); 1012 } 1013 } 1014 1015 if (BPSaveIndex) { 1016 const int BasePtrFI = *BPSaveIndex; 1017 assert(!MFI.isDeadObjectIndex(BasePtrFI)); 1018 if (spilledToMemory(MF, BasePtrFI)) { 1019 initLiveRegs(LiveRegs, TRI, FuncInfo, MF, MBB, MBBI, /*IsProlog*/ false); 1020 1021 MCPhysReg TmpVGPR = findScratchNonCalleeSaveRegister( 1022 MRI, LiveRegs, AMDGPU::VGPR_32RegClass); 1023 if (!TmpVGPR) 1024 report_fatal_error("failed to find free scratch register"); 1025 buildEpilogRestore(ST, TRI, *FuncInfo, LiveRegs, MF, MBBI, TmpVGPR, 1026 BasePtrFI); 1027 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), BasePtrReg) 1028 .addReg(TmpVGPR, RegState::Kill); 1029 } else { 1030 // Reload from VGPR spill. 1031 assert(MFI.getStackID(BasePtrFI) == TargetStackID::SGPRSpill); 1032 ArrayRef<SIMachineFunctionInfo::SpilledReg> Spill = 1033 FuncInfo->getSGPRToVGPRSpills(BasePtrFI); 1034 assert(Spill.size() == 1); 1035 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::V_READLANE_B32), BasePtrReg) 1036 .addReg(Spill[0].VGPR) 1037 .addImm(Spill[0].Lane); 1038 } 1039 } 1040 1041 Register ScratchExecCopy; 1042 for (const SIMachineFunctionInfo::SGPRSpillVGPR &Reg : 1043 FuncInfo->getSGPRSpillVGPRs()) { 1044 if (!Reg.FI) 1045 continue; 1046 1047 if (!ScratchExecCopy) 1048 ScratchExecCopy = 1049 buildScratchExecCopy(LiveRegs, MF, MBB, MBBI, /*IsProlog*/ false); 1050 1051 buildEpilogRestore(ST, TRI, *FuncInfo, LiveRegs, MF, MBBI, Reg.VGPR, 1052 *Reg.FI); 1053 } 1054 1055 for (const auto &Reg : FuncInfo->WWMReservedRegs) { 1056 auto VGPR = Reg.first; 1057 auto FI = Reg.second; 1058 if (!FI) 1059 continue; 1060 1061 if (!ScratchExecCopy) 1062 ScratchExecCopy = 1063 buildScratchExecCopy(LiveRegs, MF, MBB, MBBI, /*IsProlog*/ false); 1064 1065 buildEpilogRestore(ST, TRI, *FuncInfo, LiveRegs, MF, MBBI, VGPR, *FI); 1066 } 1067 1068 if (ScratchExecCopy) { 1069 // FIXME: Split block and make terminator. 1070 unsigned ExecMov = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; 1071 MCRegister Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 1072 BuildMI(MBB, MBBI, DL, TII->get(ExecMov), Exec) 1073 .addReg(ScratchExecCopy, RegState::Kill); 1074 } 1075 } 1076 1077 #ifndef NDEBUG 1078 static bool allSGPRSpillsAreDead(const MachineFunction &MF) { 1079 const MachineFrameInfo &MFI = MF.getFrameInfo(); 1080 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 1081 for (int I = MFI.getObjectIndexBegin(), E = MFI.getObjectIndexEnd(); 1082 I != E; ++I) { 1083 if (!MFI.isDeadObjectIndex(I) && 1084 MFI.getStackID(I) == TargetStackID::SGPRSpill && 1085 (I != FuncInfo->FramePointerSaveIndex && 1086 I != FuncInfo->BasePointerSaveIndex)) { 1087 return false; 1088 } 1089 } 1090 1091 return true; 1092 } 1093 #endif 1094 1095 StackOffset SIFrameLowering::getFrameIndexReference(const MachineFunction &MF, 1096 int FI, 1097 Register &FrameReg) const { 1098 const SIRegisterInfo *RI = MF.getSubtarget<GCNSubtarget>().getRegisterInfo(); 1099 1100 FrameReg = RI->getFrameRegister(MF); 1101 return StackOffset::getFixed(MF.getFrameInfo().getObjectOffset(FI)); 1102 } 1103 1104 void SIFrameLowering::processFunctionBeforeFrameFinalized( 1105 MachineFunction &MF, 1106 RegScavenger *RS) const { 1107 MachineFrameInfo &MFI = MF.getFrameInfo(); 1108 1109 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1110 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 1111 SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 1112 1113 FuncInfo->removeDeadFrameIndices(MFI); 1114 assert(allSGPRSpillsAreDead(MF) && 1115 "SGPR spill should have been removed in SILowerSGPRSpills"); 1116 1117 // FIXME: The other checks should be redundant with allStackObjectsAreDead, 1118 // but currently hasNonSpillStackObjects is set only from source 1119 // allocas. Stack temps produced from legalization are not counted currently. 1120 if (!allStackObjectsAreDead(MFI)) { 1121 assert(RS && "RegScavenger required if spilling"); 1122 1123 // Add an emergency spill slot 1124 RS->addScavengingFrameIndex(FuncInfo->getScavengeFI(MFI, *TRI)); 1125 } 1126 } 1127 1128 // Only report VGPRs to generic code. 1129 void SIFrameLowering::determineCalleeSaves(MachineFunction &MF, 1130 BitVector &SavedVGPRs, 1131 RegScavenger *RS) const { 1132 TargetFrameLowering::determineCalleeSaves(MF, SavedVGPRs, RS); 1133 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 1134 if (MFI->isEntryFunction()) 1135 return; 1136 1137 MachineFrameInfo &FrameInfo = MF.getFrameInfo(); 1138 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1139 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 1140 1141 // Ignore the SGPRs the default implementation found. 1142 SavedVGPRs.clearBitsNotInMask(TRI->getAllVectorRegMask()); 1143 1144 // Do not save AGPRs prior to GFX90A because there was no easy way to do so. 1145 // In gfx908 there was do AGPR loads and stores and thus spilling also 1146 // require a temporary VGPR. 1147 if (!ST.hasGFX90AInsts()) 1148 SavedVGPRs.clearBitsInMask(TRI->getAllAGPRRegMask()); 1149 1150 // hasFP only knows about stack objects that already exist. We're now 1151 // determining the stack slots that will be created, so we have to predict 1152 // them. Stack objects force FP usage with calls. 1153 // 1154 // Note a new VGPR CSR may be introduced if one is used for the spill, but we 1155 // don't want to report it here. 1156 // 1157 // FIXME: Is this really hasReservedCallFrame? 1158 const bool WillHaveFP = 1159 FrameInfo.hasCalls() && 1160 (SavedVGPRs.any() || !allStackObjectsAreDead(FrameInfo)); 1161 1162 // VGPRs used for SGPR spilling need to be specially inserted in the prolog, 1163 // so don't allow the default insertion to handle them. 1164 for (auto SSpill : MFI->getSGPRSpillVGPRs()) 1165 SavedVGPRs.reset(SSpill.VGPR); 1166 1167 LivePhysRegs LiveRegs; 1168 LiveRegs.init(*TRI); 1169 1170 if (WillHaveFP || hasFP(MF)) { 1171 assert(!MFI->SGPRForFPSaveRestoreCopy && !MFI->FramePointerSaveIndex && 1172 "Re-reserving spill slot for FP"); 1173 getVGPRSpillLaneOrTempRegister(MF, LiveRegs, MFI->SGPRForFPSaveRestoreCopy, 1174 MFI->FramePointerSaveIndex, true); 1175 } 1176 1177 if (TRI->hasBasePointer(MF)) { 1178 if (MFI->SGPRForFPSaveRestoreCopy) 1179 LiveRegs.addReg(MFI->SGPRForFPSaveRestoreCopy); 1180 1181 assert(!MFI->SGPRForBPSaveRestoreCopy && 1182 !MFI->BasePointerSaveIndex && "Re-reserving spill slot for BP"); 1183 getVGPRSpillLaneOrTempRegister(MF, LiveRegs, MFI->SGPRForBPSaveRestoreCopy, 1184 MFI->BasePointerSaveIndex, false); 1185 } 1186 } 1187 1188 void SIFrameLowering::determineCalleeSavesSGPR(MachineFunction &MF, 1189 BitVector &SavedRegs, 1190 RegScavenger *RS) const { 1191 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); 1192 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 1193 if (MFI->isEntryFunction()) 1194 return; 1195 1196 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1197 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 1198 1199 // The SP is specifically managed and we don't want extra spills of it. 1200 SavedRegs.reset(MFI->getStackPtrOffsetReg()); 1201 1202 const BitVector AllSavedRegs = SavedRegs; 1203 SavedRegs.clearBitsInMask(TRI->getAllVectorRegMask()); 1204 1205 // If clearing VGPRs changed the mask, we will have some CSR VGPR spills. 1206 const bool HaveAnyCSRVGPR = SavedRegs != AllSavedRegs; 1207 1208 // We have to anticipate introducing CSR VGPR spills if we don't have any 1209 // stack objects already, since we require an FP if there is a call and stack. 1210 MachineFrameInfo &FrameInfo = MF.getFrameInfo(); 1211 const bool WillHaveFP = FrameInfo.hasCalls() && HaveAnyCSRVGPR; 1212 1213 // FP will be specially managed like SP. 1214 if (WillHaveFP || hasFP(MF)) 1215 SavedRegs.reset(MFI->getFrameOffsetReg()); 1216 } 1217 1218 bool SIFrameLowering::assignCalleeSavedSpillSlots( 1219 MachineFunction &MF, const TargetRegisterInfo *TRI, 1220 std::vector<CalleeSavedInfo> &CSI) const { 1221 if (CSI.empty()) 1222 return true; // Early exit if no callee saved registers are modified! 1223 1224 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 1225 if (!FuncInfo->SGPRForFPSaveRestoreCopy && 1226 !FuncInfo->SGPRForBPSaveRestoreCopy) 1227 return false; 1228 1229 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1230 const SIRegisterInfo *RI = ST.getRegisterInfo(); 1231 Register FramePtrReg = FuncInfo->getFrameOffsetReg(); 1232 Register BasePtrReg = RI->getBaseRegister(); 1233 unsigned NumModifiedRegs = 0; 1234 1235 if (FuncInfo->SGPRForFPSaveRestoreCopy) 1236 NumModifiedRegs++; 1237 if (FuncInfo->SGPRForBPSaveRestoreCopy) 1238 NumModifiedRegs++; 1239 1240 for (auto &CS : CSI) { 1241 if (CS.getReg() == FramePtrReg && FuncInfo->SGPRForFPSaveRestoreCopy) { 1242 CS.setDstReg(FuncInfo->SGPRForFPSaveRestoreCopy); 1243 if (--NumModifiedRegs) 1244 break; 1245 } else if (CS.getReg() == BasePtrReg && 1246 FuncInfo->SGPRForBPSaveRestoreCopy) { 1247 CS.setDstReg(FuncInfo->SGPRForBPSaveRestoreCopy); 1248 if (--NumModifiedRegs) 1249 break; 1250 } 1251 } 1252 1253 return false; 1254 } 1255 1256 MachineBasicBlock::iterator SIFrameLowering::eliminateCallFramePseudoInstr( 1257 MachineFunction &MF, 1258 MachineBasicBlock &MBB, 1259 MachineBasicBlock::iterator I) const { 1260 int64_t Amount = I->getOperand(0).getImm(); 1261 if (Amount == 0) 1262 return MBB.erase(I); 1263 1264 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1265 const SIInstrInfo *TII = ST.getInstrInfo(); 1266 const DebugLoc &DL = I->getDebugLoc(); 1267 unsigned Opc = I->getOpcode(); 1268 bool IsDestroy = Opc == TII->getCallFrameDestroyOpcode(); 1269 uint64_t CalleePopAmount = IsDestroy ? I->getOperand(1).getImm() : 0; 1270 1271 if (!hasReservedCallFrame(MF)) { 1272 Amount = alignTo(Amount, getStackAlign()); 1273 assert(isUInt<32>(Amount) && "exceeded stack address space size"); 1274 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 1275 Register SPReg = MFI->getStackPtrOffsetReg(); 1276 1277 unsigned Op = IsDestroy ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32; 1278 BuildMI(MBB, I, DL, TII->get(Op), SPReg) 1279 .addReg(SPReg) 1280 .addImm(Amount * getScratchScaleFactor(ST)); 1281 } else if (CalleePopAmount != 0) { 1282 llvm_unreachable("is this used?"); 1283 } 1284 1285 return MBB.erase(I); 1286 } 1287 1288 /// Returns true if the frame will require a reference to the stack pointer. 1289 /// 1290 /// This is the set of conditions common to setting up the stack pointer in a 1291 /// kernel, and for using a frame pointer in a callable function. 1292 /// 1293 /// FIXME: Should also check hasOpaqueSPAdjustment and if any inline asm 1294 /// references SP. 1295 static bool frameTriviallyRequiresSP(const MachineFrameInfo &MFI) { 1296 return MFI.hasVarSizedObjects() || MFI.hasStackMap() || MFI.hasPatchPoint(); 1297 } 1298 1299 // The FP for kernels is always known 0, so we never really need to setup an 1300 // explicit register for it. However, DisableFramePointerElim will force us to 1301 // use a register for it. 1302 bool SIFrameLowering::hasFP(const MachineFunction &MF) const { 1303 const MachineFrameInfo &MFI = MF.getFrameInfo(); 1304 1305 // For entry functions we can use an immediate offset in most cases, so the 1306 // presence of calls doesn't imply we need a distinct frame pointer. 1307 if (MFI.hasCalls() && 1308 !MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction()) { 1309 // All offsets are unsigned, so need to be addressed in the same direction 1310 // as stack growth. 1311 1312 // FIXME: This function is pretty broken, since it can be called before the 1313 // frame layout is determined or CSR spills are inserted. 1314 return MFI.getStackSize() != 0; 1315 } 1316 1317 return frameTriviallyRequiresSP(MFI) || MFI.isFrameAddressTaken() || 1318 MF.getSubtarget<GCNSubtarget>().getRegisterInfo()->hasStackRealignment( 1319 MF) || 1320 MF.getTarget().Options.DisableFramePointerElim(MF); 1321 } 1322 1323 // This is essentially a reduced version of hasFP for entry functions. Since the 1324 // stack pointer is known 0 on entry to kernels, we never really need an FP 1325 // register. We may need to initialize the stack pointer depending on the frame 1326 // properties, which logically overlaps many of the cases where an ordinary 1327 // function would require an FP. 1328 bool SIFrameLowering::requiresStackPointerReference( 1329 const MachineFunction &MF) const { 1330 // Callable functions always require a stack pointer reference. 1331 assert(MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction() && 1332 "only expected to call this for entry points"); 1333 1334 const MachineFrameInfo &MFI = MF.getFrameInfo(); 1335 1336 // Entry points ordinarily don't need to initialize SP. We have to set it up 1337 // for callees if there are any. Also note tail calls are impossible/don't 1338 // make any sense for kernels. 1339 if (MFI.hasCalls()) 1340 return true; 1341 1342 // We still need to initialize the SP if we're doing anything weird that 1343 // references the SP, like variable sized stack objects. 1344 return frameTriviallyRequiresSP(MFI); 1345 } 1346