1 //===----------------------- SIFrameLowering.cpp --------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //==-----------------------------------------------------------------------===// 8 9 #include "SIFrameLowering.h" 10 #include "AMDGPUSubtarget.h" 11 #include "SIInstrInfo.h" 12 #include "SIMachineFunctionInfo.h" 13 #include "SIRegisterInfo.h" 14 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 15 16 #include "llvm/CodeGen/LivePhysRegs.h" 17 #include "llvm/CodeGen/MachineFrameInfo.h" 18 #include "llvm/CodeGen/MachineFunction.h" 19 #include "llvm/CodeGen/MachineInstrBuilder.h" 20 #include "llvm/CodeGen/RegisterScavenging.h" 21 22 using namespace llvm; 23 24 #define DEBUG_TYPE "frame-info" 25 26 27 // Find a scratch register that we can use at the start of the prologue to 28 // re-align the stack pointer. We avoid using callee-save registers since they 29 // may appear to be free when this is called from canUseAsPrologue (during 30 // shrink wrapping), but then no longer be free when this is called from 31 // emitPrologue. 32 // 33 // FIXME: This is a bit conservative, since in the above case we could use one 34 // of the callee-save registers as a scratch temp to re-align the stack pointer, 35 // but we would then have to make sure that we were in fact saving at least one 36 // callee-save register in the prologue, which is additional complexity that 37 // doesn't seem worth the benefit. 38 static MCRegister findScratchNonCalleeSaveRegister(MachineRegisterInfo &MRI, 39 LivePhysRegs &LiveRegs, 40 const TargetRegisterClass &RC, 41 bool Unused = false) { 42 // Mark callee saved registers as used so we will not choose them. 43 const MCPhysReg *CSRegs = MRI.getCalleeSavedRegs(); 44 for (unsigned i = 0; CSRegs[i]; ++i) 45 LiveRegs.addReg(CSRegs[i]); 46 47 if (Unused) { 48 // We are looking for a register that can be used throughout the entire 49 // function, so any use is unacceptable. 50 for (MCRegister Reg : RC) { 51 if (!MRI.isPhysRegUsed(Reg) && LiveRegs.available(MRI, Reg)) 52 return Reg; 53 } 54 } else { 55 for (MCRegister Reg : RC) { 56 if (LiveRegs.available(MRI, Reg)) 57 return Reg; 58 } 59 } 60 61 // If we require an unused register, this is used in contexts where failure is 62 // an option and has an alternative plan. In other contexts, this must 63 // succeed0. 64 if (!Unused) 65 report_fatal_error("failed to find free scratch register"); 66 67 return MCRegister(); 68 } 69 70 static void getVGPRSpillLaneOrTempRegister(MachineFunction &MF, 71 LivePhysRegs &LiveRegs, 72 Register &TempSGPR, 73 Optional<int> &FrameIndex, 74 bool IsFP) { 75 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 76 MachineFrameInfo &FrameInfo = MF.getFrameInfo(); 77 78 #ifndef NDEBUG 79 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 80 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 81 #endif 82 83 // We need to save and restore the current FP/BP. 84 85 // 1: If there is already a VGPR with free lanes, use it. We 86 // may already have to pay the penalty for spilling a CSR VGPR. 87 if (MFI->haveFreeLanesForSGPRSpill(MF, 1)) { 88 int NewFI = FrameInfo.CreateStackObject(4, Align(4), true, nullptr, 89 TargetStackID::SGPRSpill); 90 91 if (!MFI->allocateSGPRSpillToVGPR(MF, NewFI)) 92 llvm_unreachable("allocate SGPR spill should have worked"); 93 94 FrameIndex = NewFI; 95 96 LLVM_DEBUG(auto Spill = MFI->getSGPRToVGPRSpills(NewFI).front(); 97 dbgs() << "Spilling " << (IsFP ? "FP" : "BP") << " to " 98 << printReg(Spill.VGPR, TRI) << ':' << Spill.Lane 99 << '\n'); 100 return; 101 } 102 103 // 2: Next, try to save the FP/BP in an unused SGPR. 104 TempSGPR = findScratchNonCalleeSaveRegister( 105 MF.getRegInfo(), LiveRegs, AMDGPU::SReg_32_XM0_XEXECRegClass, true); 106 107 if (!TempSGPR) { 108 int NewFI = FrameInfo.CreateStackObject(4, Align(4), true, nullptr, 109 TargetStackID::SGPRSpill); 110 111 if (MFI->allocateSGPRSpillToVGPR(MF, NewFI)) { 112 // 3: There's no free lane to spill, and no free register to save FP/BP, 113 // so we're forced to spill another VGPR to use for the spill. 114 FrameIndex = NewFI; 115 116 LLVM_DEBUG( 117 auto Spill = MFI->getSGPRToVGPRSpills(NewFI).front(); 118 dbgs() << (IsFP ? "FP" : "BP") << " requires fallback spill to " 119 << printReg(Spill.VGPR, TRI) << ':' << Spill.Lane << '\n';); 120 } else { 121 // Remove dead <NewFI> index 122 MF.getFrameInfo().RemoveStackObject(NewFI); 123 // 4: If all else fails, spill the FP/BP to memory. 124 FrameIndex = FrameInfo.CreateSpillStackObject(4, Align(4)); 125 LLVM_DEBUG(dbgs() << "Reserved FI " << FrameIndex << " for spilling " 126 << (IsFP ? "FP" : "BP") << '\n'); 127 } 128 } else { 129 LLVM_DEBUG(dbgs() << "Saving " << (IsFP ? "FP" : "BP") << " with copy to " 130 << printReg(TempSGPR, TRI) << '\n'); 131 } 132 } 133 134 // We need to specially emit stack operations here because a different frame 135 // register is used than in the rest of the function, as getFrameRegister would 136 // use. 137 static void buildPrologSpill(const GCNSubtarget &ST, LivePhysRegs &LiveRegs, 138 MachineBasicBlock &MBB, 139 MachineBasicBlock::iterator I, 140 const SIInstrInfo *TII, Register SpillReg, 141 Register ScratchRsrcReg, Register SPReg, int FI) { 142 MachineFunction *MF = MBB.getParent(); 143 MachineFrameInfo &MFI = MF->getFrameInfo(); 144 145 int64_t Offset = MFI.getObjectOffset(FI); 146 147 MachineMemOperand *MMO = MF->getMachineMemOperand( 148 MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOStore, 4, 149 MFI.getObjectAlign(FI)); 150 151 if (ST.enableFlatScratch()) { 152 if (TII->isLegalFLATOffset(Offset, AMDGPUAS::PRIVATE_ADDRESS, true)) { 153 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::SCRATCH_STORE_DWORD_SADDR)) 154 .addReg(SpillReg, RegState::Kill) 155 .addReg(SPReg) 156 .addImm(Offset) 157 .addImm(0) // glc 158 .addImm(0) // slc 159 .addImm(0) // dlc 160 .addMemOperand(MMO); 161 return; 162 } 163 } else if (SIInstrInfo::isLegalMUBUFImmOffset(Offset)) { 164 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::BUFFER_STORE_DWORD_OFFSET)) 165 .addReg(SpillReg, RegState::Kill) 166 .addReg(ScratchRsrcReg) 167 .addReg(SPReg) 168 .addImm(Offset) 169 .addImm(0) // glc 170 .addImm(0) // slc 171 .addImm(0) // tfe 172 .addImm(0) // dlc 173 .addImm(0) // swz 174 .addMemOperand(MMO); 175 return; 176 } 177 178 // Don't clobber the TmpVGPR if we also need a scratch reg for the stack 179 // offset in the spill. 180 LiveRegs.addReg(SpillReg); 181 182 if (ST.enableFlatScratch()) { 183 MCPhysReg OffsetReg = findScratchNonCalleeSaveRegister( 184 MF->getRegInfo(), LiveRegs, AMDGPU::SReg_32_XM0RegClass); 185 186 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::S_ADD_U32), OffsetReg) 187 .addReg(SPReg) 188 .addImm(Offset); 189 190 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::SCRATCH_STORE_DWORD_SADDR)) 191 .addReg(SpillReg, RegState::Kill) 192 .addReg(OffsetReg, RegState::Kill) 193 .addImm(0) 194 .addImm(0) // glc 195 .addImm(0) // slc 196 .addImm(0) // dlc 197 .addMemOperand(MMO); 198 } else { 199 MCPhysReg OffsetReg = findScratchNonCalleeSaveRegister( 200 MF->getRegInfo(), LiveRegs, AMDGPU::VGPR_32RegClass); 201 202 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::V_MOV_B32_e32), OffsetReg) 203 .addImm(Offset); 204 205 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::BUFFER_STORE_DWORD_OFFEN)) 206 .addReg(SpillReg, RegState::Kill) 207 .addReg(OffsetReg, RegState::Kill) 208 .addReg(ScratchRsrcReg) 209 .addReg(SPReg) 210 .addImm(0) 211 .addImm(0) // glc 212 .addImm(0) // slc 213 .addImm(0) // tfe 214 .addImm(0) // dlc 215 .addImm(0) // swz 216 .addMemOperand(MMO); 217 } 218 219 LiveRegs.removeReg(SpillReg); 220 } 221 222 static void buildEpilogReload(const GCNSubtarget &ST, LivePhysRegs &LiveRegs, 223 MachineBasicBlock &MBB, 224 MachineBasicBlock::iterator I, 225 const SIInstrInfo *TII, Register SpillReg, 226 Register ScratchRsrcReg, Register SPReg, int FI) { 227 MachineFunction *MF = MBB.getParent(); 228 MachineFrameInfo &MFI = MF->getFrameInfo(); 229 int64_t Offset = MFI.getObjectOffset(FI); 230 231 MachineMemOperand *MMO = MF->getMachineMemOperand( 232 MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOLoad, 4, 233 MFI.getObjectAlign(FI)); 234 235 if (ST.enableFlatScratch()) { 236 if (TII->isLegalFLATOffset(Offset, AMDGPUAS::PRIVATE_ADDRESS, true)) { 237 BuildMI(MBB, I, DebugLoc(), 238 TII->get(AMDGPU::SCRATCH_LOAD_DWORD_SADDR), SpillReg) 239 .addReg(SPReg) 240 .addImm(Offset) 241 .addImm(0) // glc 242 .addImm(0) // slc 243 .addImm(0) // dlc 244 .addMemOperand(MMO); 245 return; 246 } 247 MCPhysReg OffsetReg = findScratchNonCalleeSaveRegister( 248 MF->getRegInfo(), LiveRegs, AMDGPU::SReg_32_XM0RegClass); 249 250 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::S_ADD_U32), OffsetReg) 251 .addReg(SPReg) 252 .addImm(Offset); 253 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::SCRATCH_LOAD_DWORD_SADDR), 254 SpillReg) 255 .addReg(OffsetReg, RegState::Kill) 256 .addImm(0) 257 .addImm(0) // glc 258 .addImm(0) // slc 259 .addImm(0) // dlc 260 .addMemOperand(MMO); 261 return; 262 } 263 264 if (SIInstrInfo::isLegalMUBUFImmOffset(Offset)) { 265 BuildMI(MBB, I, DebugLoc(), 266 TII->get(AMDGPU::BUFFER_LOAD_DWORD_OFFSET), SpillReg) 267 .addReg(ScratchRsrcReg) 268 .addReg(SPReg) 269 .addImm(Offset) 270 .addImm(0) // glc 271 .addImm(0) // slc 272 .addImm(0) // tfe 273 .addImm(0) // dlc 274 .addImm(0) // swz 275 .addMemOperand(MMO); 276 return; 277 } 278 279 MCPhysReg OffsetReg = findScratchNonCalleeSaveRegister( 280 MF->getRegInfo(), LiveRegs, AMDGPU::VGPR_32RegClass); 281 282 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::V_MOV_B32_e32), OffsetReg) 283 .addImm(Offset); 284 285 BuildMI(MBB, I, DebugLoc(), 286 TII->get(AMDGPU::BUFFER_LOAD_DWORD_OFFEN), SpillReg) 287 .addReg(OffsetReg, RegState::Kill) 288 .addReg(ScratchRsrcReg) 289 .addReg(SPReg) 290 .addImm(0) 291 .addImm(0) // glc 292 .addImm(0) // slc 293 .addImm(0) // tfe 294 .addImm(0) // dlc 295 .addImm(0) // swz 296 .addMemOperand(MMO); 297 } 298 299 // Emit flat scratch setup code, assuming `MFI->hasFlatScratchInit()` 300 void SIFrameLowering::emitEntryFunctionFlatScratchInit( 301 MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 302 const DebugLoc &DL, Register ScratchWaveOffsetReg) const { 303 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 304 const SIInstrInfo *TII = ST.getInstrInfo(); 305 const SIRegisterInfo *TRI = &TII->getRegisterInfo(); 306 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 307 308 // We don't need this if we only have spills since there is no user facing 309 // scratch. 310 311 // TODO: If we know we don't have flat instructions earlier, we can omit 312 // this from the input registers. 313 // 314 // TODO: We only need to know if we access scratch space through a flat 315 // pointer. Because we only detect if flat instructions are used at all, 316 // this will be used more often than necessary on VI. 317 318 Register FlatScratchInitReg = 319 MFI->getPreloadedReg(AMDGPUFunctionArgInfo::FLAT_SCRATCH_INIT); 320 assert(FlatScratchInitReg); 321 322 MachineRegisterInfo &MRI = MF.getRegInfo(); 323 MRI.addLiveIn(FlatScratchInitReg); 324 MBB.addLiveIn(FlatScratchInitReg); 325 326 Register FlatScrInitLo = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub0); 327 Register FlatScrInitHi = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub1); 328 329 // Do a 64-bit pointer add. 330 if (ST.flatScratchIsPointer()) { 331 if (ST.getGeneration() >= AMDGPUSubtarget::GFX10) { 332 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), FlatScrInitLo) 333 .addReg(FlatScrInitLo) 334 .addReg(ScratchWaveOffsetReg); 335 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADDC_U32), FlatScrInitHi) 336 .addReg(FlatScrInitHi) 337 .addImm(0); 338 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SETREG_B32)). 339 addReg(FlatScrInitLo). 340 addImm(int16_t(AMDGPU::Hwreg::ID_FLAT_SCR_LO | 341 (31 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_))); 342 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SETREG_B32)). 343 addReg(FlatScrInitHi). 344 addImm(int16_t(AMDGPU::Hwreg::ID_FLAT_SCR_HI | 345 (31 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_))); 346 return; 347 } 348 349 // For GFX9. 350 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), AMDGPU::FLAT_SCR_LO) 351 .addReg(FlatScrInitLo) 352 .addReg(ScratchWaveOffsetReg); 353 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADDC_U32), AMDGPU::FLAT_SCR_HI) 354 .addReg(FlatScrInitHi) 355 .addImm(0); 356 357 return; 358 } 359 360 assert(ST.getGeneration() < AMDGPUSubtarget::GFX9); 361 362 // Copy the size in bytes. 363 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), AMDGPU::FLAT_SCR_LO) 364 .addReg(FlatScrInitHi, RegState::Kill); 365 366 // Add wave offset in bytes to private base offset. 367 // See comment in AMDKernelCodeT.h for enable_sgpr_flat_scratch_init. 368 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), FlatScrInitLo) 369 .addReg(FlatScrInitLo) 370 .addReg(ScratchWaveOffsetReg); 371 372 // Convert offset to 256-byte units. 373 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_LSHR_B32), AMDGPU::FLAT_SCR_HI) 374 .addReg(FlatScrInitLo, RegState::Kill) 375 .addImm(8); 376 } 377 378 // Note SGPRSpill stack IDs should only be used for SGPR spilling to VGPRs, not 379 // memory. They should have been removed by now. 380 static bool allStackObjectsAreDead(const MachineFrameInfo &MFI) { 381 for (int I = MFI.getObjectIndexBegin(), E = MFI.getObjectIndexEnd(); 382 I != E; ++I) { 383 if (!MFI.isDeadObjectIndex(I)) 384 return false; 385 } 386 387 return true; 388 } 389 390 // Shift down registers reserved for the scratch RSRC. 391 Register SIFrameLowering::getEntryFunctionReservedScratchRsrcReg( 392 MachineFunction &MF) const { 393 394 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 395 const SIInstrInfo *TII = ST.getInstrInfo(); 396 const SIRegisterInfo *TRI = &TII->getRegisterInfo(); 397 MachineRegisterInfo &MRI = MF.getRegInfo(); 398 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 399 400 assert(MFI->isEntryFunction()); 401 402 Register ScratchRsrcReg = MFI->getScratchRSrcReg(); 403 404 if (!ScratchRsrcReg || (!MRI.isPhysRegUsed(ScratchRsrcReg) && 405 allStackObjectsAreDead(MF.getFrameInfo()))) 406 return Register(); 407 408 if (ST.hasSGPRInitBug() || 409 ScratchRsrcReg != TRI->reservedPrivateSegmentBufferReg(MF)) 410 return ScratchRsrcReg; 411 412 // We reserved the last registers for this. Shift it down to the end of those 413 // which were actually used. 414 // 415 // FIXME: It might be safer to use a pseudoregister before replacement. 416 417 // FIXME: We should be able to eliminate unused input registers. We only 418 // cannot do this for the resources required for scratch access. For now we 419 // skip over user SGPRs and may leave unused holes. 420 421 unsigned NumPreloaded = (MFI->getNumPreloadedSGPRs() + 3) / 4; 422 ArrayRef<MCPhysReg> AllSGPR128s = TRI->getAllSGPR128(MF); 423 AllSGPR128s = AllSGPR128s.slice(std::min(static_cast<unsigned>(AllSGPR128s.size()), NumPreloaded)); 424 425 // Skip the last N reserved elements because they should have already been 426 // reserved for VCC etc. 427 Register GITPtrLoReg = MFI->getGITPtrLoReg(MF); 428 for (MCPhysReg Reg : AllSGPR128s) { 429 // Pick the first unallocated one. Make sure we don't clobber the other 430 // reserved input we needed. Also for PAL, make sure we don't clobber 431 // the GIT pointer passed in SGPR0 or SGPR8. 432 if (!MRI.isPhysRegUsed(Reg) && MRI.isAllocatable(Reg) && 433 !TRI->isSubRegisterEq(Reg, GITPtrLoReg)) { 434 MRI.replaceRegWith(ScratchRsrcReg, Reg); 435 MFI->setScratchRSrcReg(Reg); 436 return Reg; 437 } 438 } 439 440 return ScratchRsrcReg; 441 } 442 443 static unsigned getScratchScaleFactor(const GCNSubtarget &ST) { 444 return ST.enableFlatScratch() ? 1 : ST.getWavefrontSize(); 445 } 446 447 void SIFrameLowering::emitEntryFunctionPrologue(MachineFunction &MF, 448 MachineBasicBlock &MBB) const { 449 assert(&MF.front() == &MBB && "Shrink-wrapping not yet supported"); 450 451 // FIXME: If we only have SGPR spills, we won't actually be using scratch 452 // memory since these spill to VGPRs. We should be cleaning up these unused 453 // SGPR spill frame indices somewhere. 454 455 // FIXME: We still have implicit uses on SGPR spill instructions in case they 456 // need to spill to vector memory. It's likely that will not happen, but at 457 // this point it appears we need the setup. This part of the prolog should be 458 // emitted after frame indices are eliminated. 459 460 // FIXME: Remove all of the isPhysRegUsed checks 461 462 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 463 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 464 const SIInstrInfo *TII = ST.getInstrInfo(); 465 const SIRegisterInfo *TRI = &TII->getRegisterInfo(); 466 MachineRegisterInfo &MRI = MF.getRegInfo(); 467 const Function &F = MF.getFunction(); 468 469 assert(MFI->isEntryFunction()); 470 471 Register PreloadedScratchWaveOffsetReg = MFI->getPreloadedReg( 472 AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET); 473 // FIXME: Hack to not crash in situations which emitted an error. 474 if (!PreloadedScratchWaveOffsetReg) 475 return; 476 477 // We need to do the replacement of the private segment buffer register even 478 // if there are no stack objects. There could be stores to undef or a 479 // constant without an associated object. 480 // 481 // This will return `Register()` in cases where there are no actual 482 // uses of the SRSRC. 483 Register ScratchRsrcReg; 484 if (!ST.enableFlatScratch()) 485 ScratchRsrcReg = getEntryFunctionReservedScratchRsrcReg(MF); 486 487 // Make the selected register live throughout the function. 488 if (ScratchRsrcReg) { 489 for (MachineBasicBlock &OtherBB : MF) { 490 if (&OtherBB != &MBB) { 491 OtherBB.addLiveIn(ScratchRsrcReg); 492 } 493 } 494 } 495 496 // Now that we have fixed the reserved SRSRC we need to locate the 497 // (potentially) preloaded SRSRC. 498 Register PreloadedScratchRsrcReg; 499 if (ST.isAmdHsaOrMesa(F)) { 500 PreloadedScratchRsrcReg = 501 MFI->getPreloadedReg(AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER); 502 if (ScratchRsrcReg && PreloadedScratchRsrcReg) { 503 // We added live-ins during argument lowering, but since they were not 504 // used they were deleted. We're adding the uses now, so add them back. 505 MRI.addLiveIn(PreloadedScratchRsrcReg); 506 MBB.addLiveIn(PreloadedScratchRsrcReg); 507 } 508 } 509 510 // Debug location must be unknown since the first debug location is used to 511 // determine the end of the prologue. 512 DebugLoc DL; 513 MachineBasicBlock::iterator I = MBB.begin(); 514 515 // We found the SRSRC first because it needs four registers and has an 516 // alignment requirement. If the SRSRC that we found is clobbering with 517 // the scratch wave offset, which may be in a fixed SGPR or a free SGPR 518 // chosen by SITargetLowering::allocateSystemSGPRs, COPY the scratch 519 // wave offset to a free SGPR. 520 Register ScratchWaveOffsetReg; 521 if (TRI->isSubRegisterEq(ScratchRsrcReg, PreloadedScratchWaveOffsetReg)) { 522 ArrayRef<MCPhysReg> AllSGPRs = TRI->getAllSGPR32(MF); 523 unsigned NumPreloaded = MFI->getNumPreloadedSGPRs(); 524 AllSGPRs = AllSGPRs.slice( 525 std::min(static_cast<unsigned>(AllSGPRs.size()), NumPreloaded)); 526 Register GITPtrLoReg = MFI->getGITPtrLoReg(MF); 527 for (MCPhysReg Reg : AllSGPRs) { 528 if (!MRI.isPhysRegUsed(Reg) && MRI.isAllocatable(Reg) && 529 !TRI->isSubRegisterEq(ScratchRsrcReg, Reg) && GITPtrLoReg != Reg) { 530 ScratchWaveOffsetReg = Reg; 531 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchWaveOffsetReg) 532 .addReg(PreloadedScratchWaveOffsetReg, RegState::Kill); 533 break; 534 } 535 } 536 } else { 537 ScratchWaveOffsetReg = PreloadedScratchWaveOffsetReg; 538 } 539 assert(ScratchWaveOffsetReg); 540 541 if (requiresStackPointerReference(MF)) { 542 Register SPReg = MFI->getStackPtrOffsetReg(); 543 assert(SPReg != AMDGPU::SP_REG); 544 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), SPReg) 545 .addImm(MF.getFrameInfo().getStackSize() * getScratchScaleFactor(ST)); 546 } 547 548 if (hasFP(MF)) { 549 Register FPReg = MFI->getFrameOffsetReg(); 550 assert(FPReg != AMDGPU::FP_REG); 551 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), FPReg).addImm(0); 552 } 553 554 if (MFI->hasFlatScratchInit() || ScratchRsrcReg) { 555 MRI.addLiveIn(PreloadedScratchWaveOffsetReg); 556 MBB.addLiveIn(PreloadedScratchWaveOffsetReg); 557 } 558 559 if (MFI->hasFlatScratchInit()) { 560 emitEntryFunctionFlatScratchInit(MF, MBB, I, DL, ScratchWaveOffsetReg); 561 } 562 563 if (ScratchRsrcReg) { 564 emitEntryFunctionScratchRsrcRegSetup(MF, MBB, I, DL, 565 PreloadedScratchRsrcReg, 566 ScratchRsrcReg, ScratchWaveOffsetReg); 567 } 568 } 569 570 // Emit scratch RSRC setup code, assuming `ScratchRsrcReg != AMDGPU::NoReg` 571 void SIFrameLowering::emitEntryFunctionScratchRsrcRegSetup( 572 MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 573 const DebugLoc &DL, Register PreloadedScratchRsrcReg, 574 Register ScratchRsrcReg, Register ScratchWaveOffsetReg) const { 575 576 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 577 const SIInstrInfo *TII = ST.getInstrInfo(); 578 const SIRegisterInfo *TRI = &TII->getRegisterInfo(); 579 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 580 const Function &Fn = MF.getFunction(); 581 582 if (ST.isAmdPalOS()) { 583 // The pointer to the GIT is formed from the offset passed in and either 584 // the amdgpu-git-ptr-high function attribute or the top part of the PC 585 Register RsrcLo = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0); 586 Register RsrcHi = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1); 587 Register Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1); 588 589 const MCInstrDesc &SMovB32 = TII->get(AMDGPU::S_MOV_B32); 590 591 if (MFI->getGITPtrHigh() != 0xffffffff) { 592 BuildMI(MBB, I, DL, SMovB32, RsrcHi) 593 .addImm(MFI->getGITPtrHigh()) 594 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 595 } else { 596 const MCInstrDesc &GetPC64 = TII->get(AMDGPU::S_GETPC_B64); 597 BuildMI(MBB, I, DL, GetPC64, Rsrc01); 598 } 599 Register GitPtrLo = MFI->getGITPtrLoReg(MF); 600 MF.getRegInfo().addLiveIn(GitPtrLo); 601 MBB.addLiveIn(GitPtrLo); 602 BuildMI(MBB, I, DL, SMovB32, RsrcLo) 603 .addReg(GitPtrLo) 604 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 605 606 // We now have the GIT ptr - now get the scratch descriptor from the entry 607 // at offset 0 (or offset 16 for a compute shader). 608 MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS); 609 const MCInstrDesc &LoadDwordX4 = TII->get(AMDGPU::S_LOAD_DWORDX4_IMM); 610 auto MMO = MF.getMachineMemOperand(PtrInfo, 611 MachineMemOperand::MOLoad | 612 MachineMemOperand::MOInvariant | 613 MachineMemOperand::MODereferenceable, 614 16, Align(4)); 615 unsigned Offset = Fn.getCallingConv() == CallingConv::AMDGPU_CS ? 16 : 0; 616 const GCNSubtarget &Subtarget = MF.getSubtarget<GCNSubtarget>(); 617 unsigned EncodedOffset = AMDGPU::convertSMRDOffsetUnits(Subtarget, Offset); 618 BuildMI(MBB, I, DL, LoadDwordX4, ScratchRsrcReg) 619 .addReg(Rsrc01) 620 .addImm(EncodedOffset) // offset 621 .addImm(0) // glc 622 .addImm(0) // dlc 623 .addReg(ScratchRsrcReg, RegState::ImplicitDefine) 624 .addMemOperand(MMO); 625 } else if (ST.isMesaGfxShader(Fn) || !PreloadedScratchRsrcReg) { 626 assert(!ST.isAmdHsaOrMesa(Fn)); 627 const MCInstrDesc &SMovB32 = TII->get(AMDGPU::S_MOV_B32); 628 629 Register Rsrc2 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub2); 630 Register Rsrc3 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub3); 631 632 // Use relocations to get the pointer, and setup the other bits manually. 633 uint64_t Rsrc23 = TII->getScratchRsrcWords23(); 634 635 if (MFI->hasImplicitBufferPtr()) { 636 Register Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1); 637 638 if (AMDGPU::isCompute(MF.getFunction().getCallingConv())) { 639 const MCInstrDesc &Mov64 = TII->get(AMDGPU::S_MOV_B64); 640 641 BuildMI(MBB, I, DL, Mov64, Rsrc01) 642 .addReg(MFI->getImplicitBufferPtrUserSGPR()) 643 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 644 } else { 645 const MCInstrDesc &LoadDwordX2 = TII->get(AMDGPU::S_LOAD_DWORDX2_IMM); 646 647 MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS); 648 auto MMO = MF.getMachineMemOperand( 649 PtrInfo, 650 MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant | 651 MachineMemOperand::MODereferenceable, 652 8, Align(4)); 653 BuildMI(MBB, I, DL, LoadDwordX2, Rsrc01) 654 .addReg(MFI->getImplicitBufferPtrUserSGPR()) 655 .addImm(0) // offset 656 .addImm(0) // glc 657 .addImm(0) // dlc 658 .addMemOperand(MMO) 659 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 660 661 MF.getRegInfo().addLiveIn(MFI->getImplicitBufferPtrUserSGPR()); 662 MBB.addLiveIn(MFI->getImplicitBufferPtrUserSGPR()); 663 } 664 } else { 665 Register Rsrc0 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0); 666 Register Rsrc1 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1); 667 668 BuildMI(MBB, I, DL, SMovB32, Rsrc0) 669 .addExternalSymbol("SCRATCH_RSRC_DWORD0") 670 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 671 672 BuildMI(MBB, I, DL, SMovB32, Rsrc1) 673 .addExternalSymbol("SCRATCH_RSRC_DWORD1") 674 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 675 676 } 677 678 BuildMI(MBB, I, DL, SMovB32, Rsrc2) 679 .addImm(Rsrc23 & 0xffffffff) 680 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 681 682 BuildMI(MBB, I, DL, SMovB32, Rsrc3) 683 .addImm(Rsrc23 >> 32) 684 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 685 } else if (ST.isAmdHsaOrMesa(Fn)) { 686 assert(PreloadedScratchRsrcReg); 687 688 if (ScratchRsrcReg != PreloadedScratchRsrcReg) { 689 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchRsrcReg) 690 .addReg(PreloadedScratchRsrcReg, RegState::Kill); 691 } 692 } 693 694 // Add the scratch wave offset into the scratch RSRC. 695 // 696 // We only want to update the first 48 bits, which is the base address 697 // pointer, without touching the adjacent 16 bits of flags. We know this add 698 // cannot carry-out from bit 47, otherwise the scratch allocation would be 699 // impossible to fit in the 48-bit global address space. 700 // 701 // TODO: Evaluate if it is better to just construct an SRD using the flat 702 // scratch init and some constants rather than update the one we are passed. 703 Register ScratchRsrcSub0 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0); 704 Register ScratchRsrcSub1 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1); 705 706 // We cannot Kill ScratchWaveOffsetReg here because we allow it to be used in 707 // the kernel body via inreg arguments. 708 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), ScratchRsrcSub0) 709 .addReg(ScratchRsrcSub0) 710 .addReg(ScratchWaveOffsetReg) 711 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 712 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADDC_U32), ScratchRsrcSub1) 713 .addReg(ScratchRsrcSub1) 714 .addImm(0) 715 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 716 } 717 718 bool SIFrameLowering::isSupportedStackID(TargetStackID::Value ID) const { 719 switch (ID) { 720 case TargetStackID::Default: 721 case TargetStackID::NoAlloc: 722 case TargetStackID::SGPRSpill: 723 return true; 724 case TargetStackID::SVEVector: 725 return false; 726 } 727 llvm_unreachable("Invalid TargetStackID::Value"); 728 } 729 730 // Activate all lanes, returns saved exec. 731 static Register buildScratchExecCopy(LivePhysRegs &LiveRegs, 732 MachineFunction &MF, 733 MachineBasicBlock &MBB, 734 MachineBasicBlock::iterator MBBI, 735 bool IsProlog) { 736 Register ScratchExecCopy; 737 MachineRegisterInfo &MRI = MF.getRegInfo(); 738 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 739 const SIInstrInfo *TII = ST.getInstrInfo(); 740 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 741 SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 742 DebugLoc DL; 743 744 if (LiveRegs.empty()) { 745 if (IsProlog) { 746 LiveRegs.init(TRI); 747 LiveRegs.addLiveIns(MBB); 748 if (FuncInfo->SGPRForFPSaveRestoreCopy) 749 LiveRegs.removeReg(FuncInfo->SGPRForFPSaveRestoreCopy); 750 751 if (FuncInfo->SGPRForBPSaveRestoreCopy) 752 LiveRegs.removeReg(FuncInfo->SGPRForBPSaveRestoreCopy); 753 } else { 754 // In epilog. 755 LiveRegs.init(*ST.getRegisterInfo()); 756 LiveRegs.addLiveOuts(MBB); 757 LiveRegs.stepBackward(*MBBI); 758 } 759 } 760 761 ScratchExecCopy = findScratchNonCalleeSaveRegister( 762 MRI, LiveRegs, *TRI.getWaveMaskRegClass()); 763 764 if (!IsProlog) 765 LiveRegs.removeReg(ScratchExecCopy); 766 767 const unsigned OrSaveExec = 768 ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32 : AMDGPU::S_OR_SAVEEXEC_B64; 769 BuildMI(MBB, MBBI, DL, TII->get(OrSaveExec), ScratchExecCopy).addImm(-1); 770 771 return ScratchExecCopy; 772 } 773 774 void SIFrameLowering::emitPrologue(MachineFunction &MF, 775 MachineBasicBlock &MBB) const { 776 SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 777 if (FuncInfo->isEntryFunction()) { 778 emitEntryFunctionPrologue(MF, MBB); 779 return; 780 } 781 782 const MachineFrameInfo &MFI = MF.getFrameInfo(); 783 MachineRegisterInfo &MRI = MF.getRegInfo(); 784 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 785 const SIInstrInfo *TII = ST.getInstrInfo(); 786 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 787 788 Register StackPtrReg = FuncInfo->getStackPtrOffsetReg(); 789 Register FramePtrReg = FuncInfo->getFrameOffsetReg(); 790 Register BasePtrReg = 791 TRI.hasBasePointer(MF) ? TRI.getBaseRegister() : Register(); 792 LivePhysRegs LiveRegs; 793 794 MachineBasicBlock::iterator MBBI = MBB.begin(); 795 DebugLoc DL; 796 797 bool HasFP = false; 798 bool HasBP = false; 799 uint32_t NumBytes = MFI.getStackSize(); 800 uint32_t RoundedSize = NumBytes; 801 // To avoid clobbering VGPRs in lanes that weren't active on function entry, 802 // turn on all lanes before doing the spill to memory. 803 Register ScratchExecCopy; 804 805 bool HasFPSaveIndex = FuncInfo->FramePointerSaveIndex.hasValue(); 806 bool SpillFPToMemory = false; 807 // A StackID of SGPRSpill implies that this is a spill from SGPR to VGPR. 808 // Otherwise we are spilling the FP to memory. 809 if (HasFPSaveIndex) { 810 SpillFPToMemory = MFI.getStackID(*FuncInfo->FramePointerSaveIndex) != 811 TargetStackID::SGPRSpill; 812 } 813 814 bool HasBPSaveIndex = FuncInfo->BasePointerSaveIndex.hasValue(); 815 bool SpillBPToMemory = false; 816 // A StackID of SGPRSpill implies that this is a spill from SGPR to VGPR. 817 // Otherwise we are spilling the BP to memory. 818 if (HasBPSaveIndex) { 819 SpillBPToMemory = MFI.getStackID(*FuncInfo->BasePointerSaveIndex) != 820 TargetStackID::SGPRSpill; 821 } 822 823 // Emit the copy if we need an FP, and are using a free SGPR to save it. 824 if (FuncInfo->SGPRForFPSaveRestoreCopy) { 825 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), FuncInfo->SGPRForFPSaveRestoreCopy) 826 .addReg(FramePtrReg) 827 .setMIFlag(MachineInstr::FrameSetup); 828 } 829 830 // Emit the copy if we need a BP, and are using a free SGPR to save it. 831 if (FuncInfo->SGPRForBPSaveRestoreCopy) { 832 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), 833 FuncInfo->SGPRForBPSaveRestoreCopy) 834 .addReg(BasePtrReg) 835 .setMIFlag(MachineInstr::FrameSetup); 836 } 837 838 // If a copy has been emitted for FP and/or BP, Make the SGPRs 839 // used in the copy instructions live throughout the function. 840 SmallVector<MCPhysReg, 2> TempSGPRs; 841 if (FuncInfo->SGPRForFPSaveRestoreCopy) 842 TempSGPRs.push_back(FuncInfo->SGPRForFPSaveRestoreCopy); 843 844 if (FuncInfo->SGPRForBPSaveRestoreCopy) 845 TempSGPRs.push_back(FuncInfo->SGPRForBPSaveRestoreCopy); 846 847 if (!TempSGPRs.empty()) { 848 for (MachineBasicBlock &MBB : MF) { 849 for (MCPhysReg Reg : TempSGPRs) 850 MBB.addLiveIn(Reg); 851 852 MBB.sortUniqueLiveIns(); 853 } 854 } 855 856 for (const SIMachineFunctionInfo::SGPRSpillVGPRCSR &Reg 857 : FuncInfo->getSGPRSpillVGPRs()) { 858 if (!Reg.FI.hasValue()) 859 continue; 860 861 if (!ScratchExecCopy) 862 ScratchExecCopy = buildScratchExecCopy(LiveRegs, MF, MBB, MBBI, true); 863 864 buildPrologSpill(ST, LiveRegs, MBB, MBBI, TII, Reg.VGPR, 865 FuncInfo->getScratchRSrcReg(), 866 StackPtrReg, 867 Reg.FI.getValue()); 868 } 869 870 if (HasFPSaveIndex && SpillFPToMemory) { 871 assert(!MFI.isDeadObjectIndex(FuncInfo->FramePointerSaveIndex.getValue())); 872 873 if (!ScratchExecCopy) 874 ScratchExecCopy = buildScratchExecCopy(LiveRegs, MF, MBB, MBBI, true); 875 876 MCPhysReg TmpVGPR = findScratchNonCalleeSaveRegister( 877 MRI, LiveRegs, AMDGPU::VGPR_32RegClass); 878 879 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpVGPR) 880 .addReg(FramePtrReg); 881 882 buildPrologSpill(ST, LiveRegs, MBB, MBBI, TII, TmpVGPR, 883 FuncInfo->getScratchRSrcReg(), StackPtrReg, 884 FuncInfo->FramePointerSaveIndex.getValue()); 885 } 886 887 if (HasBPSaveIndex && SpillBPToMemory) { 888 assert(!MFI.isDeadObjectIndex(*FuncInfo->BasePointerSaveIndex)); 889 890 if (!ScratchExecCopy) 891 ScratchExecCopy = buildScratchExecCopy(LiveRegs, MF, MBB, MBBI, true); 892 893 MCPhysReg TmpVGPR = findScratchNonCalleeSaveRegister( 894 MRI, LiveRegs, AMDGPU::VGPR_32RegClass); 895 896 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpVGPR) 897 .addReg(BasePtrReg); 898 899 buildPrologSpill(ST, LiveRegs, MBB, MBBI, TII, TmpVGPR, 900 FuncInfo->getScratchRSrcReg(), StackPtrReg, 901 *FuncInfo->BasePointerSaveIndex); 902 } 903 904 if (ScratchExecCopy) { 905 // FIXME: Split block and make terminator. 906 unsigned ExecMov = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; 907 MCRegister Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 908 BuildMI(MBB, MBBI, DL, TII->get(ExecMov), Exec) 909 .addReg(ScratchExecCopy, RegState::Kill); 910 LiveRegs.addReg(ScratchExecCopy); 911 } 912 913 // In this case, spill the FP to a reserved VGPR. 914 if (HasFPSaveIndex && !SpillFPToMemory) { 915 const int FI = FuncInfo->FramePointerSaveIndex.getValue(); 916 assert(!MFI.isDeadObjectIndex(FI)); 917 918 assert(MFI.getStackID(FI) == TargetStackID::SGPRSpill); 919 ArrayRef<SIMachineFunctionInfo::SpilledReg> Spill = 920 FuncInfo->getSGPRToVGPRSpills(FI); 921 assert(Spill.size() == 1); 922 923 // Save FP before setting it up. 924 // FIXME: This should respect spillSGPRToVGPR; 925 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::V_WRITELANE_B32), Spill[0].VGPR) 926 .addReg(FramePtrReg) 927 .addImm(Spill[0].Lane) 928 .addReg(Spill[0].VGPR, RegState::Undef); 929 } 930 931 // In this case, spill the BP to a reserved VGPR. 932 if (HasBPSaveIndex && !SpillBPToMemory) { 933 const int BasePtrFI = *FuncInfo->BasePointerSaveIndex; 934 assert(!MFI.isDeadObjectIndex(BasePtrFI)); 935 936 assert(MFI.getStackID(BasePtrFI) == TargetStackID::SGPRSpill); 937 ArrayRef<SIMachineFunctionInfo::SpilledReg> Spill = 938 FuncInfo->getSGPRToVGPRSpills(BasePtrFI); 939 assert(Spill.size() == 1); 940 941 // Save BP before setting it up. 942 // FIXME: This should respect spillSGPRToVGPR; 943 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::V_WRITELANE_B32), Spill[0].VGPR) 944 .addReg(BasePtrReg) 945 .addImm(Spill[0].Lane) 946 .addReg(Spill[0].VGPR, RegState::Undef); 947 } 948 949 if (TRI.needsStackRealignment(MF)) { 950 HasFP = true; 951 const unsigned Alignment = MFI.getMaxAlign().value(); 952 953 RoundedSize += Alignment; 954 if (LiveRegs.empty()) { 955 LiveRegs.init(TRI); 956 LiveRegs.addLiveIns(MBB); 957 LiveRegs.addReg(FuncInfo->SGPRForFPSaveRestoreCopy); 958 LiveRegs.addReg(FuncInfo->SGPRForBPSaveRestoreCopy); 959 } 960 961 Register ScratchSPReg = findScratchNonCalleeSaveRegister( 962 MRI, LiveRegs, AMDGPU::SReg_32_XM0RegClass); 963 assert(ScratchSPReg && ScratchSPReg != FuncInfo->SGPRForFPSaveRestoreCopy && 964 ScratchSPReg != FuncInfo->SGPRForBPSaveRestoreCopy); 965 966 // s_add_u32 tmp_reg, s32, NumBytes 967 // s_and_b32 s32, tmp_reg, 0b111...0000 968 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_ADD_U32), ScratchSPReg) 969 .addReg(StackPtrReg) 970 .addImm((Alignment - 1) * getScratchScaleFactor(ST)) 971 .setMIFlag(MachineInstr::FrameSetup); 972 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_AND_B32), FramePtrReg) 973 .addReg(ScratchSPReg, RegState::Kill) 974 .addImm(-Alignment * getScratchScaleFactor(ST)) 975 .setMIFlag(MachineInstr::FrameSetup); 976 FuncInfo->setIsStackRealigned(true); 977 } else if ((HasFP = hasFP(MF))) { 978 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), FramePtrReg) 979 .addReg(StackPtrReg) 980 .setMIFlag(MachineInstr::FrameSetup); 981 } 982 983 // If we need a base pointer, set it up here. It's whatever the value of 984 // the stack pointer is at this point. Any variable size objects will be 985 // allocated after this, so we can still use the base pointer to reference 986 // the incoming arguments. 987 if ((HasBP = TRI.hasBasePointer(MF))) { 988 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), BasePtrReg) 989 .addReg(StackPtrReg) 990 .setMIFlag(MachineInstr::FrameSetup); 991 } 992 993 if (HasFP && RoundedSize != 0) { 994 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_ADD_U32), StackPtrReg) 995 .addReg(StackPtrReg) 996 .addImm(RoundedSize * getScratchScaleFactor(ST)) 997 .setMIFlag(MachineInstr::FrameSetup); 998 } 999 1000 assert((!HasFP || (FuncInfo->SGPRForFPSaveRestoreCopy || 1001 FuncInfo->FramePointerSaveIndex)) && 1002 "Needed to save FP but didn't save it anywhere"); 1003 1004 assert((HasFP || (!FuncInfo->SGPRForFPSaveRestoreCopy && 1005 !FuncInfo->FramePointerSaveIndex)) && 1006 "Saved FP but didn't need it"); 1007 1008 assert((!HasBP || (FuncInfo->SGPRForBPSaveRestoreCopy || 1009 FuncInfo->BasePointerSaveIndex)) && 1010 "Needed to save BP but didn't save it anywhere"); 1011 1012 assert((HasBP || (!FuncInfo->SGPRForBPSaveRestoreCopy && 1013 !FuncInfo->BasePointerSaveIndex)) && 1014 "Saved BP but didn't need it"); 1015 } 1016 1017 void SIFrameLowering::emitEpilogue(MachineFunction &MF, 1018 MachineBasicBlock &MBB) const { 1019 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 1020 if (FuncInfo->isEntryFunction()) 1021 return; 1022 1023 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1024 const SIInstrInfo *TII = ST.getInstrInfo(); 1025 MachineRegisterInfo &MRI = MF.getRegInfo(); 1026 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 1027 MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator(); 1028 LivePhysRegs LiveRegs; 1029 DebugLoc DL; 1030 1031 const MachineFrameInfo &MFI = MF.getFrameInfo(); 1032 uint32_t NumBytes = MFI.getStackSize(); 1033 uint32_t RoundedSize = FuncInfo->isStackRealigned() 1034 ? NumBytes + MFI.getMaxAlign().value() 1035 : NumBytes; 1036 const Register StackPtrReg = FuncInfo->getStackPtrOffsetReg(); 1037 const Register FramePtrReg = FuncInfo->getFrameOffsetReg(); 1038 const Register BasePtrReg = 1039 TRI.hasBasePointer(MF) ? TRI.getBaseRegister() : Register(); 1040 1041 bool HasFPSaveIndex = FuncInfo->FramePointerSaveIndex.hasValue(); 1042 bool SpillFPToMemory = false; 1043 if (HasFPSaveIndex) { 1044 SpillFPToMemory = MFI.getStackID(*FuncInfo->FramePointerSaveIndex) != 1045 TargetStackID::SGPRSpill; 1046 } 1047 1048 bool HasBPSaveIndex = FuncInfo->BasePointerSaveIndex.hasValue(); 1049 bool SpillBPToMemory = false; 1050 if (HasBPSaveIndex) { 1051 SpillBPToMemory = MFI.getStackID(*FuncInfo->BasePointerSaveIndex) != 1052 TargetStackID::SGPRSpill; 1053 } 1054 1055 if (RoundedSize != 0 && hasFP(MF)) { 1056 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_SUB_U32), StackPtrReg) 1057 .addReg(StackPtrReg) 1058 .addImm(RoundedSize * getScratchScaleFactor(ST)) 1059 .setMIFlag(MachineInstr::FrameDestroy); 1060 } 1061 1062 if (FuncInfo->SGPRForFPSaveRestoreCopy) { 1063 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), FramePtrReg) 1064 .addReg(FuncInfo->SGPRForFPSaveRestoreCopy) 1065 .setMIFlag(MachineInstr::FrameSetup); 1066 } 1067 1068 if (FuncInfo->SGPRForBPSaveRestoreCopy) { 1069 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), BasePtrReg) 1070 .addReg(FuncInfo->SGPRForBPSaveRestoreCopy) 1071 .setMIFlag(MachineInstr::FrameSetup); 1072 } 1073 1074 Register ScratchExecCopy; 1075 if (HasFPSaveIndex) { 1076 const int FI = FuncInfo->FramePointerSaveIndex.getValue(); 1077 assert(!MFI.isDeadObjectIndex(FI)); 1078 if (SpillFPToMemory) { 1079 if (!ScratchExecCopy) 1080 ScratchExecCopy = buildScratchExecCopy(LiveRegs, MF, MBB, MBBI, false); 1081 1082 MCPhysReg TempVGPR = findScratchNonCalleeSaveRegister( 1083 MRI, LiveRegs, AMDGPU::VGPR_32RegClass); 1084 buildEpilogReload(ST, LiveRegs, MBB, MBBI, TII, TempVGPR, 1085 FuncInfo->getScratchRSrcReg(), StackPtrReg, FI); 1086 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), FramePtrReg) 1087 .addReg(TempVGPR, RegState::Kill); 1088 } else { 1089 // Reload from VGPR spill. 1090 assert(MFI.getStackID(FI) == TargetStackID::SGPRSpill); 1091 ArrayRef<SIMachineFunctionInfo::SpilledReg> Spill = 1092 FuncInfo->getSGPRToVGPRSpills(FI); 1093 assert(Spill.size() == 1); 1094 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::V_READLANE_B32), FramePtrReg) 1095 .addReg(Spill[0].VGPR) 1096 .addImm(Spill[0].Lane); 1097 } 1098 } 1099 1100 if (HasBPSaveIndex) { 1101 const int BasePtrFI = *FuncInfo->BasePointerSaveIndex; 1102 assert(!MFI.isDeadObjectIndex(BasePtrFI)); 1103 if (SpillBPToMemory) { 1104 if (!ScratchExecCopy) 1105 ScratchExecCopy = buildScratchExecCopy(LiveRegs, MF, MBB, MBBI, false); 1106 1107 MCPhysReg TempVGPR = findScratchNonCalleeSaveRegister( 1108 MRI, LiveRegs, AMDGPU::VGPR_32RegClass); 1109 buildEpilogReload(ST, LiveRegs, MBB, MBBI, TII, TempVGPR, 1110 FuncInfo->getScratchRSrcReg(), StackPtrReg, BasePtrFI); 1111 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), BasePtrReg) 1112 .addReg(TempVGPR, RegState::Kill); 1113 } else { 1114 // Reload from VGPR spill. 1115 assert(MFI.getStackID(BasePtrFI) == TargetStackID::SGPRSpill); 1116 ArrayRef<SIMachineFunctionInfo::SpilledReg> Spill = 1117 FuncInfo->getSGPRToVGPRSpills(BasePtrFI); 1118 assert(Spill.size() == 1); 1119 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::V_READLANE_B32), BasePtrReg) 1120 .addReg(Spill[0].VGPR) 1121 .addImm(Spill[0].Lane); 1122 } 1123 } 1124 1125 for (const SIMachineFunctionInfo::SGPRSpillVGPRCSR &Reg : 1126 FuncInfo->getSGPRSpillVGPRs()) { 1127 if (!Reg.FI.hasValue()) 1128 continue; 1129 1130 if (!ScratchExecCopy) 1131 ScratchExecCopy = buildScratchExecCopy(LiveRegs, MF, MBB, MBBI, false); 1132 1133 buildEpilogReload(ST, LiveRegs, MBB, MBBI, TII, Reg.VGPR, 1134 FuncInfo->getScratchRSrcReg(), StackPtrReg, 1135 Reg.FI.getValue()); 1136 } 1137 1138 if (ScratchExecCopy) { 1139 // FIXME: Split block and make terminator. 1140 unsigned ExecMov = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; 1141 MCRegister Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 1142 BuildMI(MBB, MBBI, DL, TII->get(ExecMov), Exec) 1143 .addReg(ScratchExecCopy, RegState::Kill); 1144 } 1145 } 1146 1147 #ifndef NDEBUG 1148 static bool allSGPRSpillsAreDead(const MachineFunction &MF) { 1149 const MachineFrameInfo &MFI = MF.getFrameInfo(); 1150 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 1151 for (int I = MFI.getObjectIndexBegin(), E = MFI.getObjectIndexEnd(); 1152 I != E; ++I) { 1153 if (!MFI.isDeadObjectIndex(I) && 1154 MFI.getStackID(I) == TargetStackID::SGPRSpill && 1155 (I != FuncInfo->FramePointerSaveIndex && 1156 I != FuncInfo->BasePointerSaveIndex)) { 1157 return false; 1158 } 1159 } 1160 1161 return true; 1162 } 1163 #endif 1164 1165 StackOffset SIFrameLowering::getFrameIndexReference(const MachineFunction &MF, 1166 int FI, 1167 Register &FrameReg) const { 1168 const SIRegisterInfo *RI = MF.getSubtarget<GCNSubtarget>().getRegisterInfo(); 1169 1170 FrameReg = RI->getFrameRegister(MF); 1171 return StackOffset::getFixed(MF.getFrameInfo().getObjectOffset(FI)); 1172 } 1173 1174 void SIFrameLowering::processFunctionBeforeFrameFinalized( 1175 MachineFunction &MF, 1176 RegScavenger *RS) const { 1177 MachineFrameInfo &MFI = MF.getFrameInfo(); 1178 1179 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1180 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 1181 SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 1182 1183 FuncInfo->removeDeadFrameIndices(MFI); 1184 assert(allSGPRSpillsAreDead(MF) && 1185 "SGPR spill should have been removed in SILowerSGPRSpills"); 1186 1187 // FIXME: The other checks should be redundant with allStackObjectsAreDead, 1188 // but currently hasNonSpillStackObjects is set only from source 1189 // allocas. Stack temps produced from legalization are not counted currently. 1190 if (!allStackObjectsAreDead(MFI)) { 1191 assert(RS && "RegScavenger required if spilling"); 1192 1193 if (FuncInfo->isEntryFunction()) { 1194 int ScavengeFI = MFI.CreateFixedObject( 1195 TRI->getSpillSize(AMDGPU::SGPR_32RegClass), 0, false); 1196 RS->addScavengingFrameIndex(ScavengeFI); 1197 } else { 1198 int ScavengeFI = MFI.CreateStackObject( 1199 TRI->getSpillSize(AMDGPU::SGPR_32RegClass), 1200 TRI->getSpillAlign(AMDGPU::SGPR_32RegClass), false); 1201 RS->addScavengingFrameIndex(ScavengeFI); 1202 } 1203 } 1204 } 1205 1206 // Only report VGPRs to generic code. 1207 void SIFrameLowering::determineCalleeSaves(MachineFunction &MF, 1208 BitVector &SavedVGPRs, 1209 RegScavenger *RS) const { 1210 TargetFrameLowering::determineCalleeSaves(MF, SavedVGPRs, RS); 1211 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 1212 if (MFI->isEntryFunction()) 1213 return; 1214 1215 MachineFrameInfo &FrameInfo = MF.getFrameInfo(); 1216 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1217 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 1218 1219 // Ignore the SGPRs the default implementation found. 1220 SavedVGPRs.clearBitsNotInMask(TRI->getAllVGPRRegMask()); 1221 1222 // hasFP only knows about stack objects that already exist. We're now 1223 // determining the stack slots that will be created, so we have to predict 1224 // them. Stack objects force FP usage with calls. 1225 // 1226 // Note a new VGPR CSR may be introduced if one is used for the spill, but we 1227 // don't want to report it here. 1228 // 1229 // FIXME: Is this really hasReservedCallFrame? 1230 const bool WillHaveFP = 1231 FrameInfo.hasCalls() && 1232 (SavedVGPRs.any() || !allStackObjectsAreDead(FrameInfo)); 1233 1234 // VGPRs used for SGPR spilling need to be specially inserted in the prolog, 1235 // so don't allow the default insertion to handle them. 1236 for (auto SSpill : MFI->getSGPRSpillVGPRs()) 1237 SavedVGPRs.reset(SSpill.VGPR); 1238 1239 LivePhysRegs LiveRegs; 1240 LiveRegs.init(*TRI); 1241 1242 if (WillHaveFP || hasFP(MF)) { 1243 getVGPRSpillLaneOrTempRegister(MF, LiveRegs, MFI->SGPRForFPSaveRestoreCopy, 1244 MFI->FramePointerSaveIndex, true); 1245 } 1246 1247 if (TRI->hasBasePointer(MF)) { 1248 if (MFI->SGPRForFPSaveRestoreCopy) 1249 LiveRegs.addReg(MFI->SGPRForFPSaveRestoreCopy); 1250 getVGPRSpillLaneOrTempRegister(MF, LiveRegs, MFI->SGPRForBPSaveRestoreCopy, 1251 MFI->BasePointerSaveIndex, false); 1252 } 1253 } 1254 1255 void SIFrameLowering::determineCalleeSavesSGPR(MachineFunction &MF, 1256 BitVector &SavedRegs, 1257 RegScavenger *RS) const { 1258 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); 1259 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 1260 if (MFI->isEntryFunction()) 1261 return; 1262 1263 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1264 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 1265 1266 // The SP is specifically managed and we don't want extra spills of it. 1267 SavedRegs.reset(MFI->getStackPtrOffsetReg()); 1268 SavedRegs.clearBitsInMask(TRI->getAllVGPRRegMask()); 1269 } 1270 1271 bool SIFrameLowering::assignCalleeSavedSpillSlots( 1272 MachineFunction &MF, const TargetRegisterInfo *TRI, 1273 std::vector<CalleeSavedInfo> &CSI) const { 1274 if (CSI.empty()) 1275 return true; // Early exit if no callee saved registers are modified! 1276 1277 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 1278 if (!FuncInfo->SGPRForFPSaveRestoreCopy && 1279 !FuncInfo->SGPRForBPSaveRestoreCopy) 1280 return false; 1281 1282 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1283 const SIRegisterInfo *RI = ST.getRegisterInfo(); 1284 Register FramePtrReg = FuncInfo->getFrameOffsetReg(); 1285 Register BasePtrReg = RI->getBaseRegister(); 1286 unsigned NumModifiedRegs = 0; 1287 1288 if (FuncInfo->SGPRForFPSaveRestoreCopy) 1289 NumModifiedRegs++; 1290 if (FuncInfo->SGPRForBPSaveRestoreCopy) 1291 NumModifiedRegs++; 1292 1293 for (auto &CS : CSI) { 1294 if (CS.getReg() == FramePtrReg && FuncInfo->SGPRForFPSaveRestoreCopy) { 1295 CS.setDstReg(FuncInfo->SGPRForFPSaveRestoreCopy); 1296 if (--NumModifiedRegs) 1297 break; 1298 } else if (CS.getReg() == BasePtrReg && 1299 FuncInfo->SGPRForBPSaveRestoreCopy) { 1300 CS.setDstReg(FuncInfo->SGPRForBPSaveRestoreCopy); 1301 if (--NumModifiedRegs) 1302 break; 1303 } 1304 } 1305 1306 return false; 1307 } 1308 1309 MachineBasicBlock::iterator SIFrameLowering::eliminateCallFramePseudoInstr( 1310 MachineFunction &MF, 1311 MachineBasicBlock &MBB, 1312 MachineBasicBlock::iterator I) const { 1313 int64_t Amount = I->getOperand(0).getImm(); 1314 if (Amount == 0) 1315 return MBB.erase(I); 1316 1317 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1318 const SIInstrInfo *TII = ST.getInstrInfo(); 1319 const DebugLoc &DL = I->getDebugLoc(); 1320 unsigned Opc = I->getOpcode(); 1321 bool IsDestroy = Opc == TII->getCallFrameDestroyOpcode(); 1322 uint64_t CalleePopAmount = IsDestroy ? I->getOperand(1).getImm() : 0; 1323 1324 if (!hasReservedCallFrame(MF)) { 1325 Amount = alignTo(Amount, getStackAlign()); 1326 assert(isUInt<32>(Amount) && "exceeded stack address space size"); 1327 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 1328 Register SPReg = MFI->getStackPtrOffsetReg(); 1329 1330 unsigned Op = IsDestroy ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32; 1331 BuildMI(MBB, I, DL, TII->get(Op), SPReg) 1332 .addReg(SPReg) 1333 .addImm(Amount * getScratchScaleFactor(ST)); 1334 } else if (CalleePopAmount != 0) { 1335 llvm_unreachable("is this used?"); 1336 } 1337 1338 return MBB.erase(I); 1339 } 1340 1341 /// Returns true if the frame will require a reference to the stack pointer. 1342 /// 1343 /// This is the set of conditions common to setting up the stack pointer in a 1344 /// kernel, and for using a frame pointer in a callable function. 1345 /// 1346 /// FIXME: Should also check hasOpaqueSPAdjustment and if any inline asm 1347 /// references SP. 1348 static bool frameTriviallyRequiresSP(const MachineFrameInfo &MFI) { 1349 return MFI.hasVarSizedObjects() || MFI.hasStackMap() || MFI.hasPatchPoint(); 1350 } 1351 1352 // The FP for kernels is always known 0, so we never really need to setup an 1353 // explicit register for it. However, DisableFramePointerElim will force us to 1354 // use a register for it. 1355 bool SIFrameLowering::hasFP(const MachineFunction &MF) const { 1356 const MachineFrameInfo &MFI = MF.getFrameInfo(); 1357 1358 // For entry functions we can use an immediate offset in most cases, so the 1359 // presence of calls doesn't imply we need a distinct frame pointer. 1360 if (MFI.hasCalls() && 1361 !MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction()) { 1362 // All offsets are unsigned, so need to be addressed in the same direction 1363 // as stack growth. 1364 1365 // FIXME: This function is pretty broken, since it can be called before the 1366 // frame layout is determined or CSR spills are inserted. 1367 return MFI.getStackSize() != 0; 1368 } 1369 1370 return frameTriviallyRequiresSP(MFI) || MFI.isFrameAddressTaken() || 1371 MF.getSubtarget<GCNSubtarget>().getRegisterInfo()->needsStackRealignment(MF) || 1372 MF.getTarget().Options.DisableFramePointerElim(MF); 1373 } 1374 1375 // This is essentially a reduced version of hasFP for entry functions. Since the 1376 // stack pointer is known 0 on entry to kernels, we never really need an FP 1377 // register. We may need to initialize the stack pointer depending on the frame 1378 // properties, which logically overlaps many of the cases where an ordinary 1379 // function would require an FP. 1380 bool SIFrameLowering::requiresStackPointerReference( 1381 const MachineFunction &MF) const { 1382 // Callable functions always require a stack pointer reference. 1383 assert(MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction() && 1384 "only expected to call this for entry points"); 1385 1386 const MachineFrameInfo &MFI = MF.getFrameInfo(); 1387 1388 // Entry points ordinarily don't need to initialize SP. We have to set it up 1389 // for callees if there are any. Also note tail calls are impossible/don't 1390 // make any sense for kernels. 1391 if (MFI.hasCalls()) 1392 return true; 1393 1394 // We still need to initialize the SP if we're doing anything weird that 1395 // references the SP, like variable sized stack objects. 1396 return frameTriviallyRequiresSP(MFI); 1397 } 1398