1 //===----------------------- SIFrameLowering.cpp --------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //==-----------------------------------------------------------------------===// 8 9 #include "SIFrameLowering.h" 10 #include "AMDGPU.h" 11 #include "GCNSubtarget.h" 12 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 13 #include "SIMachineFunctionInfo.h" 14 #include "llvm/CodeGen/LivePhysRegs.h" 15 #include "llvm/CodeGen/MachineFrameInfo.h" 16 #include "llvm/CodeGen/RegisterScavenging.h" 17 #include "llvm/Target/TargetMachine.h" 18 19 using namespace llvm; 20 21 #define DEBUG_TYPE "frame-info" 22 23 // Find a scratch register that we can use in the prologue. We avoid using 24 // callee-save registers since they may appear to be free when this is called 25 // from canUseAsPrologue (during shrink wrapping), but then no longer be free 26 // when this is called from emitPrologue. 27 static MCRegister findScratchNonCalleeSaveRegister(MachineRegisterInfo &MRI, 28 LivePhysRegs &LiveRegs, 29 const TargetRegisterClass &RC, 30 bool Unused = false) { 31 // Mark callee saved registers as used so we will not choose them. 32 const MCPhysReg *CSRegs = MRI.getCalleeSavedRegs(); 33 for (unsigned i = 0; CSRegs[i]; ++i) 34 LiveRegs.addReg(CSRegs[i]); 35 36 if (Unused) { 37 // We are looking for a register that can be used throughout the entire 38 // function, so any use is unacceptable. 39 for (MCRegister Reg : RC) { 40 if (!MRI.isPhysRegUsed(Reg) && LiveRegs.available(MRI, Reg)) 41 return Reg; 42 } 43 } else { 44 for (MCRegister Reg : RC) { 45 if (LiveRegs.available(MRI, Reg)) 46 return Reg; 47 } 48 } 49 50 return MCRegister(); 51 } 52 53 static void getVGPRSpillLaneOrTempRegister(MachineFunction &MF, 54 LivePhysRegs &LiveRegs, 55 Register &TempSGPR, 56 Optional<int> &FrameIndex, 57 bool IsFP) { 58 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 59 MachineFrameInfo &FrameInfo = MF.getFrameInfo(); 60 61 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 62 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 63 64 // We need to save and restore the current FP/BP. 65 66 // 1: If there is already a VGPR with free lanes, use it. We 67 // may already have to pay the penalty for spilling a CSR VGPR. 68 if (MFI->haveFreeLanesForSGPRSpill(MF, 1)) { 69 int NewFI = FrameInfo.CreateStackObject(4, Align(4), true, nullptr, 70 TargetStackID::SGPRSpill); 71 72 if (!MFI->allocateSGPRSpillToVGPR(MF, NewFI)) 73 llvm_unreachable("allocate SGPR spill should have worked"); 74 75 FrameIndex = NewFI; 76 77 LLVM_DEBUG(auto Spill = MFI->getSGPRToVGPRSpills(NewFI).front(); 78 dbgs() << "Spilling " << (IsFP ? "FP" : "BP") << " to " 79 << printReg(Spill.VGPR, TRI) << ':' << Spill.Lane 80 << '\n'); 81 return; 82 } 83 84 // 2: Next, try to save the FP/BP in an unused SGPR. 85 TempSGPR = findScratchNonCalleeSaveRegister( 86 MF.getRegInfo(), LiveRegs, AMDGPU::SReg_32_XM0_XEXECRegClass, true); 87 88 if (!TempSGPR) { 89 int NewFI = FrameInfo.CreateStackObject(4, Align(4), true, nullptr, 90 TargetStackID::SGPRSpill); 91 92 if (TRI->spillSGPRToVGPR() && MFI->allocateSGPRSpillToVGPR(MF, NewFI)) { 93 // 3: There's no free lane to spill, and no free register to save FP/BP, 94 // so we're forced to spill another VGPR to use for the spill. 95 FrameIndex = NewFI; 96 97 LLVM_DEBUG( 98 auto Spill = MFI->getSGPRToVGPRSpills(NewFI).front(); 99 dbgs() << (IsFP ? "FP" : "BP") << " requires fallback spill to " 100 << printReg(Spill.VGPR, TRI) << ':' << Spill.Lane << '\n';); 101 } else { 102 // Remove dead <NewFI> index 103 MF.getFrameInfo().RemoveStackObject(NewFI); 104 // 4: If all else fails, spill the FP/BP to memory. 105 FrameIndex = FrameInfo.CreateSpillStackObject(4, Align(4)); 106 LLVM_DEBUG(dbgs() << "Reserved FI " << FrameIndex << " for spilling " 107 << (IsFP ? "FP" : "BP") << '\n'); 108 } 109 } else { 110 LLVM_DEBUG(dbgs() << "Saving " << (IsFP ? "FP" : "BP") << " with copy to " 111 << printReg(TempSGPR, TRI) << '\n'); 112 } 113 } 114 115 // We need to specially emit stack operations here because a different frame 116 // register is used than in the rest of the function, as getFrameRegister would 117 // use. 118 static void buildPrologSpill(const GCNSubtarget &ST, LivePhysRegs &LiveRegs, 119 MachineBasicBlock &MBB, 120 MachineBasicBlock::iterator I, 121 const SIInstrInfo *TII, Register SpillReg, 122 Register ScratchRsrcReg, Register SPReg, int FI) { 123 MachineFunction *MF = MBB.getParent(); 124 MachineFrameInfo &MFI = MF->getFrameInfo(); 125 126 int64_t Offset = MFI.getObjectOffset(FI); 127 128 MachineMemOperand *MMO = MF->getMachineMemOperand( 129 MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOStore, 4, 130 MFI.getObjectAlign(FI)); 131 132 if (ST.enableFlatScratch()) { 133 if (TII->isLegalFLATOffset(Offset, AMDGPUAS::PRIVATE_ADDRESS, true)) { 134 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::SCRATCH_STORE_DWORD_SADDR)) 135 .addReg(SpillReg, RegState::Kill) 136 .addReg(SPReg) 137 .addImm(Offset) 138 .addImm(0) // glc 139 .addImm(0) // slc 140 .addImm(0) // dlc 141 .addImm(0) // scc 142 .addMemOperand(MMO); 143 return; 144 } 145 } else if (SIInstrInfo::isLegalMUBUFImmOffset(Offset)) { 146 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::BUFFER_STORE_DWORD_OFFSET)) 147 .addReg(SpillReg, RegState::Kill) 148 .addReg(ScratchRsrcReg) 149 .addReg(SPReg) 150 .addImm(Offset) 151 .addImm(0) // glc 152 .addImm(0) // slc 153 .addImm(0) // tfe 154 .addImm(0) // dlc 155 .addImm(0) // swz 156 .addImm(0) // scc 157 .addMemOperand(MMO); 158 return; 159 } 160 161 // Don't clobber the TmpVGPR if we also need a scratch reg for the stack 162 // offset in the spill. 163 LiveRegs.addReg(SpillReg); 164 165 if (ST.enableFlatScratch()) { 166 MCPhysReg OffsetReg = findScratchNonCalleeSaveRegister( 167 MF->getRegInfo(), LiveRegs, AMDGPU::SReg_32_XM0RegClass); 168 169 bool HasOffsetReg = OffsetReg; 170 if (!HasOffsetReg) { 171 // No free register, use stack pointer and restore afterwards. 172 OffsetReg = SPReg; 173 } 174 175 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::S_ADD_U32), OffsetReg) 176 .addReg(SPReg) 177 .addImm(Offset); 178 179 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::SCRATCH_STORE_DWORD_SADDR)) 180 .addReg(SpillReg, RegState::Kill) 181 .addReg(OffsetReg, HasOffsetReg ? RegState::Kill : 0) 182 .addImm(0) // offset 183 .addImm(0) // glc 184 .addImm(0) // slc 185 .addImm(0) // dlc 186 .addImm(0) // scc 187 .addMemOperand(MMO); 188 189 if (!HasOffsetReg) { 190 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::S_SUB_U32), OffsetReg) 191 .addReg(SPReg) 192 .addImm(Offset); 193 } 194 } else { 195 MCPhysReg OffsetReg = findScratchNonCalleeSaveRegister( 196 MF->getRegInfo(), LiveRegs, AMDGPU::VGPR_32RegClass); 197 198 if (OffsetReg) { 199 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::V_MOV_B32_e32), OffsetReg) 200 .addImm(Offset); 201 202 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::BUFFER_STORE_DWORD_OFFEN)) 203 .addReg(SpillReg, RegState::Kill) 204 .addReg(OffsetReg, RegState::Kill) 205 .addReg(ScratchRsrcReg) 206 .addReg(SPReg) 207 .addImm(0) // offset 208 .addImm(0) // glc 209 .addImm(0) // slc 210 .addImm(0) // tfe 211 .addImm(0) // dlc 212 .addImm(0) // swz 213 .addImm(0) // scc 214 .addMemOperand(MMO); 215 } else { 216 // No free register, use stack pointer and restore afterwards. 217 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::S_ADD_U32), SPReg) 218 .addReg(SPReg) 219 .addImm(Offset); 220 221 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::BUFFER_STORE_DWORD_OFFSET)) 222 .addReg(SpillReg, RegState::Kill) 223 .addReg(ScratchRsrcReg) 224 .addReg(SPReg) 225 .addImm(0) // offset 226 .addImm(0) // glc 227 .addImm(0) // slc 228 .addImm(0) // tfe 229 .addImm(0) // dlc 230 .addImm(0) // swz 231 .addImm(0) // scc 232 .addMemOperand(MMO); 233 234 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::S_SUB_U32), SPReg) 235 .addReg(SPReg) 236 .addImm(Offset); 237 } 238 } 239 240 LiveRegs.removeReg(SpillReg); 241 } 242 243 static void buildEpilogReload(const GCNSubtarget &ST, LivePhysRegs &LiveRegs, 244 MachineBasicBlock &MBB, 245 MachineBasicBlock::iterator I, 246 const SIInstrInfo *TII, Register SpillReg, 247 Register ScratchRsrcReg, Register SPReg, int FI) { 248 MachineFunction *MF = MBB.getParent(); 249 MachineFrameInfo &MFI = MF->getFrameInfo(); 250 int64_t Offset = MFI.getObjectOffset(FI); 251 252 MachineMemOperand *MMO = MF->getMachineMemOperand( 253 MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOLoad, 4, 254 MFI.getObjectAlign(FI)); 255 256 if (ST.enableFlatScratch()) { 257 if (TII->isLegalFLATOffset(Offset, AMDGPUAS::PRIVATE_ADDRESS, true)) { 258 BuildMI(MBB, I, DebugLoc(), 259 TII->get(AMDGPU::SCRATCH_LOAD_DWORD_SADDR), SpillReg) 260 .addReg(SPReg) 261 .addImm(Offset) 262 .addImm(0) // glc 263 .addImm(0) // slc 264 .addImm(0) // dlc 265 .addImm(0) // scc 266 .addMemOperand(MMO); 267 return; 268 } 269 MCPhysReg OffsetReg = findScratchNonCalleeSaveRegister( 270 MF->getRegInfo(), LiveRegs, AMDGPU::SReg_32_XM0RegClass); 271 if (!OffsetReg) 272 report_fatal_error("failed to find free scratch register"); 273 274 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::S_ADD_U32), OffsetReg) 275 .addReg(SPReg) 276 .addImm(Offset); 277 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::SCRATCH_LOAD_DWORD_SADDR), 278 SpillReg) 279 .addReg(OffsetReg, RegState::Kill) 280 .addImm(0) 281 .addImm(0) // glc 282 .addImm(0) // slc 283 .addImm(0) // dlc 284 .addImm(0) // scc 285 .addMemOperand(MMO); 286 return; 287 } 288 289 if (SIInstrInfo::isLegalMUBUFImmOffset(Offset)) { 290 BuildMI(MBB, I, DebugLoc(), 291 TII->get(AMDGPU::BUFFER_LOAD_DWORD_OFFSET), SpillReg) 292 .addReg(ScratchRsrcReg) 293 .addReg(SPReg) 294 .addImm(Offset) 295 .addImm(0) // glc 296 .addImm(0) // slc 297 .addImm(0) // tfe 298 .addImm(0) // dlc 299 .addImm(0) // swz 300 .addImm(0) // scc 301 .addMemOperand(MMO); 302 return; 303 } 304 305 MCPhysReg OffsetReg = findScratchNonCalleeSaveRegister( 306 MF->getRegInfo(), LiveRegs, AMDGPU::VGPR_32RegClass); 307 if (!OffsetReg) 308 report_fatal_error("failed to find free scratch register"); 309 310 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::V_MOV_B32_e32), OffsetReg) 311 .addImm(Offset); 312 313 BuildMI(MBB, I, DebugLoc(), 314 TII->get(AMDGPU::BUFFER_LOAD_DWORD_OFFEN), SpillReg) 315 .addReg(OffsetReg, RegState::Kill) 316 .addReg(ScratchRsrcReg) 317 .addReg(SPReg) 318 .addImm(0) 319 .addImm(0) // glc 320 .addImm(0) // slc 321 .addImm(0) // tfe 322 .addImm(0) // dlc 323 .addImm(0) // swz 324 .addImm(0) // scc 325 .addMemOperand(MMO); 326 } 327 328 static void buildGitPtr(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 329 const DebugLoc &DL, const SIInstrInfo *TII, 330 Register TargetReg) { 331 MachineFunction *MF = MBB.getParent(); 332 const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 333 const SIRegisterInfo *TRI = &TII->getRegisterInfo(); 334 const MCInstrDesc &SMovB32 = TII->get(AMDGPU::S_MOV_B32); 335 Register TargetLo = TRI->getSubReg(TargetReg, AMDGPU::sub0); 336 Register TargetHi = TRI->getSubReg(TargetReg, AMDGPU::sub1); 337 338 if (MFI->getGITPtrHigh() != 0xffffffff) { 339 BuildMI(MBB, I, DL, SMovB32, TargetHi) 340 .addImm(MFI->getGITPtrHigh()) 341 .addReg(TargetReg, RegState::ImplicitDefine); 342 } else { 343 const MCInstrDesc &GetPC64 = TII->get(AMDGPU::S_GETPC_B64); 344 BuildMI(MBB, I, DL, GetPC64, TargetReg); 345 } 346 Register GitPtrLo = MFI->getGITPtrLoReg(*MF); 347 MF->getRegInfo().addLiveIn(GitPtrLo); 348 MBB.addLiveIn(GitPtrLo); 349 BuildMI(MBB, I, DL, SMovB32, TargetLo) 350 .addReg(GitPtrLo); 351 } 352 353 // Emit flat scratch setup code, assuming `MFI->hasFlatScratchInit()` 354 void SIFrameLowering::emitEntryFunctionFlatScratchInit( 355 MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 356 const DebugLoc &DL, Register ScratchWaveOffsetReg) const { 357 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 358 const SIInstrInfo *TII = ST.getInstrInfo(); 359 const SIRegisterInfo *TRI = &TII->getRegisterInfo(); 360 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 361 362 // We don't need this if we only have spills since there is no user facing 363 // scratch. 364 365 // TODO: If we know we don't have flat instructions earlier, we can omit 366 // this from the input registers. 367 // 368 // TODO: We only need to know if we access scratch space through a flat 369 // pointer. Because we only detect if flat instructions are used at all, 370 // this will be used more often than necessary on VI. 371 372 Register FlatScrInitLo; 373 Register FlatScrInitHi; 374 375 if (ST.isAmdPalOS()) { 376 // Extract the scratch offset from the descriptor in the GIT 377 LivePhysRegs LiveRegs; 378 LiveRegs.init(*TRI); 379 LiveRegs.addLiveIns(MBB); 380 381 // Find unused reg to load flat scratch init into 382 MachineRegisterInfo &MRI = MF.getRegInfo(); 383 Register FlatScrInit = AMDGPU::NoRegister; 384 ArrayRef<MCPhysReg> AllSGPR64s = TRI->getAllSGPR64(MF); 385 unsigned NumPreloaded = (MFI->getNumPreloadedSGPRs() + 1) / 2; 386 AllSGPR64s = AllSGPR64s.slice( 387 std::min(static_cast<unsigned>(AllSGPR64s.size()), NumPreloaded)); 388 Register GITPtrLoReg = MFI->getGITPtrLoReg(MF); 389 for (MCPhysReg Reg : AllSGPR64s) { 390 if (LiveRegs.available(MRI, Reg) && MRI.isAllocatable(Reg) && 391 !TRI->isSubRegisterEq(Reg, GITPtrLoReg)) { 392 FlatScrInit = Reg; 393 break; 394 } 395 } 396 assert(FlatScrInit && "Failed to find free register for scratch init"); 397 398 FlatScrInitLo = TRI->getSubReg(FlatScrInit, AMDGPU::sub0); 399 FlatScrInitHi = TRI->getSubReg(FlatScrInit, AMDGPU::sub1); 400 401 buildGitPtr(MBB, I, DL, TII, FlatScrInit); 402 403 // We now have the GIT ptr - now get the scratch descriptor from the entry 404 // at offset 0 (or offset 16 for a compute shader). 405 MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS); 406 const MCInstrDesc &LoadDwordX2 = TII->get(AMDGPU::S_LOAD_DWORDX2_IMM); 407 auto *MMO = MF.getMachineMemOperand( 408 PtrInfo, 409 MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant | 410 MachineMemOperand::MODereferenceable, 411 8, Align(4)); 412 unsigned Offset = 413 MF.getFunction().getCallingConv() == CallingConv::AMDGPU_CS ? 16 : 0; 414 const GCNSubtarget &Subtarget = MF.getSubtarget<GCNSubtarget>(); 415 unsigned EncodedOffset = AMDGPU::convertSMRDOffsetUnits(Subtarget, Offset); 416 BuildMI(MBB, I, DL, LoadDwordX2, FlatScrInit) 417 .addReg(FlatScrInit) 418 .addImm(EncodedOffset) // offset 419 .addImm(0) // glc 420 .addImm(0) // dlc 421 .addMemOperand(MMO); 422 423 // Mask the offset in [47:0] of the descriptor 424 const MCInstrDesc &SAndB32 = TII->get(AMDGPU::S_AND_B32); 425 BuildMI(MBB, I, DL, SAndB32, FlatScrInitHi) 426 .addReg(FlatScrInitHi) 427 .addImm(0xffff); 428 } else { 429 Register FlatScratchInitReg = 430 MFI->getPreloadedReg(AMDGPUFunctionArgInfo::FLAT_SCRATCH_INIT); 431 assert(FlatScratchInitReg); 432 433 MachineRegisterInfo &MRI = MF.getRegInfo(); 434 MRI.addLiveIn(FlatScratchInitReg); 435 MBB.addLiveIn(FlatScratchInitReg); 436 437 FlatScrInitLo = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub0); 438 FlatScrInitHi = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub1); 439 } 440 441 // Do a 64-bit pointer add. 442 if (ST.flatScratchIsPointer()) { 443 if (ST.getGeneration() >= AMDGPUSubtarget::GFX10) { 444 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), FlatScrInitLo) 445 .addReg(FlatScrInitLo) 446 .addReg(ScratchWaveOffsetReg); 447 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADDC_U32), FlatScrInitHi) 448 .addReg(FlatScrInitHi) 449 .addImm(0); 450 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SETREG_B32)). 451 addReg(FlatScrInitLo). 452 addImm(int16_t(AMDGPU::Hwreg::ID_FLAT_SCR_LO | 453 (31 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_))); 454 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SETREG_B32)). 455 addReg(FlatScrInitHi). 456 addImm(int16_t(AMDGPU::Hwreg::ID_FLAT_SCR_HI | 457 (31 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_))); 458 return; 459 } 460 461 // For GFX9. 462 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), AMDGPU::FLAT_SCR_LO) 463 .addReg(FlatScrInitLo) 464 .addReg(ScratchWaveOffsetReg); 465 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADDC_U32), AMDGPU::FLAT_SCR_HI) 466 .addReg(FlatScrInitHi) 467 .addImm(0); 468 469 return; 470 } 471 472 assert(ST.getGeneration() < AMDGPUSubtarget::GFX9); 473 474 // Copy the size in bytes. 475 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), AMDGPU::FLAT_SCR_LO) 476 .addReg(FlatScrInitHi, RegState::Kill); 477 478 // Add wave offset in bytes to private base offset. 479 // See comment in AMDKernelCodeT.h for enable_sgpr_flat_scratch_init. 480 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), FlatScrInitLo) 481 .addReg(FlatScrInitLo) 482 .addReg(ScratchWaveOffsetReg); 483 484 // Convert offset to 256-byte units. 485 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_LSHR_B32), AMDGPU::FLAT_SCR_HI) 486 .addReg(FlatScrInitLo, RegState::Kill) 487 .addImm(8); 488 } 489 490 // Note SGPRSpill stack IDs should only be used for SGPR spilling to VGPRs, not 491 // memory. They should have been removed by now. 492 static bool allStackObjectsAreDead(const MachineFrameInfo &MFI) { 493 for (int I = MFI.getObjectIndexBegin(), E = MFI.getObjectIndexEnd(); 494 I != E; ++I) { 495 if (!MFI.isDeadObjectIndex(I)) 496 return false; 497 } 498 499 return true; 500 } 501 502 // Shift down registers reserved for the scratch RSRC. 503 Register SIFrameLowering::getEntryFunctionReservedScratchRsrcReg( 504 MachineFunction &MF) const { 505 506 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 507 const SIInstrInfo *TII = ST.getInstrInfo(); 508 const SIRegisterInfo *TRI = &TII->getRegisterInfo(); 509 MachineRegisterInfo &MRI = MF.getRegInfo(); 510 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 511 512 assert(MFI->isEntryFunction()); 513 514 Register ScratchRsrcReg = MFI->getScratchRSrcReg(); 515 516 if (!ScratchRsrcReg || (!MRI.isPhysRegUsed(ScratchRsrcReg) && 517 allStackObjectsAreDead(MF.getFrameInfo()))) 518 return Register(); 519 520 if (ST.hasSGPRInitBug() || 521 ScratchRsrcReg != TRI->reservedPrivateSegmentBufferReg(MF)) 522 return ScratchRsrcReg; 523 524 // We reserved the last registers for this. Shift it down to the end of those 525 // which were actually used. 526 // 527 // FIXME: It might be safer to use a pseudoregister before replacement. 528 529 // FIXME: We should be able to eliminate unused input registers. We only 530 // cannot do this for the resources required for scratch access. For now we 531 // skip over user SGPRs and may leave unused holes. 532 533 unsigned NumPreloaded = (MFI->getNumPreloadedSGPRs() + 3) / 4; 534 ArrayRef<MCPhysReg> AllSGPR128s = TRI->getAllSGPR128(MF); 535 AllSGPR128s = AllSGPR128s.slice(std::min(static_cast<unsigned>(AllSGPR128s.size()), NumPreloaded)); 536 537 // Skip the last N reserved elements because they should have already been 538 // reserved for VCC etc. 539 Register GITPtrLoReg = MFI->getGITPtrLoReg(MF); 540 for (MCPhysReg Reg : AllSGPR128s) { 541 // Pick the first unallocated one. Make sure we don't clobber the other 542 // reserved input we needed. Also for PAL, make sure we don't clobber 543 // the GIT pointer passed in SGPR0 or SGPR8. 544 if (!MRI.isPhysRegUsed(Reg) && MRI.isAllocatable(Reg) && 545 !TRI->isSubRegisterEq(Reg, GITPtrLoReg)) { 546 MRI.replaceRegWith(ScratchRsrcReg, Reg); 547 MFI->setScratchRSrcReg(Reg); 548 return Reg; 549 } 550 } 551 552 return ScratchRsrcReg; 553 } 554 555 static unsigned getScratchScaleFactor(const GCNSubtarget &ST) { 556 return ST.enableFlatScratch() ? 1 : ST.getWavefrontSize(); 557 } 558 559 void SIFrameLowering::emitEntryFunctionPrologue(MachineFunction &MF, 560 MachineBasicBlock &MBB) const { 561 assert(&MF.front() == &MBB && "Shrink-wrapping not yet supported"); 562 563 // FIXME: If we only have SGPR spills, we won't actually be using scratch 564 // memory since these spill to VGPRs. We should be cleaning up these unused 565 // SGPR spill frame indices somewhere. 566 567 // FIXME: We still have implicit uses on SGPR spill instructions in case they 568 // need to spill to vector memory. It's likely that will not happen, but at 569 // this point it appears we need the setup. This part of the prolog should be 570 // emitted after frame indices are eliminated. 571 572 // FIXME: Remove all of the isPhysRegUsed checks 573 574 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 575 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 576 const SIInstrInfo *TII = ST.getInstrInfo(); 577 const SIRegisterInfo *TRI = &TII->getRegisterInfo(); 578 MachineRegisterInfo &MRI = MF.getRegInfo(); 579 const Function &F = MF.getFunction(); 580 581 assert(MFI->isEntryFunction()); 582 583 Register PreloadedScratchWaveOffsetReg = MFI->getPreloadedReg( 584 AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET); 585 // FIXME: Hack to not crash in situations which emitted an error. 586 if (!PreloadedScratchWaveOffsetReg) 587 return; 588 589 // We need to do the replacement of the private segment buffer register even 590 // if there are no stack objects. There could be stores to undef or a 591 // constant without an associated object. 592 // 593 // This will return `Register()` in cases where there are no actual 594 // uses of the SRSRC. 595 Register ScratchRsrcReg; 596 if (!ST.enableFlatScratch()) 597 ScratchRsrcReg = getEntryFunctionReservedScratchRsrcReg(MF); 598 599 // Make the selected register live throughout the function. 600 if (ScratchRsrcReg) { 601 for (MachineBasicBlock &OtherBB : MF) { 602 if (&OtherBB != &MBB) { 603 OtherBB.addLiveIn(ScratchRsrcReg); 604 } 605 } 606 } 607 608 // Now that we have fixed the reserved SRSRC we need to locate the 609 // (potentially) preloaded SRSRC. 610 Register PreloadedScratchRsrcReg; 611 if (ST.isAmdHsaOrMesa(F)) { 612 PreloadedScratchRsrcReg = 613 MFI->getPreloadedReg(AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER); 614 if (ScratchRsrcReg && PreloadedScratchRsrcReg) { 615 // We added live-ins during argument lowering, but since they were not 616 // used they were deleted. We're adding the uses now, so add them back. 617 MRI.addLiveIn(PreloadedScratchRsrcReg); 618 MBB.addLiveIn(PreloadedScratchRsrcReg); 619 } 620 } 621 622 // Debug location must be unknown since the first debug location is used to 623 // determine the end of the prologue. 624 DebugLoc DL; 625 MachineBasicBlock::iterator I = MBB.begin(); 626 627 // We found the SRSRC first because it needs four registers and has an 628 // alignment requirement. If the SRSRC that we found is clobbering with 629 // the scratch wave offset, which may be in a fixed SGPR or a free SGPR 630 // chosen by SITargetLowering::allocateSystemSGPRs, COPY the scratch 631 // wave offset to a free SGPR. 632 Register ScratchWaveOffsetReg; 633 if (TRI->isSubRegisterEq(ScratchRsrcReg, PreloadedScratchWaveOffsetReg)) { 634 ArrayRef<MCPhysReg> AllSGPRs = TRI->getAllSGPR32(MF); 635 unsigned NumPreloaded = MFI->getNumPreloadedSGPRs(); 636 AllSGPRs = AllSGPRs.slice( 637 std::min(static_cast<unsigned>(AllSGPRs.size()), NumPreloaded)); 638 Register GITPtrLoReg = MFI->getGITPtrLoReg(MF); 639 for (MCPhysReg Reg : AllSGPRs) { 640 if (!MRI.isPhysRegUsed(Reg) && MRI.isAllocatable(Reg) && 641 !TRI->isSubRegisterEq(ScratchRsrcReg, Reg) && GITPtrLoReg != Reg) { 642 ScratchWaveOffsetReg = Reg; 643 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchWaveOffsetReg) 644 .addReg(PreloadedScratchWaveOffsetReg, RegState::Kill); 645 break; 646 } 647 } 648 } else { 649 ScratchWaveOffsetReg = PreloadedScratchWaveOffsetReg; 650 } 651 assert(ScratchWaveOffsetReg); 652 653 if (requiresStackPointerReference(MF)) { 654 Register SPReg = MFI->getStackPtrOffsetReg(); 655 assert(SPReg != AMDGPU::SP_REG); 656 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), SPReg) 657 .addImm(MF.getFrameInfo().getStackSize() * getScratchScaleFactor(ST)); 658 } 659 660 if (hasFP(MF)) { 661 Register FPReg = MFI->getFrameOffsetReg(); 662 assert(FPReg != AMDGPU::FP_REG); 663 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), FPReg).addImm(0); 664 } 665 666 if (MFI->hasFlatScratchInit() || ScratchRsrcReg) { 667 MRI.addLiveIn(PreloadedScratchWaveOffsetReg); 668 MBB.addLiveIn(PreloadedScratchWaveOffsetReg); 669 } 670 671 if (MFI->hasFlatScratchInit()) { 672 emitEntryFunctionFlatScratchInit(MF, MBB, I, DL, ScratchWaveOffsetReg); 673 } 674 675 if (ScratchRsrcReg) { 676 emitEntryFunctionScratchRsrcRegSetup(MF, MBB, I, DL, 677 PreloadedScratchRsrcReg, 678 ScratchRsrcReg, ScratchWaveOffsetReg); 679 } 680 } 681 682 // Emit scratch RSRC setup code, assuming `ScratchRsrcReg != AMDGPU::NoReg` 683 void SIFrameLowering::emitEntryFunctionScratchRsrcRegSetup( 684 MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 685 const DebugLoc &DL, Register PreloadedScratchRsrcReg, 686 Register ScratchRsrcReg, Register ScratchWaveOffsetReg) const { 687 688 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 689 const SIInstrInfo *TII = ST.getInstrInfo(); 690 const SIRegisterInfo *TRI = &TII->getRegisterInfo(); 691 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 692 const Function &Fn = MF.getFunction(); 693 694 if (ST.isAmdPalOS()) { 695 // The pointer to the GIT is formed from the offset passed in and either 696 // the amdgpu-git-ptr-high function attribute or the top part of the PC 697 Register Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1); 698 699 buildGitPtr(MBB, I, DL, TII, Rsrc01); 700 701 // We now have the GIT ptr - now get the scratch descriptor from the entry 702 // at offset 0 (or offset 16 for a compute shader). 703 MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS); 704 const MCInstrDesc &LoadDwordX4 = TII->get(AMDGPU::S_LOAD_DWORDX4_IMM); 705 auto MMO = MF.getMachineMemOperand(PtrInfo, 706 MachineMemOperand::MOLoad | 707 MachineMemOperand::MOInvariant | 708 MachineMemOperand::MODereferenceable, 709 16, Align(4)); 710 unsigned Offset = Fn.getCallingConv() == CallingConv::AMDGPU_CS ? 16 : 0; 711 const GCNSubtarget &Subtarget = MF.getSubtarget<GCNSubtarget>(); 712 unsigned EncodedOffset = AMDGPU::convertSMRDOffsetUnits(Subtarget, Offset); 713 BuildMI(MBB, I, DL, LoadDwordX4, ScratchRsrcReg) 714 .addReg(Rsrc01) 715 .addImm(EncodedOffset) // offset 716 .addImm(0) // glc 717 .addImm(0) // dlc 718 .addReg(ScratchRsrcReg, RegState::ImplicitDefine) 719 .addMemOperand(MMO); 720 } else if (ST.isMesaGfxShader(Fn) || !PreloadedScratchRsrcReg) { 721 assert(!ST.isAmdHsaOrMesa(Fn)); 722 const MCInstrDesc &SMovB32 = TII->get(AMDGPU::S_MOV_B32); 723 724 Register Rsrc2 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub2); 725 Register Rsrc3 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub3); 726 727 // Use relocations to get the pointer, and setup the other bits manually. 728 uint64_t Rsrc23 = TII->getScratchRsrcWords23(); 729 730 if (MFI->hasImplicitBufferPtr()) { 731 Register Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1); 732 733 if (AMDGPU::isCompute(MF.getFunction().getCallingConv())) { 734 const MCInstrDesc &Mov64 = TII->get(AMDGPU::S_MOV_B64); 735 736 BuildMI(MBB, I, DL, Mov64, Rsrc01) 737 .addReg(MFI->getImplicitBufferPtrUserSGPR()) 738 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 739 } else { 740 const MCInstrDesc &LoadDwordX2 = TII->get(AMDGPU::S_LOAD_DWORDX2_IMM); 741 742 MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS); 743 auto MMO = MF.getMachineMemOperand( 744 PtrInfo, 745 MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant | 746 MachineMemOperand::MODereferenceable, 747 8, Align(4)); 748 BuildMI(MBB, I, DL, LoadDwordX2, Rsrc01) 749 .addReg(MFI->getImplicitBufferPtrUserSGPR()) 750 .addImm(0) // offset 751 .addImm(0) // glc 752 .addImm(0) // dlc 753 .addMemOperand(MMO) 754 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 755 756 MF.getRegInfo().addLiveIn(MFI->getImplicitBufferPtrUserSGPR()); 757 MBB.addLiveIn(MFI->getImplicitBufferPtrUserSGPR()); 758 } 759 } else { 760 Register Rsrc0 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0); 761 Register Rsrc1 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1); 762 763 BuildMI(MBB, I, DL, SMovB32, Rsrc0) 764 .addExternalSymbol("SCRATCH_RSRC_DWORD0") 765 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 766 767 BuildMI(MBB, I, DL, SMovB32, Rsrc1) 768 .addExternalSymbol("SCRATCH_RSRC_DWORD1") 769 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 770 771 } 772 773 BuildMI(MBB, I, DL, SMovB32, Rsrc2) 774 .addImm(Rsrc23 & 0xffffffff) 775 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 776 777 BuildMI(MBB, I, DL, SMovB32, Rsrc3) 778 .addImm(Rsrc23 >> 32) 779 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 780 } else if (ST.isAmdHsaOrMesa(Fn)) { 781 assert(PreloadedScratchRsrcReg); 782 783 if (ScratchRsrcReg != PreloadedScratchRsrcReg) { 784 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchRsrcReg) 785 .addReg(PreloadedScratchRsrcReg, RegState::Kill); 786 } 787 } 788 789 // Add the scratch wave offset into the scratch RSRC. 790 // 791 // We only want to update the first 48 bits, which is the base address 792 // pointer, without touching the adjacent 16 bits of flags. We know this add 793 // cannot carry-out from bit 47, otherwise the scratch allocation would be 794 // impossible to fit in the 48-bit global address space. 795 // 796 // TODO: Evaluate if it is better to just construct an SRD using the flat 797 // scratch init and some constants rather than update the one we are passed. 798 Register ScratchRsrcSub0 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0); 799 Register ScratchRsrcSub1 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1); 800 801 // We cannot Kill ScratchWaveOffsetReg here because we allow it to be used in 802 // the kernel body via inreg arguments. 803 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), ScratchRsrcSub0) 804 .addReg(ScratchRsrcSub0) 805 .addReg(ScratchWaveOffsetReg) 806 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 807 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADDC_U32), ScratchRsrcSub1) 808 .addReg(ScratchRsrcSub1) 809 .addImm(0) 810 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 811 } 812 813 bool SIFrameLowering::isSupportedStackID(TargetStackID::Value ID) const { 814 switch (ID) { 815 case TargetStackID::Default: 816 case TargetStackID::NoAlloc: 817 case TargetStackID::SGPRSpill: 818 return true; 819 case TargetStackID::ScalableVector: 820 return false; 821 } 822 llvm_unreachable("Invalid TargetStackID::Value"); 823 } 824 825 // Activate all lanes, returns saved exec. 826 static Register buildScratchExecCopy(LivePhysRegs &LiveRegs, 827 MachineFunction &MF, 828 MachineBasicBlock &MBB, 829 MachineBasicBlock::iterator MBBI, 830 bool IsProlog) { 831 Register ScratchExecCopy; 832 MachineRegisterInfo &MRI = MF.getRegInfo(); 833 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 834 const SIInstrInfo *TII = ST.getInstrInfo(); 835 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 836 SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 837 DebugLoc DL; 838 839 if (LiveRegs.empty()) { 840 if (IsProlog) { 841 LiveRegs.init(TRI); 842 LiveRegs.addLiveIns(MBB); 843 if (FuncInfo->SGPRForFPSaveRestoreCopy) 844 LiveRegs.removeReg(FuncInfo->SGPRForFPSaveRestoreCopy); 845 846 if (FuncInfo->SGPRForBPSaveRestoreCopy) 847 LiveRegs.removeReg(FuncInfo->SGPRForBPSaveRestoreCopy); 848 } else { 849 // In epilog. 850 LiveRegs.init(*ST.getRegisterInfo()); 851 LiveRegs.addLiveOuts(MBB); 852 LiveRegs.stepBackward(*MBBI); 853 } 854 } 855 856 ScratchExecCopy = findScratchNonCalleeSaveRegister( 857 MRI, LiveRegs, *TRI.getWaveMaskRegClass()); 858 if (!ScratchExecCopy) 859 report_fatal_error("failed to find free scratch register"); 860 861 if (!IsProlog) 862 LiveRegs.removeReg(ScratchExecCopy); 863 864 const unsigned OrSaveExec = 865 ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32 : AMDGPU::S_OR_SAVEEXEC_B64; 866 BuildMI(MBB, MBBI, DL, TII->get(OrSaveExec), ScratchExecCopy).addImm(-1); 867 868 return ScratchExecCopy; 869 } 870 871 void SIFrameLowering::emitPrologue(MachineFunction &MF, 872 MachineBasicBlock &MBB) const { 873 SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 874 if (FuncInfo->isEntryFunction()) { 875 emitEntryFunctionPrologue(MF, MBB); 876 return; 877 } 878 879 const MachineFrameInfo &MFI = MF.getFrameInfo(); 880 MachineRegisterInfo &MRI = MF.getRegInfo(); 881 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 882 const SIInstrInfo *TII = ST.getInstrInfo(); 883 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 884 885 Register StackPtrReg = FuncInfo->getStackPtrOffsetReg(); 886 Register FramePtrReg = FuncInfo->getFrameOffsetReg(); 887 Register BasePtrReg = 888 TRI.hasBasePointer(MF) ? TRI.getBaseRegister() : Register(); 889 LivePhysRegs LiveRegs; 890 891 MachineBasicBlock::iterator MBBI = MBB.begin(); 892 DebugLoc DL; 893 894 bool HasFP = false; 895 bool HasBP = false; 896 uint32_t NumBytes = MFI.getStackSize(); 897 uint32_t RoundedSize = NumBytes; 898 // To avoid clobbering VGPRs in lanes that weren't active on function entry, 899 // turn on all lanes before doing the spill to memory. 900 Register ScratchExecCopy; 901 902 bool HasFPSaveIndex = FuncInfo->FramePointerSaveIndex.hasValue(); 903 bool SpillFPToMemory = false; 904 // A StackID of SGPRSpill implies that this is a spill from SGPR to VGPR. 905 // Otherwise we are spilling the FP to memory. 906 if (HasFPSaveIndex) { 907 SpillFPToMemory = MFI.getStackID(*FuncInfo->FramePointerSaveIndex) != 908 TargetStackID::SGPRSpill; 909 } 910 911 bool HasBPSaveIndex = FuncInfo->BasePointerSaveIndex.hasValue(); 912 bool SpillBPToMemory = false; 913 // A StackID of SGPRSpill implies that this is a spill from SGPR to VGPR. 914 // Otherwise we are spilling the BP to memory. 915 if (HasBPSaveIndex) { 916 SpillBPToMemory = MFI.getStackID(*FuncInfo->BasePointerSaveIndex) != 917 TargetStackID::SGPRSpill; 918 } 919 920 for (const SIMachineFunctionInfo::SGPRSpillVGPRCSR &Reg 921 : FuncInfo->getSGPRSpillVGPRs()) { 922 if (!Reg.FI.hasValue()) 923 continue; 924 925 if (!ScratchExecCopy) 926 ScratchExecCopy = buildScratchExecCopy(LiveRegs, MF, MBB, MBBI, true); 927 928 buildPrologSpill(ST, LiveRegs, MBB, MBBI, TII, Reg.VGPR, 929 FuncInfo->getScratchRSrcReg(), 930 StackPtrReg, 931 Reg.FI.getValue()); 932 } 933 934 if (HasFPSaveIndex && SpillFPToMemory) { 935 assert(!MFI.isDeadObjectIndex(FuncInfo->FramePointerSaveIndex.getValue())); 936 937 if (!ScratchExecCopy) 938 ScratchExecCopy = buildScratchExecCopy(LiveRegs, MF, MBB, MBBI, true); 939 940 MCPhysReg TmpVGPR = findScratchNonCalleeSaveRegister( 941 MRI, LiveRegs, AMDGPU::VGPR_32RegClass); 942 if (!TmpVGPR) 943 report_fatal_error("failed to find free scratch register"); 944 945 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpVGPR) 946 .addReg(FramePtrReg); 947 948 buildPrologSpill(ST, LiveRegs, MBB, MBBI, TII, TmpVGPR, 949 FuncInfo->getScratchRSrcReg(), StackPtrReg, 950 FuncInfo->FramePointerSaveIndex.getValue()); 951 } 952 953 if (HasBPSaveIndex && SpillBPToMemory) { 954 assert(!MFI.isDeadObjectIndex(*FuncInfo->BasePointerSaveIndex)); 955 956 if (!ScratchExecCopy) 957 ScratchExecCopy = buildScratchExecCopy(LiveRegs, MF, MBB, MBBI, true); 958 959 MCPhysReg TmpVGPR = findScratchNonCalleeSaveRegister( 960 MRI, LiveRegs, AMDGPU::VGPR_32RegClass); 961 if (!TmpVGPR) 962 report_fatal_error("failed to find free scratch register"); 963 964 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpVGPR) 965 .addReg(BasePtrReg); 966 967 buildPrologSpill(ST, LiveRegs, MBB, MBBI, TII, TmpVGPR, 968 FuncInfo->getScratchRSrcReg(), StackPtrReg, 969 *FuncInfo->BasePointerSaveIndex); 970 } 971 972 if (ScratchExecCopy) { 973 // FIXME: Split block and make terminator. 974 unsigned ExecMov = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; 975 MCRegister Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 976 BuildMI(MBB, MBBI, DL, TII->get(ExecMov), Exec) 977 .addReg(ScratchExecCopy, RegState::Kill); 978 LiveRegs.addReg(ScratchExecCopy); 979 } 980 981 // In this case, spill the FP to a reserved VGPR. 982 if (HasFPSaveIndex && !SpillFPToMemory) { 983 const int FI = FuncInfo->FramePointerSaveIndex.getValue(); 984 assert(!MFI.isDeadObjectIndex(FI)); 985 986 assert(MFI.getStackID(FI) == TargetStackID::SGPRSpill); 987 ArrayRef<SIMachineFunctionInfo::SpilledReg> Spill = 988 FuncInfo->getSGPRToVGPRSpills(FI); 989 assert(Spill.size() == 1); 990 991 // Save FP before setting it up. 992 // FIXME: This should respect spillSGPRToVGPR; 993 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::V_WRITELANE_B32), Spill[0].VGPR) 994 .addReg(FramePtrReg) 995 .addImm(Spill[0].Lane) 996 .addReg(Spill[0].VGPR, RegState::Undef); 997 } 998 999 // In this case, spill the BP to a reserved VGPR. 1000 if (HasBPSaveIndex && !SpillBPToMemory) { 1001 const int BasePtrFI = *FuncInfo->BasePointerSaveIndex; 1002 assert(!MFI.isDeadObjectIndex(BasePtrFI)); 1003 1004 assert(MFI.getStackID(BasePtrFI) == TargetStackID::SGPRSpill); 1005 ArrayRef<SIMachineFunctionInfo::SpilledReg> Spill = 1006 FuncInfo->getSGPRToVGPRSpills(BasePtrFI); 1007 assert(Spill.size() == 1); 1008 1009 // Save BP before setting it up. 1010 // FIXME: This should respect spillSGPRToVGPR; 1011 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::V_WRITELANE_B32), Spill[0].VGPR) 1012 .addReg(BasePtrReg) 1013 .addImm(Spill[0].Lane) 1014 .addReg(Spill[0].VGPR, RegState::Undef); 1015 } 1016 1017 // Emit the copy if we need an FP, and are using a free SGPR to save it. 1018 if (FuncInfo->SGPRForFPSaveRestoreCopy) { 1019 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), 1020 FuncInfo->SGPRForFPSaveRestoreCopy) 1021 .addReg(FramePtrReg) 1022 .setMIFlag(MachineInstr::FrameSetup); 1023 } 1024 1025 // Emit the copy if we need a BP, and are using a free SGPR to save it. 1026 if (FuncInfo->SGPRForBPSaveRestoreCopy) { 1027 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), 1028 FuncInfo->SGPRForBPSaveRestoreCopy) 1029 .addReg(BasePtrReg) 1030 .setMIFlag(MachineInstr::FrameSetup); 1031 } 1032 1033 // If a copy has been emitted for FP and/or BP, Make the SGPRs 1034 // used in the copy instructions live throughout the function. 1035 SmallVector<MCPhysReg, 2> TempSGPRs; 1036 if (FuncInfo->SGPRForFPSaveRestoreCopy) 1037 TempSGPRs.push_back(FuncInfo->SGPRForFPSaveRestoreCopy); 1038 1039 if (FuncInfo->SGPRForBPSaveRestoreCopy) 1040 TempSGPRs.push_back(FuncInfo->SGPRForBPSaveRestoreCopy); 1041 1042 if (!TempSGPRs.empty()) { 1043 for (MachineBasicBlock &MBB : MF) { 1044 for (MCPhysReg Reg : TempSGPRs) 1045 MBB.addLiveIn(Reg); 1046 1047 MBB.sortUniqueLiveIns(); 1048 } 1049 if (!LiveRegs.empty()) { 1050 LiveRegs.addReg(FuncInfo->SGPRForFPSaveRestoreCopy); 1051 LiveRegs.addReg(FuncInfo->SGPRForBPSaveRestoreCopy); 1052 } 1053 } 1054 1055 if (TRI.needsStackRealignment(MF)) { 1056 HasFP = true; 1057 const unsigned Alignment = MFI.getMaxAlign().value(); 1058 1059 RoundedSize += Alignment; 1060 if (LiveRegs.empty()) { 1061 LiveRegs.init(TRI); 1062 LiveRegs.addLiveIns(MBB); 1063 } 1064 1065 // s_add_u32 s33, s32, NumBytes 1066 // s_and_b32 s33, s33, 0b111...0000 1067 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_ADD_U32), FramePtrReg) 1068 .addReg(StackPtrReg) 1069 .addImm((Alignment - 1) * getScratchScaleFactor(ST)) 1070 .setMIFlag(MachineInstr::FrameSetup); 1071 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_AND_B32), FramePtrReg) 1072 .addReg(FramePtrReg, RegState::Kill) 1073 .addImm(-Alignment * getScratchScaleFactor(ST)) 1074 .setMIFlag(MachineInstr::FrameSetup); 1075 FuncInfo->setIsStackRealigned(true); 1076 } else if ((HasFP = hasFP(MF))) { 1077 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), FramePtrReg) 1078 .addReg(StackPtrReg) 1079 .setMIFlag(MachineInstr::FrameSetup); 1080 } 1081 1082 // If we need a base pointer, set it up here. It's whatever the value of 1083 // the stack pointer is at this point. Any variable size objects will be 1084 // allocated after this, so we can still use the base pointer to reference 1085 // the incoming arguments. 1086 if ((HasBP = TRI.hasBasePointer(MF))) { 1087 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), BasePtrReg) 1088 .addReg(StackPtrReg) 1089 .setMIFlag(MachineInstr::FrameSetup); 1090 } 1091 1092 if (HasFP && RoundedSize != 0) { 1093 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_ADD_U32), StackPtrReg) 1094 .addReg(StackPtrReg) 1095 .addImm(RoundedSize * getScratchScaleFactor(ST)) 1096 .setMIFlag(MachineInstr::FrameSetup); 1097 } 1098 1099 assert((!HasFP || (FuncInfo->SGPRForFPSaveRestoreCopy || 1100 FuncInfo->FramePointerSaveIndex)) && 1101 "Needed to save FP but didn't save it anywhere"); 1102 1103 assert((HasFP || (!FuncInfo->SGPRForFPSaveRestoreCopy && 1104 !FuncInfo->FramePointerSaveIndex)) && 1105 "Saved FP but didn't need it"); 1106 1107 assert((!HasBP || (FuncInfo->SGPRForBPSaveRestoreCopy || 1108 FuncInfo->BasePointerSaveIndex)) && 1109 "Needed to save BP but didn't save it anywhere"); 1110 1111 assert((HasBP || (!FuncInfo->SGPRForBPSaveRestoreCopy && 1112 !FuncInfo->BasePointerSaveIndex)) && 1113 "Saved BP but didn't need it"); 1114 } 1115 1116 void SIFrameLowering::emitEpilogue(MachineFunction &MF, 1117 MachineBasicBlock &MBB) const { 1118 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 1119 if (FuncInfo->isEntryFunction()) 1120 return; 1121 1122 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1123 const SIInstrInfo *TII = ST.getInstrInfo(); 1124 MachineRegisterInfo &MRI = MF.getRegInfo(); 1125 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 1126 MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator(); 1127 LivePhysRegs LiveRegs; 1128 DebugLoc DL; 1129 1130 const MachineFrameInfo &MFI = MF.getFrameInfo(); 1131 uint32_t NumBytes = MFI.getStackSize(); 1132 uint32_t RoundedSize = FuncInfo->isStackRealigned() 1133 ? NumBytes + MFI.getMaxAlign().value() 1134 : NumBytes; 1135 const Register StackPtrReg = FuncInfo->getStackPtrOffsetReg(); 1136 const Register FramePtrReg = FuncInfo->getFrameOffsetReg(); 1137 const Register BasePtrReg = 1138 TRI.hasBasePointer(MF) ? TRI.getBaseRegister() : Register(); 1139 1140 bool HasFPSaveIndex = FuncInfo->FramePointerSaveIndex.hasValue(); 1141 bool SpillFPToMemory = false; 1142 if (HasFPSaveIndex) { 1143 SpillFPToMemory = MFI.getStackID(*FuncInfo->FramePointerSaveIndex) != 1144 TargetStackID::SGPRSpill; 1145 } 1146 1147 bool HasBPSaveIndex = FuncInfo->BasePointerSaveIndex.hasValue(); 1148 bool SpillBPToMemory = false; 1149 if (HasBPSaveIndex) { 1150 SpillBPToMemory = MFI.getStackID(*FuncInfo->BasePointerSaveIndex) != 1151 TargetStackID::SGPRSpill; 1152 } 1153 1154 if (RoundedSize != 0 && hasFP(MF)) { 1155 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_SUB_U32), StackPtrReg) 1156 .addReg(StackPtrReg) 1157 .addImm(RoundedSize * getScratchScaleFactor(ST)) 1158 .setMIFlag(MachineInstr::FrameDestroy); 1159 } 1160 1161 if (FuncInfo->SGPRForFPSaveRestoreCopy) { 1162 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), FramePtrReg) 1163 .addReg(FuncInfo->SGPRForFPSaveRestoreCopy) 1164 .setMIFlag(MachineInstr::FrameDestroy); 1165 } 1166 1167 if (FuncInfo->SGPRForBPSaveRestoreCopy) { 1168 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), BasePtrReg) 1169 .addReg(FuncInfo->SGPRForBPSaveRestoreCopy) 1170 .setMIFlag(MachineInstr::FrameDestroy); 1171 } 1172 1173 Register ScratchExecCopy; 1174 if (HasFPSaveIndex) { 1175 const int FI = FuncInfo->FramePointerSaveIndex.getValue(); 1176 assert(!MFI.isDeadObjectIndex(FI)); 1177 if (SpillFPToMemory) { 1178 if (!ScratchExecCopy) 1179 ScratchExecCopy = buildScratchExecCopy(LiveRegs, MF, MBB, MBBI, false); 1180 1181 MCPhysReg TempVGPR = findScratchNonCalleeSaveRegister( 1182 MRI, LiveRegs, AMDGPU::VGPR_32RegClass); 1183 if (!TempVGPR) 1184 report_fatal_error("failed to find free scratch register"); 1185 buildEpilogReload(ST, LiveRegs, MBB, MBBI, TII, TempVGPR, 1186 FuncInfo->getScratchRSrcReg(), StackPtrReg, FI); 1187 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), FramePtrReg) 1188 .addReg(TempVGPR, RegState::Kill); 1189 } else { 1190 // Reload from VGPR spill. 1191 assert(MFI.getStackID(FI) == TargetStackID::SGPRSpill); 1192 ArrayRef<SIMachineFunctionInfo::SpilledReg> Spill = 1193 FuncInfo->getSGPRToVGPRSpills(FI); 1194 assert(Spill.size() == 1); 1195 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::V_READLANE_B32), FramePtrReg) 1196 .addReg(Spill[0].VGPR) 1197 .addImm(Spill[0].Lane); 1198 } 1199 } 1200 1201 if (HasBPSaveIndex) { 1202 const int BasePtrFI = *FuncInfo->BasePointerSaveIndex; 1203 assert(!MFI.isDeadObjectIndex(BasePtrFI)); 1204 if (SpillBPToMemory) { 1205 if (!ScratchExecCopy) 1206 ScratchExecCopy = buildScratchExecCopy(LiveRegs, MF, MBB, MBBI, false); 1207 1208 MCPhysReg TempVGPR = findScratchNonCalleeSaveRegister( 1209 MRI, LiveRegs, AMDGPU::VGPR_32RegClass); 1210 if (!TempVGPR) 1211 report_fatal_error("failed to find free scratch register"); 1212 buildEpilogReload(ST, LiveRegs, MBB, MBBI, TII, TempVGPR, 1213 FuncInfo->getScratchRSrcReg(), StackPtrReg, BasePtrFI); 1214 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), BasePtrReg) 1215 .addReg(TempVGPR, RegState::Kill); 1216 } else { 1217 // Reload from VGPR spill. 1218 assert(MFI.getStackID(BasePtrFI) == TargetStackID::SGPRSpill); 1219 ArrayRef<SIMachineFunctionInfo::SpilledReg> Spill = 1220 FuncInfo->getSGPRToVGPRSpills(BasePtrFI); 1221 assert(Spill.size() == 1); 1222 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::V_READLANE_B32), BasePtrReg) 1223 .addReg(Spill[0].VGPR) 1224 .addImm(Spill[0].Lane); 1225 } 1226 } 1227 1228 for (const SIMachineFunctionInfo::SGPRSpillVGPRCSR &Reg : 1229 FuncInfo->getSGPRSpillVGPRs()) { 1230 if (!Reg.FI.hasValue()) 1231 continue; 1232 1233 if (!ScratchExecCopy) 1234 ScratchExecCopy = buildScratchExecCopy(LiveRegs, MF, MBB, MBBI, false); 1235 1236 buildEpilogReload(ST, LiveRegs, MBB, MBBI, TII, Reg.VGPR, 1237 FuncInfo->getScratchRSrcReg(), StackPtrReg, 1238 Reg.FI.getValue()); 1239 } 1240 1241 if (ScratchExecCopy) { 1242 // FIXME: Split block and make terminator. 1243 unsigned ExecMov = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; 1244 MCRegister Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 1245 BuildMI(MBB, MBBI, DL, TII->get(ExecMov), Exec) 1246 .addReg(ScratchExecCopy, RegState::Kill); 1247 } 1248 } 1249 1250 #ifndef NDEBUG 1251 static bool allSGPRSpillsAreDead(const MachineFunction &MF) { 1252 const MachineFrameInfo &MFI = MF.getFrameInfo(); 1253 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 1254 for (int I = MFI.getObjectIndexBegin(), E = MFI.getObjectIndexEnd(); 1255 I != E; ++I) { 1256 if (!MFI.isDeadObjectIndex(I) && 1257 MFI.getStackID(I) == TargetStackID::SGPRSpill && 1258 (I != FuncInfo->FramePointerSaveIndex && 1259 I != FuncInfo->BasePointerSaveIndex)) { 1260 return false; 1261 } 1262 } 1263 1264 return true; 1265 } 1266 #endif 1267 1268 StackOffset SIFrameLowering::getFrameIndexReference(const MachineFunction &MF, 1269 int FI, 1270 Register &FrameReg) const { 1271 const SIRegisterInfo *RI = MF.getSubtarget<GCNSubtarget>().getRegisterInfo(); 1272 1273 FrameReg = RI->getFrameRegister(MF); 1274 return StackOffset::getFixed(MF.getFrameInfo().getObjectOffset(FI)); 1275 } 1276 1277 void SIFrameLowering::processFunctionBeforeFrameFinalized( 1278 MachineFunction &MF, 1279 RegScavenger *RS) const { 1280 MachineFrameInfo &MFI = MF.getFrameInfo(); 1281 1282 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1283 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 1284 SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 1285 1286 FuncInfo->removeDeadFrameIndices(MFI); 1287 assert(allSGPRSpillsAreDead(MF) && 1288 "SGPR spill should have been removed in SILowerSGPRSpills"); 1289 1290 // FIXME: The other checks should be redundant with allStackObjectsAreDead, 1291 // but currently hasNonSpillStackObjects is set only from source 1292 // allocas. Stack temps produced from legalization are not counted currently. 1293 if (!allStackObjectsAreDead(MFI)) { 1294 assert(RS && "RegScavenger required if spilling"); 1295 1296 if (FuncInfo->isEntryFunction()) { 1297 int ScavengeFI = MFI.CreateFixedObject( 1298 TRI->getSpillSize(AMDGPU::SGPR_32RegClass), 0, false); 1299 RS->addScavengingFrameIndex(ScavengeFI); 1300 } else { 1301 int ScavengeFI = MFI.CreateStackObject( 1302 TRI->getSpillSize(AMDGPU::SGPR_32RegClass), 1303 TRI->getSpillAlign(AMDGPU::SGPR_32RegClass), false); 1304 RS->addScavengingFrameIndex(ScavengeFI); 1305 } 1306 } 1307 } 1308 1309 // Only report VGPRs to generic code. 1310 void SIFrameLowering::determineCalleeSaves(MachineFunction &MF, 1311 BitVector &SavedVGPRs, 1312 RegScavenger *RS) const { 1313 TargetFrameLowering::determineCalleeSaves(MF, SavedVGPRs, RS); 1314 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 1315 if (MFI->isEntryFunction()) 1316 return; 1317 1318 MachineFrameInfo &FrameInfo = MF.getFrameInfo(); 1319 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1320 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 1321 1322 // Ignore the SGPRs the default implementation found. 1323 SavedVGPRs.clearBitsNotInMask(TRI->getAllVectorRegMask()); 1324 1325 // Do not save AGPRs prior to GFX90A because there was no easy way to do so. 1326 // In gfx908 there was do AGPR loads and stores and thus spilling also 1327 // require a temporary VGPR. 1328 if (!ST.hasGFX90AInsts()) 1329 SavedVGPRs.clearBitsInMask(TRI->getAllAGPRRegMask()); 1330 1331 // hasFP only knows about stack objects that already exist. We're now 1332 // determining the stack slots that will be created, so we have to predict 1333 // them. Stack objects force FP usage with calls. 1334 // 1335 // Note a new VGPR CSR may be introduced if one is used for the spill, but we 1336 // don't want to report it here. 1337 // 1338 // FIXME: Is this really hasReservedCallFrame? 1339 const bool WillHaveFP = 1340 FrameInfo.hasCalls() && 1341 (SavedVGPRs.any() || !allStackObjectsAreDead(FrameInfo)); 1342 1343 // VGPRs used for SGPR spilling need to be specially inserted in the prolog, 1344 // so don't allow the default insertion to handle them. 1345 for (auto SSpill : MFI->getSGPRSpillVGPRs()) 1346 SavedVGPRs.reset(SSpill.VGPR); 1347 1348 LivePhysRegs LiveRegs; 1349 LiveRegs.init(*TRI); 1350 1351 if (WillHaveFP || hasFP(MF)) { 1352 assert(!MFI->SGPRForFPSaveRestoreCopy && !MFI->FramePointerSaveIndex && 1353 "Re-reserving spill slot for FP"); 1354 getVGPRSpillLaneOrTempRegister(MF, LiveRegs, MFI->SGPRForFPSaveRestoreCopy, 1355 MFI->FramePointerSaveIndex, true); 1356 } 1357 1358 if (TRI->hasBasePointer(MF)) { 1359 if (MFI->SGPRForFPSaveRestoreCopy) 1360 LiveRegs.addReg(MFI->SGPRForFPSaveRestoreCopy); 1361 1362 assert(!MFI->SGPRForBPSaveRestoreCopy && 1363 !MFI->BasePointerSaveIndex && "Re-reserving spill slot for BP"); 1364 getVGPRSpillLaneOrTempRegister(MF, LiveRegs, MFI->SGPRForBPSaveRestoreCopy, 1365 MFI->BasePointerSaveIndex, false); 1366 } 1367 } 1368 1369 void SIFrameLowering::determineCalleeSavesSGPR(MachineFunction &MF, 1370 BitVector &SavedRegs, 1371 RegScavenger *RS) const { 1372 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); 1373 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 1374 if (MFI->isEntryFunction()) 1375 return; 1376 1377 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1378 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 1379 1380 // The SP is specifically managed and we don't want extra spills of it. 1381 SavedRegs.reset(MFI->getStackPtrOffsetReg()); 1382 1383 const BitVector AllSavedRegs = SavedRegs; 1384 SavedRegs.clearBitsInMask(TRI->getAllVectorRegMask()); 1385 1386 // If clearing VGPRs changed the mask, we will have some CSR VGPR spills. 1387 const bool HaveAnyCSRVGPR = SavedRegs != AllSavedRegs; 1388 1389 // We have to anticipate introducing CSR VGPR spills if we don't have any 1390 // stack objects already, since we require an FP if there is a call and stack. 1391 MachineFrameInfo &FrameInfo = MF.getFrameInfo(); 1392 const bool WillHaveFP = FrameInfo.hasCalls() && HaveAnyCSRVGPR; 1393 1394 // FP will be specially managed like SP. 1395 if (WillHaveFP || hasFP(MF)) 1396 SavedRegs.reset(MFI->getFrameOffsetReg()); 1397 } 1398 1399 bool SIFrameLowering::assignCalleeSavedSpillSlots( 1400 MachineFunction &MF, const TargetRegisterInfo *TRI, 1401 std::vector<CalleeSavedInfo> &CSI) const { 1402 if (CSI.empty()) 1403 return true; // Early exit if no callee saved registers are modified! 1404 1405 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 1406 if (!FuncInfo->SGPRForFPSaveRestoreCopy && 1407 !FuncInfo->SGPRForBPSaveRestoreCopy) 1408 return false; 1409 1410 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1411 const SIRegisterInfo *RI = ST.getRegisterInfo(); 1412 Register FramePtrReg = FuncInfo->getFrameOffsetReg(); 1413 Register BasePtrReg = RI->getBaseRegister(); 1414 unsigned NumModifiedRegs = 0; 1415 1416 if (FuncInfo->SGPRForFPSaveRestoreCopy) 1417 NumModifiedRegs++; 1418 if (FuncInfo->SGPRForBPSaveRestoreCopy) 1419 NumModifiedRegs++; 1420 1421 for (auto &CS : CSI) { 1422 if (CS.getReg() == FramePtrReg && FuncInfo->SGPRForFPSaveRestoreCopy) { 1423 CS.setDstReg(FuncInfo->SGPRForFPSaveRestoreCopy); 1424 if (--NumModifiedRegs) 1425 break; 1426 } else if (CS.getReg() == BasePtrReg && 1427 FuncInfo->SGPRForBPSaveRestoreCopy) { 1428 CS.setDstReg(FuncInfo->SGPRForBPSaveRestoreCopy); 1429 if (--NumModifiedRegs) 1430 break; 1431 } 1432 } 1433 1434 return false; 1435 } 1436 1437 MachineBasicBlock::iterator SIFrameLowering::eliminateCallFramePseudoInstr( 1438 MachineFunction &MF, 1439 MachineBasicBlock &MBB, 1440 MachineBasicBlock::iterator I) const { 1441 int64_t Amount = I->getOperand(0).getImm(); 1442 if (Amount == 0) 1443 return MBB.erase(I); 1444 1445 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1446 const SIInstrInfo *TII = ST.getInstrInfo(); 1447 const DebugLoc &DL = I->getDebugLoc(); 1448 unsigned Opc = I->getOpcode(); 1449 bool IsDestroy = Opc == TII->getCallFrameDestroyOpcode(); 1450 uint64_t CalleePopAmount = IsDestroy ? I->getOperand(1).getImm() : 0; 1451 1452 if (!hasReservedCallFrame(MF)) { 1453 Amount = alignTo(Amount, getStackAlign()); 1454 assert(isUInt<32>(Amount) && "exceeded stack address space size"); 1455 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 1456 Register SPReg = MFI->getStackPtrOffsetReg(); 1457 1458 unsigned Op = IsDestroy ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32; 1459 BuildMI(MBB, I, DL, TII->get(Op), SPReg) 1460 .addReg(SPReg) 1461 .addImm(Amount * getScratchScaleFactor(ST)); 1462 } else if (CalleePopAmount != 0) { 1463 llvm_unreachable("is this used?"); 1464 } 1465 1466 return MBB.erase(I); 1467 } 1468 1469 /// Returns true if the frame will require a reference to the stack pointer. 1470 /// 1471 /// This is the set of conditions common to setting up the stack pointer in a 1472 /// kernel, and for using a frame pointer in a callable function. 1473 /// 1474 /// FIXME: Should also check hasOpaqueSPAdjustment and if any inline asm 1475 /// references SP. 1476 static bool frameTriviallyRequiresSP(const MachineFrameInfo &MFI) { 1477 return MFI.hasVarSizedObjects() || MFI.hasStackMap() || MFI.hasPatchPoint(); 1478 } 1479 1480 // The FP for kernels is always known 0, so we never really need to setup an 1481 // explicit register for it. However, DisableFramePointerElim will force us to 1482 // use a register for it. 1483 bool SIFrameLowering::hasFP(const MachineFunction &MF) const { 1484 const MachineFrameInfo &MFI = MF.getFrameInfo(); 1485 1486 // For entry functions we can use an immediate offset in most cases, so the 1487 // presence of calls doesn't imply we need a distinct frame pointer. 1488 if (MFI.hasCalls() && 1489 !MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction()) { 1490 // All offsets are unsigned, so need to be addressed in the same direction 1491 // as stack growth. 1492 1493 // FIXME: This function is pretty broken, since it can be called before the 1494 // frame layout is determined or CSR spills are inserted. 1495 return MFI.getStackSize() != 0; 1496 } 1497 1498 return frameTriviallyRequiresSP(MFI) || MFI.isFrameAddressTaken() || 1499 MF.getSubtarget<GCNSubtarget>().getRegisterInfo()->needsStackRealignment(MF) || 1500 MF.getTarget().Options.DisableFramePointerElim(MF); 1501 } 1502 1503 // This is essentially a reduced version of hasFP for entry functions. Since the 1504 // stack pointer is known 0 on entry to kernels, we never really need an FP 1505 // register. We may need to initialize the stack pointer depending on the frame 1506 // properties, which logically overlaps many of the cases where an ordinary 1507 // function would require an FP. 1508 bool SIFrameLowering::requiresStackPointerReference( 1509 const MachineFunction &MF) const { 1510 // Callable functions always require a stack pointer reference. 1511 assert(MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction() && 1512 "only expected to call this for entry points"); 1513 1514 const MachineFrameInfo &MFI = MF.getFrameInfo(); 1515 1516 // Entry points ordinarily don't need to initialize SP. We have to set it up 1517 // for callees if there are any. Also note tail calls are impossible/don't 1518 // make any sense for kernels. 1519 if (MFI.hasCalls()) 1520 return true; 1521 1522 // We still need to initialize the SP if we're doing anything weird that 1523 // references the SP, like variable sized stack objects. 1524 return frameTriviallyRequiresSP(MFI); 1525 } 1526