1 //===----------------------- SIFrameLowering.cpp --------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //==-----------------------------------------------------------------------===// 8 9 #include "SIFrameLowering.h" 10 #include "AMDGPU.h" 11 #include "GCNSubtarget.h" 12 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 13 #include "SIMachineFunctionInfo.h" 14 #include "llvm/CodeGen/LivePhysRegs.h" 15 #include "llvm/CodeGen/MachineFrameInfo.h" 16 #include "llvm/CodeGen/RegisterScavenging.h" 17 #include "llvm/Target/TargetMachine.h" 18 19 using namespace llvm; 20 21 #define DEBUG_TYPE "frame-info" 22 23 // Find a scratch register that we can use in the prologue. We avoid using 24 // callee-save registers since they may appear to be free when this is called 25 // from canUseAsPrologue (during shrink wrapping), but then no longer be free 26 // when this is called from emitPrologue. 27 static MCRegister findScratchNonCalleeSaveRegister(MachineRegisterInfo &MRI, 28 LivePhysRegs &LiveRegs, 29 const TargetRegisterClass &RC, 30 bool Unused = false) { 31 // Mark callee saved registers as used so we will not choose them. 32 const MCPhysReg *CSRegs = MRI.getCalleeSavedRegs(); 33 for (unsigned i = 0; CSRegs[i]; ++i) 34 LiveRegs.addReg(CSRegs[i]); 35 36 if (Unused) { 37 // We are looking for a register that can be used throughout the entire 38 // function, so any use is unacceptable. 39 for (MCRegister Reg : RC) { 40 if (!MRI.isPhysRegUsed(Reg) && LiveRegs.available(MRI, Reg)) 41 return Reg; 42 } 43 } else { 44 for (MCRegister Reg : RC) { 45 if (LiveRegs.available(MRI, Reg)) 46 return Reg; 47 } 48 } 49 50 return MCRegister(); 51 } 52 53 static void getVGPRSpillLaneOrTempRegister(MachineFunction &MF, 54 LivePhysRegs &LiveRegs, 55 Register &TempSGPR, 56 Optional<int> &FrameIndex, 57 bool IsFP) { 58 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 59 MachineFrameInfo &FrameInfo = MF.getFrameInfo(); 60 61 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 62 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 63 64 // We need to save and restore the current FP/BP. 65 66 // 1: If there is already a VGPR with free lanes, use it. We 67 // may already have to pay the penalty for spilling a CSR VGPR. 68 if (MFI->haveFreeLanesForSGPRSpill(MF, 1)) { 69 int NewFI = FrameInfo.CreateStackObject(4, Align(4), true, nullptr, 70 TargetStackID::SGPRSpill); 71 72 if (!MFI->allocateSGPRSpillToVGPR(MF, NewFI)) 73 llvm_unreachable("allocate SGPR spill should have worked"); 74 75 FrameIndex = NewFI; 76 77 LLVM_DEBUG(auto Spill = MFI->getSGPRToVGPRSpills(NewFI).front(); 78 dbgs() << "Spilling " << (IsFP ? "FP" : "BP") << " to " 79 << printReg(Spill.VGPR, TRI) << ':' << Spill.Lane 80 << '\n'); 81 return; 82 } 83 84 // 2: Next, try to save the FP/BP in an unused SGPR. 85 TempSGPR = findScratchNonCalleeSaveRegister( 86 MF.getRegInfo(), LiveRegs, AMDGPU::SReg_32_XM0_XEXECRegClass, true); 87 88 if (!TempSGPR) { 89 int NewFI = FrameInfo.CreateStackObject(4, Align(4), true, nullptr, 90 TargetStackID::SGPRSpill); 91 92 if (TRI->spillSGPRToVGPR() && MFI->allocateSGPRSpillToVGPR(MF, NewFI)) { 93 // 3: There's no free lane to spill, and no free register to save FP/BP, 94 // so we're forced to spill another VGPR to use for the spill. 95 FrameIndex = NewFI; 96 97 LLVM_DEBUG( 98 auto Spill = MFI->getSGPRToVGPRSpills(NewFI).front(); 99 dbgs() << (IsFP ? "FP" : "BP") << " requires fallback spill to " 100 << printReg(Spill.VGPR, TRI) << ':' << Spill.Lane << '\n';); 101 } else { 102 // Remove dead <NewFI> index 103 MF.getFrameInfo().RemoveStackObject(NewFI); 104 // 4: If all else fails, spill the FP/BP to memory. 105 FrameIndex = FrameInfo.CreateSpillStackObject(4, Align(4)); 106 LLVM_DEBUG(dbgs() << "Reserved FI " << FrameIndex << " for spilling " 107 << (IsFP ? "FP" : "BP") << '\n'); 108 } 109 } else { 110 LLVM_DEBUG(dbgs() << "Saving " << (IsFP ? "FP" : "BP") << " with copy to " 111 << printReg(TempSGPR, TRI) << '\n'); 112 } 113 } 114 115 // We need to specially emit stack operations here because a different frame 116 // register is used than in the rest of the function, as getFrameRegister would 117 // use. 118 static void buildPrologSpill(const GCNSubtarget &ST, LivePhysRegs &LiveRegs, 119 MachineBasicBlock &MBB, 120 MachineBasicBlock::iterator I, 121 const SIInstrInfo *TII, Register SpillReg, 122 Register ScratchRsrcReg, Register SPReg, int FI) { 123 MachineFunction *MF = MBB.getParent(); 124 MachineFrameInfo &MFI = MF->getFrameInfo(); 125 126 int64_t Offset = MFI.getObjectOffset(FI); 127 128 MachineMemOperand *MMO = MF->getMachineMemOperand( 129 MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOStore, 4, 130 MFI.getObjectAlign(FI)); 131 132 if (ST.enableFlatScratch()) { 133 if (TII->isLegalFLATOffset(Offset, AMDGPUAS::PRIVATE_ADDRESS, true)) { 134 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::SCRATCH_STORE_DWORD_SADDR)) 135 .addReg(SpillReg, RegState::Kill) 136 .addReg(SPReg) 137 .addImm(Offset) 138 .addImm(0) // cpol 139 .addMemOperand(MMO); 140 return; 141 } 142 } else if (SIInstrInfo::isLegalMUBUFImmOffset(Offset)) { 143 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::BUFFER_STORE_DWORD_OFFSET)) 144 .addReg(SpillReg, RegState::Kill) 145 .addReg(ScratchRsrcReg) 146 .addReg(SPReg) 147 .addImm(Offset) 148 .addImm(0) // cpol 149 .addImm(0) // tfe 150 .addImm(0) // swz 151 .addMemOperand(MMO); 152 return; 153 } 154 155 // Don't clobber the TmpVGPR if we also need a scratch reg for the stack 156 // offset in the spill. 157 LiveRegs.addReg(SpillReg); 158 159 if (ST.enableFlatScratch()) { 160 MCPhysReg OffsetReg = findScratchNonCalleeSaveRegister( 161 MF->getRegInfo(), LiveRegs, AMDGPU::SReg_32_XM0RegClass); 162 163 bool HasOffsetReg = OffsetReg; 164 if (!HasOffsetReg) { 165 // No free register, use stack pointer and restore afterwards. 166 OffsetReg = SPReg; 167 } 168 169 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::S_ADD_U32), OffsetReg) 170 .addReg(SPReg) 171 .addImm(Offset); 172 173 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::SCRATCH_STORE_DWORD_SADDR)) 174 .addReg(SpillReg, RegState::Kill) 175 .addReg(OffsetReg, HasOffsetReg ? RegState::Kill : 0) 176 .addImm(0) // offset 177 .addImm(0) // cpol 178 .addMemOperand(MMO); 179 180 if (!HasOffsetReg) { 181 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::S_SUB_U32), OffsetReg) 182 .addReg(SPReg) 183 .addImm(Offset); 184 } 185 } else { 186 MCPhysReg OffsetReg = findScratchNonCalleeSaveRegister( 187 MF->getRegInfo(), LiveRegs, AMDGPU::VGPR_32RegClass); 188 189 if (OffsetReg) { 190 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::V_MOV_B32_e32), OffsetReg) 191 .addImm(Offset); 192 193 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::BUFFER_STORE_DWORD_OFFEN)) 194 .addReg(SpillReg, RegState::Kill) 195 .addReg(OffsetReg, RegState::Kill) 196 .addReg(ScratchRsrcReg) 197 .addReg(SPReg) 198 .addImm(0) // offset 199 .addImm(0) // cpol 200 .addImm(0) // tfe 201 .addImm(0) // swz 202 .addMemOperand(MMO); 203 } else { 204 // No free register, use stack pointer and restore afterwards. 205 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::S_ADD_U32), SPReg) 206 .addReg(SPReg) 207 .addImm(Offset); 208 209 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::BUFFER_STORE_DWORD_OFFSET)) 210 .addReg(SpillReg, RegState::Kill) 211 .addReg(ScratchRsrcReg) 212 .addReg(SPReg) 213 .addImm(0) // offset 214 .addImm(0) // cpol 215 .addImm(0) // tfe 216 .addImm(0) // swz 217 .addMemOperand(MMO); 218 219 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::S_SUB_U32), SPReg) 220 .addReg(SPReg) 221 .addImm(Offset); 222 } 223 } 224 225 LiveRegs.removeReg(SpillReg); 226 } 227 228 static void buildEpilogReload(const GCNSubtarget &ST, LivePhysRegs &LiveRegs, 229 MachineBasicBlock &MBB, 230 MachineBasicBlock::iterator I, 231 const SIInstrInfo *TII, Register SpillReg, 232 Register ScratchRsrcReg, Register SPReg, int FI) { 233 MachineFunction *MF = MBB.getParent(); 234 MachineFrameInfo &MFI = MF->getFrameInfo(); 235 int64_t Offset = MFI.getObjectOffset(FI); 236 237 MachineMemOperand *MMO = MF->getMachineMemOperand( 238 MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOLoad, 4, 239 MFI.getObjectAlign(FI)); 240 241 if (ST.enableFlatScratch()) { 242 if (TII->isLegalFLATOffset(Offset, AMDGPUAS::PRIVATE_ADDRESS, true)) { 243 BuildMI(MBB, I, DebugLoc(), 244 TII->get(AMDGPU::SCRATCH_LOAD_DWORD_SADDR), SpillReg) 245 .addReg(SPReg) 246 .addImm(Offset) 247 .addImm(0) // cpol 248 .addMemOperand(MMO); 249 return; 250 } 251 MCPhysReg OffsetReg = findScratchNonCalleeSaveRegister( 252 MF->getRegInfo(), LiveRegs, AMDGPU::SReg_32_XM0RegClass); 253 if (!OffsetReg) 254 report_fatal_error("failed to find free scratch register"); 255 256 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::S_ADD_U32), OffsetReg) 257 .addReg(SPReg) 258 .addImm(Offset); 259 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::SCRATCH_LOAD_DWORD_SADDR), 260 SpillReg) 261 .addReg(OffsetReg, RegState::Kill) 262 .addImm(0) 263 .addImm(0) // cpol 264 .addMemOperand(MMO); 265 return; 266 } 267 268 if (SIInstrInfo::isLegalMUBUFImmOffset(Offset)) { 269 BuildMI(MBB, I, DebugLoc(), 270 TII->get(AMDGPU::BUFFER_LOAD_DWORD_OFFSET), SpillReg) 271 .addReg(ScratchRsrcReg) 272 .addReg(SPReg) 273 .addImm(Offset) 274 .addImm(0) // cpol 275 .addImm(0) // tfe 276 .addImm(0) // swz 277 .addMemOperand(MMO); 278 return; 279 } 280 281 MCPhysReg OffsetReg = findScratchNonCalleeSaveRegister( 282 MF->getRegInfo(), LiveRegs, AMDGPU::VGPR_32RegClass); 283 if (!OffsetReg) 284 report_fatal_error("failed to find free scratch register"); 285 286 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::V_MOV_B32_e32), OffsetReg) 287 .addImm(Offset); 288 289 BuildMI(MBB, I, DebugLoc(), 290 TII->get(AMDGPU::BUFFER_LOAD_DWORD_OFFEN), SpillReg) 291 .addReg(OffsetReg, RegState::Kill) 292 .addReg(ScratchRsrcReg) 293 .addReg(SPReg) 294 .addImm(0) 295 .addImm(0) // cpol 296 .addImm(0) // tfe 297 .addImm(0) // swz 298 .addMemOperand(MMO); 299 } 300 301 static void buildGitPtr(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 302 const DebugLoc &DL, const SIInstrInfo *TII, 303 Register TargetReg) { 304 MachineFunction *MF = MBB.getParent(); 305 const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 306 const SIRegisterInfo *TRI = &TII->getRegisterInfo(); 307 const MCInstrDesc &SMovB32 = TII->get(AMDGPU::S_MOV_B32); 308 Register TargetLo = TRI->getSubReg(TargetReg, AMDGPU::sub0); 309 Register TargetHi = TRI->getSubReg(TargetReg, AMDGPU::sub1); 310 311 if (MFI->getGITPtrHigh() != 0xffffffff) { 312 BuildMI(MBB, I, DL, SMovB32, TargetHi) 313 .addImm(MFI->getGITPtrHigh()) 314 .addReg(TargetReg, RegState::ImplicitDefine); 315 } else { 316 const MCInstrDesc &GetPC64 = TII->get(AMDGPU::S_GETPC_B64); 317 BuildMI(MBB, I, DL, GetPC64, TargetReg); 318 } 319 Register GitPtrLo = MFI->getGITPtrLoReg(*MF); 320 MF->getRegInfo().addLiveIn(GitPtrLo); 321 MBB.addLiveIn(GitPtrLo); 322 BuildMI(MBB, I, DL, SMovB32, TargetLo) 323 .addReg(GitPtrLo); 324 } 325 326 // Emit flat scratch setup code, assuming `MFI->hasFlatScratchInit()` 327 void SIFrameLowering::emitEntryFunctionFlatScratchInit( 328 MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 329 const DebugLoc &DL, Register ScratchWaveOffsetReg) const { 330 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 331 const SIInstrInfo *TII = ST.getInstrInfo(); 332 const SIRegisterInfo *TRI = &TII->getRegisterInfo(); 333 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 334 335 // We don't need this if we only have spills since there is no user facing 336 // scratch. 337 338 // TODO: If we know we don't have flat instructions earlier, we can omit 339 // this from the input registers. 340 // 341 // TODO: We only need to know if we access scratch space through a flat 342 // pointer. Because we only detect if flat instructions are used at all, 343 // this will be used more often than necessary on VI. 344 345 Register FlatScrInitLo; 346 Register FlatScrInitHi; 347 348 if (ST.isAmdPalOS()) { 349 // Extract the scratch offset from the descriptor in the GIT 350 LivePhysRegs LiveRegs; 351 LiveRegs.init(*TRI); 352 LiveRegs.addLiveIns(MBB); 353 354 // Find unused reg to load flat scratch init into 355 MachineRegisterInfo &MRI = MF.getRegInfo(); 356 Register FlatScrInit = AMDGPU::NoRegister; 357 ArrayRef<MCPhysReg> AllSGPR64s = TRI->getAllSGPR64(MF); 358 unsigned NumPreloaded = (MFI->getNumPreloadedSGPRs() + 1) / 2; 359 AllSGPR64s = AllSGPR64s.slice( 360 std::min(static_cast<unsigned>(AllSGPR64s.size()), NumPreloaded)); 361 Register GITPtrLoReg = MFI->getGITPtrLoReg(MF); 362 for (MCPhysReg Reg : AllSGPR64s) { 363 if (LiveRegs.available(MRI, Reg) && MRI.isAllocatable(Reg) && 364 !TRI->isSubRegisterEq(Reg, GITPtrLoReg)) { 365 FlatScrInit = Reg; 366 break; 367 } 368 } 369 assert(FlatScrInit && "Failed to find free register for scratch init"); 370 371 FlatScrInitLo = TRI->getSubReg(FlatScrInit, AMDGPU::sub0); 372 FlatScrInitHi = TRI->getSubReg(FlatScrInit, AMDGPU::sub1); 373 374 buildGitPtr(MBB, I, DL, TII, FlatScrInit); 375 376 // We now have the GIT ptr - now get the scratch descriptor from the entry 377 // at offset 0 (or offset 16 for a compute shader). 378 MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS); 379 const MCInstrDesc &LoadDwordX2 = TII->get(AMDGPU::S_LOAD_DWORDX2_IMM); 380 auto *MMO = MF.getMachineMemOperand( 381 PtrInfo, 382 MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant | 383 MachineMemOperand::MODereferenceable, 384 8, Align(4)); 385 unsigned Offset = 386 MF.getFunction().getCallingConv() == CallingConv::AMDGPU_CS ? 16 : 0; 387 const GCNSubtarget &Subtarget = MF.getSubtarget<GCNSubtarget>(); 388 unsigned EncodedOffset = AMDGPU::convertSMRDOffsetUnits(Subtarget, Offset); 389 BuildMI(MBB, I, DL, LoadDwordX2, FlatScrInit) 390 .addReg(FlatScrInit) 391 .addImm(EncodedOffset) // offset 392 .addImm(0) // cpol 393 .addMemOperand(MMO); 394 395 // Mask the offset in [47:0] of the descriptor 396 const MCInstrDesc &SAndB32 = TII->get(AMDGPU::S_AND_B32); 397 BuildMI(MBB, I, DL, SAndB32, FlatScrInitHi) 398 .addReg(FlatScrInitHi) 399 .addImm(0xffff); 400 } else { 401 Register FlatScratchInitReg = 402 MFI->getPreloadedReg(AMDGPUFunctionArgInfo::FLAT_SCRATCH_INIT); 403 assert(FlatScratchInitReg); 404 405 MachineRegisterInfo &MRI = MF.getRegInfo(); 406 MRI.addLiveIn(FlatScratchInitReg); 407 MBB.addLiveIn(FlatScratchInitReg); 408 409 FlatScrInitLo = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub0); 410 FlatScrInitHi = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub1); 411 } 412 413 // Do a 64-bit pointer add. 414 if (ST.flatScratchIsPointer()) { 415 if (ST.getGeneration() >= AMDGPUSubtarget::GFX10) { 416 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), FlatScrInitLo) 417 .addReg(FlatScrInitLo) 418 .addReg(ScratchWaveOffsetReg); 419 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADDC_U32), FlatScrInitHi) 420 .addReg(FlatScrInitHi) 421 .addImm(0); 422 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SETREG_B32)). 423 addReg(FlatScrInitLo). 424 addImm(int16_t(AMDGPU::Hwreg::ID_FLAT_SCR_LO | 425 (31 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_))); 426 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SETREG_B32)). 427 addReg(FlatScrInitHi). 428 addImm(int16_t(AMDGPU::Hwreg::ID_FLAT_SCR_HI | 429 (31 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_))); 430 return; 431 } 432 433 // For GFX9. 434 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), AMDGPU::FLAT_SCR_LO) 435 .addReg(FlatScrInitLo) 436 .addReg(ScratchWaveOffsetReg); 437 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADDC_U32), AMDGPU::FLAT_SCR_HI) 438 .addReg(FlatScrInitHi) 439 .addImm(0); 440 441 return; 442 } 443 444 assert(ST.getGeneration() < AMDGPUSubtarget::GFX9); 445 446 // Copy the size in bytes. 447 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), AMDGPU::FLAT_SCR_LO) 448 .addReg(FlatScrInitHi, RegState::Kill); 449 450 // Add wave offset in bytes to private base offset. 451 // See comment in AMDKernelCodeT.h for enable_sgpr_flat_scratch_init. 452 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), FlatScrInitLo) 453 .addReg(FlatScrInitLo) 454 .addReg(ScratchWaveOffsetReg); 455 456 // Convert offset to 256-byte units. 457 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_LSHR_B32), AMDGPU::FLAT_SCR_HI) 458 .addReg(FlatScrInitLo, RegState::Kill) 459 .addImm(8); 460 } 461 462 // Note SGPRSpill stack IDs should only be used for SGPR spilling to VGPRs, not 463 // memory. They should have been removed by now. 464 static bool allStackObjectsAreDead(const MachineFrameInfo &MFI) { 465 for (int I = MFI.getObjectIndexBegin(), E = MFI.getObjectIndexEnd(); 466 I != E; ++I) { 467 if (!MFI.isDeadObjectIndex(I)) 468 return false; 469 } 470 471 return true; 472 } 473 474 // Shift down registers reserved for the scratch RSRC. 475 Register SIFrameLowering::getEntryFunctionReservedScratchRsrcReg( 476 MachineFunction &MF) const { 477 478 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 479 const SIInstrInfo *TII = ST.getInstrInfo(); 480 const SIRegisterInfo *TRI = &TII->getRegisterInfo(); 481 MachineRegisterInfo &MRI = MF.getRegInfo(); 482 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 483 484 assert(MFI->isEntryFunction()); 485 486 Register ScratchRsrcReg = MFI->getScratchRSrcReg(); 487 488 if (!ScratchRsrcReg || (!MRI.isPhysRegUsed(ScratchRsrcReg) && 489 allStackObjectsAreDead(MF.getFrameInfo()))) 490 return Register(); 491 492 if (ST.hasSGPRInitBug() || 493 ScratchRsrcReg != TRI->reservedPrivateSegmentBufferReg(MF)) 494 return ScratchRsrcReg; 495 496 // We reserved the last registers for this. Shift it down to the end of those 497 // which were actually used. 498 // 499 // FIXME: It might be safer to use a pseudoregister before replacement. 500 501 // FIXME: We should be able to eliminate unused input registers. We only 502 // cannot do this for the resources required for scratch access. For now we 503 // skip over user SGPRs and may leave unused holes. 504 505 unsigned NumPreloaded = (MFI->getNumPreloadedSGPRs() + 3) / 4; 506 ArrayRef<MCPhysReg> AllSGPR128s = TRI->getAllSGPR128(MF); 507 AllSGPR128s = AllSGPR128s.slice(std::min(static_cast<unsigned>(AllSGPR128s.size()), NumPreloaded)); 508 509 // Skip the last N reserved elements because they should have already been 510 // reserved for VCC etc. 511 Register GITPtrLoReg = MFI->getGITPtrLoReg(MF); 512 for (MCPhysReg Reg : AllSGPR128s) { 513 // Pick the first unallocated one. Make sure we don't clobber the other 514 // reserved input we needed. Also for PAL, make sure we don't clobber 515 // the GIT pointer passed in SGPR0 or SGPR8. 516 if (!MRI.isPhysRegUsed(Reg) && MRI.isAllocatable(Reg) && 517 !TRI->isSubRegisterEq(Reg, GITPtrLoReg)) { 518 MRI.replaceRegWith(ScratchRsrcReg, Reg); 519 MFI->setScratchRSrcReg(Reg); 520 return Reg; 521 } 522 } 523 524 return ScratchRsrcReg; 525 } 526 527 static unsigned getScratchScaleFactor(const GCNSubtarget &ST) { 528 return ST.enableFlatScratch() ? 1 : ST.getWavefrontSize(); 529 } 530 531 void SIFrameLowering::emitEntryFunctionPrologue(MachineFunction &MF, 532 MachineBasicBlock &MBB) const { 533 assert(&MF.front() == &MBB && "Shrink-wrapping not yet supported"); 534 535 // FIXME: If we only have SGPR spills, we won't actually be using scratch 536 // memory since these spill to VGPRs. We should be cleaning up these unused 537 // SGPR spill frame indices somewhere. 538 539 // FIXME: We still have implicit uses on SGPR spill instructions in case they 540 // need to spill to vector memory. It's likely that will not happen, but at 541 // this point it appears we need the setup. This part of the prolog should be 542 // emitted after frame indices are eliminated. 543 544 // FIXME: Remove all of the isPhysRegUsed checks 545 546 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 547 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 548 const SIInstrInfo *TII = ST.getInstrInfo(); 549 const SIRegisterInfo *TRI = &TII->getRegisterInfo(); 550 MachineRegisterInfo &MRI = MF.getRegInfo(); 551 const Function &F = MF.getFunction(); 552 553 assert(MFI->isEntryFunction()); 554 555 Register PreloadedScratchWaveOffsetReg = MFI->getPreloadedReg( 556 AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET); 557 // FIXME: Hack to not crash in situations which emitted an error. 558 if (!PreloadedScratchWaveOffsetReg) 559 return; 560 561 // We need to do the replacement of the private segment buffer register even 562 // if there are no stack objects. There could be stores to undef or a 563 // constant without an associated object. 564 // 565 // This will return `Register()` in cases where there are no actual 566 // uses of the SRSRC. 567 Register ScratchRsrcReg; 568 if (!ST.enableFlatScratch()) 569 ScratchRsrcReg = getEntryFunctionReservedScratchRsrcReg(MF); 570 571 // Make the selected register live throughout the function. 572 if (ScratchRsrcReg) { 573 for (MachineBasicBlock &OtherBB : MF) { 574 if (&OtherBB != &MBB) { 575 OtherBB.addLiveIn(ScratchRsrcReg); 576 } 577 } 578 } 579 580 // Now that we have fixed the reserved SRSRC we need to locate the 581 // (potentially) preloaded SRSRC. 582 Register PreloadedScratchRsrcReg; 583 if (ST.isAmdHsaOrMesa(F)) { 584 PreloadedScratchRsrcReg = 585 MFI->getPreloadedReg(AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER); 586 if (ScratchRsrcReg && PreloadedScratchRsrcReg) { 587 // We added live-ins during argument lowering, but since they were not 588 // used they were deleted. We're adding the uses now, so add them back. 589 MRI.addLiveIn(PreloadedScratchRsrcReg); 590 MBB.addLiveIn(PreloadedScratchRsrcReg); 591 } 592 } 593 594 // Debug location must be unknown since the first debug location is used to 595 // determine the end of the prologue. 596 DebugLoc DL; 597 MachineBasicBlock::iterator I = MBB.begin(); 598 599 // We found the SRSRC first because it needs four registers and has an 600 // alignment requirement. If the SRSRC that we found is clobbering with 601 // the scratch wave offset, which may be in a fixed SGPR or a free SGPR 602 // chosen by SITargetLowering::allocateSystemSGPRs, COPY the scratch 603 // wave offset to a free SGPR. 604 Register ScratchWaveOffsetReg; 605 if (TRI->isSubRegisterEq(ScratchRsrcReg, PreloadedScratchWaveOffsetReg)) { 606 ArrayRef<MCPhysReg> AllSGPRs = TRI->getAllSGPR32(MF); 607 unsigned NumPreloaded = MFI->getNumPreloadedSGPRs(); 608 AllSGPRs = AllSGPRs.slice( 609 std::min(static_cast<unsigned>(AllSGPRs.size()), NumPreloaded)); 610 Register GITPtrLoReg = MFI->getGITPtrLoReg(MF); 611 for (MCPhysReg Reg : AllSGPRs) { 612 if (!MRI.isPhysRegUsed(Reg) && MRI.isAllocatable(Reg) && 613 !TRI->isSubRegisterEq(ScratchRsrcReg, Reg) && GITPtrLoReg != Reg) { 614 ScratchWaveOffsetReg = Reg; 615 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchWaveOffsetReg) 616 .addReg(PreloadedScratchWaveOffsetReg, RegState::Kill); 617 break; 618 } 619 } 620 } else { 621 ScratchWaveOffsetReg = PreloadedScratchWaveOffsetReg; 622 } 623 assert(ScratchWaveOffsetReg); 624 625 if (requiresStackPointerReference(MF)) { 626 Register SPReg = MFI->getStackPtrOffsetReg(); 627 assert(SPReg != AMDGPU::SP_REG); 628 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), SPReg) 629 .addImm(MF.getFrameInfo().getStackSize() * getScratchScaleFactor(ST)); 630 } 631 632 if (hasFP(MF)) { 633 Register FPReg = MFI->getFrameOffsetReg(); 634 assert(FPReg != AMDGPU::FP_REG); 635 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), FPReg).addImm(0); 636 } 637 638 if (MFI->hasFlatScratchInit() || ScratchRsrcReg) { 639 MRI.addLiveIn(PreloadedScratchWaveOffsetReg); 640 MBB.addLiveIn(PreloadedScratchWaveOffsetReg); 641 } 642 643 if (MFI->hasFlatScratchInit()) { 644 emitEntryFunctionFlatScratchInit(MF, MBB, I, DL, ScratchWaveOffsetReg); 645 } 646 647 if (ScratchRsrcReg) { 648 emitEntryFunctionScratchRsrcRegSetup(MF, MBB, I, DL, 649 PreloadedScratchRsrcReg, 650 ScratchRsrcReg, ScratchWaveOffsetReg); 651 } 652 } 653 654 // Emit scratch RSRC setup code, assuming `ScratchRsrcReg != AMDGPU::NoReg` 655 void SIFrameLowering::emitEntryFunctionScratchRsrcRegSetup( 656 MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 657 const DebugLoc &DL, Register PreloadedScratchRsrcReg, 658 Register ScratchRsrcReg, Register ScratchWaveOffsetReg) const { 659 660 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 661 const SIInstrInfo *TII = ST.getInstrInfo(); 662 const SIRegisterInfo *TRI = &TII->getRegisterInfo(); 663 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 664 const Function &Fn = MF.getFunction(); 665 666 if (ST.isAmdPalOS()) { 667 // The pointer to the GIT is formed from the offset passed in and either 668 // the amdgpu-git-ptr-high function attribute or the top part of the PC 669 Register Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1); 670 671 buildGitPtr(MBB, I, DL, TII, Rsrc01); 672 673 // We now have the GIT ptr - now get the scratch descriptor from the entry 674 // at offset 0 (or offset 16 for a compute shader). 675 MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS); 676 const MCInstrDesc &LoadDwordX4 = TII->get(AMDGPU::S_LOAD_DWORDX4_IMM); 677 auto MMO = MF.getMachineMemOperand(PtrInfo, 678 MachineMemOperand::MOLoad | 679 MachineMemOperand::MOInvariant | 680 MachineMemOperand::MODereferenceable, 681 16, Align(4)); 682 unsigned Offset = Fn.getCallingConv() == CallingConv::AMDGPU_CS ? 16 : 0; 683 const GCNSubtarget &Subtarget = MF.getSubtarget<GCNSubtarget>(); 684 unsigned EncodedOffset = AMDGPU::convertSMRDOffsetUnits(Subtarget, Offset); 685 BuildMI(MBB, I, DL, LoadDwordX4, ScratchRsrcReg) 686 .addReg(Rsrc01) 687 .addImm(EncodedOffset) // offset 688 .addImm(0) // cpol 689 .addReg(ScratchRsrcReg, RegState::ImplicitDefine) 690 .addMemOperand(MMO); 691 } else if (ST.isMesaGfxShader(Fn) || !PreloadedScratchRsrcReg) { 692 assert(!ST.isAmdHsaOrMesa(Fn)); 693 const MCInstrDesc &SMovB32 = TII->get(AMDGPU::S_MOV_B32); 694 695 Register Rsrc2 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub2); 696 Register Rsrc3 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub3); 697 698 // Use relocations to get the pointer, and setup the other bits manually. 699 uint64_t Rsrc23 = TII->getScratchRsrcWords23(); 700 701 if (MFI->hasImplicitBufferPtr()) { 702 Register Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1); 703 704 if (AMDGPU::isCompute(MF.getFunction().getCallingConv())) { 705 const MCInstrDesc &Mov64 = TII->get(AMDGPU::S_MOV_B64); 706 707 BuildMI(MBB, I, DL, Mov64, Rsrc01) 708 .addReg(MFI->getImplicitBufferPtrUserSGPR()) 709 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 710 } else { 711 const MCInstrDesc &LoadDwordX2 = TII->get(AMDGPU::S_LOAD_DWORDX2_IMM); 712 713 MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS); 714 auto MMO = MF.getMachineMemOperand( 715 PtrInfo, 716 MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant | 717 MachineMemOperand::MODereferenceable, 718 8, Align(4)); 719 BuildMI(MBB, I, DL, LoadDwordX2, Rsrc01) 720 .addReg(MFI->getImplicitBufferPtrUserSGPR()) 721 .addImm(0) // offset 722 .addImm(0) // cpol 723 .addMemOperand(MMO) 724 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 725 726 MF.getRegInfo().addLiveIn(MFI->getImplicitBufferPtrUserSGPR()); 727 MBB.addLiveIn(MFI->getImplicitBufferPtrUserSGPR()); 728 } 729 } else { 730 Register Rsrc0 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0); 731 Register Rsrc1 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1); 732 733 BuildMI(MBB, I, DL, SMovB32, Rsrc0) 734 .addExternalSymbol("SCRATCH_RSRC_DWORD0") 735 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 736 737 BuildMI(MBB, I, DL, SMovB32, Rsrc1) 738 .addExternalSymbol("SCRATCH_RSRC_DWORD1") 739 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 740 741 } 742 743 BuildMI(MBB, I, DL, SMovB32, Rsrc2) 744 .addImm(Rsrc23 & 0xffffffff) 745 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 746 747 BuildMI(MBB, I, DL, SMovB32, Rsrc3) 748 .addImm(Rsrc23 >> 32) 749 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 750 } else if (ST.isAmdHsaOrMesa(Fn)) { 751 assert(PreloadedScratchRsrcReg); 752 753 if (ScratchRsrcReg != PreloadedScratchRsrcReg) { 754 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchRsrcReg) 755 .addReg(PreloadedScratchRsrcReg, RegState::Kill); 756 } 757 } 758 759 // Add the scratch wave offset into the scratch RSRC. 760 // 761 // We only want to update the first 48 bits, which is the base address 762 // pointer, without touching the adjacent 16 bits of flags. We know this add 763 // cannot carry-out from bit 47, otherwise the scratch allocation would be 764 // impossible to fit in the 48-bit global address space. 765 // 766 // TODO: Evaluate if it is better to just construct an SRD using the flat 767 // scratch init and some constants rather than update the one we are passed. 768 Register ScratchRsrcSub0 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0); 769 Register ScratchRsrcSub1 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1); 770 771 // We cannot Kill ScratchWaveOffsetReg here because we allow it to be used in 772 // the kernel body via inreg arguments. 773 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), ScratchRsrcSub0) 774 .addReg(ScratchRsrcSub0) 775 .addReg(ScratchWaveOffsetReg) 776 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 777 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADDC_U32), ScratchRsrcSub1) 778 .addReg(ScratchRsrcSub1) 779 .addImm(0) 780 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 781 } 782 783 bool SIFrameLowering::isSupportedStackID(TargetStackID::Value ID) const { 784 switch (ID) { 785 case TargetStackID::Default: 786 case TargetStackID::NoAlloc: 787 case TargetStackID::SGPRSpill: 788 return true; 789 case TargetStackID::ScalableVector: 790 return false; 791 } 792 llvm_unreachable("Invalid TargetStackID::Value"); 793 } 794 795 // Activate all lanes, returns saved exec. 796 static Register buildScratchExecCopy(LivePhysRegs &LiveRegs, 797 MachineFunction &MF, 798 MachineBasicBlock &MBB, 799 MachineBasicBlock::iterator MBBI, 800 bool IsProlog) { 801 Register ScratchExecCopy; 802 MachineRegisterInfo &MRI = MF.getRegInfo(); 803 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 804 const SIInstrInfo *TII = ST.getInstrInfo(); 805 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 806 SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 807 DebugLoc DL; 808 809 if (LiveRegs.empty()) { 810 if (IsProlog) { 811 LiveRegs.init(TRI); 812 LiveRegs.addLiveIns(MBB); 813 if (FuncInfo->SGPRForFPSaveRestoreCopy) 814 LiveRegs.removeReg(FuncInfo->SGPRForFPSaveRestoreCopy); 815 816 if (FuncInfo->SGPRForBPSaveRestoreCopy) 817 LiveRegs.removeReg(FuncInfo->SGPRForBPSaveRestoreCopy); 818 } else { 819 // In epilog. 820 LiveRegs.init(*ST.getRegisterInfo()); 821 LiveRegs.addLiveOuts(MBB); 822 LiveRegs.stepBackward(*MBBI); 823 } 824 } 825 826 ScratchExecCopy = findScratchNonCalleeSaveRegister( 827 MRI, LiveRegs, *TRI.getWaveMaskRegClass()); 828 if (!ScratchExecCopy) 829 report_fatal_error("failed to find free scratch register"); 830 831 if (!IsProlog) 832 LiveRegs.removeReg(ScratchExecCopy); 833 834 const unsigned OrSaveExec = 835 ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32 : AMDGPU::S_OR_SAVEEXEC_B64; 836 BuildMI(MBB, MBBI, DL, TII->get(OrSaveExec), ScratchExecCopy).addImm(-1); 837 838 return ScratchExecCopy; 839 } 840 841 // A StackID of SGPRSpill implies that this is a spill from SGPR to VGPR. 842 // Otherwise we are spilling to memory. 843 static bool spilledToMemory(const MachineFunction &MF, int SaveIndex) { 844 const MachineFrameInfo &MFI = MF.getFrameInfo(); 845 return MFI.getStackID(SaveIndex) != TargetStackID::SGPRSpill; 846 } 847 848 void SIFrameLowering::emitPrologue(MachineFunction &MF, 849 MachineBasicBlock &MBB) const { 850 SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 851 if (FuncInfo->isEntryFunction()) { 852 emitEntryFunctionPrologue(MF, MBB); 853 return; 854 } 855 856 const MachineFrameInfo &MFI = MF.getFrameInfo(); 857 MachineRegisterInfo &MRI = MF.getRegInfo(); 858 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 859 const SIInstrInfo *TII = ST.getInstrInfo(); 860 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 861 862 Register StackPtrReg = FuncInfo->getStackPtrOffsetReg(); 863 Register FramePtrReg = FuncInfo->getFrameOffsetReg(); 864 Register BasePtrReg = 865 TRI.hasBasePointer(MF) ? TRI.getBaseRegister() : Register(); 866 LivePhysRegs LiveRegs; 867 868 MachineBasicBlock::iterator MBBI = MBB.begin(); 869 DebugLoc DL; 870 871 bool HasFP = false; 872 bool HasBP = false; 873 uint32_t NumBytes = MFI.getStackSize(); 874 uint32_t RoundedSize = NumBytes; 875 // To avoid clobbering VGPRs in lanes that weren't active on function entry, 876 // turn on all lanes before doing the spill to memory. 877 Register ScratchExecCopy; 878 879 Optional<int> FPSaveIndex = FuncInfo->FramePointerSaveIndex; 880 Optional<int> BPSaveIndex = FuncInfo->BasePointerSaveIndex; 881 882 for (const SIMachineFunctionInfo::SGPRSpillVGPRCSR &Reg 883 : FuncInfo->getSGPRSpillVGPRs()) { 884 if (!Reg.FI.hasValue()) 885 continue; 886 887 if (!ScratchExecCopy) 888 ScratchExecCopy = buildScratchExecCopy(LiveRegs, MF, MBB, MBBI, true); 889 890 buildPrologSpill(ST, LiveRegs, MBB, MBBI, TII, Reg.VGPR, 891 FuncInfo->getScratchRSrcReg(), 892 StackPtrReg, 893 Reg.FI.getValue()); 894 } 895 896 if (FPSaveIndex && spilledToMemory(MF, *FPSaveIndex)) { 897 const int FramePtrFI = *FPSaveIndex; 898 assert(!MFI.isDeadObjectIndex(FramePtrFI)); 899 900 if (!ScratchExecCopy) 901 ScratchExecCopy = buildScratchExecCopy(LiveRegs, MF, MBB, MBBI, true); 902 903 MCPhysReg TmpVGPR = findScratchNonCalleeSaveRegister( 904 MRI, LiveRegs, AMDGPU::VGPR_32RegClass); 905 if (!TmpVGPR) 906 report_fatal_error("failed to find free scratch register"); 907 908 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpVGPR) 909 .addReg(FramePtrReg); 910 911 buildPrologSpill(ST, LiveRegs, MBB, MBBI, TII, TmpVGPR, 912 FuncInfo->getScratchRSrcReg(), StackPtrReg, FramePtrFI); 913 } 914 915 if (BPSaveIndex && spilledToMemory(MF, *BPSaveIndex)) { 916 const int BasePtrFI = *BPSaveIndex; 917 assert(!MFI.isDeadObjectIndex(BasePtrFI)); 918 919 if (!ScratchExecCopy) 920 ScratchExecCopy = buildScratchExecCopy(LiveRegs, MF, MBB, MBBI, true); 921 922 MCPhysReg TmpVGPR = findScratchNonCalleeSaveRegister( 923 MRI, LiveRegs, AMDGPU::VGPR_32RegClass); 924 if (!TmpVGPR) 925 report_fatal_error("failed to find free scratch register"); 926 927 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpVGPR) 928 .addReg(BasePtrReg); 929 930 buildPrologSpill(ST, LiveRegs, MBB, MBBI, TII, TmpVGPR, 931 FuncInfo->getScratchRSrcReg(), StackPtrReg, BasePtrFI); 932 } 933 934 if (ScratchExecCopy) { 935 // FIXME: Split block and make terminator. 936 unsigned ExecMov = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; 937 MCRegister Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 938 BuildMI(MBB, MBBI, DL, TII->get(ExecMov), Exec) 939 .addReg(ScratchExecCopy, RegState::Kill); 940 LiveRegs.addReg(ScratchExecCopy); 941 } 942 943 // In this case, spill the FP to a reserved VGPR. 944 if (FPSaveIndex && !spilledToMemory(MF, *FPSaveIndex)) { 945 const int FramePtrFI = *FPSaveIndex; 946 assert(!MFI.isDeadObjectIndex(FramePtrFI)); 947 948 assert(MFI.getStackID(FramePtrFI) == TargetStackID::SGPRSpill); 949 ArrayRef<SIMachineFunctionInfo::SpilledReg> Spill = 950 FuncInfo->getSGPRToVGPRSpills(FramePtrFI); 951 assert(Spill.size() == 1); 952 953 // Save FP before setting it up. 954 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::V_WRITELANE_B32), Spill[0].VGPR) 955 .addReg(FramePtrReg) 956 .addImm(Spill[0].Lane) 957 .addReg(Spill[0].VGPR, RegState::Undef); 958 } 959 960 // In this case, spill the BP to a reserved VGPR. 961 if (BPSaveIndex && !spilledToMemory(MF, *BPSaveIndex)) { 962 const int BasePtrFI = *BPSaveIndex; 963 assert(!MFI.isDeadObjectIndex(BasePtrFI)); 964 965 assert(MFI.getStackID(BasePtrFI) == TargetStackID::SGPRSpill); 966 ArrayRef<SIMachineFunctionInfo::SpilledReg> Spill = 967 FuncInfo->getSGPRToVGPRSpills(BasePtrFI); 968 assert(Spill.size() == 1); 969 970 // Save BP before setting it up. 971 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::V_WRITELANE_B32), Spill[0].VGPR) 972 .addReg(BasePtrReg) 973 .addImm(Spill[0].Lane) 974 .addReg(Spill[0].VGPR, RegState::Undef); 975 } 976 977 // Emit the copy if we need an FP, and are using a free SGPR to save it. 978 if (FuncInfo->SGPRForFPSaveRestoreCopy) { 979 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), 980 FuncInfo->SGPRForFPSaveRestoreCopy) 981 .addReg(FramePtrReg) 982 .setMIFlag(MachineInstr::FrameSetup); 983 } 984 985 // Emit the copy if we need a BP, and are using a free SGPR to save it. 986 if (FuncInfo->SGPRForBPSaveRestoreCopy) { 987 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), 988 FuncInfo->SGPRForBPSaveRestoreCopy) 989 .addReg(BasePtrReg) 990 .setMIFlag(MachineInstr::FrameSetup); 991 } 992 993 // If a copy has been emitted for FP and/or BP, Make the SGPRs 994 // used in the copy instructions live throughout the function. 995 SmallVector<MCPhysReg, 2> TempSGPRs; 996 if (FuncInfo->SGPRForFPSaveRestoreCopy) 997 TempSGPRs.push_back(FuncInfo->SGPRForFPSaveRestoreCopy); 998 999 if (FuncInfo->SGPRForBPSaveRestoreCopy) 1000 TempSGPRs.push_back(FuncInfo->SGPRForBPSaveRestoreCopy); 1001 1002 if (!TempSGPRs.empty()) { 1003 for (MachineBasicBlock &MBB : MF) { 1004 for (MCPhysReg Reg : TempSGPRs) 1005 MBB.addLiveIn(Reg); 1006 1007 MBB.sortUniqueLiveIns(); 1008 } 1009 if (!LiveRegs.empty()) { 1010 LiveRegs.addReg(FuncInfo->SGPRForFPSaveRestoreCopy); 1011 LiveRegs.addReg(FuncInfo->SGPRForBPSaveRestoreCopy); 1012 } 1013 } 1014 1015 if (TRI.needsStackRealignment(MF)) { 1016 HasFP = true; 1017 const unsigned Alignment = MFI.getMaxAlign().value(); 1018 1019 RoundedSize += Alignment; 1020 if (LiveRegs.empty()) { 1021 LiveRegs.init(TRI); 1022 LiveRegs.addLiveIns(MBB); 1023 } 1024 1025 // s_add_u32 s33, s32, NumBytes 1026 // s_and_b32 s33, s33, 0b111...0000 1027 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_ADD_U32), FramePtrReg) 1028 .addReg(StackPtrReg) 1029 .addImm((Alignment - 1) * getScratchScaleFactor(ST)) 1030 .setMIFlag(MachineInstr::FrameSetup); 1031 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_AND_B32), FramePtrReg) 1032 .addReg(FramePtrReg, RegState::Kill) 1033 .addImm(-Alignment * getScratchScaleFactor(ST)) 1034 .setMIFlag(MachineInstr::FrameSetup); 1035 FuncInfo->setIsStackRealigned(true); 1036 } else if ((HasFP = hasFP(MF))) { 1037 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), FramePtrReg) 1038 .addReg(StackPtrReg) 1039 .setMIFlag(MachineInstr::FrameSetup); 1040 } 1041 1042 // If we need a base pointer, set it up here. It's whatever the value of 1043 // the stack pointer is at this point. Any variable size objects will be 1044 // allocated after this, so we can still use the base pointer to reference 1045 // the incoming arguments. 1046 if ((HasBP = TRI.hasBasePointer(MF))) { 1047 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), BasePtrReg) 1048 .addReg(StackPtrReg) 1049 .setMIFlag(MachineInstr::FrameSetup); 1050 } 1051 1052 if (HasFP && RoundedSize != 0) { 1053 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_ADD_U32), StackPtrReg) 1054 .addReg(StackPtrReg) 1055 .addImm(RoundedSize * getScratchScaleFactor(ST)) 1056 .setMIFlag(MachineInstr::FrameSetup); 1057 } 1058 1059 assert((!HasFP || (FuncInfo->SGPRForFPSaveRestoreCopy || 1060 FuncInfo->FramePointerSaveIndex)) && 1061 "Needed to save FP but didn't save it anywhere"); 1062 1063 assert((HasFP || (!FuncInfo->SGPRForFPSaveRestoreCopy && 1064 !FuncInfo->FramePointerSaveIndex)) && 1065 "Saved FP but didn't need it"); 1066 1067 assert((!HasBP || (FuncInfo->SGPRForBPSaveRestoreCopy || 1068 FuncInfo->BasePointerSaveIndex)) && 1069 "Needed to save BP but didn't save it anywhere"); 1070 1071 assert((HasBP || (!FuncInfo->SGPRForBPSaveRestoreCopy && 1072 !FuncInfo->BasePointerSaveIndex)) && 1073 "Saved BP but didn't need it"); 1074 } 1075 1076 void SIFrameLowering::emitEpilogue(MachineFunction &MF, 1077 MachineBasicBlock &MBB) const { 1078 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 1079 if (FuncInfo->isEntryFunction()) 1080 return; 1081 1082 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1083 const SIInstrInfo *TII = ST.getInstrInfo(); 1084 MachineRegisterInfo &MRI = MF.getRegInfo(); 1085 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 1086 MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator(); 1087 LivePhysRegs LiveRegs; 1088 DebugLoc DL; 1089 1090 const MachineFrameInfo &MFI = MF.getFrameInfo(); 1091 uint32_t NumBytes = MFI.getStackSize(); 1092 uint32_t RoundedSize = FuncInfo->isStackRealigned() 1093 ? NumBytes + MFI.getMaxAlign().value() 1094 : NumBytes; 1095 const Register StackPtrReg = FuncInfo->getStackPtrOffsetReg(); 1096 const Register FramePtrReg = FuncInfo->getFrameOffsetReg(); 1097 const Register BasePtrReg = 1098 TRI.hasBasePointer(MF) ? TRI.getBaseRegister() : Register(); 1099 1100 Optional<int> FPSaveIndex = FuncInfo->FramePointerSaveIndex; 1101 Optional<int> BPSaveIndex = FuncInfo->BasePointerSaveIndex; 1102 1103 if (RoundedSize != 0 && hasFP(MF)) { 1104 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_SUB_U32), StackPtrReg) 1105 .addReg(StackPtrReg) 1106 .addImm(RoundedSize * getScratchScaleFactor(ST)) 1107 .setMIFlag(MachineInstr::FrameDestroy); 1108 } 1109 1110 if (FuncInfo->SGPRForFPSaveRestoreCopy) { 1111 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), FramePtrReg) 1112 .addReg(FuncInfo->SGPRForFPSaveRestoreCopy) 1113 .setMIFlag(MachineInstr::FrameDestroy); 1114 } 1115 1116 if (FuncInfo->SGPRForBPSaveRestoreCopy) { 1117 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), BasePtrReg) 1118 .addReg(FuncInfo->SGPRForBPSaveRestoreCopy) 1119 .setMIFlag(MachineInstr::FrameDestroy); 1120 } 1121 1122 Register ScratchExecCopy; 1123 if (FPSaveIndex) { 1124 const int FramePtrFI = *FPSaveIndex; 1125 assert(!MFI.isDeadObjectIndex(FramePtrFI)); 1126 if (spilledToMemory(MF, FramePtrFI)) { 1127 if (!ScratchExecCopy) 1128 ScratchExecCopy = buildScratchExecCopy(LiveRegs, MF, MBB, MBBI, false); 1129 1130 MCPhysReg TempVGPR = findScratchNonCalleeSaveRegister( 1131 MRI, LiveRegs, AMDGPU::VGPR_32RegClass); 1132 if (!TempVGPR) 1133 report_fatal_error("failed to find free scratch register"); 1134 buildEpilogReload(ST, LiveRegs, MBB, MBBI, TII, TempVGPR, 1135 FuncInfo->getScratchRSrcReg(), StackPtrReg, FramePtrFI); 1136 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), FramePtrReg) 1137 .addReg(TempVGPR, RegState::Kill); 1138 } else { 1139 // Reload from VGPR spill. 1140 assert(MFI.getStackID(FramePtrFI) == TargetStackID::SGPRSpill); 1141 ArrayRef<SIMachineFunctionInfo::SpilledReg> Spill = 1142 FuncInfo->getSGPRToVGPRSpills(FramePtrFI); 1143 assert(Spill.size() == 1); 1144 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::V_READLANE_B32), FramePtrReg) 1145 .addReg(Spill[0].VGPR) 1146 .addImm(Spill[0].Lane); 1147 } 1148 } 1149 1150 if (BPSaveIndex) { 1151 const int BasePtrFI = *BPSaveIndex; 1152 assert(!MFI.isDeadObjectIndex(BasePtrFI)); 1153 if (spilledToMemory(MF, BasePtrFI)) { 1154 if (!ScratchExecCopy) 1155 ScratchExecCopy = buildScratchExecCopy(LiveRegs, MF, MBB, MBBI, false); 1156 1157 MCPhysReg TempVGPR = findScratchNonCalleeSaveRegister( 1158 MRI, LiveRegs, AMDGPU::VGPR_32RegClass); 1159 if (!TempVGPR) 1160 report_fatal_error("failed to find free scratch register"); 1161 buildEpilogReload(ST, LiveRegs, MBB, MBBI, TII, TempVGPR, 1162 FuncInfo->getScratchRSrcReg(), StackPtrReg, BasePtrFI); 1163 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), BasePtrReg) 1164 .addReg(TempVGPR, RegState::Kill); 1165 } else { 1166 // Reload from VGPR spill. 1167 assert(MFI.getStackID(BasePtrFI) == TargetStackID::SGPRSpill); 1168 ArrayRef<SIMachineFunctionInfo::SpilledReg> Spill = 1169 FuncInfo->getSGPRToVGPRSpills(BasePtrFI); 1170 assert(Spill.size() == 1); 1171 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::V_READLANE_B32), BasePtrReg) 1172 .addReg(Spill[0].VGPR) 1173 .addImm(Spill[0].Lane); 1174 } 1175 } 1176 1177 for (const SIMachineFunctionInfo::SGPRSpillVGPRCSR &Reg : 1178 FuncInfo->getSGPRSpillVGPRs()) { 1179 if (!Reg.FI.hasValue()) 1180 continue; 1181 1182 if (!ScratchExecCopy) 1183 ScratchExecCopy = buildScratchExecCopy(LiveRegs, MF, MBB, MBBI, false); 1184 1185 buildEpilogReload(ST, LiveRegs, MBB, MBBI, TII, Reg.VGPR, 1186 FuncInfo->getScratchRSrcReg(), StackPtrReg, 1187 Reg.FI.getValue()); 1188 } 1189 1190 if (ScratchExecCopy) { 1191 // FIXME: Split block and make terminator. 1192 unsigned ExecMov = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; 1193 MCRegister Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 1194 BuildMI(MBB, MBBI, DL, TII->get(ExecMov), Exec) 1195 .addReg(ScratchExecCopy, RegState::Kill); 1196 } 1197 } 1198 1199 #ifndef NDEBUG 1200 static bool allSGPRSpillsAreDead(const MachineFunction &MF) { 1201 const MachineFrameInfo &MFI = MF.getFrameInfo(); 1202 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 1203 for (int I = MFI.getObjectIndexBegin(), E = MFI.getObjectIndexEnd(); 1204 I != E; ++I) { 1205 if (!MFI.isDeadObjectIndex(I) && 1206 MFI.getStackID(I) == TargetStackID::SGPRSpill && 1207 (I != FuncInfo->FramePointerSaveIndex && 1208 I != FuncInfo->BasePointerSaveIndex)) { 1209 return false; 1210 } 1211 } 1212 1213 return true; 1214 } 1215 #endif 1216 1217 StackOffset SIFrameLowering::getFrameIndexReference(const MachineFunction &MF, 1218 int FI, 1219 Register &FrameReg) const { 1220 const SIRegisterInfo *RI = MF.getSubtarget<GCNSubtarget>().getRegisterInfo(); 1221 1222 FrameReg = RI->getFrameRegister(MF); 1223 return StackOffset::getFixed(MF.getFrameInfo().getObjectOffset(FI)); 1224 } 1225 1226 void SIFrameLowering::processFunctionBeforeFrameFinalized( 1227 MachineFunction &MF, 1228 RegScavenger *RS) const { 1229 MachineFrameInfo &MFI = MF.getFrameInfo(); 1230 1231 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1232 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 1233 SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 1234 1235 FuncInfo->removeDeadFrameIndices(MFI); 1236 assert(allSGPRSpillsAreDead(MF) && 1237 "SGPR spill should have been removed in SILowerSGPRSpills"); 1238 1239 // FIXME: The other checks should be redundant with allStackObjectsAreDead, 1240 // but currently hasNonSpillStackObjects is set only from source 1241 // allocas. Stack temps produced from legalization are not counted currently. 1242 if (!allStackObjectsAreDead(MFI)) { 1243 assert(RS && "RegScavenger required if spilling"); 1244 1245 if (FuncInfo->isEntryFunction()) { 1246 int ScavengeFI = MFI.CreateFixedObject( 1247 TRI->getSpillSize(AMDGPU::SGPR_32RegClass), 0, false); 1248 RS->addScavengingFrameIndex(ScavengeFI); 1249 } else { 1250 int ScavengeFI = MFI.CreateStackObject( 1251 TRI->getSpillSize(AMDGPU::SGPR_32RegClass), 1252 TRI->getSpillAlign(AMDGPU::SGPR_32RegClass), false); 1253 RS->addScavengingFrameIndex(ScavengeFI); 1254 } 1255 } 1256 } 1257 1258 // Only report VGPRs to generic code. 1259 void SIFrameLowering::determineCalleeSaves(MachineFunction &MF, 1260 BitVector &SavedVGPRs, 1261 RegScavenger *RS) const { 1262 TargetFrameLowering::determineCalleeSaves(MF, SavedVGPRs, RS); 1263 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 1264 if (MFI->isEntryFunction()) 1265 return; 1266 1267 MachineFrameInfo &FrameInfo = MF.getFrameInfo(); 1268 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1269 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 1270 1271 // Ignore the SGPRs the default implementation found. 1272 SavedVGPRs.clearBitsNotInMask(TRI->getAllVectorRegMask()); 1273 1274 // Do not save AGPRs prior to GFX90A because there was no easy way to do so. 1275 // In gfx908 there was do AGPR loads and stores and thus spilling also 1276 // require a temporary VGPR. 1277 if (!ST.hasGFX90AInsts()) 1278 SavedVGPRs.clearBitsInMask(TRI->getAllAGPRRegMask()); 1279 1280 // hasFP only knows about stack objects that already exist. We're now 1281 // determining the stack slots that will be created, so we have to predict 1282 // them. Stack objects force FP usage with calls. 1283 // 1284 // Note a new VGPR CSR may be introduced if one is used for the spill, but we 1285 // don't want to report it here. 1286 // 1287 // FIXME: Is this really hasReservedCallFrame? 1288 const bool WillHaveFP = 1289 FrameInfo.hasCalls() && 1290 (SavedVGPRs.any() || !allStackObjectsAreDead(FrameInfo)); 1291 1292 // VGPRs used for SGPR spilling need to be specially inserted in the prolog, 1293 // so don't allow the default insertion to handle them. 1294 for (auto SSpill : MFI->getSGPRSpillVGPRs()) 1295 SavedVGPRs.reset(SSpill.VGPR); 1296 1297 LivePhysRegs LiveRegs; 1298 LiveRegs.init(*TRI); 1299 1300 if (WillHaveFP || hasFP(MF)) { 1301 assert(!MFI->SGPRForFPSaveRestoreCopy && !MFI->FramePointerSaveIndex && 1302 "Re-reserving spill slot for FP"); 1303 getVGPRSpillLaneOrTempRegister(MF, LiveRegs, MFI->SGPRForFPSaveRestoreCopy, 1304 MFI->FramePointerSaveIndex, true); 1305 } 1306 1307 if (TRI->hasBasePointer(MF)) { 1308 if (MFI->SGPRForFPSaveRestoreCopy) 1309 LiveRegs.addReg(MFI->SGPRForFPSaveRestoreCopy); 1310 1311 assert(!MFI->SGPRForBPSaveRestoreCopy && 1312 !MFI->BasePointerSaveIndex && "Re-reserving spill slot for BP"); 1313 getVGPRSpillLaneOrTempRegister(MF, LiveRegs, MFI->SGPRForBPSaveRestoreCopy, 1314 MFI->BasePointerSaveIndex, false); 1315 } 1316 } 1317 1318 void SIFrameLowering::determineCalleeSavesSGPR(MachineFunction &MF, 1319 BitVector &SavedRegs, 1320 RegScavenger *RS) const { 1321 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); 1322 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 1323 if (MFI->isEntryFunction()) 1324 return; 1325 1326 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1327 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 1328 1329 // The SP is specifically managed and we don't want extra spills of it. 1330 SavedRegs.reset(MFI->getStackPtrOffsetReg()); 1331 1332 const BitVector AllSavedRegs = SavedRegs; 1333 SavedRegs.clearBitsInMask(TRI->getAllVectorRegMask()); 1334 1335 // If clearing VGPRs changed the mask, we will have some CSR VGPR spills. 1336 const bool HaveAnyCSRVGPR = SavedRegs != AllSavedRegs; 1337 1338 // We have to anticipate introducing CSR VGPR spills if we don't have any 1339 // stack objects already, since we require an FP if there is a call and stack. 1340 MachineFrameInfo &FrameInfo = MF.getFrameInfo(); 1341 const bool WillHaveFP = FrameInfo.hasCalls() && HaveAnyCSRVGPR; 1342 1343 // FP will be specially managed like SP. 1344 if (WillHaveFP || hasFP(MF)) 1345 SavedRegs.reset(MFI->getFrameOffsetReg()); 1346 } 1347 1348 bool SIFrameLowering::assignCalleeSavedSpillSlots( 1349 MachineFunction &MF, const TargetRegisterInfo *TRI, 1350 std::vector<CalleeSavedInfo> &CSI) const { 1351 if (CSI.empty()) 1352 return true; // Early exit if no callee saved registers are modified! 1353 1354 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 1355 if (!FuncInfo->SGPRForFPSaveRestoreCopy && 1356 !FuncInfo->SGPRForBPSaveRestoreCopy) 1357 return false; 1358 1359 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1360 const SIRegisterInfo *RI = ST.getRegisterInfo(); 1361 Register FramePtrReg = FuncInfo->getFrameOffsetReg(); 1362 Register BasePtrReg = RI->getBaseRegister(); 1363 unsigned NumModifiedRegs = 0; 1364 1365 if (FuncInfo->SGPRForFPSaveRestoreCopy) 1366 NumModifiedRegs++; 1367 if (FuncInfo->SGPRForBPSaveRestoreCopy) 1368 NumModifiedRegs++; 1369 1370 for (auto &CS : CSI) { 1371 if (CS.getReg() == FramePtrReg && FuncInfo->SGPRForFPSaveRestoreCopy) { 1372 CS.setDstReg(FuncInfo->SGPRForFPSaveRestoreCopy); 1373 if (--NumModifiedRegs) 1374 break; 1375 } else if (CS.getReg() == BasePtrReg && 1376 FuncInfo->SGPRForBPSaveRestoreCopy) { 1377 CS.setDstReg(FuncInfo->SGPRForBPSaveRestoreCopy); 1378 if (--NumModifiedRegs) 1379 break; 1380 } 1381 } 1382 1383 return false; 1384 } 1385 1386 MachineBasicBlock::iterator SIFrameLowering::eliminateCallFramePseudoInstr( 1387 MachineFunction &MF, 1388 MachineBasicBlock &MBB, 1389 MachineBasicBlock::iterator I) const { 1390 int64_t Amount = I->getOperand(0).getImm(); 1391 if (Amount == 0) 1392 return MBB.erase(I); 1393 1394 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1395 const SIInstrInfo *TII = ST.getInstrInfo(); 1396 const DebugLoc &DL = I->getDebugLoc(); 1397 unsigned Opc = I->getOpcode(); 1398 bool IsDestroy = Opc == TII->getCallFrameDestroyOpcode(); 1399 uint64_t CalleePopAmount = IsDestroy ? I->getOperand(1).getImm() : 0; 1400 1401 if (!hasReservedCallFrame(MF)) { 1402 Amount = alignTo(Amount, getStackAlign()); 1403 assert(isUInt<32>(Amount) && "exceeded stack address space size"); 1404 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 1405 Register SPReg = MFI->getStackPtrOffsetReg(); 1406 1407 unsigned Op = IsDestroy ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32; 1408 BuildMI(MBB, I, DL, TII->get(Op), SPReg) 1409 .addReg(SPReg) 1410 .addImm(Amount * getScratchScaleFactor(ST)); 1411 } else if (CalleePopAmount != 0) { 1412 llvm_unreachable("is this used?"); 1413 } 1414 1415 return MBB.erase(I); 1416 } 1417 1418 /// Returns true if the frame will require a reference to the stack pointer. 1419 /// 1420 /// This is the set of conditions common to setting up the stack pointer in a 1421 /// kernel, and for using a frame pointer in a callable function. 1422 /// 1423 /// FIXME: Should also check hasOpaqueSPAdjustment and if any inline asm 1424 /// references SP. 1425 static bool frameTriviallyRequiresSP(const MachineFrameInfo &MFI) { 1426 return MFI.hasVarSizedObjects() || MFI.hasStackMap() || MFI.hasPatchPoint(); 1427 } 1428 1429 // The FP for kernels is always known 0, so we never really need to setup an 1430 // explicit register for it. However, DisableFramePointerElim will force us to 1431 // use a register for it. 1432 bool SIFrameLowering::hasFP(const MachineFunction &MF) const { 1433 const MachineFrameInfo &MFI = MF.getFrameInfo(); 1434 1435 // For entry functions we can use an immediate offset in most cases, so the 1436 // presence of calls doesn't imply we need a distinct frame pointer. 1437 if (MFI.hasCalls() && 1438 !MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction()) { 1439 // All offsets are unsigned, so need to be addressed in the same direction 1440 // as stack growth. 1441 1442 // FIXME: This function is pretty broken, since it can be called before the 1443 // frame layout is determined or CSR spills are inserted. 1444 return MFI.getStackSize() != 0; 1445 } 1446 1447 return frameTriviallyRequiresSP(MFI) || MFI.isFrameAddressTaken() || 1448 MF.getSubtarget<GCNSubtarget>().getRegisterInfo()->needsStackRealignment(MF) || 1449 MF.getTarget().Options.DisableFramePointerElim(MF); 1450 } 1451 1452 // This is essentially a reduced version of hasFP for entry functions. Since the 1453 // stack pointer is known 0 on entry to kernels, we never really need an FP 1454 // register. We may need to initialize the stack pointer depending on the frame 1455 // properties, which logically overlaps many of the cases where an ordinary 1456 // function would require an FP. 1457 bool SIFrameLowering::requiresStackPointerReference( 1458 const MachineFunction &MF) const { 1459 // Callable functions always require a stack pointer reference. 1460 assert(MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction() && 1461 "only expected to call this for entry points"); 1462 1463 const MachineFrameInfo &MFI = MF.getFrameInfo(); 1464 1465 // Entry points ordinarily don't need to initialize SP. We have to set it up 1466 // for callees if there are any. Also note tail calls are impossible/don't 1467 // make any sense for kernels. 1468 if (MFI.hasCalls()) 1469 return true; 1470 1471 // We still need to initialize the SP if we're doing anything weird that 1472 // references the SP, like variable sized stack objects. 1473 return frameTriviallyRequiresSP(MFI); 1474 } 1475