1 //===----------------------- SIFrameLowering.cpp --------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //==-----------------------------------------------------------------------===// 8 9 #include "SIFrameLowering.h" 10 #include "AMDGPUSubtarget.h" 11 #include "SIInstrInfo.h" 12 #include "SIMachineFunctionInfo.h" 13 #include "SIRegisterInfo.h" 14 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 15 16 #include "llvm/CodeGen/LivePhysRegs.h" 17 #include "llvm/CodeGen/MachineFrameInfo.h" 18 #include "llvm/CodeGen/MachineFunction.h" 19 #include "llvm/CodeGen/MachineInstrBuilder.h" 20 #include "llvm/CodeGen/RegisterScavenging.h" 21 22 using namespace llvm; 23 24 #define DEBUG_TYPE "frame-info" 25 26 27 // Find a scratch register that we can use at the start of the prologue to 28 // re-align the stack pointer. We avoid using callee-save registers since they 29 // may appear to be free when this is called from canUseAsPrologue (during 30 // shrink wrapping), but then no longer be free when this is called from 31 // emitPrologue. 32 // 33 // FIXME: This is a bit conservative, since in the above case we could use one 34 // of the callee-save registers as a scratch temp to re-align the stack pointer, 35 // but we would then have to make sure that we were in fact saving at least one 36 // callee-save register in the prologue, which is additional complexity that 37 // doesn't seem worth the benefit. 38 static MCRegister findScratchNonCalleeSaveRegister(MachineRegisterInfo &MRI, 39 LivePhysRegs &LiveRegs, 40 const TargetRegisterClass &RC, 41 bool Unused = false) { 42 // Mark callee saved registers as used so we will not choose them. 43 const MCPhysReg *CSRegs = MRI.getCalleeSavedRegs(); 44 for (unsigned i = 0; CSRegs[i]; ++i) 45 LiveRegs.addReg(CSRegs[i]); 46 47 if (Unused) { 48 // We are looking for a register that can be used throughout the entire 49 // function, so any use is unacceptable. 50 for (MCRegister Reg : RC) { 51 if (!MRI.isPhysRegUsed(Reg) && LiveRegs.available(MRI, Reg)) 52 return Reg; 53 } 54 } else { 55 for (MCRegister Reg : RC) { 56 if (LiveRegs.available(MRI, Reg)) 57 return Reg; 58 } 59 } 60 61 // If we require an unused register, this is used in contexts where failure is 62 // an option and has an alternative plan. In other contexts, this must 63 // succeed0. 64 if (!Unused) 65 report_fatal_error("failed to find free scratch register"); 66 67 return MCRegister(); 68 } 69 70 static void getVGPRSpillLaneOrTempRegister(MachineFunction &MF, 71 LivePhysRegs &LiveRegs, 72 Register &TempSGPR, 73 Optional<int> &FrameIndex, 74 bool IsFP) { 75 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 76 MachineFrameInfo &FrameInfo = MF.getFrameInfo(); 77 78 #ifndef NDEBUG 79 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 80 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 81 #endif 82 83 // We need to save and restore the current FP/BP. 84 85 // 1: If there is already a VGPR with free lanes, use it. We 86 // may already have to pay the penalty for spilling a CSR VGPR. 87 if (MFI->haveFreeLanesForSGPRSpill(MF, 1)) { 88 int NewFI = FrameInfo.CreateStackObject(4, Align(4), true, nullptr, 89 TargetStackID::SGPRSpill); 90 91 if (!MFI->allocateSGPRSpillToVGPR(MF, NewFI)) 92 llvm_unreachable("allocate SGPR spill should have worked"); 93 94 FrameIndex = NewFI; 95 96 LLVM_DEBUG(auto Spill = MFI->getSGPRToVGPRSpills(NewFI).front(); 97 dbgs() << "Spilling " << (IsFP ? "FP" : "BP") << " to " 98 << printReg(Spill.VGPR, TRI) << ':' << Spill.Lane 99 << '\n'); 100 return; 101 } 102 103 // 2: Next, try to save the FP/BP in an unused SGPR. 104 TempSGPR = findScratchNonCalleeSaveRegister( 105 MF.getRegInfo(), LiveRegs, AMDGPU::SReg_32_XM0_XEXECRegClass, true); 106 107 if (!TempSGPR) { 108 int NewFI = FrameInfo.CreateStackObject(4, Align(4), true, nullptr, 109 TargetStackID::SGPRSpill); 110 111 if (MFI->allocateSGPRSpillToVGPR(MF, NewFI)) { 112 // 3: There's no free lane to spill, and no free register to save FP/BP, 113 // so we're forced to spill another VGPR to use for the spill. 114 FrameIndex = NewFI; 115 116 LLVM_DEBUG( 117 auto Spill = MFI->getSGPRToVGPRSpills(NewFI).front(); 118 dbgs() << (IsFP ? "FP" : "BP") << " requires fallback spill to " 119 << printReg(Spill.VGPR, TRI) << ':' << Spill.Lane << '\n';); 120 } else { 121 // Remove dead <NewFI> index 122 MF.getFrameInfo().RemoveStackObject(NewFI); 123 // 4: If all else fails, spill the FP/BP to memory. 124 FrameIndex = FrameInfo.CreateSpillStackObject(4, Align(4)); 125 LLVM_DEBUG(dbgs() << "Reserved FI " << FrameIndex << " for spilling " 126 << (IsFP ? "FP" : "BP") << '\n'); 127 } 128 } else { 129 LLVM_DEBUG(dbgs() << "Saving " << (IsFP ? "FP" : "BP") << " with copy to " 130 << printReg(TempSGPR, TRI) << '\n'); 131 } 132 } 133 134 // We need to specially emit stack operations here because a different frame 135 // register is used than in the rest of the function, as getFrameRegister would 136 // use. 137 static void buildPrologSpill(const GCNSubtarget &ST, LivePhysRegs &LiveRegs, 138 MachineBasicBlock &MBB, 139 MachineBasicBlock::iterator I, 140 const SIInstrInfo *TII, Register SpillReg, 141 Register ScratchRsrcReg, Register SPReg, int FI) { 142 MachineFunction *MF = MBB.getParent(); 143 MachineFrameInfo &MFI = MF->getFrameInfo(); 144 145 int64_t Offset = MFI.getObjectOffset(FI); 146 147 MachineMemOperand *MMO = MF->getMachineMemOperand( 148 MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOStore, 4, 149 MFI.getObjectAlign(FI)); 150 151 if (ST.enableFlatScratch()) { 152 if (TII->isLegalFLATOffset(Offset, AMDGPUAS::PRIVATE_ADDRESS, true)) { 153 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::SCRATCH_STORE_DWORD_SADDR)) 154 .addReg(SpillReg, RegState::Kill) 155 .addReg(SPReg) 156 .addImm(Offset) 157 .addImm(0) // glc 158 .addImm(0) // slc 159 .addImm(0) // dlc 160 .addMemOperand(MMO); 161 return; 162 } 163 } else if (SIInstrInfo::isLegalMUBUFImmOffset(Offset)) { 164 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::BUFFER_STORE_DWORD_OFFSET)) 165 .addReg(SpillReg, RegState::Kill) 166 .addReg(ScratchRsrcReg) 167 .addReg(SPReg) 168 .addImm(Offset) 169 .addImm(0) // glc 170 .addImm(0) // slc 171 .addImm(0) // tfe 172 .addImm(0) // dlc 173 .addImm(0) // swz 174 .addMemOperand(MMO); 175 return; 176 } 177 178 // Don't clobber the TmpVGPR if we also need a scratch reg for the stack 179 // offset in the spill. 180 LiveRegs.addReg(SpillReg); 181 182 if (ST.enableFlatScratch()) { 183 MCPhysReg OffsetReg = findScratchNonCalleeSaveRegister( 184 MF->getRegInfo(), LiveRegs, AMDGPU::SReg_32_XM0RegClass); 185 186 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::S_ADD_U32), OffsetReg) 187 .addReg(SPReg) 188 .addImm(Offset); 189 190 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::SCRATCH_STORE_DWORD_SADDR)) 191 .addReg(SpillReg, RegState::Kill) 192 .addReg(OffsetReg, RegState::Kill) 193 .addImm(0) 194 .addImm(0) // glc 195 .addImm(0) // slc 196 .addImm(0) // dlc 197 .addMemOperand(MMO); 198 } else { 199 MCPhysReg OffsetReg = findScratchNonCalleeSaveRegister( 200 MF->getRegInfo(), LiveRegs, AMDGPU::VGPR_32RegClass); 201 202 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::V_MOV_B32_e32), OffsetReg) 203 .addImm(Offset); 204 205 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::BUFFER_STORE_DWORD_OFFEN)) 206 .addReg(SpillReg, RegState::Kill) 207 .addReg(OffsetReg, RegState::Kill) 208 .addReg(ScratchRsrcReg) 209 .addReg(SPReg) 210 .addImm(0) 211 .addImm(0) // glc 212 .addImm(0) // slc 213 .addImm(0) // tfe 214 .addImm(0) // dlc 215 .addImm(0) // swz 216 .addMemOperand(MMO); 217 } 218 219 LiveRegs.removeReg(SpillReg); 220 } 221 222 static void buildEpilogReload(const GCNSubtarget &ST, LivePhysRegs &LiveRegs, 223 MachineBasicBlock &MBB, 224 MachineBasicBlock::iterator I, 225 const SIInstrInfo *TII, Register SpillReg, 226 Register ScratchRsrcReg, Register SPReg, int FI) { 227 MachineFunction *MF = MBB.getParent(); 228 MachineFrameInfo &MFI = MF->getFrameInfo(); 229 int64_t Offset = MFI.getObjectOffset(FI); 230 231 MachineMemOperand *MMO = MF->getMachineMemOperand( 232 MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOLoad, 4, 233 MFI.getObjectAlign(FI)); 234 235 if (ST.enableFlatScratch()) { 236 if (TII->isLegalFLATOffset(Offset, AMDGPUAS::PRIVATE_ADDRESS, true)) { 237 BuildMI(MBB, I, DebugLoc(), 238 TII->get(AMDGPU::SCRATCH_LOAD_DWORD_SADDR), SpillReg) 239 .addReg(SPReg) 240 .addImm(Offset) 241 .addImm(0) // glc 242 .addImm(0) // slc 243 .addImm(0) // dlc 244 .addMemOperand(MMO); 245 return; 246 } 247 MCPhysReg OffsetReg = findScratchNonCalleeSaveRegister( 248 MF->getRegInfo(), LiveRegs, AMDGPU::SReg_32_XM0RegClass); 249 250 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::S_ADD_U32), OffsetReg) 251 .addReg(SPReg) 252 .addImm(Offset); 253 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::SCRATCH_LOAD_DWORD_SADDR), 254 SpillReg) 255 .addReg(OffsetReg, RegState::Kill) 256 .addImm(0) 257 .addImm(0) // glc 258 .addImm(0) // slc 259 .addImm(0) // dlc 260 .addMemOperand(MMO); 261 return; 262 } 263 264 if (SIInstrInfo::isLegalMUBUFImmOffset(Offset)) { 265 BuildMI(MBB, I, DebugLoc(), 266 TII->get(AMDGPU::BUFFER_LOAD_DWORD_OFFSET), SpillReg) 267 .addReg(ScratchRsrcReg) 268 .addReg(SPReg) 269 .addImm(Offset) 270 .addImm(0) // glc 271 .addImm(0) // slc 272 .addImm(0) // tfe 273 .addImm(0) // dlc 274 .addImm(0) // swz 275 .addMemOperand(MMO); 276 return; 277 } 278 279 MCPhysReg OffsetReg = findScratchNonCalleeSaveRegister( 280 MF->getRegInfo(), LiveRegs, AMDGPU::VGPR_32RegClass); 281 282 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::V_MOV_B32_e32), OffsetReg) 283 .addImm(Offset); 284 285 BuildMI(MBB, I, DebugLoc(), 286 TII->get(AMDGPU::BUFFER_LOAD_DWORD_OFFEN), SpillReg) 287 .addReg(OffsetReg, RegState::Kill) 288 .addReg(ScratchRsrcReg) 289 .addReg(SPReg) 290 .addImm(0) 291 .addImm(0) // glc 292 .addImm(0) // slc 293 .addImm(0) // tfe 294 .addImm(0) // dlc 295 .addImm(0) // swz 296 .addMemOperand(MMO); 297 } 298 299 static void buildGitPtr(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 300 const DebugLoc &DL, const SIInstrInfo *TII, 301 Register TargetReg) { 302 MachineFunction *MF = MBB.getParent(); 303 const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 304 const SIRegisterInfo *TRI = &TII->getRegisterInfo(); 305 const MCInstrDesc &SMovB32 = TII->get(AMDGPU::S_MOV_B32); 306 Register TargetLo = TRI->getSubReg(TargetReg, AMDGPU::sub0); 307 Register TargetHi = TRI->getSubReg(TargetReg, AMDGPU::sub1); 308 309 if (MFI->getGITPtrHigh() != 0xffffffff) { 310 BuildMI(MBB, I, DL, SMovB32, TargetHi) 311 .addImm(MFI->getGITPtrHigh()) 312 .addReg(TargetReg, RegState::ImplicitDefine); 313 } else { 314 const MCInstrDesc &GetPC64 = TII->get(AMDGPU::S_GETPC_B64); 315 BuildMI(MBB, I, DL, GetPC64, TargetReg); 316 } 317 Register GitPtrLo = MFI->getGITPtrLoReg(*MF); 318 MF->getRegInfo().addLiveIn(GitPtrLo); 319 MBB.addLiveIn(GitPtrLo); 320 BuildMI(MBB, I, DL, SMovB32, TargetLo) 321 .addReg(GitPtrLo); 322 } 323 324 // Emit flat scratch setup code, assuming `MFI->hasFlatScratchInit()` 325 void SIFrameLowering::emitEntryFunctionFlatScratchInit( 326 MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 327 const DebugLoc &DL, Register ScratchWaveOffsetReg) const { 328 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 329 const SIInstrInfo *TII = ST.getInstrInfo(); 330 const SIRegisterInfo *TRI = &TII->getRegisterInfo(); 331 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 332 333 // We don't need this if we only have spills since there is no user facing 334 // scratch. 335 336 // TODO: If we know we don't have flat instructions earlier, we can omit 337 // this from the input registers. 338 // 339 // TODO: We only need to know if we access scratch space through a flat 340 // pointer. Because we only detect if flat instructions are used at all, 341 // this will be used more often than necessary on VI. 342 343 Register FlatScrInitLo; 344 Register FlatScrInitHi; 345 346 if (ST.isAmdPalOS()) { 347 // Extract the scratch offset from the descriptor in the GIT 348 LivePhysRegs LiveRegs; 349 LiveRegs.init(*TRI); 350 LiveRegs.addLiveIns(MBB); 351 352 // Find unused reg to load flat scratch init into 353 MachineRegisterInfo &MRI = MF.getRegInfo(); 354 Register FlatScrInit = AMDGPU::NoRegister; 355 ArrayRef<MCPhysReg> AllSGPR64s = TRI->getAllSGPR64(MF); 356 unsigned NumPreloaded = (MFI->getNumPreloadedSGPRs() + 1) / 2; 357 AllSGPR64s = AllSGPR64s.slice( 358 std::min(static_cast<unsigned>(AllSGPR64s.size()), NumPreloaded)); 359 Register GITPtrLoReg = MFI->getGITPtrLoReg(MF); 360 for (MCPhysReg Reg : AllSGPR64s) { 361 if (LiveRegs.available(MRI, Reg) && MRI.isAllocatable(Reg) && 362 !TRI->isSubRegisterEq(Reg, GITPtrLoReg)) { 363 FlatScrInit = Reg; 364 break; 365 } 366 } 367 assert(FlatScrInit && "Failed to find free register for scratch init"); 368 369 FlatScrInitLo = TRI->getSubReg(FlatScrInit, AMDGPU::sub0); 370 FlatScrInitHi = TRI->getSubReg(FlatScrInit, AMDGPU::sub1); 371 372 buildGitPtr(MBB, I, DL, TII, FlatScrInit); 373 374 // We now have the GIT ptr - now get the scratch descriptor from the entry 375 // at offset 0 (or offset 16 for a compute shader). 376 MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS); 377 const MCInstrDesc &LoadDwordX2 = TII->get(AMDGPU::S_LOAD_DWORDX2_IMM); 378 auto *MMO = MF.getMachineMemOperand( 379 PtrInfo, 380 MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant | 381 MachineMemOperand::MODereferenceable, 382 8, Align(4)); 383 unsigned Offset = 384 MF.getFunction().getCallingConv() == CallingConv::AMDGPU_CS ? 16 : 0; 385 const GCNSubtarget &Subtarget = MF.getSubtarget<GCNSubtarget>(); 386 unsigned EncodedOffset = AMDGPU::convertSMRDOffsetUnits(Subtarget, Offset); 387 BuildMI(MBB, I, DL, LoadDwordX2, FlatScrInit) 388 .addReg(FlatScrInit) 389 .addImm(EncodedOffset) // offset 390 .addImm(0) // glc 391 .addImm(0) // dlc 392 .addMemOperand(MMO); 393 394 // Mask the offset in [47:0] of the descriptor 395 const MCInstrDesc &SAndB32 = TII->get(AMDGPU::S_AND_B32); 396 BuildMI(MBB, I, DL, SAndB32, FlatScrInitHi) 397 .addReg(FlatScrInitHi) 398 .addImm(0xffff); 399 } else { 400 Register FlatScratchInitReg = 401 MFI->getPreloadedReg(AMDGPUFunctionArgInfo::FLAT_SCRATCH_INIT); 402 assert(FlatScratchInitReg); 403 404 MachineRegisterInfo &MRI = MF.getRegInfo(); 405 MRI.addLiveIn(FlatScratchInitReg); 406 MBB.addLiveIn(FlatScratchInitReg); 407 408 FlatScrInitLo = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub0); 409 FlatScrInitHi = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub1); 410 } 411 412 // Do a 64-bit pointer add. 413 if (ST.flatScratchIsPointer()) { 414 if (ST.getGeneration() >= AMDGPUSubtarget::GFX10) { 415 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), FlatScrInitLo) 416 .addReg(FlatScrInitLo) 417 .addReg(ScratchWaveOffsetReg); 418 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADDC_U32), FlatScrInitHi) 419 .addReg(FlatScrInitHi) 420 .addImm(0); 421 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SETREG_B32)). 422 addReg(FlatScrInitLo). 423 addImm(int16_t(AMDGPU::Hwreg::ID_FLAT_SCR_LO | 424 (31 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_))); 425 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SETREG_B32)). 426 addReg(FlatScrInitHi). 427 addImm(int16_t(AMDGPU::Hwreg::ID_FLAT_SCR_HI | 428 (31 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_))); 429 return; 430 } 431 432 // For GFX9. 433 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), AMDGPU::FLAT_SCR_LO) 434 .addReg(FlatScrInitLo) 435 .addReg(ScratchWaveOffsetReg); 436 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADDC_U32), AMDGPU::FLAT_SCR_HI) 437 .addReg(FlatScrInitHi) 438 .addImm(0); 439 440 return; 441 } 442 443 assert(ST.getGeneration() < AMDGPUSubtarget::GFX9); 444 445 // Copy the size in bytes. 446 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), AMDGPU::FLAT_SCR_LO) 447 .addReg(FlatScrInitHi, RegState::Kill); 448 449 // Add wave offset in bytes to private base offset. 450 // See comment in AMDKernelCodeT.h for enable_sgpr_flat_scratch_init. 451 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), FlatScrInitLo) 452 .addReg(FlatScrInitLo) 453 .addReg(ScratchWaveOffsetReg); 454 455 // Convert offset to 256-byte units. 456 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_LSHR_B32), AMDGPU::FLAT_SCR_HI) 457 .addReg(FlatScrInitLo, RegState::Kill) 458 .addImm(8); 459 } 460 461 // Note SGPRSpill stack IDs should only be used for SGPR spilling to VGPRs, not 462 // memory. They should have been removed by now. 463 static bool allStackObjectsAreDead(const MachineFrameInfo &MFI) { 464 for (int I = MFI.getObjectIndexBegin(), E = MFI.getObjectIndexEnd(); 465 I != E; ++I) { 466 if (!MFI.isDeadObjectIndex(I)) 467 return false; 468 } 469 470 return true; 471 } 472 473 // Shift down registers reserved for the scratch RSRC. 474 Register SIFrameLowering::getEntryFunctionReservedScratchRsrcReg( 475 MachineFunction &MF) const { 476 477 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 478 const SIInstrInfo *TII = ST.getInstrInfo(); 479 const SIRegisterInfo *TRI = &TII->getRegisterInfo(); 480 MachineRegisterInfo &MRI = MF.getRegInfo(); 481 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 482 483 assert(MFI->isEntryFunction()); 484 485 Register ScratchRsrcReg = MFI->getScratchRSrcReg(); 486 487 if (!ScratchRsrcReg || (!MRI.isPhysRegUsed(ScratchRsrcReg) && 488 allStackObjectsAreDead(MF.getFrameInfo()))) 489 return Register(); 490 491 if (ST.hasSGPRInitBug() || 492 ScratchRsrcReg != TRI->reservedPrivateSegmentBufferReg(MF)) 493 return ScratchRsrcReg; 494 495 // We reserved the last registers for this. Shift it down to the end of those 496 // which were actually used. 497 // 498 // FIXME: It might be safer to use a pseudoregister before replacement. 499 500 // FIXME: We should be able to eliminate unused input registers. We only 501 // cannot do this for the resources required for scratch access. For now we 502 // skip over user SGPRs and may leave unused holes. 503 504 unsigned NumPreloaded = (MFI->getNumPreloadedSGPRs() + 3) / 4; 505 ArrayRef<MCPhysReg> AllSGPR128s = TRI->getAllSGPR128(MF); 506 AllSGPR128s = AllSGPR128s.slice(std::min(static_cast<unsigned>(AllSGPR128s.size()), NumPreloaded)); 507 508 // Skip the last N reserved elements because they should have already been 509 // reserved for VCC etc. 510 Register GITPtrLoReg = MFI->getGITPtrLoReg(MF); 511 for (MCPhysReg Reg : AllSGPR128s) { 512 // Pick the first unallocated one. Make sure we don't clobber the other 513 // reserved input we needed. Also for PAL, make sure we don't clobber 514 // the GIT pointer passed in SGPR0 or SGPR8. 515 if (!MRI.isPhysRegUsed(Reg) && MRI.isAllocatable(Reg) && 516 !TRI->isSubRegisterEq(Reg, GITPtrLoReg)) { 517 MRI.replaceRegWith(ScratchRsrcReg, Reg); 518 MFI->setScratchRSrcReg(Reg); 519 return Reg; 520 } 521 } 522 523 return ScratchRsrcReg; 524 } 525 526 static unsigned getScratchScaleFactor(const GCNSubtarget &ST) { 527 return ST.enableFlatScratch() ? 1 : ST.getWavefrontSize(); 528 } 529 530 void SIFrameLowering::emitEntryFunctionPrologue(MachineFunction &MF, 531 MachineBasicBlock &MBB) const { 532 assert(&MF.front() == &MBB && "Shrink-wrapping not yet supported"); 533 534 // FIXME: If we only have SGPR spills, we won't actually be using scratch 535 // memory since these spill to VGPRs. We should be cleaning up these unused 536 // SGPR spill frame indices somewhere. 537 538 // FIXME: We still have implicit uses on SGPR spill instructions in case they 539 // need to spill to vector memory. It's likely that will not happen, but at 540 // this point it appears we need the setup. This part of the prolog should be 541 // emitted after frame indices are eliminated. 542 543 // FIXME: Remove all of the isPhysRegUsed checks 544 545 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 546 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 547 const SIInstrInfo *TII = ST.getInstrInfo(); 548 const SIRegisterInfo *TRI = &TII->getRegisterInfo(); 549 MachineRegisterInfo &MRI = MF.getRegInfo(); 550 const Function &F = MF.getFunction(); 551 552 assert(MFI->isEntryFunction()); 553 554 Register PreloadedScratchWaveOffsetReg = MFI->getPreloadedReg( 555 AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET); 556 // FIXME: Hack to not crash in situations which emitted an error. 557 if (!PreloadedScratchWaveOffsetReg) 558 return; 559 560 // We need to do the replacement of the private segment buffer register even 561 // if there are no stack objects. There could be stores to undef or a 562 // constant without an associated object. 563 // 564 // This will return `Register()` in cases where there are no actual 565 // uses of the SRSRC. 566 Register ScratchRsrcReg; 567 if (!ST.enableFlatScratch()) 568 ScratchRsrcReg = getEntryFunctionReservedScratchRsrcReg(MF); 569 570 // Make the selected register live throughout the function. 571 if (ScratchRsrcReg) { 572 for (MachineBasicBlock &OtherBB : MF) { 573 if (&OtherBB != &MBB) { 574 OtherBB.addLiveIn(ScratchRsrcReg); 575 } 576 } 577 } 578 579 // Now that we have fixed the reserved SRSRC we need to locate the 580 // (potentially) preloaded SRSRC. 581 Register PreloadedScratchRsrcReg; 582 if (ST.isAmdHsaOrMesa(F)) { 583 PreloadedScratchRsrcReg = 584 MFI->getPreloadedReg(AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER); 585 if (ScratchRsrcReg && PreloadedScratchRsrcReg) { 586 // We added live-ins during argument lowering, but since they were not 587 // used they were deleted. We're adding the uses now, so add them back. 588 MRI.addLiveIn(PreloadedScratchRsrcReg); 589 MBB.addLiveIn(PreloadedScratchRsrcReg); 590 } 591 } 592 593 // Debug location must be unknown since the first debug location is used to 594 // determine the end of the prologue. 595 DebugLoc DL; 596 MachineBasicBlock::iterator I = MBB.begin(); 597 598 // We found the SRSRC first because it needs four registers and has an 599 // alignment requirement. If the SRSRC that we found is clobbering with 600 // the scratch wave offset, which may be in a fixed SGPR or a free SGPR 601 // chosen by SITargetLowering::allocateSystemSGPRs, COPY the scratch 602 // wave offset to a free SGPR. 603 Register ScratchWaveOffsetReg; 604 if (TRI->isSubRegisterEq(ScratchRsrcReg, PreloadedScratchWaveOffsetReg)) { 605 ArrayRef<MCPhysReg> AllSGPRs = TRI->getAllSGPR32(MF); 606 unsigned NumPreloaded = MFI->getNumPreloadedSGPRs(); 607 AllSGPRs = AllSGPRs.slice( 608 std::min(static_cast<unsigned>(AllSGPRs.size()), NumPreloaded)); 609 Register GITPtrLoReg = MFI->getGITPtrLoReg(MF); 610 for (MCPhysReg Reg : AllSGPRs) { 611 if (!MRI.isPhysRegUsed(Reg) && MRI.isAllocatable(Reg) && 612 !TRI->isSubRegisterEq(ScratchRsrcReg, Reg) && GITPtrLoReg != Reg) { 613 ScratchWaveOffsetReg = Reg; 614 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchWaveOffsetReg) 615 .addReg(PreloadedScratchWaveOffsetReg, RegState::Kill); 616 break; 617 } 618 } 619 } else { 620 ScratchWaveOffsetReg = PreloadedScratchWaveOffsetReg; 621 } 622 assert(ScratchWaveOffsetReg); 623 624 if (requiresStackPointerReference(MF)) { 625 Register SPReg = MFI->getStackPtrOffsetReg(); 626 assert(SPReg != AMDGPU::SP_REG); 627 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), SPReg) 628 .addImm(MF.getFrameInfo().getStackSize() * getScratchScaleFactor(ST)); 629 } 630 631 if (hasFP(MF)) { 632 Register FPReg = MFI->getFrameOffsetReg(); 633 assert(FPReg != AMDGPU::FP_REG); 634 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), FPReg).addImm(0); 635 } 636 637 if (MFI->hasFlatScratchInit() || ScratchRsrcReg) { 638 MRI.addLiveIn(PreloadedScratchWaveOffsetReg); 639 MBB.addLiveIn(PreloadedScratchWaveOffsetReg); 640 } 641 642 if (MFI->hasFlatScratchInit()) { 643 emitEntryFunctionFlatScratchInit(MF, MBB, I, DL, ScratchWaveOffsetReg); 644 } 645 646 if (ScratchRsrcReg) { 647 emitEntryFunctionScratchRsrcRegSetup(MF, MBB, I, DL, 648 PreloadedScratchRsrcReg, 649 ScratchRsrcReg, ScratchWaveOffsetReg); 650 } 651 } 652 653 // Emit scratch RSRC setup code, assuming `ScratchRsrcReg != AMDGPU::NoReg` 654 void SIFrameLowering::emitEntryFunctionScratchRsrcRegSetup( 655 MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 656 const DebugLoc &DL, Register PreloadedScratchRsrcReg, 657 Register ScratchRsrcReg, Register ScratchWaveOffsetReg) const { 658 659 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 660 const SIInstrInfo *TII = ST.getInstrInfo(); 661 const SIRegisterInfo *TRI = &TII->getRegisterInfo(); 662 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 663 const Function &Fn = MF.getFunction(); 664 665 if (ST.isAmdPalOS()) { 666 // The pointer to the GIT is formed from the offset passed in and either 667 // the amdgpu-git-ptr-high function attribute or the top part of the PC 668 Register Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1); 669 670 buildGitPtr(MBB, I, DL, TII, Rsrc01); 671 672 // We now have the GIT ptr - now get the scratch descriptor from the entry 673 // at offset 0 (or offset 16 for a compute shader). 674 MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS); 675 const MCInstrDesc &LoadDwordX4 = TII->get(AMDGPU::S_LOAD_DWORDX4_IMM); 676 auto MMO = MF.getMachineMemOperand(PtrInfo, 677 MachineMemOperand::MOLoad | 678 MachineMemOperand::MOInvariant | 679 MachineMemOperand::MODereferenceable, 680 16, Align(4)); 681 unsigned Offset = Fn.getCallingConv() == CallingConv::AMDGPU_CS ? 16 : 0; 682 const GCNSubtarget &Subtarget = MF.getSubtarget<GCNSubtarget>(); 683 unsigned EncodedOffset = AMDGPU::convertSMRDOffsetUnits(Subtarget, Offset); 684 BuildMI(MBB, I, DL, LoadDwordX4, ScratchRsrcReg) 685 .addReg(Rsrc01) 686 .addImm(EncodedOffset) // offset 687 .addImm(0) // glc 688 .addImm(0) // dlc 689 .addReg(ScratchRsrcReg, RegState::ImplicitDefine) 690 .addMemOperand(MMO); 691 } else if (ST.isMesaGfxShader(Fn) || !PreloadedScratchRsrcReg) { 692 assert(!ST.isAmdHsaOrMesa(Fn)); 693 const MCInstrDesc &SMovB32 = TII->get(AMDGPU::S_MOV_B32); 694 695 Register Rsrc2 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub2); 696 Register Rsrc3 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub3); 697 698 // Use relocations to get the pointer, and setup the other bits manually. 699 uint64_t Rsrc23 = TII->getScratchRsrcWords23(); 700 701 if (MFI->hasImplicitBufferPtr()) { 702 Register Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1); 703 704 if (AMDGPU::isCompute(MF.getFunction().getCallingConv())) { 705 const MCInstrDesc &Mov64 = TII->get(AMDGPU::S_MOV_B64); 706 707 BuildMI(MBB, I, DL, Mov64, Rsrc01) 708 .addReg(MFI->getImplicitBufferPtrUserSGPR()) 709 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 710 } else { 711 const MCInstrDesc &LoadDwordX2 = TII->get(AMDGPU::S_LOAD_DWORDX2_IMM); 712 713 MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS); 714 auto MMO = MF.getMachineMemOperand( 715 PtrInfo, 716 MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant | 717 MachineMemOperand::MODereferenceable, 718 8, Align(4)); 719 BuildMI(MBB, I, DL, LoadDwordX2, Rsrc01) 720 .addReg(MFI->getImplicitBufferPtrUserSGPR()) 721 .addImm(0) // offset 722 .addImm(0) // glc 723 .addImm(0) // dlc 724 .addMemOperand(MMO) 725 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 726 727 MF.getRegInfo().addLiveIn(MFI->getImplicitBufferPtrUserSGPR()); 728 MBB.addLiveIn(MFI->getImplicitBufferPtrUserSGPR()); 729 } 730 } else { 731 Register Rsrc0 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0); 732 Register Rsrc1 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1); 733 734 BuildMI(MBB, I, DL, SMovB32, Rsrc0) 735 .addExternalSymbol("SCRATCH_RSRC_DWORD0") 736 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 737 738 BuildMI(MBB, I, DL, SMovB32, Rsrc1) 739 .addExternalSymbol("SCRATCH_RSRC_DWORD1") 740 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 741 742 } 743 744 BuildMI(MBB, I, DL, SMovB32, Rsrc2) 745 .addImm(Rsrc23 & 0xffffffff) 746 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 747 748 BuildMI(MBB, I, DL, SMovB32, Rsrc3) 749 .addImm(Rsrc23 >> 32) 750 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 751 } else if (ST.isAmdHsaOrMesa(Fn)) { 752 assert(PreloadedScratchRsrcReg); 753 754 if (ScratchRsrcReg != PreloadedScratchRsrcReg) { 755 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchRsrcReg) 756 .addReg(PreloadedScratchRsrcReg, RegState::Kill); 757 } 758 } 759 760 // Add the scratch wave offset into the scratch RSRC. 761 // 762 // We only want to update the first 48 bits, which is the base address 763 // pointer, without touching the adjacent 16 bits of flags. We know this add 764 // cannot carry-out from bit 47, otherwise the scratch allocation would be 765 // impossible to fit in the 48-bit global address space. 766 // 767 // TODO: Evaluate if it is better to just construct an SRD using the flat 768 // scratch init and some constants rather than update the one we are passed. 769 Register ScratchRsrcSub0 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0); 770 Register ScratchRsrcSub1 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1); 771 772 // We cannot Kill ScratchWaveOffsetReg here because we allow it to be used in 773 // the kernel body via inreg arguments. 774 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), ScratchRsrcSub0) 775 .addReg(ScratchRsrcSub0) 776 .addReg(ScratchWaveOffsetReg) 777 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 778 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADDC_U32), ScratchRsrcSub1) 779 .addReg(ScratchRsrcSub1) 780 .addImm(0) 781 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 782 } 783 784 bool SIFrameLowering::isSupportedStackID(TargetStackID::Value ID) const { 785 switch (ID) { 786 case TargetStackID::Default: 787 case TargetStackID::NoAlloc: 788 case TargetStackID::SGPRSpill: 789 return true; 790 case TargetStackID::SVEVector: 791 return false; 792 } 793 llvm_unreachable("Invalid TargetStackID::Value"); 794 } 795 796 // Activate all lanes, returns saved exec. 797 static Register buildScratchExecCopy(LivePhysRegs &LiveRegs, 798 MachineFunction &MF, 799 MachineBasicBlock &MBB, 800 MachineBasicBlock::iterator MBBI, 801 bool IsProlog) { 802 Register ScratchExecCopy; 803 MachineRegisterInfo &MRI = MF.getRegInfo(); 804 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 805 const SIInstrInfo *TII = ST.getInstrInfo(); 806 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 807 SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 808 DebugLoc DL; 809 810 if (LiveRegs.empty()) { 811 if (IsProlog) { 812 LiveRegs.init(TRI); 813 LiveRegs.addLiveIns(MBB); 814 if (FuncInfo->SGPRForFPSaveRestoreCopy) 815 LiveRegs.removeReg(FuncInfo->SGPRForFPSaveRestoreCopy); 816 817 if (FuncInfo->SGPRForBPSaveRestoreCopy) 818 LiveRegs.removeReg(FuncInfo->SGPRForBPSaveRestoreCopy); 819 } else { 820 // In epilog. 821 LiveRegs.init(*ST.getRegisterInfo()); 822 LiveRegs.addLiveOuts(MBB); 823 LiveRegs.stepBackward(*MBBI); 824 } 825 } 826 827 ScratchExecCopy = findScratchNonCalleeSaveRegister( 828 MRI, LiveRegs, *TRI.getWaveMaskRegClass()); 829 830 if (!IsProlog) 831 LiveRegs.removeReg(ScratchExecCopy); 832 833 const unsigned OrSaveExec = 834 ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32 : AMDGPU::S_OR_SAVEEXEC_B64; 835 BuildMI(MBB, MBBI, DL, TII->get(OrSaveExec), ScratchExecCopy).addImm(-1); 836 837 return ScratchExecCopy; 838 } 839 840 void SIFrameLowering::emitPrologue(MachineFunction &MF, 841 MachineBasicBlock &MBB) const { 842 SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 843 if (FuncInfo->isEntryFunction()) { 844 emitEntryFunctionPrologue(MF, MBB); 845 return; 846 } 847 848 const MachineFrameInfo &MFI = MF.getFrameInfo(); 849 MachineRegisterInfo &MRI = MF.getRegInfo(); 850 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 851 const SIInstrInfo *TII = ST.getInstrInfo(); 852 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 853 854 Register StackPtrReg = FuncInfo->getStackPtrOffsetReg(); 855 Register FramePtrReg = FuncInfo->getFrameOffsetReg(); 856 Register BasePtrReg = 857 TRI.hasBasePointer(MF) ? TRI.getBaseRegister() : Register(); 858 LivePhysRegs LiveRegs; 859 860 MachineBasicBlock::iterator MBBI = MBB.begin(); 861 DebugLoc DL; 862 863 bool HasFP = false; 864 bool HasBP = false; 865 uint32_t NumBytes = MFI.getStackSize(); 866 uint32_t RoundedSize = NumBytes; 867 // To avoid clobbering VGPRs in lanes that weren't active on function entry, 868 // turn on all lanes before doing the spill to memory. 869 Register ScratchExecCopy; 870 871 bool HasFPSaveIndex = FuncInfo->FramePointerSaveIndex.hasValue(); 872 bool SpillFPToMemory = false; 873 // A StackID of SGPRSpill implies that this is a spill from SGPR to VGPR. 874 // Otherwise we are spilling the FP to memory. 875 if (HasFPSaveIndex) { 876 SpillFPToMemory = MFI.getStackID(*FuncInfo->FramePointerSaveIndex) != 877 TargetStackID::SGPRSpill; 878 } 879 880 bool HasBPSaveIndex = FuncInfo->BasePointerSaveIndex.hasValue(); 881 bool SpillBPToMemory = false; 882 // A StackID of SGPRSpill implies that this is a spill from SGPR to VGPR. 883 // Otherwise we are spilling the BP to memory. 884 if (HasBPSaveIndex) { 885 SpillBPToMemory = MFI.getStackID(*FuncInfo->BasePointerSaveIndex) != 886 TargetStackID::SGPRSpill; 887 } 888 889 // Emit the copy if we need an FP, and are using a free SGPR to save it. 890 if (FuncInfo->SGPRForFPSaveRestoreCopy) { 891 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), FuncInfo->SGPRForFPSaveRestoreCopy) 892 .addReg(FramePtrReg) 893 .setMIFlag(MachineInstr::FrameSetup); 894 } 895 896 // Emit the copy if we need a BP, and are using a free SGPR to save it. 897 if (FuncInfo->SGPRForBPSaveRestoreCopy) { 898 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), 899 FuncInfo->SGPRForBPSaveRestoreCopy) 900 .addReg(BasePtrReg) 901 .setMIFlag(MachineInstr::FrameSetup); 902 } 903 904 // If a copy has been emitted for FP and/or BP, Make the SGPRs 905 // used in the copy instructions live throughout the function. 906 SmallVector<MCPhysReg, 2> TempSGPRs; 907 if (FuncInfo->SGPRForFPSaveRestoreCopy) 908 TempSGPRs.push_back(FuncInfo->SGPRForFPSaveRestoreCopy); 909 910 if (FuncInfo->SGPRForBPSaveRestoreCopy) 911 TempSGPRs.push_back(FuncInfo->SGPRForBPSaveRestoreCopy); 912 913 if (!TempSGPRs.empty()) { 914 for (MachineBasicBlock &MBB : MF) { 915 for (MCPhysReg Reg : TempSGPRs) 916 MBB.addLiveIn(Reg); 917 918 MBB.sortUniqueLiveIns(); 919 } 920 } 921 922 for (const SIMachineFunctionInfo::SGPRSpillVGPRCSR &Reg 923 : FuncInfo->getSGPRSpillVGPRs()) { 924 if (!Reg.FI.hasValue()) 925 continue; 926 927 if (!ScratchExecCopy) 928 ScratchExecCopy = buildScratchExecCopy(LiveRegs, MF, MBB, MBBI, true); 929 930 buildPrologSpill(ST, LiveRegs, MBB, MBBI, TII, Reg.VGPR, 931 FuncInfo->getScratchRSrcReg(), 932 StackPtrReg, 933 Reg.FI.getValue()); 934 } 935 936 if (HasFPSaveIndex && SpillFPToMemory) { 937 assert(!MFI.isDeadObjectIndex(FuncInfo->FramePointerSaveIndex.getValue())); 938 939 if (!ScratchExecCopy) 940 ScratchExecCopy = buildScratchExecCopy(LiveRegs, MF, MBB, MBBI, true); 941 942 MCPhysReg TmpVGPR = findScratchNonCalleeSaveRegister( 943 MRI, LiveRegs, AMDGPU::VGPR_32RegClass); 944 945 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpVGPR) 946 .addReg(FramePtrReg); 947 948 buildPrologSpill(ST, LiveRegs, MBB, MBBI, TII, TmpVGPR, 949 FuncInfo->getScratchRSrcReg(), StackPtrReg, 950 FuncInfo->FramePointerSaveIndex.getValue()); 951 } 952 953 if (HasBPSaveIndex && SpillBPToMemory) { 954 assert(!MFI.isDeadObjectIndex(*FuncInfo->BasePointerSaveIndex)); 955 956 if (!ScratchExecCopy) 957 ScratchExecCopy = buildScratchExecCopy(LiveRegs, MF, MBB, MBBI, true); 958 959 MCPhysReg TmpVGPR = findScratchNonCalleeSaveRegister( 960 MRI, LiveRegs, AMDGPU::VGPR_32RegClass); 961 962 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpVGPR) 963 .addReg(BasePtrReg); 964 965 buildPrologSpill(ST, LiveRegs, MBB, MBBI, TII, TmpVGPR, 966 FuncInfo->getScratchRSrcReg(), StackPtrReg, 967 *FuncInfo->BasePointerSaveIndex); 968 } 969 970 if (ScratchExecCopy) { 971 // FIXME: Split block and make terminator. 972 unsigned ExecMov = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; 973 MCRegister Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 974 BuildMI(MBB, MBBI, DL, TII->get(ExecMov), Exec) 975 .addReg(ScratchExecCopy, RegState::Kill); 976 LiveRegs.addReg(ScratchExecCopy); 977 } 978 979 // In this case, spill the FP to a reserved VGPR. 980 if (HasFPSaveIndex && !SpillFPToMemory) { 981 const int FI = FuncInfo->FramePointerSaveIndex.getValue(); 982 assert(!MFI.isDeadObjectIndex(FI)); 983 984 assert(MFI.getStackID(FI) == TargetStackID::SGPRSpill); 985 ArrayRef<SIMachineFunctionInfo::SpilledReg> Spill = 986 FuncInfo->getSGPRToVGPRSpills(FI); 987 assert(Spill.size() == 1); 988 989 // Save FP before setting it up. 990 // FIXME: This should respect spillSGPRToVGPR; 991 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::V_WRITELANE_B32), Spill[0].VGPR) 992 .addReg(FramePtrReg) 993 .addImm(Spill[0].Lane) 994 .addReg(Spill[0].VGPR, RegState::Undef); 995 } 996 997 // In this case, spill the BP to a reserved VGPR. 998 if (HasBPSaveIndex && !SpillBPToMemory) { 999 const int BasePtrFI = *FuncInfo->BasePointerSaveIndex; 1000 assert(!MFI.isDeadObjectIndex(BasePtrFI)); 1001 1002 assert(MFI.getStackID(BasePtrFI) == TargetStackID::SGPRSpill); 1003 ArrayRef<SIMachineFunctionInfo::SpilledReg> Spill = 1004 FuncInfo->getSGPRToVGPRSpills(BasePtrFI); 1005 assert(Spill.size() == 1); 1006 1007 // Save BP before setting it up. 1008 // FIXME: This should respect spillSGPRToVGPR; 1009 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::V_WRITELANE_B32), Spill[0].VGPR) 1010 .addReg(BasePtrReg) 1011 .addImm(Spill[0].Lane) 1012 .addReg(Spill[0].VGPR, RegState::Undef); 1013 } 1014 1015 if (TRI.needsStackRealignment(MF)) { 1016 HasFP = true; 1017 const unsigned Alignment = MFI.getMaxAlign().value(); 1018 1019 RoundedSize += Alignment; 1020 if (LiveRegs.empty()) { 1021 LiveRegs.init(TRI); 1022 LiveRegs.addLiveIns(MBB); 1023 LiveRegs.addReg(FuncInfo->SGPRForFPSaveRestoreCopy); 1024 LiveRegs.addReg(FuncInfo->SGPRForBPSaveRestoreCopy); 1025 } 1026 1027 Register ScratchSPReg = findScratchNonCalleeSaveRegister( 1028 MRI, LiveRegs, AMDGPU::SReg_32_XM0RegClass); 1029 assert(ScratchSPReg && ScratchSPReg != FuncInfo->SGPRForFPSaveRestoreCopy && 1030 ScratchSPReg != FuncInfo->SGPRForBPSaveRestoreCopy); 1031 1032 // s_add_u32 tmp_reg, s32, NumBytes 1033 // s_and_b32 s32, tmp_reg, 0b111...0000 1034 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_ADD_U32), ScratchSPReg) 1035 .addReg(StackPtrReg) 1036 .addImm((Alignment - 1) * getScratchScaleFactor(ST)) 1037 .setMIFlag(MachineInstr::FrameSetup); 1038 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_AND_B32), FramePtrReg) 1039 .addReg(ScratchSPReg, RegState::Kill) 1040 .addImm(-Alignment * getScratchScaleFactor(ST)) 1041 .setMIFlag(MachineInstr::FrameSetup); 1042 FuncInfo->setIsStackRealigned(true); 1043 } else if ((HasFP = hasFP(MF))) { 1044 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), FramePtrReg) 1045 .addReg(StackPtrReg) 1046 .setMIFlag(MachineInstr::FrameSetup); 1047 } 1048 1049 // If we need a base pointer, set it up here. It's whatever the value of 1050 // the stack pointer is at this point. Any variable size objects will be 1051 // allocated after this, so we can still use the base pointer to reference 1052 // the incoming arguments. 1053 if ((HasBP = TRI.hasBasePointer(MF))) { 1054 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), BasePtrReg) 1055 .addReg(StackPtrReg) 1056 .setMIFlag(MachineInstr::FrameSetup); 1057 } 1058 1059 if (HasFP && RoundedSize != 0) { 1060 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_ADD_U32), StackPtrReg) 1061 .addReg(StackPtrReg) 1062 .addImm(RoundedSize * getScratchScaleFactor(ST)) 1063 .setMIFlag(MachineInstr::FrameSetup); 1064 } 1065 1066 assert((!HasFP || (FuncInfo->SGPRForFPSaveRestoreCopy || 1067 FuncInfo->FramePointerSaveIndex)) && 1068 "Needed to save FP but didn't save it anywhere"); 1069 1070 assert((HasFP || (!FuncInfo->SGPRForFPSaveRestoreCopy && 1071 !FuncInfo->FramePointerSaveIndex)) && 1072 "Saved FP but didn't need it"); 1073 1074 assert((!HasBP || (FuncInfo->SGPRForBPSaveRestoreCopy || 1075 FuncInfo->BasePointerSaveIndex)) && 1076 "Needed to save BP but didn't save it anywhere"); 1077 1078 assert((HasBP || (!FuncInfo->SGPRForBPSaveRestoreCopy && 1079 !FuncInfo->BasePointerSaveIndex)) && 1080 "Saved BP but didn't need it"); 1081 } 1082 1083 void SIFrameLowering::emitEpilogue(MachineFunction &MF, 1084 MachineBasicBlock &MBB) const { 1085 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 1086 if (FuncInfo->isEntryFunction()) 1087 return; 1088 1089 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1090 const SIInstrInfo *TII = ST.getInstrInfo(); 1091 MachineRegisterInfo &MRI = MF.getRegInfo(); 1092 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 1093 MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator(); 1094 LivePhysRegs LiveRegs; 1095 DebugLoc DL; 1096 1097 const MachineFrameInfo &MFI = MF.getFrameInfo(); 1098 uint32_t NumBytes = MFI.getStackSize(); 1099 uint32_t RoundedSize = FuncInfo->isStackRealigned() 1100 ? NumBytes + MFI.getMaxAlign().value() 1101 : NumBytes; 1102 const Register StackPtrReg = FuncInfo->getStackPtrOffsetReg(); 1103 const Register FramePtrReg = FuncInfo->getFrameOffsetReg(); 1104 const Register BasePtrReg = 1105 TRI.hasBasePointer(MF) ? TRI.getBaseRegister() : Register(); 1106 1107 bool HasFPSaveIndex = FuncInfo->FramePointerSaveIndex.hasValue(); 1108 bool SpillFPToMemory = false; 1109 if (HasFPSaveIndex) { 1110 SpillFPToMemory = MFI.getStackID(*FuncInfo->FramePointerSaveIndex) != 1111 TargetStackID::SGPRSpill; 1112 } 1113 1114 bool HasBPSaveIndex = FuncInfo->BasePointerSaveIndex.hasValue(); 1115 bool SpillBPToMemory = false; 1116 if (HasBPSaveIndex) { 1117 SpillBPToMemory = MFI.getStackID(*FuncInfo->BasePointerSaveIndex) != 1118 TargetStackID::SGPRSpill; 1119 } 1120 1121 if (RoundedSize != 0 && hasFP(MF)) { 1122 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_SUB_U32), StackPtrReg) 1123 .addReg(StackPtrReg) 1124 .addImm(RoundedSize * getScratchScaleFactor(ST)) 1125 .setMIFlag(MachineInstr::FrameDestroy); 1126 } 1127 1128 if (FuncInfo->SGPRForFPSaveRestoreCopy) { 1129 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), FramePtrReg) 1130 .addReg(FuncInfo->SGPRForFPSaveRestoreCopy) 1131 .setMIFlag(MachineInstr::FrameSetup); 1132 } 1133 1134 if (FuncInfo->SGPRForBPSaveRestoreCopy) { 1135 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), BasePtrReg) 1136 .addReg(FuncInfo->SGPRForBPSaveRestoreCopy) 1137 .setMIFlag(MachineInstr::FrameSetup); 1138 } 1139 1140 Register ScratchExecCopy; 1141 if (HasFPSaveIndex) { 1142 const int FI = FuncInfo->FramePointerSaveIndex.getValue(); 1143 assert(!MFI.isDeadObjectIndex(FI)); 1144 if (SpillFPToMemory) { 1145 if (!ScratchExecCopy) 1146 ScratchExecCopy = buildScratchExecCopy(LiveRegs, MF, MBB, MBBI, false); 1147 1148 MCPhysReg TempVGPR = findScratchNonCalleeSaveRegister( 1149 MRI, LiveRegs, AMDGPU::VGPR_32RegClass); 1150 buildEpilogReload(ST, LiveRegs, MBB, MBBI, TII, TempVGPR, 1151 FuncInfo->getScratchRSrcReg(), StackPtrReg, FI); 1152 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), FramePtrReg) 1153 .addReg(TempVGPR, RegState::Kill); 1154 } else { 1155 // Reload from VGPR spill. 1156 assert(MFI.getStackID(FI) == TargetStackID::SGPRSpill); 1157 ArrayRef<SIMachineFunctionInfo::SpilledReg> Spill = 1158 FuncInfo->getSGPRToVGPRSpills(FI); 1159 assert(Spill.size() == 1); 1160 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::V_READLANE_B32), FramePtrReg) 1161 .addReg(Spill[0].VGPR) 1162 .addImm(Spill[0].Lane); 1163 } 1164 } 1165 1166 if (HasBPSaveIndex) { 1167 const int BasePtrFI = *FuncInfo->BasePointerSaveIndex; 1168 assert(!MFI.isDeadObjectIndex(BasePtrFI)); 1169 if (SpillBPToMemory) { 1170 if (!ScratchExecCopy) 1171 ScratchExecCopy = buildScratchExecCopy(LiveRegs, MF, MBB, MBBI, false); 1172 1173 MCPhysReg TempVGPR = findScratchNonCalleeSaveRegister( 1174 MRI, LiveRegs, AMDGPU::VGPR_32RegClass); 1175 buildEpilogReload(ST, LiveRegs, MBB, MBBI, TII, TempVGPR, 1176 FuncInfo->getScratchRSrcReg(), StackPtrReg, BasePtrFI); 1177 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), BasePtrReg) 1178 .addReg(TempVGPR, RegState::Kill); 1179 } else { 1180 // Reload from VGPR spill. 1181 assert(MFI.getStackID(BasePtrFI) == TargetStackID::SGPRSpill); 1182 ArrayRef<SIMachineFunctionInfo::SpilledReg> Spill = 1183 FuncInfo->getSGPRToVGPRSpills(BasePtrFI); 1184 assert(Spill.size() == 1); 1185 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::V_READLANE_B32), BasePtrReg) 1186 .addReg(Spill[0].VGPR) 1187 .addImm(Spill[0].Lane); 1188 } 1189 } 1190 1191 for (const SIMachineFunctionInfo::SGPRSpillVGPRCSR &Reg : 1192 FuncInfo->getSGPRSpillVGPRs()) { 1193 if (!Reg.FI.hasValue()) 1194 continue; 1195 1196 if (!ScratchExecCopy) 1197 ScratchExecCopy = buildScratchExecCopy(LiveRegs, MF, MBB, MBBI, false); 1198 1199 buildEpilogReload(ST, LiveRegs, MBB, MBBI, TII, Reg.VGPR, 1200 FuncInfo->getScratchRSrcReg(), StackPtrReg, 1201 Reg.FI.getValue()); 1202 } 1203 1204 if (ScratchExecCopy) { 1205 // FIXME: Split block and make terminator. 1206 unsigned ExecMov = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; 1207 MCRegister Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 1208 BuildMI(MBB, MBBI, DL, TII->get(ExecMov), Exec) 1209 .addReg(ScratchExecCopy, RegState::Kill); 1210 } 1211 } 1212 1213 #ifndef NDEBUG 1214 static bool allSGPRSpillsAreDead(const MachineFunction &MF) { 1215 const MachineFrameInfo &MFI = MF.getFrameInfo(); 1216 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 1217 for (int I = MFI.getObjectIndexBegin(), E = MFI.getObjectIndexEnd(); 1218 I != E; ++I) { 1219 if (!MFI.isDeadObjectIndex(I) && 1220 MFI.getStackID(I) == TargetStackID::SGPRSpill && 1221 (I != FuncInfo->FramePointerSaveIndex && 1222 I != FuncInfo->BasePointerSaveIndex)) { 1223 return false; 1224 } 1225 } 1226 1227 return true; 1228 } 1229 #endif 1230 1231 StackOffset SIFrameLowering::getFrameIndexReference(const MachineFunction &MF, 1232 int FI, 1233 Register &FrameReg) const { 1234 const SIRegisterInfo *RI = MF.getSubtarget<GCNSubtarget>().getRegisterInfo(); 1235 1236 FrameReg = RI->getFrameRegister(MF); 1237 return StackOffset::getFixed(MF.getFrameInfo().getObjectOffset(FI)); 1238 } 1239 1240 void SIFrameLowering::processFunctionBeforeFrameFinalized( 1241 MachineFunction &MF, 1242 RegScavenger *RS) const { 1243 MachineFrameInfo &MFI = MF.getFrameInfo(); 1244 1245 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1246 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 1247 SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 1248 1249 FuncInfo->removeDeadFrameIndices(MFI); 1250 assert(allSGPRSpillsAreDead(MF) && 1251 "SGPR spill should have been removed in SILowerSGPRSpills"); 1252 1253 // FIXME: The other checks should be redundant with allStackObjectsAreDead, 1254 // but currently hasNonSpillStackObjects is set only from source 1255 // allocas. Stack temps produced from legalization are not counted currently. 1256 if (!allStackObjectsAreDead(MFI)) { 1257 assert(RS && "RegScavenger required if spilling"); 1258 1259 if (FuncInfo->isEntryFunction()) { 1260 int ScavengeFI = MFI.CreateFixedObject( 1261 TRI->getSpillSize(AMDGPU::SGPR_32RegClass), 0, false); 1262 RS->addScavengingFrameIndex(ScavengeFI); 1263 } else { 1264 int ScavengeFI = MFI.CreateStackObject( 1265 TRI->getSpillSize(AMDGPU::SGPR_32RegClass), 1266 TRI->getSpillAlign(AMDGPU::SGPR_32RegClass), false); 1267 RS->addScavengingFrameIndex(ScavengeFI); 1268 } 1269 } 1270 } 1271 1272 // Only report VGPRs to generic code. 1273 void SIFrameLowering::determineCalleeSaves(MachineFunction &MF, 1274 BitVector &SavedVGPRs, 1275 RegScavenger *RS) const { 1276 TargetFrameLowering::determineCalleeSaves(MF, SavedVGPRs, RS); 1277 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 1278 if (MFI->isEntryFunction()) 1279 return; 1280 1281 MachineFrameInfo &FrameInfo = MF.getFrameInfo(); 1282 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1283 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 1284 1285 // Ignore the SGPRs the default implementation found. 1286 SavedVGPRs.clearBitsNotInMask(TRI->getAllVGPRRegMask()); 1287 1288 // hasFP only knows about stack objects that already exist. We're now 1289 // determining the stack slots that will be created, so we have to predict 1290 // them. Stack objects force FP usage with calls. 1291 // 1292 // Note a new VGPR CSR may be introduced if one is used for the spill, but we 1293 // don't want to report it here. 1294 // 1295 // FIXME: Is this really hasReservedCallFrame? 1296 const bool WillHaveFP = 1297 FrameInfo.hasCalls() && 1298 (SavedVGPRs.any() || !allStackObjectsAreDead(FrameInfo)); 1299 1300 // VGPRs used for SGPR spilling need to be specially inserted in the prolog, 1301 // so don't allow the default insertion to handle them. 1302 for (auto SSpill : MFI->getSGPRSpillVGPRs()) 1303 SavedVGPRs.reset(SSpill.VGPR); 1304 1305 LivePhysRegs LiveRegs; 1306 LiveRegs.init(*TRI); 1307 1308 if (WillHaveFP || hasFP(MF)) { 1309 getVGPRSpillLaneOrTempRegister(MF, LiveRegs, MFI->SGPRForFPSaveRestoreCopy, 1310 MFI->FramePointerSaveIndex, true); 1311 } 1312 1313 if (TRI->hasBasePointer(MF)) { 1314 if (MFI->SGPRForFPSaveRestoreCopy) 1315 LiveRegs.addReg(MFI->SGPRForFPSaveRestoreCopy); 1316 getVGPRSpillLaneOrTempRegister(MF, LiveRegs, MFI->SGPRForBPSaveRestoreCopy, 1317 MFI->BasePointerSaveIndex, false); 1318 } 1319 } 1320 1321 void SIFrameLowering::determineCalleeSavesSGPR(MachineFunction &MF, 1322 BitVector &SavedRegs, 1323 RegScavenger *RS) const { 1324 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); 1325 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 1326 if (MFI->isEntryFunction()) 1327 return; 1328 1329 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1330 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 1331 1332 // The SP is specifically managed and we don't want extra spills of it. 1333 SavedRegs.reset(MFI->getStackPtrOffsetReg()); 1334 SavedRegs.clearBitsInMask(TRI->getAllVGPRRegMask()); 1335 } 1336 1337 bool SIFrameLowering::assignCalleeSavedSpillSlots( 1338 MachineFunction &MF, const TargetRegisterInfo *TRI, 1339 std::vector<CalleeSavedInfo> &CSI) const { 1340 if (CSI.empty()) 1341 return true; // Early exit if no callee saved registers are modified! 1342 1343 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 1344 if (!FuncInfo->SGPRForFPSaveRestoreCopy && 1345 !FuncInfo->SGPRForBPSaveRestoreCopy) 1346 return false; 1347 1348 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1349 const SIRegisterInfo *RI = ST.getRegisterInfo(); 1350 Register FramePtrReg = FuncInfo->getFrameOffsetReg(); 1351 Register BasePtrReg = RI->getBaseRegister(); 1352 unsigned NumModifiedRegs = 0; 1353 1354 if (FuncInfo->SGPRForFPSaveRestoreCopy) 1355 NumModifiedRegs++; 1356 if (FuncInfo->SGPRForBPSaveRestoreCopy) 1357 NumModifiedRegs++; 1358 1359 for (auto &CS : CSI) { 1360 if (CS.getReg() == FramePtrReg && FuncInfo->SGPRForFPSaveRestoreCopy) { 1361 CS.setDstReg(FuncInfo->SGPRForFPSaveRestoreCopy); 1362 if (--NumModifiedRegs) 1363 break; 1364 } else if (CS.getReg() == BasePtrReg && 1365 FuncInfo->SGPRForBPSaveRestoreCopy) { 1366 CS.setDstReg(FuncInfo->SGPRForBPSaveRestoreCopy); 1367 if (--NumModifiedRegs) 1368 break; 1369 } 1370 } 1371 1372 return false; 1373 } 1374 1375 MachineBasicBlock::iterator SIFrameLowering::eliminateCallFramePseudoInstr( 1376 MachineFunction &MF, 1377 MachineBasicBlock &MBB, 1378 MachineBasicBlock::iterator I) const { 1379 int64_t Amount = I->getOperand(0).getImm(); 1380 if (Amount == 0) 1381 return MBB.erase(I); 1382 1383 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1384 const SIInstrInfo *TII = ST.getInstrInfo(); 1385 const DebugLoc &DL = I->getDebugLoc(); 1386 unsigned Opc = I->getOpcode(); 1387 bool IsDestroy = Opc == TII->getCallFrameDestroyOpcode(); 1388 uint64_t CalleePopAmount = IsDestroy ? I->getOperand(1).getImm() : 0; 1389 1390 if (!hasReservedCallFrame(MF)) { 1391 Amount = alignTo(Amount, getStackAlign()); 1392 assert(isUInt<32>(Amount) && "exceeded stack address space size"); 1393 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 1394 Register SPReg = MFI->getStackPtrOffsetReg(); 1395 1396 unsigned Op = IsDestroy ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32; 1397 BuildMI(MBB, I, DL, TII->get(Op), SPReg) 1398 .addReg(SPReg) 1399 .addImm(Amount * getScratchScaleFactor(ST)); 1400 } else if (CalleePopAmount != 0) { 1401 llvm_unreachable("is this used?"); 1402 } 1403 1404 return MBB.erase(I); 1405 } 1406 1407 /// Returns true if the frame will require a reference to the stack pointer. 1408 /// 1409 /// This is the set of conditions common to setting up the stack pointer in a 1410 /// kernel, and for using a frame pointer in a callable function. 1411 /// 1412 /// FIXME: Should also check hasOpaqueSPAdjustment and if any inline asm 1413 /// references SP. 1414 static bool frameTriviallyRequiresSP(const MachineFrameInfo &MFI) { 1415 return MFI.hasVarSizedObjects() || MFI.hasStackMap() || MFI.hasPatchPoint(); 1416 } 1417 1418 // The FP for kernels is always known 0, so we never really need to setup an 1419 // explicit register for it. However, DisableFramePointerElim will force us to 1420 // use a register for it. 1421 bool SIFrameLowering::hasFP(const MachineFunction &MF) const { 1422 const MachineFrameInfo &MFI = MF.getFrameInfo(); 1423 1424 // For entry functions we can use an immediate offset in most cases, so the 1425 // presence of calls doesn't imply we need a distinct frame pointer. 1426 if (MFI.hasCalls() && 1427 !MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction()) { 1428 // All offsets are unsigned, so need to be addressed in the same direction 1429 // as stack growth. 1430 1431 // FIXME: This function is pretty broken, since it can be called before the 1432 // frame layout is determined or CSR spills are inserted. 1433 return MFI.getStackSize() != 0; 1434 } 1435 1436 return frameTriviallyRequiresSP(MFI) || MFI.isFrameAddressTaken() || 1437 MF.getSubtarget<GCNSubtarget>().getRegisterInfo()->needsStackRealignment(MF) || 1438 MF.getTarget().Options.DisableFramePointerElim(MF); 1439 } 1440 1441 // This is essentially a reduced version of hasFP for entry functions. Since the 1442 // stack pointer is known 0 on entry to kernels, we never really need an FP 1443 // register. We may need to initialize the stack pointer depending on the frame 1444 // properties, which logically overlaps many of the cases where an ordinary 1445 // function would require an FP. 1446 bool SIFrameLowering::requiresStackPointerReference( 1447 const MachineFunction &MF) const { 1448 // Callable functions always require a stack pointer reference. 1449 assert(MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction() && 1450 "only expected to call this for entry points"); 1451 1452 const MachineFrameInfo &MFI = MF.getFrameInfo(); 1453 1454 // Entry points ordinarily don't need to initialize SP. We have to set it up 1455 // for callees if there are any. Also note tail calls are impossible/don't 1456 // make any sense for kernels. 1457 if (MFI.hasCalls()) 1458 return true; 1459 1460 // We still need to initialize the SP if we're doing anything weird that 1461 // references the SP, like variable sized stack objects. 1462 return frameTriviallyRequiresSP(MFI); 1463 } 1464