1 //===----------------------- SIFrameLowering.cpp --------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //==-----------------------------------------------------------------------===// 8 9 #include "SIFrameLowering.h" 10 #include "AMDGPU.h" 11 #include "GCNSubtarget.h" 12 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 13 #include "SIMachineFunctionInfo.h" 14 #include "llvm/CodeGen/LivePhysRegs.h" 15 #include "llvm/CodeGen/MachineFrameInfo.h" 16 #include "llvm/CodeGen/RegisterScavenging.h" 17 #include "llvm/Target/TargetMachine.h" 18 19 using namespace llvm; 20 21 #define DEBUG_TYPE "frame-info" 22 23 // Find a scratch register that we can use in the prologue. We avoid using 24 // callee-save registers since they may appear to be free when this is called 25 // from canUseAsPrologue (during shrink wrapping), but then no longer be free 26 // when this is called from emitPrologue. 27 static MCRegister findScratchNonCalleeSaveRegister(MachineRegisterInfo &MRI, 28 LivePhysRegs &LiveRegs, 29 const TargetRegisterClass &RC, 30 bool Unused = false) { 31 // Mark callee saved registers as used so we will not choose them. 32 const MCPhysReg *CSRegs = MRI.getCalleeSavedRegs(); 33 for (unsigned i = 0; CSRegs[i]; ++i) 34 LiveRegs.addReg(CSRegs[i]); 35 36 if (Unused) { 37 // We are looking for a register that can be used throughout the entire 38 // function, so any use is unacceptable. 39 for (MCRegister Reg : RC) { 40 if (!MRI.isPhysRegUsed(Reg) && LiveRegs.available(MRI, Reg)) 41 return Reg; 42 } 43 } else { 44 for (MCRegister Reg : RC) { 45 if (LiveRegs.available(MRI, Reg)) 46 return Reg; 47 } 48 } 49 50 return MCRegister(); 51 } 52 53 static void getVGPRSpillLaneOrTempRegister(MachineFunction &MF, 54 LivePhysRegs &LiveRegs, 55 Register &TempSGPR, 56 Optional<int> &FrameIndex, 57 bool IsFP) { 58 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 59 MachineFrameInfo &FrameInfo = MF.getFrameInfo(); 60 61 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 62 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 63 64 // We need to save and restore the current FP/BP. 65 66 // 1: If there is already a VGPR with free lanes, use it. We 67 // may already have to pay the penalty for spilling a CSR VGPR. 68 if (MFI->haveFreeLanesForSGPRSpill(MF, 1)) { 69 int NewFI = FrameInfo.CreateStackObject(4, Align(4), true, nullptr, 70 TargetStackID::SGPRSpill); 71 72 if (!MFI->allocateSGPRSpillToVGPR(MF, NewFI)) 73 llvm_unreachable("allocate SGPR spill should have worked"); 74 75 FrameIndex = NewFI; 76 77 LLVM_DEBUG(auto Spill = MFI->getSGPRToVGPRSpills(NewFI).front(); 78 dbgs() << "Spilling " << (IsFP ? "FP" : "BP") << " to " 79 << printReg(Spill.VGPR, TRI) << ':' << Spill.Lane 80 << '\n'); 81 return; 82 } 83 84 // 2: Next, try to save the FP/BP in an unused SGPR. 85 TempSGPR = findScratchNonCalleeSaveRegister( 86 MF.getRegInfo(), LiveRegs, AMDGPU::SReg_32_XM0_XEXECRegClass, true); 87 88 if (!TempSGPR) { 89 int NewFI = FrameInfo.CreateStackObject(4, Align(4), true, nullptr, 90 TargetStackID::SGPRSpill); 91 92 if (TRI->spillSGPRToVGPR() && MFI->allocateSGPRSpillToVGPR(MF, NewFI)) { 93 // 3: There's no free lane to spill, and no free register to save FP/BP, 94 // so we're forced to spill another VGPR to use for the spill. 95 FrameIndex = NewFI; 96 97 LLVM_DEBUG( 98 auto Spill = MFI->getSGPRToVGPRSpills(NewFI).front(); 99 dbgs() << (IsFP ? "FP" : "BP") << " requires fallback spill to " 100 << printReg(Spill.VGPR, TRI) << ':' << Spill.Lane << '\n';); 101 } else { 102 // Remove dead <NewFI> index 103 MF.getFrameInfo().RemoveStackObject(NewFI); 104 // 4: If all else fails, spill the FP/BP to memory. 105 FrameIndex = FrameInfo.CreateSpillStackObject(4, Align(4)); 106 LLVM_DEBUG(dbgs() << "Reserved FI " << FrameIndex << " for spilling " 107 << (IsFP ? "FP" : "BP") << '\n'); 108 } 109 } else { 110 LLVM_DEBUG(dbgs() << "Saving " << (IsFP ? "FP" : "BP") << " with copy to " 111 << printReg(TempSGPR, TRI) << '\n'); 112 } 113 } 114 115 // We need to specially emit stack operations here because a different frame 116 // register is used than in the rest of the function, as getFrameRegister would 117 // use. 118 static void buildPrologSpill(const GCNSubtarget &ST, LivePhysRegs &LiveRegs, 119 MachineBasicBlock &MBB, 120 MachineBasicBlock::iterator I, 121 const SIInstrInfo *TII, Register SpillReg, 122 Register ScratchRsrcReg, Register SPReg, int FI) { 123 MachineFunction *MF = MBB.getParent(); 124 MachineFrameInfo &MFI = MF->getFrameInfo(); 125 126 int64_t Offset = MFI.getObjectOffset(FI); 127 128 MachineMemOperand *MMO = MF->getMachineMemOperand( 129 MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOStore, 4, 130 MFI.getObjectAlign(FI)); 131 132 if (ST.enableFlatScratch()) { 133 if (TII->isLegalFLATOffset(Offset, AMDGPUAS::PRIVATE_ADDRESS, true)) { 134 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::SCRATCH_STORE_DWORD_SADDR)) 135 .addReg(SpillReg, RegState::Kill) 136 .addReg(SPReg) 137 .addImm(Offset) 138 .addImm(0) // glc 139 .addImm(0) // slc 140 .addImm(0) // dlc 141 .addMemOperand(MMO); 142 return; 143 } 144 } else if (SIInstrInfo::isLegalMUBUFImmOffset(Offset)) { 145 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::BUFFER_STORE_DWORD_OFFSET)) 146 .addReg(SpillReg, RegState::Kill) 147 .addReg(ScratchRsrcReg) 148 .addReg(SPReg) 149 .addImm(Offset) 150 .addImm(0) // glc 151 .addImm(0) // slc 152 .addImm(0) // tfe 153 .addImm(0) // dlc 154 .addImm(0) // swz 155 .addMemOperand(MMO); 156 return; 157 } 158 159 // Don't clobber the TmpVGPR if we also need a scratch reg for the stack 160 // offset in the spill. 161 LiveRegs.addReg(SpillReg); 162 163 if (ST.enableFlatScratch()) { 164 MCPhysReg OffsetReg = findScratchNonCalleeSaveRegister( 165 MF->getRegInfo(), LiveRegs, AMDGPU::SReg_32_XM0RegClass); 166 167 bool HasOffsetReg = OffsetReg; 168 if (!HasOffsetReg) { 169 // No free register, use stack pointer and restore afterwards. 170 OffsetReg = SPReg; 171 } 172 173 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::S_ADD_U32), OffsetReg) 174 .addReg(SPReg) 175 .addImm(Offset); 176 177 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::SCRATCH_STORE_DWORD_SADDR)) 178 .addReg(SpillReg, RegState::Kill) 179 .addReg(OffsetReg, HasOffsetReg ? RegState::Kill : 0) 180 .addImm(0) // offset 181 .addImm(0) // glc 182 .addImm(0) // slc 183 .addImm(0) // dlc 184 .addMemOperand(MMO); 185 186 if (!HasOffsetReg) { 187 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::S_SUB_U32), OffsetReg) 188 .addReg(SPReg) 189 .addImm(Offset); 190 } 191 } else { 192 MCPhysReg OffsetReg = findScratchNonCalleeSaveRegister( 193 MF->getRegInfo(), LiveRegs, AMDGPU::VGPR_32RegClass); 194 195 if (OffsetReg) { 196 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::V_MOV_B32_e32), OffsetReg) 197 .addImm(Offset); 198 199 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::BUFFER_STORE_DWORD_OFFEN)) 200 .addReg(SpillReg, RegState::Kill) 201 .addReg(OffsetReg, RegState::Kill) 202 .addReg(ScratchRsrcReg) 203 .addReg(SPReg) 204 .addImm(0) // offset 205 .addImm(0) // glc 206 .addImm(0) // slc 207 .addImm(0) // tfe 208 .addImm(0) // dlc 209 .addImm(0) // swz 210 .addMemOperand(MMO); 211 } else { 212 // No free register, use stack pointer and restore afterwards. 213 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::S_ADD_U32), SPReg) 214 .addReg(SPReg) 215 .addImm(Offset); 216 217 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::BUFFER_STORE_DWORD_OFFSET)) 218 .addReg(SpillReg, RegState::Kill) 219 .addReg(ScratchRsrcReg) 220 .addReg(SPReg) 221 .addImm(0) // offset 222 .addImm(0) // glc 223 .addImm(0) // slc 224 .addImm(0) // tfe 225 .addImm(0) // dlc 226 .addImm(0) // swz 227 .addMemOperand(MMO); 228 229 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::S_SUB_U32), SPReg) 230 .addReg(SPReg) 231 .addImm(Offset); 232 } 233 } 234 235 LiveRegs.removeReg(SpillReg); 236 } 237 238 static void buildEpilogReload(const GCNSubtarget &ST, LivePhysRegs &LiveRegs, 239 MachineBasicBlock &MBB, 240 MachineBasicBlock::iterator I, 241 const SIInstrInfo *TII, Register SpillReg, 242 Register ScratchRsrcReg, Register SPReg, int FI) { 243 MachineFunction *MF = MBB.getParent(); 244 MachineFrameInfo &MFI = MF->getFrameInfo(); 245 int64_t Offset = MFI.getObjectOffset(FI); 246 247 MachineMemOperand *MMO = MF->getMachineMemOperand( 248 MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOLoad, 4, 249 MFI.getObjectAlign(FI)); 250 251 if (ST.enableFlatScratch()) { 252 if (TII->isLegalFLATOffset(Offset, AMDGPUAS::PRIVATE_ADDRESS, true)) { 253 BuildMI(MBB, I, DebugLoc(), 254 TII->get(AMDGPU::SCRATCH_LOAD_DWORD_SADDR), SpillReg) 255 .addReg(SPReg) 256 .addImm(Offset) 257 .addImm(0) // glc 258 .addImm(0) // slc 259 .addImm(0) // dlc 260 .addMemOperand(MMO); 261 return; 262 } 263 MCPhysReg OffsetReg = findScratchNonCalleeSaveRegister( 264 MF->getRegInfo(), LiveRegs, AMDGPU::SReg_32_XM0RegClass); 265 if (!OffsetReg) 266 report_fatal_error("failed to find free scratch register"); 267 268 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::S_ADD_U32), OffsetReg) 269 .addReg(SPReg) 270 .addImm(Offset); 271 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::SCRATCH_LOAD_DWORD_SADDR), 272 SpillReg) 273 .addReg(OffsetReg, RegState::Kill) 274 .addImm(0) 275 .addImm(0) // glc 276 .addImm(0) // slc 277 .addImm(0) // dlc 278 .addMemOperand(MMO); 279 return; 280 } 281 282 if (SIInstrInfo::isLegalMUBUFImmOffset(Offset)) { 283 BuildMI(MBB, I, DebugLoc(), 284 TII->get(AMDGPU::BUFFER_LOAD_DWORD_OFFSET), SpillReg) 285 .addReg(ScratchRsrcReg) 286 .addReg(SPReg) 287 .addImm(Offset) 288 .addImm(0) // glc 289 .addImm(0) // slc 290 .addImm(0) // tfe 291 .addImm(0) // dlc 292 .addImm(0) // swz 293 .addMemOperand(MMO); 294 return; 295 } 296 297 MCPhysReg OffsetReg = findScratchNonCalleeSaveRegister( 298 MF->getRegInfo(), LiveRegs, AMDGPU::VGPR_32RegClass); 299 if (!OffsetReg) 300 report_fatal_error("failed to find free scratch register"); 301 302 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::V_MOV_B32_e32), OffsetReg) 303 .addImm(Offset); 304 305 BuildMI(MBB, I, DebugLoc(), 306 TII->get(AMDGPU::BUFFER_LOAD_DWORD_OFFEN), SpillReg) 307 .addReg(OffsetReg, RegState::Kill) 308 .addReg(ScratchRsrcReg) 309 .addReg(SPReg) 310 .addImm(0) 311 .addImm(0) // glc 312 .addImm(0) // slc 313 .addImm(0) // tfe 314 .addImm(0) // dlc 315 .addImm(0) // swz 316 .addMemOperand(MMO); 317 } 318 319 static void buildGitPtr(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 320 const DebugLoc &DL, const SIInstrInfo *TII, 321 Register TargetReg) { 322 MachineFunction *MF = MBB.getParent(); 323 const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 324 const SIRegisterInfo *TRI = &TII->getRegisterInfo(); 325 const MCInstrDesc &SMovB32 = TII->get(AMDGPU::S_MOV_B32); 326 Register TargetLo = TRI->getSubReg(TargetReg, AMDGPU::sub0); 327 Register TargetHi = TRI->getSubReg(TargetReg, AMDGPU::sub1); 328 329 if (MFI->getGITPtrHigh() != 0xffffffff) { 330 BuildMI(MBB, I, DL, SMovB32, TargetHi) 331 .addImm(MFI->getGITPtrHigh()) 332 .addReg(TargetReg, RegState::ImplicitDefine); 333 } else { 334 const MCInstrDesc &GetPC64 = TII->get(AMDGPU::S_GETPC_B64); 335 BuildMI(MBB, I, DL, GetPC64, TargetReg); 336 } 337 Register GitPtrLo = MFI->getGITPtrLoReg(*MF); 338 MF->getRegInfo().addLiveIn(GitPtrLo); 339 MBB.addLiveIn(GitPtrLo); 340 BuildMI(MBB, I, DL, SMovB32, TargetLo) 341 .addReg(GitPtrLo); 342 } 343 344 // Emit flat scratch setup code, assuming `MFI->hasFlatScratchInit()` 345 void SIFrameLowering::emitEntryFunctionFlatScratchInit( 346 MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 347 const DebugLoc &DL, Register ScratchWaveOffsetReg) const { 348 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 349 const SIInstrInfo *TII = ST.getInstrInfo(); 350 const SIRegisterInfo *TRI = &TII->getRegisterInfo(); 351 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 352 353 // We don't need this if we only have spills since there is no user facing 354 // scratch. 355 356 // TODO: If we know we don't have flat instructions earlier, we can omit 357 // this from the input registers. 358 // 359 // TODO: We only need to know if we access scratch space through a flat 360 // pointer. Because we only detect if flat instructions are used at all, 361 // this will be used more often than necessary on VI. 362 363 Register FlatScrInitLo; 364 Register FlatScrInitHi; 365 366 if (ST.isAmdPalOS()) { 367 // Extract the scratch offset from the descriptor in the GIT 368 LivePhysRegs LiveRegs; 369 LiveRegs.init(*TRI); 370 LiveRegs.addLiveIns(MBB); 371 372 // Find unused reg to load flat scratch init into 373 MachineRegisterInfo &MRI = MF.getRegInfo(); 374 Register FlatScrInit = AMDGPU::NoRegister; 375 ArrayRef<MCPhysReg> AllSGPR64s = TRI->getAllSGPR64(MF); 376 unsigned NumPreloaded = (MFI->getNumPreloadedSGPRs() + 1) / 2; 377 AllSGPR64s = AllSGPR64s.slice( 378 std::min(static_cast<unsigned>(AllSGPR64s.size()), NumPreloaded)); 379 Register GITPtrLoReg = MFI->getGITPtrLoReg(MF); 380 for (MCPhysReg Reg : AllSGPR64s) { 381 if (LiveRegs.available(MRI, Reg) && MRI.isAllocatable(Reg) && 382 !TRI->isSubRegisterEq(Reg, GITPtrLoReg)) { 383 FlatScrInit = Reg; 384 break; 385 } 386 } 387 assert(FlatScrInit && "Failed to find free register for scratch init"); 388 389 FlatScrInitLo = TRI->getSubReg(FlatScrInit, AMDGPU::sub0); 390 FlatScrInitHi = TRI->getSubReg(FlatScrInit, AMDGPU::sub1); 391 392 buildGitPtr(MBB, I, DL, TII, FlatScrInit); 393 394 // We now have the GIT ptr - now get the scratch descriptor from the entry 395 // at offset 0 (or offset 16 for a compute shader). 396 MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS); 397 const MCInstrDesc &LoadDwordX2 = TII->get(AMDGPU::S_LOAD_DWORDX2_IMM); 398 auto *MMO = MF.getMachineMemOperand( 399 PtrInfo, 400 MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant | 401 MachineMemOperand::MODereferenceable, 402 8, Align(4)); 403 unsigned Offset = 404 MF.getFunction().getCallingConv() == CallingConv::AMDGPU_CS ? 16 : 0; 405 const GCNSubtarget &Subtarget = MF.getSubtarget<GCNSubtarget>(); 406 unsigned EncodedOffset = AMDGPU::convertSMRDOffsetUnits(Subtarget, Offset); 407 BuildMI(MBB, I, DL, LoadDwordX2, FlatScrInit) 408 .addReg(FlatScrInit) 409 .addImm(EncodedOffset) // offset 410 .addImm(0) // glc 411 .addImm(0) // dlc 412 .addMemOperand(MMO); 413 414 // Mask the offset in [47:0] of the descriptor 415 const MCInstrDesc &SAndB32 = TII->get(AMDGPU::S_AND_B32); 416 BuildMI(MBB, I, DL, SAndB32, FlatScrInitHi) 417 .addReg(FlatScrInitHi) 418 .addImm(0xffff); 419 } else { 420 Register FlatScratchInitReg = 421 MFI->getPreloadedReg(AMDGPUFunctionArgInfo::FLAT_SCRATCH_INIT); 422 assert(FlatScratchInitReg); 423 424 MachineRegisterInfo &MRI = MF.getRegInfo(); 425 MRI.addLiveIn(FlatScratchInitReg); 426 MBB.addLiveIn(FlatScratchInitReg); 427 428 FlatScrInitLo = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub0); 429 FlatScrInitHi = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub1); 430 } 431 432 // Do a 64-bit pointer add. 433 if (ST.flatScratchIsPointer()) { 434 if (ST.getGeneration() >= AMDGPUSubtarget::GFX10) { 435 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), FlatScrInitLo) 436 .addReg(FlatScrInitLo) 437 .addReg(ScratchWaveOffsetReg); 438 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADDC_U32), FlatScrInitHi) 439 .addReg(FlatScrInitHi) 440 .addImm(0); 441 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SETREG_B32)). 442 addReg(FlatScrInitLo). 443 addImm(int16_t(AMDGPU::Hwreg::ID_FLAT_SCR_LO | 444 (31 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_))); 445 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SETREG_B32)). 446 addReg(FlatScrInitHi). 447 addImm(int16_t(AMDGPU::Hwreg::ID_FLAT_SCR_HI | 448 (31 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_))); 449 return; 450 } 451 452 // For GFX9. 453 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), AMDGPU::FLAT_SCR_LO) 454 .addReg(FlatScrInitLo) 455 .addReg(ScratchWaveOffsetReg); 456 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADDC_U32), AMDGPU::FLAT_SCR_HI) 457 .addReg(FlatScrInitHi) 458 .addImm(0); 459 460 return; 461 } 462 463 assert(ST.getGeneration() < AMDGPUSubtarget::GFX9); 464 465 // Copy the size in bytes. 466 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), AMDGPU::FLAT_SCR_LO) 467 .addReg(FlatScrInitHi, RegState::Kill); 468 469 // Add wave offset in bytes to private base offset. 470 // See comment in AMDKernelCodeT.h for enable_sgpr_flat_scratch_init. 471 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), FlatScrInitLo) 472 .addReg(FlatScrInitLo) 473 .addReg(ScratchWaveOffsetReg); 474 475 // Convert offset to 256-byte units. 476 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_LSHR_B32), AMDGPU::FLAT_SCR_HI) 477 .addReg(FlatScrInitLo, RegState::Kill) 478 .addImm(8); 479 } 480 481 // Note SGPRSpill stack IDs should only be used for SGPR spilling to VGPRs, not 482 // memory. They should have been removed by now. 483 static bool allStackObjectsAreDead(const MachineFrameInfo &MFI) { 484 for (int I = MFI.getObjectIndexBegin(), E = MFI.getObjectIndexEnd(); 485 I != E; ++I) { 486 if (!MFI.isDeadObjectIndex(I)) 487 return false; 488 } 489 490 return true; 491 } 492 493 // Shift down registers reserved for the scratch RSRC. 494 Register SIFrameLowering::getEntryFunctionReservedScratchRsrcReg( 495 MachineFunction &MF) const { 496 497 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 498 const SIInstrInfo *TII = ST.getInstrInfo(); 499 const SIRegisterInfo *TRI = &TII->getRegisterInfo(); 500 MachineRegisterInfo &MRI = MF.getRegInfo(); 501 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 502 503 assert(MFI->isEntryFunction()); 504 505 Register ScratchRsrcReg = MFI->getScratchRSrcReg(); 506 507 if (!ScratchRsrcReg || (!MRI.isPhysRegUsed(ScratchRsrcReg) && 508 allStackObjectsAreDead(MF.getFrameInfo()))) 509 return Register(); 510 511 if (ST.hasSGPRInitBug() || 512 ScratchRsrcReg != TRI->reservedPrivateSegmentBufferReg(MF)) 513 return ScratchRsrcReg; 514 515 // We reserved the last registers for this. Shift it down to the end of those 516 // which were actually used. 517 // 518 // FIXME: It might be safer to use a pseudoregister before replacement. 519 520 // FIXME: We should be able to eliminate unused input registers. We only 521 // cannot do this for the resources required for scratch access. For now we 522 // skip over user SGPRs and may leave unused holes. 523 524 unsigned NumPreloaded = (MFI->getNumPreloadedSGPRs() + 3) / 4; 525 ArrayRef<MCPhysReg> AllSGPR128s = TRI->getAllSGPR128(MF); 526 AllSGPR128s = AllSGPR128s.slice(std::min(static_cast<unsigned>(AllSGPR128s.size()), NumPreloaded)); 527 528 // Skip the last N reserved elements because they should have already been 529 // reserved for VCC etc. 530 Register GITPtrLoReg = MFI->getGITPtrLoReg(MF); 531 for (MCPhysReg Reg : AllSGPR128s) { 532 // Pick the first unallocated one. Make sure we don't clobber the other 533 // reserved input we needed. Also for PAL, make sure we don't clobber 534 // the GIT pointer passed in SGPR0 or SGPR8. 535 if (!MRI.isPhysRegUsed(Reg) && MRI.isAllocatable(Reg) && 536 !TRI->isSubRegisterEq(Reg, GITPtrLoReg)) { 537 MRI.replaceRegWith(ScratchRsrcReg, Reg); 538 MFI->setScratchRSrcReg(Reg); 539 return Reg; 540 } 541 } 542 543 return ScratchRsrcReg; 544 } 545 546 static unsigned getScratchScaleFactor(const GCNSubtarget &ST) { 547 return ST.enableFlatScratch() ? 1 : ST.getWavefrontSize(); 548 } 549 550 void SIFrameLowering::emitEntryFunctionPrologue(MachineFunction &MF, 551 MachineBasicBlock &MBB) const { 552 assert(&MF.front() == &MBB && "Shrink-wrapping not yet supported"); 553 554 // FIXME: If we only have SGPR spills, we won't actually be using scratch 555 // memory since these spill to VGPRs. We should be cleaning up these unused 556 // SGPR spill frame indices somewhere. 557 558 // FIXME: We still have implicit uses on SGPR spill instructions in case they 559 // need to spill to vector memory. It's likely that will not happen, but at 560 // this point it appears we need the setup. This part of the prolog should be 561 // emitted after frame indices are eliminated. 562 563 // FIXME: Remove all of the isPhysRegUsed checks 564 565 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 566 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 567 const SIInstrInfo *TII = ST.getInstrInfo(); 568 const SIRegisterInfo *TRI = &TII->getRegisterInfo(); 569 MachineRegisterInfo &MRI = MF.getRegInfo(); 570 const Function &F = MF.getFunction(); 571 572 assert(MFI->isEntryFunction()); 573 574 Register PreloadedScratchWaveOffsetReg = MFI->getPreloadedReg( 575 AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET); 576 // FIXME: Hack to not crash in situations which emitted an error. 577 if (!PreloadedScratchWaveOffsetReg) 578 return; 579 580 // We need to do the replacement of the private segment buffer register even 581 // if there are no stack objects. There could be stores to undef or a 582 // constant without an associated object. 583 // 584 // This will return `Register()` in cases where there are no actual 585 // uses of the SRSRC. 586 Register ScratchRsrcReg; 587 if (!ST.enableFlatScratch()) 588 ScratchRsrcReg = getEntryFunctionReservedScratchRsrcReg(MF); 589 590 // Make the selected register live throughout the function. 591 if (ScratchRsrcReg) { 592 for (MachineBasicBlock &OtherBB : MF) { 593 if (&OtherBB != &MBB) { 594 OtherBB.addLiveIn(ScratchRsrcReg); 595 } 596 } 597 } 598 599 // Now that we have fixed the reserved SRSRC we need to locate the 600 // (potentially) preloaded SRSRC. 601 Register PreloadedScratchRsrcReg; 602 if (ST.isAmdHsaOrMesa(F)) { 603 PreloadedScratchRsrcReg = 604 MFI->getPreloadedReg(AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER); 605 if (ScratchRsrcReg && PreloadedScratchRsrcReg) { 606 // We added live-ins during argument lowering, but since they were not 607 // used they were deleted. We're adding the uses now, so add them back. 608 MRI.addLiveIn(PreloadedScratchRsrcReg); 609 MBB.addLiveIn(PreloadedScratchRsrcReg); 610 } 611 } 612 613 // Debug location must be unknown since the first debug location is used to 614 // determine the end of the prologue. 615 DebugLoc DL; 616 MachineBasicBlock::iterator I = MBB.begin(); 617 618 // We found the SRSRC first because it needs four registers and has an 619 // alignment requirement. If the SRSRC that we found is clobbering with 620 // the scratch wave offset, which may be in a fixed SGPR or a free SGPR 621 // chosen by SITargetLowering::allocateSystemSGPRs, COPY the scratch 622 // wave offset to a free SGPR. 623 Register ScratchWaveOffsetReg; 624 if (TRI->isSubRegisterEq(ScratchRsrcReg, PreloadedScratchWaveOffsetReg)) { 625 ArrayRef<MCPhysReg> AllSGPRs = TRI->getAllSGPR32(MF); 626 unsigned NumPreloaded = MFI->getNumPreloadedSGPRs(); 627 AllSGPRs = AllSGPRs.slice( 628 std::min(static_cast<unsigned>(AllSGPRs.size()), NumPreloaded)); 629 Register GITPtrLoReg = MFI->getGITPtrLoReg(MF); 630 for (MCPhysReg Reg : AllSGPRs) { 631 if (!MRI.isPhysRegUsed(Reg) && MRI.isAllocatable(Reg) && 632 !TRI->isSubRegisterEq(ScratchRsrcReg, Reg) && GITPtrLoReg != Reg) { 633 ScratchWaveOffsetReg = Reg; 634 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchWaveOffsetReg) 635 .addReg(PreloadedScratchWaveOffsetReg, RegState::Kill); 636 break; 637 } 638 } 639 } else { 640 ScratchWaveOffsetReg = PreloadedScratchWaveOffsetReg; 641 } 642 assert(ScratchWaveOffsetReg); 643 644 if (requiresStackPointerReference(MF)) { 645 Register SPReg = MFI->getStackPtrOffsetReg(); 646 assert(SPReg != AMDGPU::SP_REG); 647 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), SPReg) 648 .addImm(MF.getFrameInfo().getStackSize() * getScratchScaleFactor(ST)); 649 } 650 651 if (hasFP(MF)) { 652 Register FPReg = MFI->getFrameOffsetReg(); 653 assert(FPReg != AMDGPU::FP_REG); 654 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), FPReg).addImm(0); 655 } 656 657 if (MFI->hasFlatScratchInit() || ScratchRsrcReg) { 658 MRI.addLiveIn(PreloadedScratchWaveOffsetReg); 659 MBB.addLiveIn(PreloadedScratchWaveOffsetReg); 660 } 661 662 if (MFI->hasFlatScratchInit()) { 663 emitEntryFunctionFlatScratchInit(MF, MBB, I, DL, ScratchWaveOffsetReg); 664 } 665 666 if (ScratchRsrcReg) { 667 emitEntryFunctionScratchRsrcRegSetup(MF, MBB, I, DL, 668 PreloadedScratchRsrcReg, 669 ScratchRsrcReg, ScratchWaveOffsetReg); 670 } 671 } 672 673 // Emit scratch RSRC setup code, assuming `ScratchRsrcReg != AMDGPU::NoReg` 674 void SIFrameLowering::emitEntryFunctionScratchRsrcRegSetup( 675 MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 676 const DebugLoc &DL, Register PreloadedScratchRsrcReg, 677 Register ScratchRsrcReg, Register ScratchWaveOffsetReg) const { 678 679 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 680 const SIInstrInfo *TII = ST.getInstrInfo(); 681 const SIRegisterInfo *TRI = &TII->getRegisterInfo(); 682 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 683 const Function &Fn = MF.getFunction(); 684 685 if (ST.isAmdPalOS()) { 686 // The pointer to the GIT is formed from the offset passed in and either 687 // the amdgpu-git-ptr-high function attribute or the top part of the PC 688 Register Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1); 689 690 buildGitPtr(MBB, I, DL, TII, Rsrc01); 691 692 // We now have the GIT ptr - now get the scratch descriptor from the entry 693 // at offset 0 (or offset 16 for a compute shader). 694 MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS); 695 const MCInstrDesc &LoadDwordX4 = TII->get(AMDGPU::S_LOAD_DWORDX4_IMM); 696 auto MMO = MF.getMachineMemOperand(PtrInfo, 697 MachineMemOperand::MOLoad | 698 MachineMemOperand::MOInvariant | 699 MachineMemOperand::MODereferenceable, 700 16, Align(4)); 701 unsigned Offset = Fn.getCallingConv() == CallingConv::AMDGPU_CS ? 16 : 0; 702 const GCNSubtarget &Subtarget = MF.getSubtarget<GCNSubtarget>(); 703 unsigned EncodedOffset = AMDGPU::convertSMRDOffsetUnits(Subtarget, Offset); 704 BuildMI(MBB, I, DL, LoadDwordX4, ScratchRsrcReg) 705 .addReg(Rsrc01) 706 .addImm(EncodedOffset) // offset 707 .addImm(0) // glc 708 .addImm(0) // dlc 709 .addReg(ScratchRsrcReg, RegState::ImplicitDefine) 710 .addMemOperand(MMO); 711 } else if (ST.isMesaGfxShader(Fn) || !PreloadedScratchRsrcReg) { 712 assert(!ST.isAmdHsaOrMesa(Fn)); 713 const MCInstrDesc &SMovB32 = TII->get(AMDGPU::S_MOV_B32); 714 715 Register Rsrc2 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub2); 716 Register Rsrc3 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub3); 717 718 // Use relocations to get the pointer, and setup the other bits manually. 719 uint64_t Rsrc23 = TII->getScratchRsrcWords23(); 720 721 if (MFI->hasImplicitBufferPtr()) { 722 Register Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1); 723 724 if (AMDGPU::isCompute(MF.getFunction().getCallingConv())) { 725 const MCInstrDesc &Mov64 = TII->get(AMDGPU::S_MOV_B64); 726 727 BuildMI(MBB, I, DL, Mov64, Rsrc01) 728 .addReg(MFI->getImplicitBufferPtrUserSGPR()) 729 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 730 } else { 731 const MCInstrDesc &LoadDwordX2 = TII->get(AMDGPU::S_LOAD_DWORDX2_IMM); 732 733 MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS); 734 auto MMO = MF.getMachineMemOperand( 735 PtrInfo, 736 MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant | 737 MachineMemOperand::MODereferenceable, 738 8, Align(4)); 739 BuildMI(MBB, I, DL, LoadDwordX2, Rsrc01) 740 .addReg(MFI->getImplicitBufferPtrUserSGPR()) 741 .addImm(0) // offset 742 .addImm(0) // glc 743 .addImm(0) // dlc 744 .addMemOperand(MMO) 745 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 746 747 MF.getRegInfo().addLiveIn(MFI->getImplicitBufferPtrUserSGPR()); 748 MBB.addLiveIn(MFI->getImplicitBufferPtrUserSGPR()); 749 } 750 } else { 751 Register Rsrc0 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0); 752 Register Rsrc1 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1); 753 754 BuildMI(MBB, I, DL, SMovB32, Rsrc0) 755 .addExternalSymbol("SCRATCH_RSRC_DWORD0") 756 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 757 758 BuildMI(MBB, I, DL, SMovB32, Rsrc1) 759 .addExternalSymbol("SCRATCH_RSRC_DWORD1") 760 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 761 762 } 763 764 BuildMI(MBB, I, DL, SMovB32, Rsrc2) 765 .addImm(Rsrc23 & 0xffffffff) 766 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 767 768 BuildMI(MBB, I, DL, SMovB32, Rsrc3) 769 .addImm(Rsrc23 >> 32) 770 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 771 } else if (ST.isAmdHsaOrMesa(Fn)) { 772 assert(PreloadedScratchRsrcReg); 773 774 if (ScratchRsrcReg != PreloadedScratchRsrcReg) { 775 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchRsrcReg) 776 .addReg(PreloadedScratchRsrcReg, RegState::Kill); 777 } 778 } 779 780 // Add the scratch wave offset into the scratch RSRC. 781 // 782 // We only want to update the first 48 bits, which is the base address 783 // pointer, without touching the adjacent 16 bits of flags. We know this add 784 // cannot carry-out from bit 47, otherwise the scratch allocation would be 785 // impossible to fit in the 48-bit global address space. 786 // 787 // TODO: Evaluate if it is better to just construct an SRD using the flat 788 // scratch init and some constants rather than update the one we are passed. 789 Register ScratchRsrcSub0 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0); 790 Register ScratchRsrcSub1 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1); 791 792 // We cannot Kill ScratchWaveOffsetReg here because we allow it to be used in 793 // the kernel body via inreg arguments. 794 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), ScratchRsrcSub0) 795 .addReg(ScratchRsrcSub0) 796 .addReg(ScratchWaveOffsetReg) 797 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 798 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADDC_U32), ScratchRsrcSub1) 799 .addReg(ScratchRsrcSub1) 800 .addImm(0) 801 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 802 } 803 804 bool SIFrameLowering::isSupportedStackID(TargetStackID::Value ID) const { 805 switch (ID) { 806 case TargetStackID::Default: 807 case TargetStackID::NoAlloc: 808 case TargetStackID::SGPRSpill: 809 return true; 810 case TargetStackID::ScalableVector: 811 return false; 812 } 813 llvm_unreachable("Invalid TargetStackID::Value"); 814 } 815 816 // Activate all lanes, returns saved exec. 817 static Register buildScratchExecCopy(LivePhysRegs &LiveRegs, 818 MachineFunction &MF, 819 MachineBasicBlock &MBB, 820 MachineBasicBlock::iterator MBBI, 821 bool IsProlog) { 822 Register ScratchExecCopy; 823 MachineRegisterInfo &MRI = MF.getRegInfo(); 824 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 825 const SIInstrInfo *TII = ST.getInstrInfo(); 826 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 827 SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 828 DebugLoc DL; 829 830 if (LiveRegs.empty()) { 831 if (IsProlog) { 832 LiveRegs.init(TRI); 833 LiveRegs.addLiveIns(MBB); 834 if (FuncInfo->SGPRForFPSaveRestoreCopy) 835 LiveRegs.removeReg(FuncInfo->SGPRForFPSaveRestoreCopy); 836 837 if (FuncInfo->SGPRForBPSaveRestoreCopy) 838 LiveRegs.removeReg(FuncInfo->SGPRForBPSaveRestoreCopy); 839 } else { 840 // In epilog. 841 LiveRegs.init(*ST.getRegisterInfo()); 842 LiveRegs.addLiveOuts(MBB); 843 LiveRegs.stepBackward(*MBBI); 844 } 845 } 846 847 ScratchExecCopy = findScratchNonCalleeSaveRegister( 848 MRI, LiveRegs, *TRI.getWaveMaskRegClass()); 849 if (!ScratchExecCopy) 850 report_fatal_error("failed to find free scratch register"); 851 852 if (!IsProlog) 853 LiveRegs.removeReg(ScratchExecCopy); 854 855 const unsigned OrSaveExec = 856 ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32 : AMDGPU::S_OR_SAVEEXEC_B64; 857 BuildMI(MBB, MBBI, DL, TII->get(OrSaveExec), ScratchExecCopy).addImm(-1); 858 859 return ScratchExecCopy; 860 } 861 862 void SIFrameLowering::emitPrologue(MachineFunction &MF, 863 MachineBasicBlock &MBB) const { 864 SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 865 if (FuncInfo->isEntryFunction()) { 866 emitEntryFunctionPrologue(MF, MBB); 867 return; 868 } 869 870 const MachineFrameInfo &MFI = MF.getFrameInfo(); 871 MachineRegisterInfo &MRI = MF.getRegInfo(); 872 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 873 const SIInstrInfo *TII = ST.getInstrInfo(); 874 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 875 876 Register StackPtrReg = FuncInfo->getStackPtrOffsetReg(); 877 Register FramePtrReg = FuncInfo->getFrameOffsetReg(); 878 Register BasePtrReg = 879 TRI.hasBasePointer(MF) ? TRI.getBaseRegister() : Register(); 880 LivePhysRegs LiveRegs; 881 882 MachineBasicBlock::iterator MBBI = MBB.begin(); 883 DebugLoc DL; 884 885 bool HasFP = false; 886 bool HasBP = false; 887 uint32_t NumBytes = MFI.getStackSize(); 888 uint32_t RoundedSize = NumBytes; 889 // To avoid clobbering VGPRs in lanes that weren't active on function entry, 890 // turn on all lanes before doing the spill to memory. 891 Register ScratchExecCopy; 892 893 bool HasFPSaveIndex = FuncInfo->FramePointerSaveIndex.hasValue(); 894 bool SpillFPToMemory = false; 895 // A StackID of SGPRSpill implies that this is a spill from SGPR to VGPR. 896 // Otherwise we are spilling the FP to memory. 897 if (HasFPSaveIndex) { 898 SpillFPToMemory = MFI.getStackID(*FuncInfo->FramePointerSaveIndex) != 899 TargetStackID::SGPRSpill; 900 } 901 902 bool HasBPSaveIndex = FuncInfo->BasePointerSaveIndex.hasValue(); 903 bool SpillBPToMemory = false; 904 // A StackID of SGPRSpill implies that this is a spill from SGPR to VGPR. 905 // Otherwise we are spilling the BP to memory. 906 if (HasBPSaveIndex) { 907 SpillBPToMemory = MFI.getStackID(*FuncInfo->BasePointerSaveIndex) != 908 TargetStackID::SGPRSpill; 909 } 910 911 for (const SIMachineFunctionInfo::SGPRSpillVGPRCSR &Reg 912 : FuncInfo->getSGPRSpillVGPRs()) { 913 if (!Reg.FI.hasValue()) 914 continue; 915 916 if (!ScratchExecCopy) 917 ScratchExecCopy = buildScratchExecCopy(LiveRegs, MF, MBB, MBBI, true); 918 919 buildPrologSpill(ST, LiveRegs, MBB, MBBI, TII, Reg.VGPR, 920 FuncInfo->getScratchRSrcReg(), 921 StackPtrReg, 922 Reg.FI.getValue()); 923 } 924 925 if (HasFPSaveIndex && SpillFPToMemory) { 926 assert(!MFI.isDeadObjectIndex(FuncInfo->FramePointerSaveIndex.getValue())); 927 928 if (!ScratchExecCopy) 929 ScratchExecCopy = buildScratchExecCopy(LiveRegs, MF, MBB, MBBI, true); 930 931 MCPhysReg TmpVGPR = findScratchNonCalleeSaveRegister( 932 MRI, LiveRegs, AMDGPU::VGPR_32RegClass); 933 if (!TmpVGPR) 934 report_fatal_error("failed to find free scratch register"); 935 936 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpVGPR) 937 .addReg(FramePtrReg); 938 939 buildPrologSpill(ST, LiveRegs, MBB, MBBI, TII, TmpVGPR, 940 FuncInfo->getScratchRSrcReg(), StackPtrReg, 941 FuncInfo->FramePointerSaveIndex.getValue()); 942 } 943 944 if (HasBPSaveIndex && SpillBPToMemory) { 945 assert(!MFI.isDeadObjectIndex(*FuncInfo->BasePointerSaveIndex)); 946 947 if (!ScratchExecCopy) 948 ScratchExecCopy = buildScratchExecCopy(LiveRegs, MF, MBB, MBBI, true); 949 950 MCPhysReg TmpVGPR = findScratchNonCalleeSaveRegister( 951 MRI, LiveRegs, AMDGPU::VGPR_32RegClass); 952 if (!TmpVGPR) 953 report_fatal_error("failed to find free scratch register"); 954 955 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpVGPR) 956 .addReg(BasePtrReg); 957 958 buildPrologSpill(ST, LiveRegs, MBB, MBBI, TII, TmpVGPR, 959 FuncInfo->getScratchRSrcReg(), StackPtrReg, 960 *FuncInfo->BasePointerSaveIndex); 961 } 962 963 if (ScratchExecCopy) { 964 // FIXME: Split block and make terminator. 965 unsigned ExecMov = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; 966 MCRegister Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 967 BuildMI(MBB, MBBI, DL, TII->get(ExecMov), Exec) 968 .addReg(ScratchExecCopy, RegState::Kill); 969 LiveRegs.addReg(ScratchExecCopy); 970 } 971 972 // In this case, spill the FP to a reserved VGPR. 973 if (HasFPSaveIndex && !SpillFPToMemory) { 974 const int FI = FuncInfo->FramePointerSaveIndex.getValue(); 975 assert(!MFI.isDeadObjectIndex(FI)); 976 977 assert(MFI.getStackID(FI) == TargetStackID::SGPRSpill); 978 ArrayRef<SIMachineFunctionInfo::SpilledReg> Spill = 979 FuncInfo->getSGPRToVGPRSpills(FI); 980 assert(Spill.size() == 1); 981 982 // Save FP before setting it up. 983 // FIXME: This should respect spillSGPRToVGPR; 984 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::V_WRITELANE_B32), Spill[0].VGPR) 985 .addReg(FramePtrReg) 986 .addImm(Spill[0].Lane) 987 .addReg(Spill[0].VGPR, RegState::Undef); 988 } 989 990 // In this case, spill the BP to a reserved VGPR. 991 if (HasBPSaveIndex && !SpillBPToMemory) { 992 const int BasePtrFI = *FuncInfo->BasePointerSaveIndex; 993 assert(!MFI.isDeadObjectIndex(BasePtrFI)); 994 995 assert(MFI.getStackID(BasePtrFI) == TargetStackID::SGPRSpill); 996 ArrayRef<SIMachineFunctionInfo::SpilledReg> Spill = 997 FuncInfo->getSGPRToVGPRSpills(BasePtrFI); 998 assert(Spill.size() == 1); 999 1000 // Save BP before setting it up. 1001 // FIXME: This should respect spillSGPRToVGPR; 1002 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::V_WRITELANE_B32), Spill[0].VGPR) 1003 .addReg(BasePtrReg) 1004 .addImm(Spill[0].Lane) 1005 .addReg(Spill[0].VGPR, RegState::Undef); 1006 } 1007 1008 // Emit the copy if we need an FP, and are using a free SGPR to save it. 1009 if (FuncInfo->SGPRForFPSaveRestoreCopy) { 1010 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), 1011 FuncInfo->SGPRForFPSaveRestoreCopy) 1012 .addReg(FramePtrReg) 1013 .setMIFlag(MachineInstr::FrameSetup); 1014 } 1015 1016 // Emit the copy if we need a BP, and are using a free SGPR to save it. 1017 if (FuncInfo->SGPRForBPSaveRestoreCopy) { 1018 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), 1019 FuncInfo->SGPRForBPSaveRestoreCopy) 1020 .addReg(BasePtrReg) 1021 .setMIFlag(MachineInstr::FrameSetup); 1022 } 1023 1024 // If a copy has been emitted for FP and/or BP, Make the SGPRs 1025 // used in the copy instructions live throughout the function. 1026 SmallVector<MCPhysReg, 2> TempSGPRs; 1027 if (FuncInfo->SGPRForFPSaveRestoreCopy) 1028 TempSGPRs.push_back(FuncInfo->SGPRForFPSaveRestoreCopy); 1029 1030 if (FuncInfo->SGPRForBPSaveRestoreCopy) 1031 TempSGPRs.push_back(FuncInfo->SGPRForBPSaveRestoreCopy); 1032 1033 if (!TempSGPRs.empty()) { 1034 for (MachineBasicBlock &MBB : MF) { 1035 for (MCPhysReg Reg : TempSGPRs) 1036 MBB.addLiveIn(Reg); 1037 1038 MBB.sortUniqueLiveIns(); 1039 } 1040 if (!LiveRegs.empty()) { 1041 LiveRegs.addReg(FuncInfo->SGPRForFPSaveRestoreCopy); 1042 LiveRegs.addReg(FuncInfo->SGPRForBPSaveRestoreCopy); 1043 } 1044 } 1045 1046 if (TRI.needsStackRealignment(MF)) { 1047 HasFP = true; 1048 const unsigned Alignment = MFI.getMaxAlign().value(); 1049 1050 RoundedSize += Alignment; 1051 if (LiveRegs.empty()) { 1052 LiveRegs.init(TRI); 1053 LiveRegs.addLiveIns(MBB); 1054 } 1055 1056 // s_add_u32 s33, s32, NumBytes 1057 // s_and_b32 s33, s33, 0b111...0000 1058 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_ADD_U32), FramePtrReg) 1059 .addReg(StackPtrReg) 1060 .addImm((Alignment - 1) * getScratchScaleFactor(ST)) 1061 .setMIFlag(MachineInstr::FrameSetup); 1062 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_AND_B32), FramePtrReg) 1063 .addReg(FramePtrReg, RegState::Kill) 1064 .addImm(-Alignment * getScratchScaleFactor(ST)) 1065 .setMIFlag(MachineInstr::FrameSetup); 1066 FuncInfo->setIsStackRealigned(true); 1067 } else if ((HasFP = hasFP(MF))) { 1068 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), FramePtrReg) 1069 .addReg(StackPtrReg) 1070 .setMIFlag(MachineInstr::FrameSetup); 1071 } 1072 1073 // If we need a base pointer, set it up here. It's whatever the value of 1074 // the stack pointer is at this point. Any variable size objects will be 1075 // allocated after this, so we can still use the base pointer to reference 1076 // the incoming arguments. 1077 if ((HasBP = TRI.hasBasePointer(MF))) { 1078 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), BasePtrReg) 1079 .addReg(StackPtrReg) 1080 .setMIFlag(MachineInstr::FrameSetup); 1081 } 1082 1083 if (HasFP && RoundedSize != 0) { 1084 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_ADD_U32), StackPtrReg) 1085 .addReg(StackPtrReg) 1086 .addImm(RoundedSize * getScratchScaleFactor(ST)) 1087 .setMIFlag(MachineInstr::FrameSetup); 1088 } 1089 1090 assert((!HasFP || (FuncInfo->SGPRForFPSaveRestoreCopy || 1091 FuncInfo->FramePointerSaveIndex)) && 1092 "Needed to save FP but didn't save it anywhere"); 1093 1094 assert((HasFP || (!FuncInfo->SGPRForFPSaveRestoreCopy && 1095 !FuncInfo->FramePointerSaveIndex)) && 1096 "Saved FP but didn't need it"); 1097 1098 assert((!HasBP || (FuncInfo->SGPRForBPSaveRestoreCopy || 1099 FuncInfo->BasePointerSaveIndex)) && 1100 "Needed to save BP but didn't save it anywhere"); 1101 1102 assert((HasBP || (!FuncInfo->SGPRForBPSaveRestoreCopy && 1103 !FuncInfo->BasePointerSaveIndex)) && 1104 "Saved BP but didn't need it"); 1105 } 1106 1107 void SIFrameLowering::emitEpilogue(MachineFunction &MF, 1108 MachineBasicBlock &MBB) const { 1109 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 1110 if (FuncInfo->isEntryFunction()) 1111 return; 1112 1113 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1114 const SIInstrInfo *TII = ST.getInstrInfo(); 1115 MachineRegisterInfo &MRI = MF.getRegInfo(); 1116 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 1117 MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator(); 1118 LivePhysRegs LiveRegs; 1119 DebugLoc DL; 1120 1121 const MachineFrameInfo &MFI = MF.getFrameInfo(); 1122 uint32_t NumBytes = MFI.getStackSize(); 1123 uint32_t RoundedSize = FuncInfo->isStackRealigned() 1124 ? NumBytes + MFI.getMaxAlign().value() 1125 : NumBytes; 1126 const Register StackPtrReg = FuncInfo->getStackPtrOffsetReg(); 1127 const Register FramePtrReg = FuncInfo->getFrameOffsetReg(); 1128 const Register BasePtrReg = 1129 TRI.hasBasePointer(MF) ? TRI.getBaseRegister() : Register(); 1130 1131 bool HasFPSaveIndex = FuncInfo->FramePointerSaveIndex.hasValue(); 1132 bool SpillFPToMemory = false; 1133 if (HasFPSaveIndex) { 1134 SpillFPToMemory = MFI.getStackID(*FuncInfo->FramePointerSaveIndex) != 1135 TargetStackID::SGPRSpill; 1136 } 1137 1138 bool HasBPSaveIndex = FuncInfo->BasePointerSaveIndex.hasValue(); 1139 bool SpillBPToMemory = false; 1140 if (HasBPSaveIndex) { 1141 SpillBPToMemory = MFI.getStackID(*FuncInfo->BasePointerSaveIndex) != 1142 TargetStackID::SGPRSpill; 1143 } 1144 1145 if (RoundedSize != 0 && hasFP(MF)) { 1146 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_SUB_U32), StackPtrReg) 1147 .addReg(StackPtrReg) 1148 .addImm(RoundedSize * getScratchScaleFactor(ST)) 1149 .setMIFlag(MachineInstr::FrameDestroy); 1150 } 1151 1152 if (FuncInfo->SGPRForFPSaveRestoreCopy) { 1153 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), FramePtrReg) 1154 .addReg(FuncInfo->SGPRForFPSaveRestoreCopy) 1155 .setMIFlag(MachineInstr::FrameDestroy); 1156 } 1157 1158 if (FuncInfo->SGPRForBPSaveRestoreCopy) { 1159 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), BasePtrReg) 1160 .addReg(FuncInfo->SGPRForBPSaveRestoreCopy) 1161 .setMIFlag(MachineInstr::FrameDestroy); 1162 } 1163 1164 Register ScratchExecCopy; 1165 if (HasFPSaveIndex) { 1166 const int FI = FuncInfo->FramePointerSaveIndex.getValue(); 1167 assert(!MFI.isDeadObjectIndex(FI)); 1168 if (SpillFPToMemory) { 1169 if (!ScratchExecCopy) 1170 ScratchExecCopy = buildScratchExecCopy(LiveRegs, MF, MBB, MBBI, false); 1171 1172 MCPhysReg TempVGPR = findScratchNonCalleeSaveRegister( 1173 MRI, LiveRegs, AMDGPU::VGPR_32RegClass); 1174 if (!TempVGPR) 1175 report_fatal_error("failed to find free scratch register"); 1176 buildEpilogReload(ST, LiveRegs, MBB, MBBI, TII, TempVGPR, 1177 FuncInfo->getScratchRSrcReg(), StackPtrReg, FI); 1178 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), FramePtrReg) 1179 .addReg(TempVGPR, RegState::Kill); 1180 } else { 1181 // Reload from VGPR spill. 1182 assert(MFI.getStackID(FI) == TargetStackID::SGPRSpill); 1183 ArrayRef<SIMachineFunctionInfo::SpilledReg> Spill = 1184 FuncInfo->getSGPRToVGPRSpills(FI); 1185 assert(Spill.size() == 1); 1186 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::V_READLANE_B32), FramePtrReg) 1187 .addReg(Spill[0].VGPR) 1188 .addImm(Spill[0].Lane); 1189 } 1190 } 1191 1192 if (HasBPSaveIndex) { 1193 const int BasePtrFI = *FuncInfo->BasePointerSaveIndex; 1194 assert(!MFI.isDeadObjectIndex(BasePtrFI)); 1195 if (SpillBPToMemory) { 1196 if (!ScratchExecCopy) 1197 ScratchExecCopy = buildScratchExecCopy(LiveRegs, MF, MBB, MBBI, false); 1198 1199 MCPhysReg TempVGPR = findScratchNonCalleeSaveRegister( 1200 MRI, LiveRegs, AMDGPU::VGPR_32RegClass); 1201 if (!TempVGPR) 1202 report_fatal_error("failed to find free scratch register"); 1203 buildEpilogReload(ST, LiveRegs, MBB, MBBI, TII, TempVGPR, 1204 FuncInfo->getScratchRSrcReg(), StackPtrReg, BasePtrFI); 1205 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), BasePtrReg) 1206 .addReg(TempVGPR, RegState::Kill); 1207 } else { 1208 // Reload from VGPR spill. 1209 assert(MFI.getStackID(BasePtrFI) == TargetStackID::SGPRSpill); 1210 ArrayRef<SIMachineFunctionInfo::SpilledReg> Spill = 1211 FuncInfo->getSGPRToVGPRSpills(BasePtrFI); 1212 assert(Spill.size() == 1); 1213 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::V_READLANE_B32), BasePtrReg) 1214 .addReg(Spill[0].VGPR) 1215 .addImm(Spill[0].Lane); 1216 } 1217 } 1218 1219 for (const SIMachineFunctionInfo::SGPRSpillVGPRCSR &Reg : 1220 FuncInfo->getSGPRSpillVGPRs()) { 1221 if (!Reg.FI.hasValue()) 1222 continue; 1223 1224 if (!ScratchExecCopy) 1225 ScratchExecCopy = buildScratchExecCopy(LiveRegs, MF, MBB, MBBI, false); 1226 1227 buildEpilogReload(ST, LiveRegs, MBB, MBBI, TII, Reg.VGPR, 1228 FuncInfo->getScratchRSrcReg(), StackPtrReg, 1229 Reg.FI.getValue()); 1230 } 1231 1232 if (ScratchExecCopy) { 1233 // FIXME: Split block and make terminator. 1234 unsigned ExecMov = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; 1235 MCRegister Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 1236 BuildMI(MBB, MBBI, DL, TII->get(ExecMov), Exec) 1237 .addReg(ScratchExecCopy, RegState::Kill); 1238 } 1239 } 1240 1241 #ifndef NDEBUG 1242 static bool allSGPRSpillsAreDead(const MachineFunction &MF) { 1243 const MachineFrameInfo &MFI = MF.getFrameInfo(); 1244 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 1245 for (int I = MFI.getObjectIndexBegin(), E = MFI.getObjectIndexEnd(); 1246 I != E; ++I) { 1247 if (!MFI.isDeadObjectIndex(I) && 1248 MFI.getStackID(I) == TargetStackID::SGPRSpill && 1249 (I != FuncInfo->FramePointerSaveIndex && 1250 I != FuncInfo->BasePointerSaveIndex)) { 1251 return false; 1252 } 1253 } 1254 1255 return true; 1256 } 1257 #endif 1258 1259 StackOffset SIFrameLowering::getFrameIndexReference(const MachineFunction &MF, 1260 int FI, 1261 Register &FrameReg) const { 1262 const SIRegisterInfo *RI = MF.getSubtarget<GCNSubtarget>().getRegisterInfo(); 1263 1264 FrameReg = RI->getFrameRegister(MF); 1265 return StackOffset::getFixed(MF.getFrameInfo().getObjectOffset(FI)); 1266 } 1267 1268 void SIFrameLowering::processFunctionBeforeFrameFinalized( 1269 MachineFunction &MF, 1270 RegScavenger *RS) const { 1271 MachineFrameInfo &MFI = MF.getFrameInfo(); 1272 1273 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1274 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 1275 SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 1276 1277 FuncInfo->removeDeadFrameIndices(MFI); 1278 assert(allSGPRSpillsAreDead(MF) && 1279 "SGPR spill should have been removed in SILowerSGPRSpills"); 1280 1281 // FIXME: The other checks should be redundant with allStackObjectsAreDead, 1282 // but currently hasNonSpillStackObjects is set only from source 1283 // allocas. Stack temps produced from legalization are not counted currently. 1284 if (!allStackObjectsAreDead(MFI)) { 1285 assert(RS && "RegScavenger required if spilling"); 1286 1287 if (FuncInfo->isEntryFunction()) { 1288 int ScavengeFI = MFI.CreateFixedObject( 1289 TRI->getSpillSize(AMDGPU::SGPR_32RegClass), 0, false); 1290 RS->addScavengingFrameIndex(ScavengeFI); 1291 } else { 1292 int ScavengeFI = MFI.CreateStackObject( 1293 TRI->getSpillSize(AMDGPU::SGPR_32RegClass), 1294 TRI->getSpillAlign(AMDGPU::SGPR_32RegClass), false); 1295 RS->addScavengingFrameIndex(ScavengeFI); 1296 } 1297 } 1298 } 1299 1300 // Only report VGPRs to generic code. 1301 void SIFrameLowering::determineCalleeSaves(MachineFunction &MF, 1302 BitVector &SavedVGPRs, 1303 RegScavenger *RS) const { 1304 TargetFrameLowering::determineCalleeSaves(MF, SavedVGPRs, RS); 1305 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 1306 if (MFI->isEntryFunction()) 1307 return; 1308 1309 MachineFrameInfo &FrameInfo = MF.getFrameInfo(); 1310 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1311 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 1312 1313 // Ignore the SGPRs the default implementation found. 1314 SavedVGPRs.clearBitsNotInMask(TRI->getAllVGPRRegMask()); 1315 1316 // hasFP only knows about stack objects that already exist. We're now 1317 // determining the stack slots that will be created, so we have to predict 1318 // them. Stack objects force FP usage with calls. 1319 // 1320 // Note a new VGPR CSR may be introduced if one is used for the spill, but we 1321 // don't want to report it here. 1322 // 1323 // FIXME: Is this really hasReservedCallFrame? 1324 const bool WillHaveFP = 1325 FrameInfo.hasCalls() && 1326 (SavedVGPRs.any() || !allStackObjectsAreDead(FrameInfo)); 1327 1328 // VGPRs used for SGPR spilling need to be specially inserted in the prolog, 1329 // so don't allow the default insertion to handle them. 1330 for (auto SSpill : MFI->getSGPRSpillVGPRs()) 1331 SavedVGPRs.reset(SSpill.VGPR); 1332 1333 LivePhysRegs LiveRegs; 1334 LiveRegs.init(*TRI); 1335 1336 if (WillHaveFP || hasFP(MF)) { 1337 assert(!MFI->SGPRForFPSaveRestoreCopy && !MFI->FramePointerSaveIndex && 1338 "Re-reserving spill slot for FP"); 1339 getVGPRSpillLaneOrTempRegister(MF, LiveRegs, MFI->SGPRForFPSaveRestoreCopy, 1340 MFI->FramePointerSaveIndex, true); 1341 } 1342 1343 if (TRI->hasBasePointer(MF)) { 1344 if (MFI->SGPRForFPSaveRestoreCopy) 1345 LiveRegs.addReg(MFI->SGPRForFPSaveRestoreCopy); 1346 1347 assert(!MFI->SGPRForBPSaveRestoreCopy && 1348 !MFI->BasePointerSaveIndex && "Re-reserving spill slot for BP"); 1349 getVGPRSpillLaneOrTempRegister(MF, LiveRegs, MFI->SGPRForBPSaveRestoreCopy, 1350 MFI->BasePointerSaveIndex, false); 1351 } 1352 } 1353 1354 void SIFrameLowering::determineCalleeSavesSGPR(MachineFunction &MF, 1355 BitVector &SavedRegs, 1356 RegScavenger *RS) const { 1357 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); 1358 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 1359 if (MFI->isEntryFunction()) 1360 return; 1361 1362 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1363 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 1364 1365 // The SP is specifically managed and we don't want extra spills of it. 1366 SavedRegs.reset(MFI->getStackPtrOffsetReg()); 1367 1368 const BitVector AllSavedRegs = SavedRegs; 1369 SavedRegs.clearBitsInMask(TRI->getAllVGPRRegMask()); 1370 1371 // If clearing VGPRs changed the mask, we will have some CSR VGPR spills. 1372 const bool HaveAnyCSRVGPR = SavedRegs != AllSavedRegs; 1373 1374 // We have to anticipate introducing CSR VGPR spills if we don't have any 1375 // stack objects already, since we require an FP if there is a call and stack. 1376 MachineFrameInfo &FrameInfo = MF.getFrameInfo(); 1377 const bool WillHaveFP = FrameInfo.hasCalls() && HaveAnyCSRVGPR; 1378 1379 // FP will be specially managed like SP. 1380 if (WillHaveFP || hasFP(MF)) 1381 SavedRegs.reset(MFI->getFrameOffsetReg()); 1382 } 1383 1384 bool SIFrameLowering::assignCalleeSavedSpillSlots( 1385 MachineFunction &MF, const TargetRegisterInfo *TRI, 1386 std::vector<CalleeSavedInfo> &CSI) const { 1387 if (CSI.empty()) 1388 return true; // Early exit if no callee saved registers are modified! 1389 1390 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 1391 if (!FuncInfo->SGPRForFPSaveRestoreCopy && 1392 !FuncInfo->SGPRForBPSaveRestoreCopy) 1393 return false; 1394 1395 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1396 const SIRegisterInfo *RI = ST.getRegisterInfo(); 1397 Register FramePtrReg = FuncInfo->getFrameOffsetReg(); 1398 Register BasePtrReg = RI->getBaseRegister(); 1399 unsigned NumModifiedRegs = 0; 1400 1401 if (FuncInfo->SGPRForFPSaveRestoreCopy) 1402 NumModifiedRegs++; 1403 if (FuncInfo->SGPRForBPSaveRestoreCopy) 1404 NumModifiedRegs++; 1405 1406 for (auto &CS : CSI) { 1407 if (CS.getReg() == FramePtrReg && FuncInfo->SGPRForFPSaveRestoreCopy) { 1408 CS.setDstReg(FuncInfo->SGPRForFPSaveRestoreCopy); 1409 if (--NumModifiedRegs) 1410 break; 1411 } else if (CS.getReg() == BasePtrReg && 1412 FuncInfo->SGPRForBPSaveRestoreCopy) { 1413 CS.setDstReg(FuncInfo->SGPRForBPSaveRestoreCopy); 1414 if (--NumModifiedRegs) 1415 break; 1416 } 1417 } 1418 1419 return false; 1420 } 1421 1422 MachineBasicBlock::iterator SIFrameLowering::eliminateCallFramePseudoInstr( 1423 MachineFunction &MF, 1424 MachineBasicBlock &MBB, 1425 MachineBasicBlock::iterator I) const { 1426 int64_t Amount = I->getOperand(0).getImm(); 1427 if (Amount == 0) 1428 return MBB.erase(I); 1429 1430 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1431 const SIInstrInfo *TII = ST.getInstrInfo(); 1432 const DebugLoc &DL = I->getDebugLoc(); 1433 unsigned Opc = I->getOpcode(); 1434 bool IsDestroy = Opc == TII->getCallFrameDestroyOpcode(); 1435 uint64_t CalleePopAmount = IsDestroy ? I->getOperand(1).getImm() : 0; 1436 1437 if (!hasReservedCallFrame(MF)) { 1438 Amount = alignTo(Amount, getStackAlign()); 1439 assert(isUInt<32>(Amount) && "exceeded stack address space size"); 1440 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 1441 Register SPReg = MFI->getStackPtrOffsetReg(); 1442 1443 unsigned Op = IsDestroy ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32; 1444 BuildMI(MBB, I, DL, TII->get(Op), SPReg) 1445 .addReg(SPReg) 1446 .addImm(Amount * getScratchScaleFactor(ST)); 1447 } else if (CalleePopAmount != 0) { 1448 llvm_unreachable("is this used?"); 1449 } 1450 1451 return MBB.erase(I); 1452 } 1453 1454 /// Returns true if the frame will require a reference to the stack pointer. 1455 /// 1456 /// This is the set of conditions common to setting up the stack pointer in a 1457 /// kernel, and for using a frame pointer in a callable function. 1458 /// 1459 /// FIXME: Should also check hasOpaqueSPAdjustment and if any inline asm 1460 /// references SP. 1461 static bool frameTriviallyRequiresSP(const MachineFrameInfo &MFI) { 1462 return MFI.hasVarSizedObjects() || MFI.hasStackMap() || MFI.hasPatchPoint(); 1463 } 1464 1465 // The FP for kernels is always known 0, so we never really need to setup an 1466 // explicit register for it. However, DisableFramePointerElim will force us to 1467 // use a register for it. 1468 bool SIFrameLowering::hasFP(const MachineFunction &MF) const { 1469 const MachineFrameInfo &MFI = MF.getFrameInfo(); 1470 1471 // For entry functions we can use an immediate offset in most cases, so the 1472 // presence of calls doesn't imply we need a distinct frame pointer. 1473 if (MFI.hasCalls() && 1474 !MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction()) { 1475 // All offsets are unsigned, so need to be addressed in the same direction 1476 // as stack growth. 1477 1478 // FIXME: This function is pretty broken, since it can be called before the 1479 // frame layout is determined or CSR spills are inserted. 1480 return MFI.getStackSize() != 0; 1481 } 1482 1483 return frameTriviallyRequiresSP(MFI) || MFI.isFrameAddressTaken() || 1484 MF.getSubtarget<GCNSubtarget>().getRegisterInfo()->needsStackRealignment(MF) || 1485 MF.getTarget().Options.DisableFramePointerElim(MF); 1486 } 1487 1488 // This is essentially a reduced version of hasFP for entry functions. Since the 1489 // stack pointer is known 0 on entry to kernels, we never really need an FP 1490 // register. We may need to initialize the stack pointer depending on the frame 1491 // properties, which logically overlaps many of the cases where an ordinary 1492 // function would require an FP. 1493 bool SIFrameLowering::requiresStackPointerReference( 1494 const MachineFunction &MF) const { 1495 // Callable functions always require a stack pointer reference. 1496 assert(MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction() && 1497 "only expected to call this for entry points"); 1498 1499 const MachineFrameInfo &MFI = MF.getFrameInfo(); 1500 1501 // Entry points ordinarily don't need to initialize SP. We have to set it up 1502 // for callees if there are any. Also note tail calls are impossible/don't 1503 // make any sense for kernels. 1504 if (MFI.hasCalls()) 1505 return true; 1506 1507 // We still need to initialize the SP if we're doing anything weird that 1508 // references the SP, like variable sized stack objects. 1509 return frameTriviallyRequiresSP(MFI); 1510 } 1511