1 //===----------------------- SIFrameLowering.cpp --------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //==-----------------------------------------------------------------------===// 9 10 #include "SIFrameLowering.h" 11 #include "SIInstrInfo.h" 12 #include "SIMachineFunctionInfo.h" 13 #include "SIRegisterInfo.h" 14 #include "AMDGPUSubtarget.h" 15 16 #include "llvm/CodeGen/MachineFrameInfo.h" 17 #include "llvm/CodeGen/MachineFunction.h" 18 #include "llvm/CodeGen/MachineInstrBuilder.h" 19 #include "llvm/CodeGen/RegisterScavenging.h" 20 21 using namespace llvm; 22 23 24 static ArrayRef<MCPhysReg> getAllSGPR128(const SISubtarget &ST, 25 const MachineFunction &MF) { 26 return makeArrayRef(AMDGPU::SGPR_128RegClass.begin(), 27 ST.getMaxNumSGPRs(MF) / 4); 28 } 29 30 static ArrayRef<MCPhysReg> getAllSGPRs(const SISubtarget &ST, 31 const MachineFunction &MF) { 32 return makeArrayRef(AMDGPU::SGPR_32RegClass.begin(), 33 ST.getMaxNumSGPRs(MF)); 34 } 35 36 void SIFrameLowering::emitFlatScratchInit(const SISubtarget &ST, 37 MachineFunction &MF, 38 MachineBasicBlock &MBB) const { 39 const SIInstrInfo *TII = ST.getInstrInfo(); 40 const SIRegisterInfo* TRI = &TII->getRegisterInfo(); 41 42 // We don't need this if we only have spills since there is no user facing 43 // scratch. 44 45 // TODO: If we know we don't have flat instructions earlier, we can omit 46 // this from the input registers. 47 // 48 // TODO: We only need to know if we access scratch space through a flat 49 // pointer. Because we only detect if flat instructions are used at all, 50 // this will be used more often than necessary on VI. 51 52 // Debug location must be unknown since the first debug location is used to 53 // determine the end of the prologue. 54 DebugLoc DL; 55 MachineBasicBlock::iterator I = MBB.begin(); 56 57 unsigned FlatScratchInitReg 58 = TRI->getPreloadedValue(MF, SIRegisterInfo::FLAT_SCRATCH_INIT); 59 60 MachineRegisterInfo &MRI = MF.getRegInfo(); 61 MRI.addLiveIn(FlatScratchInitReg); 62 MBB.addLiveIn(FlatScratchInitReg); 63 64 unsigned FlatScrInitLo = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub0); 65 unsigned FlatScrInitHi = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub1); 66 67 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 68 unsigned ScratchWaveOffsetReg = MFI->getScratchWaveOffsetReg(); 69 70 // Do a 64-bit pointer add. 71 if (ST.flatScratchIsPointer()) { 72 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), AMDGPU::FLAT_SCR_LO) 73 .addReg(FlatScrInitLo) 74 .addReg(ScratchWaveOffsetReg); 75 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADDC_U32), AMDGPU::FLAT_SCR_HI) 76 .addReg(FlatScrInitHi) 77 .addImm(0); 78 79 return; 80 } 81 82 // Copy the size in bytes. 83 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), AMDGPU::FLAT_SCR_LO) 84 .addReg(FlatScrInitHi, RegState::Kill); 85 86 // Add wave offset in bytes to private base offset. 87 // See comment in AMDKernelCodeT.h for enable_sgpr_flat_scratch_init. 88 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), FlatScrInitLo) 89 .addReg(FlatScrInitLo) 90 .addReg(ScratchWaveOffsetReg); 91 92 // Convert offset to 256-byte units. 93 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_LSHR_B32), AMDGPU::FLAT_SCR_HI) 94 .addReg(FlatScrInitLo, RegState::Kill) 95 .addImm(8); 96 } 97 98 unsigned SIFrameLowering::getReservedPrivateSegmentBufferReg( 99 const SISubtarget &ST, 100 const SIInstrInfo *TII, 101 const SIRegisterInfo *TRI, 102 SIMachineFunctionInfo *MFI, 103 MachineFunction &MF) const { 104 MachineRegisterInfo &MRI = MF.getRegInfo(); 105 106 // We need to insert initialization of the scratch resource descriptor. 107 unsigned ScratchRsrcReg = MFI->getScratchRSrcReg(); 108 if (ScratchRsrcReg == AMDGPU::NoRegister || 109 !MRI.isPhysRegUsed(ScratchRsrcReg)) 110 return AMDGPU::NoRegister; 111 112 if (ST.hasSGPRInitBug() || 113 ScratchRsrcReg != TRI->reservedPrivateSegmentBufferReg(MF)) 114 return ScratchRsrcReg; 115 116 // We reserved the last registers for this. Shift it down to the end of those 117 // which were actually used. 118 // 119 // FIXME: It might be safer to use a pseudoregister before replacement. 120 121 // FIXME: We should be able to eliminate unused input registers. We only 122 // cannot do this for the resources required for scratch access. For now we 123 // skip over user SGPRs and may leave unused holes. 124 125 // We find the resource first because it has an alignment requirement. 126 127 unsigned NumPreloaded = (MFI->getNumPreloadedSGPRs() + 3) / 4; 128 ArrayRef<MCPhysReg> AllSGPR128s = getAllSGPR128(ST, MF); 129 AllSGPR128s = AllSGPR128s.slice(std::min(static_cast<unsigned>(AllSGPR128s.size()), NumPreloaded)); 130 131 // Skip the last N reserved elements because they should have already been 132 // reserved for VCC etc. 133 for (MCPhysReg Reg : AllSGPR128s) { 134 // Pick the first unallocated one. Make sure we don't clobber the other 135 // reserved input we needed. 136 if (!MRI.isPhysRegUsed(Reg) && MRI.isAllocatable(Reg)) { 137 MRI.replaceRegWith(ScratchRsrcReg, Reg); 138 MFI->setScratchRSrcReg(Reg); 139 return Reg; 140 } 141 } 142 143 return ScratchRsrcReg; 144 } 145 146 // Shift down registers reserved for the scratch wave offset and stack pointer 147 // SGPRs. 148 std::pair<unsigned, unsigned> 149 SIFrameLowering::getReservedPrivateSegmentWaveByteOffsetReg( 150 const SISubtarget &ST, 151 const SIInstrInfo *TII, 152 const SIRegisterInfo *TRI, 153 SIMachineFunctionInfo *MFI, 154 MachineFunction &MF) const { 155 MachineRegisterInfo &MRI = MF.getRegInfo(); 156 unsigned ScratchWaveOffsetReg = MFI->getScratchWaveOffsetReg(); 157 158 // No replacement necessary. 159 if (ScratchWaveOffsetReg == AMDGPU::NoRegister || 160 !MRI.isPhysRegUsed(ScratchWaveOffsetReg)) { 161 assert(MFI->getStackPtrOffsetReg() == AMDGPU::NoRegister); 162 return std::make_pair(AMDGPU::NoRegister, AMDGPU::NoRegister); 163 } 164 165 unsigned SPReg = MFI->getStackPtrOffsetReg(); 166 if (ST.hasSGPRInitBug()) 167 return std::make_pair(ScratchWaveOffsetReg, SPReg); 168 169 unsigned NumPreloaded = MFI->getNumPreloadedSGPRs(); 170 171 ArrayRef<MCPhysReg> AllSGPRs = getAllSGPRs(ST, MF); 172 if (NumPreloaded > AllSGPRs.size()) 173 return std::make_pair(ScratchWaveOffsetReg, SPReg); 174 175 AllSGPRs = AllSGPRs.slice(NumPreloaded); 176 177 // We need to drop register from the end of the list that we cannot use 178 // for the scratch wave offset. 179 // + 2 s102 and s103 do not exist on VI. 180 // + 2 for vcc 181 // + 2 for xnack_mask 182 // + 2 for flat_scratch 183 // + 4 for registers reserved for scratch resource register 184 // + 1 for register reserved for scratch wave offset. (By exluding this 185 // register from the list to consider, it means that when this 186 // register is being used for the scratch wave offset and there 187 // are no other free SGPRs, then the value will stay in this register. 188 // + 1 if stack pointer is used. 189 // ---- 190 // 13 (+1) 191 unsigned ReservedRegCount = 13; 192 if (SPReg != AMDGPU::NoRegister) 193 ++ReservedRegCount; 194 195 if (AllSGPRs.size() < ReservedRegCount) 196 return std::make_pair(ScratchWaveOffsetReg, SPReg); 197 198 bool HandledScratchWaveOffsetReg = 199 ScratchWaveOffsetReg != TRI->reservedPrivateSegmentWaveByteOffsetReg(MF); 200 201 for (MCPhysReg Reg : AllSGPRs.drop_back(ReservedRegCount)) { 202 // Pick the first unallocated SGPR. Be careful not to pick an alias of the 203 // scratch descriptor, since we haven’t added its uses yet. 204 if (!MRI.isPhysRegUsed(Reg) && MRI.isAllocatable(Reg)) { 205 if (!HandledScratchWaveOffsetReg) { 206 HandledScratchWaveOffsetReg = true; 207 208 MRI.replaceRegWith(ScratchWaveOffsetReg, Reg); 209 MFI->setScratchWaveOffsetReg(Reg); 210 ScratchWaveOffsetReg = Reg; 211 } else { 212 if (SPReg == AMDGPU::NoRegister) 213 break; 214 215 MRI.replaceRegWith(SPReg, Reg); 216 MFI->setStackPtrOffsetReg(Reg); 217 SPReg = Reg; 218 break; 219 } 220 } 221 } 222 223 return std::make_pair(ScratchWaveOffsetReg, SPReg); 224 } 225 226 void SIFrameLowering::emitPrologue(MachineFunction &MF, 227 MachineBasicBlock &MBB) const { 228 // Emit debugger prologue if "amdgpu-debugger-emit-prologue" attribute was 229 // specified. 230 const SISubtarget &ST = MF.getSubtarget<SISubtarget>(); 231 auto AMDGPUASI = ST.getAMDGPUAS(); 232 if (ST.debuggerEmitPrologue()) 233 emitDebuggerPrologue(MF, MBB); 234 235 assert(&MF.front() == &MBB && "Shrink-wrapping not yet supported"); 236 237 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 238 239 // If we only have SGPR spills, we won't actually be using scratch memory 240 // since these spill to VGPRs. 241 // 242 // FIXME: We should be cleaning up these unused SGPR spill frame indices 243 // somewhere. 244 245 const SIInstrInfo *TII = ST.getInstrInfo(); 246 const SIRegisterInfo *TRI = &TII->getRegisterInfo(); 247 MachineRegisterInfo &MRI = MF.getRegInfo(); 248 249 // We need to do the replacement of the private segment buffer and wave offset 250 // register even if there are no stack objects. There could be stores to undef 251 // or a constant without an associated object. 252 253 // FIXME: We still have implicit uses on SGPR spill instructions in case they 254 // need to spill to vector memory. It's likely that will not happen, but at 255 // this point it appears we need the setup. This part of the prolog should be 256 // emitted after frame indices are eliminated. 257 258 if (MF.getFrameInfo().hasStackObjects() && MFI->hasFlatScratchInit()) 259 emitFlatScratchInit(ST, MF, MBB); 260 261 unsigned SPReg = MFI->getStackPtrOffsetReg(); 262 if (SPReg != AMDGPU::NoRegister) { 263 DebugLoc DL; 264 int64_t StackSize = MF.getFrameInfo().getStackSize(); 265 266 if (StackSize == 0) { 267 BuildMI(MBB, MBB.begin(), DL, TII->get(AMDGPU::COPY), SPReg) 268 .addReg(MFI->getScratchWaveOffsetReg()); 269 } else { 270 BuildMI(MBB, MBB.begin(), DL, TII->get(AMDGPU::S_ADD_U32), SPReg) 271 .addReg(MFI->getScratchWaveOffsetReg()) 272 .addImm(StackSize * ST.getWavefrontSize()); 273 } 274 } 275 276 unsigned ScratchRsrcReg 277 = getReservedPrivateSegmentBufferReg(ST, TII, TRI, MFI, MF); 278 279 unsigned ScratchWaveOffsetReg; 280 std::tie(ScratchWaveOffsetReg, SPReg) 281 = getReservedPrivateSegmentWaveByteOffsetReg(ST, TII, TRI, MFI, MF); 282 283 // It's possible to have uses of only ScratchWaveOffsetReg without 284 // ScratchRsrcReg if it's only used for the initialization of flat_scratch, 285 // but the inverse is not true. 286 if (ScratchWaveOffsetReg == AMDGPU::NoRegister) { 287 assert(ScratchRsrcReg == AMDGPU::NoRegister); 288 return; 289 } 290 291 // We need to insert initialization of the scratch resource descriptor. 292 unsigned PreloadedScratchWaveOffsetReg = TRI->getPreloadedValue( 293 MF, SIRegisterInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET); 294 295 unsigned PreloadedPrivateBufferReg = AMDGPU::NoRegister; 296 if (ST.isAmdCodeObjectV2(MF) || ST.isMesaGfxShader(MF)) { 297 PreloadedPrivateBufferReg = TRI->getPreloadedValue( 298 MF, SIRegisterInfo::PRIVATE_SEGMENT_BUFFER); 299 } 300 301 bool OffsetRegUsed = MRI.isPhysRegUsed(ScratchWaveOffsetReg); 302 bool ResourceRegUsed = ScratchRsrcReg != AMDGPU::NoRegister && 303 MRI.isPhysRegUsed(ScratchRsrcReg); 304 305 // We added live-ins during argument lowering, but since they were not used 306 // they were deleted. We're adding the uses now, so add them back. 307 if (OffsetRegUsed) { 308 assert(PreloadedScratchWaveOffsetReg != AMDGPU::NoRegister && 309 "scratch wave offset input is required"); 310 MRI.addLiveIn(PreloadedScratchWaveOffsetReg); 311 MBB.addLiveIn(PreloadedScratchWaveOffsetReg); 312 } 313 314 if (ResourceRegUsed && PreloadedPrivateBufferReg != AMDGPU::NoRegister) { 315 assert(ST.isAmdCodeObjectV2(MF) || ST.isMesaGfxShader(MF)); 316 MRI.addLiveIn(PreloadedPrivateBufferReg); 317 MBB.addLiveIn(PreloadedPrivateBufferReg); 318 } 319 320 // Make the register selected live throughout the function. 321 for (MachineBasicBlock &OtherBB : MF) { 322 if (&OtherBB == &MBB) 323 continue; 324 325 if (OffsetRegUsed) 326 OtherBB.addLiveIn(ScratchWaveOffsetReg); 327 328 if (ResourceRegUsed) 329 OtherBB.addLiveIn(ScratchRsrcReg); 330 } 331 332 DebugLoc DL; 333 MachineBasicBlock::iterator I = MBB.begin(); 334 335 // If we reserved the original input registers, we don't need to copy to the 336 // reserved registers. 337 338 bool CopyBuffer = ResourceRegUsed && 339 PreloadedPrivateBufferReg != AMDGPU::NoRegister && 340 ST.isAmdCodeObjectV2(MF) && 341 ScratchRsrcReg != PreloadedPrivateBufferReg; 342 343 // This needs to be careful of the copying order to avoid overwriting one of 344 // the input registers before it's been copied to it's final 345 // destination. Usually the offset should be copied first. 346 bool CopyBufferFirst = TRI->isSubRegisterEq(PreloadedPrivateBufferReg, 347 ScratchWaveOffsetReg); 348 if (CopyBuffer && CopyBufferFirst) { 349 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchRsrcReg) 350 .addReg(PreloadedPrivateBufferReg, RegState::Kill); 351 } 352 353 if (OffsetRegUsed && 354 PreloadedScratchWaveOffsetReg != ScratchWaveOffsetReg) { 355 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchWaveOffsetReg) 356 .addReg(PreloadedScratchWaveOffsetReg, RegState::Kill); 357 } 358 359 if (CopyBuffer && !CopyBufferFirst) { 360 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchRsrcReg) 361 .addReg(PreloadedPrivateBufferReg, RegState::Kill); 362 } 363 364 if (ResourceRegUsed && (ST.isMesaGfxShader(MF) || (PreloadedPrivateBufferReg == AMDGPU::NoRegister))) { 365 assert(!ST.isAmdCodeObjectV2(MF)); 366 const MCInstrDesc &SMovB32 = TII->get(AMDGPU::S_MOV_B32); 367 368 unsigned Rsrc2 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub2); 369 unsigned Rsrc3 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub3); 370 371 // Use relocations to get the pointer, and setup the other bits manually. 372 uint64_t Rsrc23 = TII->getScratchRsrcWords23(); 373 374 if (MFI->hasPrivateMemoryInputPtr()) { 375 unsigned Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1); 376 377 if (AMDGPU::isCompute(MF.getFunction()->getCallingConv())) { 378 const MCInstrDesc &Mov64 = TII->get(AMDGPU::S_MOV_B64); 379 380 BuildMI(MBB, I, DL, Mov64, Rsrc01) 381 .addReg(PreloadedPrivateBufferReg) 382 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 383 } else { 384 const MCInstrDesc &LoadDwordX2 = TII->get(AMDGPU::S_LOAD_DWORDX2_IMM); 385 386 PointerType *PtrTy = 387 PointerType::get(Type::getInt64Ty(MF.getFunction()->getContext()), 388 AMDGPUASI.CONSTANT_ADDRESS); 389 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy)); 390 auto MMO = MF.getMachineMemOperand(PtrInfo, 391 MachineMemOperand::MOLoad | 392 MachineMemOperand::MOInvariant | 393 MachineMemOperand::MODereferenceable, 394 0, 0); 395 BuildMI(MBB, I, DL, LoadDwordX2, Rsrc01) 396 .addReg(PreloadedPrivateBufferReg) 397 .addImm(0) // offset 398 .addImm(0) // glc 399 .addMemOperand(MMO) 400 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 401 } 402 } else { 403 unsigned Rsrc0 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0); 404 unsigned Rsrc1 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1); 405 406 BuildMI(MBB, I, DL, SMovB32, Rsrc0) 407 .addExternalSymbol("SCRATCH_RSRC_DWORD0") 408 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 409 410 BuildMI(MBB, I, DL, SMovB32, Rsrc1) 411 .addExternalSymbol("SCRATCH_RSRC_DWORD1") 412 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 413 414 } 415 416 BuildMI(MBB, I, DL, SMovB32, Rsrc2) 417 .addImm(Rsrc23 & 0xffffffff) 418 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 419 420 BuildMI(MBB, I, DL, SMovB32, Rsrc3) 421 .addImm(Rsrc23 >> 32) 422 .addReg(ScratchRsrcReg, RegState::ImplicitDefine); 423 } 424 } 425 426 void SIFrameLowering::emitEpilogue(MachineFunction &MF, 427 MachineBasicBlock &MBB) const { 428 429 } 430 431 static bool allStackObjectsAreDead(const MachineFrameInfo &MFI) { 432 for (int I = MFI.getObjectIndexBegin(), E = MFI.getObjectIndexEnd(); 433 I != E; ++I) { 434 if (!MFI.isDeadObjectIndex(I)) 435 return false; 436 } 437 438 return true; 439 } 440 441 int SIFrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI, 442 unsigned &FrameReg) const { 443 const SIRegisterInfo *RI = MF.getSubtarget<SISubtarget>().getRegisterInfo(); 444 445 FrameReg = RI->getFrameRegister(MF); 446 return MF.getFrameInfo().getObjectOffset(FI); 447 } 448 449 void SIFrameLowering::processFunctionBeforeFrameFinalized( 450 MachineFunction &MF, 451 RegScavenger *RS) const { 452 MachineFrameInfo &MFI = MF.getFrameInfo(); 453 454 if (!MFI.hasStackObjects()) 455 return; 456 457 const SISubtarget &ST = MF.getSubtarget<SISubtarget>(); 458 const SIInstrInfo *TII = ST.getInstrInfo(); 459 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 460 SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 461 bool AllSGPRSpilledToVGPRs = false; 462 463 if (TRI.spillSGPRToVGPR() && FuncInfo->hasSpilledSGPRs()) { 464 AllSGPRSpilledToVGPRs = true; 465 466 // Process all SGPR spills before frame offsets are finalized. Ideally SGPRs 467 // are spilled to VGPRs, in which case we can eliminate the stack usage. 468 // 469 // XXX - This operates under the assumption that only other SGPR spills are 470 // users of the frame index. I'm not 100% sure this is correct. The 471 // StackColoring pass has a comment saying a future improvement would be to 472 // merging of allocas with spill slots, but for now according to 473 // MachineFrameInfo isSpillSlot can't alias any other object. 474 for (MachineBasicBlock &MBB : MF) { 475 MachineBasicBlock::iterator Next; 476 for (auto I = MBB.begin(), E = MBB.end(); I != E; I = Next) { 477 MachineInstr &MI = *I; 478 Next = std::next(I); 479 480 if (TII->isSGPRSpill(MI)) { 481 int FI = TII->getNamedOperand(MI, AMDGPU::OpName::addr)->getIndex(); 482 if (FuncInfo->allocateSGPRSpillToVGPR(MF, FI)) { 483 bool Spilled = TRI.eliminateSGPRToVGPRSpillFrameIndex(MI, FI, RS); 484 (void)Spilled; 485 assert(Spilled && "failed to spill SGPR to VGPR when allocated"); 486 } else 487 AllSGPRSpilledToVGPRs = false; 488 } 489 } 490 } 491 492 FuncInfo->removeSGPRToVGPRFrameIndices(MFI); 493 } 494 495 // FIXME: The other checks should be redundant with allStackObjectsAreDead, 496 // but currently hasNonSpillStackObjects is set only from source 497 // allocas. Stack temps produced from legalization are not counted currently. 498 if (FuncInfo->hasNonSpillStackObjects() || FuncInfo->hasSpilledVGPRs() || 499 !AllSGPRSpilledToVGPRs || !allStackObjectsAreDead(MFI)) { 500 assert(RS && "RegScavenger required if spilling"); 501 502 // We force this to be at offset 0 so no user object ever has 0 as an 503 // address, so we may use 0 as an invalid pointer value. This is because 504 // LLVM assumes 0 is an invalid pointer in address space 0. Because alloca 505 // is required to be address space 0, we are forced to accept this for 506 // now. Ideally we could have the stack in another address space with 0 as a 507 // valid pointer, and -1 as the null value. 508 // 509 // This will also waste additional space when user stack objects require > 4 510 // byte alignment. 511 // 512 // The main cost here is losing the offset for addressing modes. However 513 // this also ensures we shouldn't need a register for the offset when 514 // emergency scavenging. 515 int ScavengeFI = MFI.CreateFixedObject( 516 TRI.getSpillSize(AMDGPU::SGPR_32RegClass), 0, false); 517 RS->addScavengingFrameIndex(ScavengeFI); 518 } 519 } 520 521 void SIFrameLowering::emitDebuggerPrologue(MachineFunction &MF, 522 MachineBasicBlock &MBB) const { 523 const SISubtarget &ST = MF.getSubtarget<SISubtarget>(); 524 const SIInstrInfo *TII = ST.getInstrInfo(); 525 const SIRegisterInfo *TRI = &TII->getRegisterInfo(); 526 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 527 528 MachineBasicBlock::iterator I = MBB.begin(); 529 DebugLoc DL; 530 531 // For each dimension: 532 for (unsigned i = 0; i < 3; ++i) { 533 // Get work group ID SGPR, and make it live-in again. 534 unsigned WorkGroupIDSGPR = MFI->getWorkGroupIDSGPR(i); 535 MF.getRegInfo().addLiveIn(WorkGroupIDSGPR); 536 MBB.addLiveIn(WorkGroupIDSGPR); 537 538 // Since SGPRs are spilled into VGPRs, copy work group ID SGPR to VGPR in 539 // order to spill it to scratch. 540 unsigned WorkGroupIDVGPR = 541 MF.getRegInfo().createVirtualRegister(&AMDGPU::VGPR_32RegClass); 542 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), WorkGroupIDVGPR) 543 .addReg(WorkGroupIDSGPR); 544 545 // Spill work group ID. 546 int WorkGroupIDObjectIdx = MFI->getDebuggerWorkGroupIDStackObjectIndex(i); 547 TII->storeRegToStackSlot(MBB, I, WorkGroupIDVGPR, false, 548 WorkGroupIDObjectIdx, &AMDGPU::VGPR_32RegClass, TRI); 549 550 // Get work item ID VGPR, and make it live-in again. 551 unsigned WorkItemIDVGPR = MFI->getWorkItemIDVGPR(i); 552 MF.getRegInfo().addLiveIn(WorkItemIDVGPR); 553 MBB.addLiveIn(WorkItemIDVGPR); 554 555 // Spill work item ID. 556 int WorkItemIDObjectIdx = MFI->getDebuggerWorkItemIDStackObjectIndex(i); 557 TII->storeRegToStackSlot(MBB, I, WorkItemIDVGPR, false, 558 WorkItemIDObjectIdx, &AMDGPU::VGPR_32RegClass, TRI); 559 } 560 } 561