1 //===- SIMachineFunctionInfo.cpp - SI Machine Function Info ---------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "SIMachineFunctionInfo.h" 10 #include "AMDGPUTargetMachine.h" 11 #include "AMDGPUSubtarget.h" 12 #include "SIRegisterInfo.h" 13 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 14 #include "Utils/AMDGPUBaseInfo.h" 15 #include "llvm/ADT/Optional.h" 16 #include "llvm/CodeGen/LiveIntervals.h" 17 #include "llvm/CodeGen/MachineBasicBlock.h" 18 #include "llvm/CodeGen/MachineFrameInfo.h" 19 #include "llvm/CodeGen/MachineFunction.h" 20 #include "llvm/CodeGen/MachineRegisterInfo.h" 21 #include "llvm/CodeGen/MIRParser/MIParser.h" 22 #include "llvm/IR/CallingConv.h" 23 #include "llvm/IR/DiagnosticInfo.h" 24 #include "llvm/IR/Function.h" 25 #include <cassert> 26 #include <vector> 27 28 #define MAX_LANES 64 29 30 using namespace llvm; 31 32 SIMachineFunctionInfo::SIMachineFunctionInfo(const MachineFunction &MF) 33 : AMDGPUMachineFunction(MF), 34 PrivateSegmentBuffer(false), 35 DispatchPtr(false), 36 QueuePtr(false), 37 KernargSegmentPtr(false), 38 DispatchID(false), 39 FlatScratchInit(false), 40 WorkGroupIDX(false), 41 WorkGroupIDY(false), 42 WorkGroupIDZ(false), 43 WorkGroupInfo(false), 44 PrivateSegmentWaveByteOffset(false), 45 WorkItemIDX(false), 46 WorkItemIDY(false), 47 WorkItemIDZ(false), 48 ImplicitBufferPtr(false), 49 ImplicitArgPtr(false), 50 GITPtrHigh(0xffffffff), 51 HighBitsOf32BitAddress(0), 52 GDSSize(0) { 53 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 54 const Function &F = MF.getFunction(); 55 FlatWorkGroupSizes = ST.getFlatWorkGroupSizes(F); 56 WavesPerEU = ST.getWavesPerEU(F); 57 58 Occupancy = ST.computeOccupancy(F, getLDSSize()); 59 CallingConv::ID CC = F.getCallingConv(); 60 61 // FIXME: Should have analysis or something rather than attribute to detect 62 // calls. 63 const bool HasCalls = F.hasFnAttribute("amdgpu-calls"); 64 65 // Enable all kernel inputs if we have the fixed ABI. Don't bother if we don't 66 // have any calls. 67 const bool UseFixedABI = AMDGPUTargetMachine::EnableFixedFunctionABI && 68 CC != CallingConv::AMDGPU_Gfx && 69 (!isEntryFunction() || HasCalls); 70 const bool IsKernel = CC == CallingConv::AMDGPU_KERNEL || 71 CC == CallingConv::SPIR_KERNEL; 72 73 if (IsKernel) { 74 if (!F.arg_empty() || ST.getImplicitArgNumBytes(F) != 0) 75 KernargSegmentPtr = true; 76 WorkGroupIDX = true; 77 WorkItemIDX = true; 78 } else if (CC == CallingConv::AMDGPU_PS) { 79 PSInputAddr = AMDGPU::getInitialPSInputAddr(F); 80 } 81 82 if (!isEntryFunction()) { 83 if (UseFixedABI) 84 ArgInfo = AMDGPUArgumentUsageInfo::FixedABIFunctionInfo; 85 86 // TODO: Pick a high register, and shift down, similar to a kernel. 87 FrameOffsetReg = AMDGPU::SGPR33; 88 StackPtrOffsetReg = AMDGPU::SGPR32; 89 90 if (!ST.enableFlatScratch()) { 91 // Non-entry functions have no special inputs for now, other registers 92 // required for scratch access. 93 ScratchRSrcReg = AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3; 94 95 ArgInfo.PrivateSegmentBuffer = 96 ArgDescriptor::createRegister(ScratchRSrcReg); 97 } 98 99 if (!F.hasFnAttribute("amdgpu-no-implicitarg-ptr")) 100 ImplicitArgPtr = true; 101 } else { 102 ImplicitArgPtr = false; 103 MaxKernArgAlign = std::max(ST.getAlignmentForImplicitArgPtr(), 104 MaxKernArgAlign); 105 } 106 107 bool isAmdHsaOrMesa = ST.isAmdHsaOrMesa(F); 108 if (isAmdHsaOrMesa && !ST.enableFlatScratch()) 109 PrivateSegmentBuffer = true; 110 else if (ST.isMesaGfxShader(F)) 111 ImplicitBufferPtr = true; 112 113 if (UseFixedABI) { 114 DispatchPtr = true; 115 QueuePtr = true; 116 ImplicitArgPtr = true; 117 WorkGroupIDX = true; 118 WorkGroupIDY = true; 119 WorkGroupIDZ = true; 120 WorkItemIDX = true; 121 WorkItemIDY = true; 122 WorkItemIDZ = true; 123 124 // FIXME: We don't need this? 125 DispatchID = true; 126 } else if (!AMDGPU::isGraphics(CC)) { 127 if (IsKernel || !F.hasFnAttribute("amdgpu-no-workgroup-id-x")) 128 WorkGroupIDX = true; 129 130 if (!F.hasFnAttribute("amdgpu-no-workgroup-id-y")) 131 WorkGroupIDY = true; 132 133 if (!F.hasFnAttribute("amdgpu-no-workgroup-id-z")) 134 WorkGroupIDZ = true; 135 136 if (IsKernel || !F.hasFnAttribute("amdgpu-no-workitem-id-x")) 137 WorkItemIDX = true; 138 139 if (!F.hasFnAttribute("amdgpu-no-workitem-id-y")) 140 WorkItemIDY = true; 141 142 if (!F.hasFnAttribute("amdgpu-no-workitem-id-z")) 143 WorkItemIDZ = true; 144 145 if (!F.hasFnAttribute("amdgpu-no-dispatch-ptr")) 146 DispatchPtr = true; 147 148 if (!F.hasFnAttribute("amdgpu-no-queue-ptr")) 149 QueuePtr = true; 150 151 if (!F.hasFnAttribute("amdgpu-no-dispatch-id")) 152 DispatchID = true; 153 } 154 155 // FIXME: This attribute is a hack, we just need an analysis on the function 156 // to look for allocas. 157 bool HasStackObjects = F.hasFnAttribute("amdgpu-stack-objects"); 158 159 // TODO: This could be refined a lot. The attribute is a poor way of 160 // detecting calls or stack objects that may require it before argument 161 // lowering. 162 if (ST.hasFlatAddressSpace() && isEntryFunction() && 163 (isAmdHsaOrMesa || ST.enableFlatScratch()) && 164 (HasCalls || HasStackObjects || ST.enableFlatScratch()) && 165 !ST.flatScratchIsArchitected()) { 166 FlatScratchInit = true; 167 } 168 169 if (isEntryFunction()) { 170 // X, XY, and XYZ are the only supported combinations, so make sure Y is 171 // enabled if Z is. 172 if (WorkItemIDZ) 173 WorkItemIDY = true; 174 175 if (!ST.flatScratchIsArchitected()) { 176 PrivateSegmentWaveByteOffset = true; 177 178 // HS and GS always have the scratch wave offset in SGPR5 on GFX9. 179 if (ST.getGeneration() >= AMDGPUSubtarget::GFX9 && 180 (CC == CallingConv::AMDGPU_HS || CC == CallingConv::AMDGPU_GS)) 181 ArgInfo.PrivateSegmentWaveByteOffset = 182 ArgDescriptor::createRegister(AMDGPU::SGPR5); 183 } 184 } 185 186 Attribute A = F.getFnAttribute("amdgpu-git-ptr-high"); 187 StringRef S = A.getValueAsString(); 188 if (!S.empty()) 189 S.consumeInteger(0, GITPtrHigh); 190 191 A = F.getFnAttribute("amdgpu-32bit-address-high-bits"); 192 S = A.getValueAsString(); 193 if (!S.empty()) 194 S.consumeInteger(0, HighBitsOf32BitAddress); 195 196 S = F.getFnAttribute("amdgpu-gds-size").getValueAsString(); 197 if (!S.empty()) 198 S.consumeInteger(0, GDSSize); 199 } 200 201 void SIMachineFunctionInfo::limitOccupancy(const MachineFunction &MF) { 202 limitOccupancy(getMaxWavesPerEU()); 203 const GCNSubtarget& ST = MF.getSubtarget<GCNSubtarget>(); 204 limitOccupancy(ST.getOccupancyWithLocalMemSize(getLDSSize(), 205 MF.getFunction())); 206 } 207 208 Register SIMachineFunctionInfo::addPrivateSegmentBuffer( 209 const SIRegisterInfo &TRI) { 210 ArgInfo.PrivateSegmentBuffer = 211 ArgDescriptor::createRegister(TRI.getMatchingSuperReg( 212 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SGPR_128RegClass)); 213 NumUserSGPRs += 4; 214 return ArgInfo.PrivateSegmentBuffer.getRegister(); 215 } 216 217 Register SIMachineFunctionInfo::addDispatchPtr(const SIRegisterInfo &TRI) { 218 ArgInfo.DispatchPtr = ArgDescriptor::createRegister(TRI.getMatchingSuperReg( 219 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass)); 220 NumUserSGPRs += 2; 221 return ArgInfo.DispatchPtr.getRegister(); 222 } 223 224 Register SIMachineFunctionInfo::addQueuePtr(const SIRegisterInfo &TRI) { 225 ArgInfo.QueuePtr = ArgDescriptor::createRegister(TRI.getMatchingSuperReg( 226 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass)); 227 NumUserSGPRs += 2; 228 return ArgInfo.QueuePtr.getRegister(); 229 } 230 231 Register SIMachineFunctionInfo::addKernargSegmentPtr(const SIRegisterInfo &TRI) { 232 ArgInfo.KernargSegmentPtr 233 = ArgDescriptor::createRegister(TRI.getMatchingSuperReg( 234 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass)); 235 NumUserSGPRs += 2; 236 return ArgInfo.KernargSegmentPtr.getRegister(); 237 } 238 239 Register SIMachineFunctionInfo::addDispatchID(const SIRegisterInfo &TRI) { 240 ArgInfo.DispatchID = ArgDescriptor::createRegister(TRI.getMatchingSuperReg( 241 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass)); 242 NumUserSGPRs += 2; 243 return ArgInfo.DispatchID.getRegister(); 244 } 245 246 Register SIMachineFunctionInfo::addFlatScratchInit(const SIRegisterInfo &TRI) { 247 ArgInfo.FlatScratchInit = ArgDescriptor::createRegister(TRI.getMatchingSuperReg( 248 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass)); 249 NumUserSGPRs += 2; 250 return ArgInfo.FlatScratchInit.getRegister(); 251 } 252 253 Register SIMachineFunctionInfo::addImplicitBufferPtr(const SIRegisterInfo &TRI) { 254 ArgInfo.ImplicitBufferPtr = ArgDescriptor::createRegister(TRI.getMatchingSuperReg( 255 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass)); 256 NumUserSGPRs += 2; 257 return ArgInfo.ImplicitBufferPtr.getRegister(); 258 } 259 260 bool SIMachineFunctionInfo::isCalleeSavedReg(const MCPhysReg *CSRegs, 261 MCPhysReg Reg) { 262 for (unsigned I = 0; CSRegs[I]; ++I) { 263 if (CSRegs[I] == Reg) 264 return true; 265 } 266 267 return false; 268 } 269 270 /// \p returns true if \p NumLanes slots are available in VGPRs already used for 271 /// SGPR spilling. 272 // 273 // FIXME: This only works after processFunctionBeforeFrameFinalized 274 bool SIMachineFunctionInfo::haveFreeLanesForSGPRSpill(const MachineFunction &MF, 275 unsigned NumNeed) const { 276 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 277 unsigned WaveSize = ST.getWavefrontSize(); 278 return NumVGPRSpillLanes + NumNeed <= WaveSize * SpillVGPRs.size(); 279 } 280 281 /// Reserve a slice of a VGPR to support spilling for FrameIndex \p FI. 282 bool SIMachineFunctionInfo::allocateSGPRSpillToVGPR(MachineFunction &MF, 283 int FI) { 284 std::vector<SpilledReg> &SpillLanes = SGPRToVGPRSpills[FI]; 285 286 // This has already been allocated. 287 if (!SpillLanes.empty()) 288 return true; 289 290 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 291 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 292 MachineFrameInfo &FrameInfo = MF.getFrameInfo(); 293 MachineRegisterInfo &MRI = MF.getRegInfo(); 294 unsigned WaveSize = ST.getWavefrontSize(); 295 SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 296 297 unsigned Size = FrameInfo.getObjectSize(FI); 298 unsigned NumLanes = Size / 4; 299 300 if (NumLanes > WaveSize) 301 return false; 302 303 assert(Size >= 4 && "invalid sgpr spill size"); 304 assert(TRI->spillSGPRToVGPR() && "not spilling SGPRs to VGPRs"); 305 306 // Make sure to handle the case where a wide SGPR spill may span between two 307 // VGPRs. 308 for (unsigned I = 0; I < NumLanes; ++I, ++NumVGPRSpillLanes) { 309 Register LaneVGPR; 310 unsigned VGPRIndex = (NumVGPRSpillLanes % WaveSize); 311 312 // Reserve a VGPR (when NumVGPRSpillLanes = 0, WaveSize, 2*WaveSize, ..) and 313 // when one of the two conditions is true: 314 // 1. One reserved VGPR being tracked by VGPRReservedForSGPRSpill is not yet 315 // reserved. 316 // 2. All spill lanes of reserved VGPR(s) are full and another spill lane is 317 // required. 318 if (FuncInfo->VGPRReservedForSGPRSpill && NumVGPRSpillLanes < WaveSize) { 319 assert(FuncInfo->VGPRReservedForSGPRSpill == SpillVGPRs.back().VGPR); 320 LaneVGPR = FuncInfo->VGPRReservedForSGPRSpill; 321 } else if (VGPRIndex == 0) { 322 LaneVGPR = TRI->findUnusedRegister(MRI, &AMDGPU::VGPR_32RegClass, MF); 323 if (LaneVGPR == AMDGPU::NoRegister) { 324 // We have no VGPRs left for spilling SGPRs. Reset because we will not 325 // partially spill the SGPR to VGPRs. 326 SGPRToVGPRSpills.erase(FI); 327 NumVGPRSpillLanes -= I; 328 329 #if 0 330 DiagnosticInfoResourceLimit DiagOutOfRegs(MF.getFunction(), 331 "VGPRs for SGPR spilling", 332 0, DS_Error); 333 MF.getFunction().getContext().diagnose(DiagOutOfRegs); 334 #endif 335 return false; 336 } 337 338 Optional<int> SpillFI; 339 // We need to preserve inactive lanes, so always save, even caller-save 340 // registers. 341 if (!isEntryFunction()) { 342 SpillFI = FrameInfo.CreateSpillStackObject(4, Align(4)); 343 } 344 345 SpillVGPRs.push_back(SGPRSpillVGPR(LaneVGPR, SpillFI)); 346 347 // Add this register as live-in to all blocks to avoid machine verifer 348 // complaining about use of an undefined physical register. 349 for (MachineBasicBlock &BB : MF) 350 BB.addLiveIn(LaneVGPR); 351 } else { 352 LaneVGPR = SpillVGPRs.back().VGPR; 353 } 354 355 SpillLanes.push_back(SpilledReg(LaneVGPR, VGPRIndex)); 356 } 357 358 return true; 359 } 360 361 /// Reserve a VGPR for spilling of SGPRs 362 bool SIMachineFunctionInfo::reserveVGPRforSGPRSpills(MachineFunction &MF) { 363 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 364 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 365 SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 366 367 Register LaneVGPR = TRI->findUnusedRegister( 368 MF.getRegInfo(), &AMDGPU::VGPR_32RegClass, MF, true); 369 if (LaneVGPR == Register()) 370 return false; 371 SpillVGPRs.push_back(SGPRSpillVGPR(LaneVGPR, None)); 372 FuncInfo->VGPRReservedForSGPRSpill = LaneVGPR; 373 return true; 374 } 375 376 /// Reserve AGPRs or VGPRs to support spilling for FrameIndex \p FI. 377 /// Either AGPR is spilled to VGPR to vice versa. 378 /// Returns true if a \p FI can be eliminated completely. 379 bool SIMachineFunctionInfo::allocateVGPRSpillToAGPR(MachineFunction &MF, 380 int FI, 381 bool isAGPRtoVGPR) { 382 MachineRegisterInfo &MRI = MF.getRegInfo(); 383 MachineFrameInfo &FrameInfo = MF.getFrameInfo(); 384 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 385 386 assert(ST.hasMAIInsts() && FrameInfo.isSpillSlotObjectIndex(FI)); 387 388 auto &Spill = VGPRToAGPRSpills[FI]; 389 390 // This has already been allocated. 391 if (!Spill.Lanes.empty()) 392 return Spill.FullyAllocated; 393 394 unsigned Size = FrameInfo.getObjectSize(FI); 395 unsigned NumLanes = Size / 4; 396 Spill.Lanes.resize(NumLanes, AMDGPU::NoRegister); 397 398 const TargetRegisterClass &RC = 399 isAGPRtoVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::AGPR_32RegClass; 400 auto Regs = RC.getRegisters(); 401 402 auto &SpillRegs = isAGPRtoVGPR ? SpillAGPR : SpillVGPR; 403 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 404 Spill.FullyAllocated = true; 405 406 // FIXME: Move allocation logic out of MachineFunctionInfo and initialize 407 // once. 408 BitVector OtherUsedRegs; 409 OtherUsedRegs.resize(TRI->getNumRegs()); 410 411 const uint32_t *CSRMask = 412 TRI->getCallPreservedMask(MF, MF.getFunction().getCallingConv()); 413 if (CSRMask) 414 OtherUsedRegs.setBitsInMask(CSRMask); 415 416 // TODO: Should include register tuples, but doesn't matter with current 417 // usage. 418 for (MCPhysReg Reg : SpillAGPR) 419 OtherUsedRegs.set(Reg); 420 for (MCPhysReg Reg : SpillVGPR) 421 OtherUsedRegs.set(Reg); 422 423 SmallVectorImpl<MCPhysReg>::const_iterator NextSpillReg = Regs.begin(); 424 for (int I = NumLanes - 1; I >= 0; --I) { 425 NextSpillReg = std::find_if( 426 NextSpillReg, Regs.end(), [&MRI, &OtherUsedRegs](MCPhysReg Reg) { 427 return MRI.isAllocatable(Reg) && !MRI.isPhysRegUsed(Reg) && 428 !OtherUsedRegs[Reg]; 429 }); 430 431 if (NextSpillReg == Regs.end()) { // Registers exhausted 432 Spill.FullyAllocated = false; 433 break; 434 } 435 436 OtherUsedRegs.set(*NextSpillReg); 437 SpillRegs.push_back(*NextSpillReg); 438 Spill.Lanes[I] = *NextSpillReg++; 439 } 440 441 return Spill.FullyAllocated; 442 } 443 444 void SIMachineFunctionInfo::removeDeadFrameIndices(MachineFrameInfo &MFI) { 445 // The FP & BP spills haven't been inserted yet, so keep them around. 446 for (auto &R : SGPRToVGPRSpills) { 447 if (R.first != FramePointerSaveIndex && R.first != BasePointerSaveIndex) 448 MFI.RemoveStackObject(R.first); 449 } 450 451 // All other SPGRs must be allocated on the default stack, so reset the stack 452 // ID. 453 for (int i = MFI.getObjectIndexBegin(), e = MFI.getObjectIndexEnd(); i != e; 454 ++i) 455 if (i != FramePointerSaveIndex && i != BasePointerSaveIndex) 456 MFI.setStackID(i, TargetStackID::Default); 457 458 for (auto &R : VGPRToAGPRSpills) { 459 if (R.second.FullyAllocated) 460 MFI.RemoveStackObject(R.first); 461 } 462 } 463 464 int SIMachineFunctionInfo::getScavengeFI(MachineFrameInfo &MFI, 465 const SIRegisterInfo &TRI) { 466 if (ScavengeFI) 467 return *ScavengeFI; 468 if (isEntryFunction()) { 469 ScavengeFI = MFI.CreateFixedObject( 470 TRI.getSpillSize(AMDGPU::SGPR_32RegClass), 0, false); 471 } else { 472 ScavengeFI = MFI.CreateStackObject( 473 TRI.getSpillSize(AMDGPU::SGPR_32RegClass), 474 TRI.getSpillAlign(AMDGPU::SGPR_32RegClass), false); 475 } 476 return *ScavengeFI; 477 } 478 479 MCPhysReg SIMachineFunctionInfo::getNextUserSGPR() const { 480 assert(NumSystemSGPRs == 0 && "System SGPRs must be added after user SGPRs"); 481 return AMDGPU::SGPR0 + NumUserSGPRs; 482 } 483 484 MCPhysReg SIMachineFunctionInfo::getNextSystemSGPR() const { 485 return AMDGPU::SGPR0 + NumUserSGPRs + NumSystemSGPRs; 486 } 487 488 Register 489 SIMachineFunctionInfo::getGITPtrLoReg(const MachineFunction &MF) const { 490 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 491 if (!ST.isAmdPalOS()) 492 return Register(); 493 Register GitPtrLo = AMDGPU::SGPR0; // Low GIT address passed in 494 if (ST.hasMergedShaders()) { 495 switch (MF.getFunction().getCallingConv()) { 496 case CallingConv::AMDGPU_HS: 497 case CallingConv::AMDGPU_GS: 498 // Low GIT address is passed in s8 rather than s0 for an LS+HS or 499 // ES+GS merged shader on gfx9+. 500 GitPtrLo = AMDGPU::SGPR8; 501 return GitPtrLo; 502 default: 503 return GitPtrLo; 504 } 505 } 506 return GitPtrLo; 507 } 508 509 static yaml::StringValue regToString(Register Reg, 510 const TargetRegisterInfo &TRI) { 511 yaml::StringValue Dest; 512 { 513 raw_string_ostream OS(Dest.Value); 514 OS << printReg(Reg, &TRI); 515 } 516 return Dest; 517 } 518 519 static Optional<yaml::SIArgumentInfo> 520 convertArgumentInfo(const AMDGPUFunctionArgInfo &ArgInfo, 521 const TargetRegisterInfo &TRI) { 522 yaml::SIArgumentInfo AI; 523 524 auto convertArg = [&](Optional<yaml::SIArgument> &A, 525 const ArgDescriptor &Arg) { 526 if (!Arg) 527 return false; 528 529 // Create a register or stack argument. 530 yaml::SIArgument SA = yaml::SIArgument::createArgument(Arg.isRegister()); 531 if (Arg.isRegister()) { 532 raw_string_ostream OS(SA.RegisterName.Value); 533 OS << printReg(Arg.getRegister(), &TRI); 534 } else 535 SA.StackOffset = Arg.getStackOffset(); 536 // Check and update the optional mask. 537 if (Arg.isMasked()) 538 SA.Mask = Arg.getMask(); 539 540 A = SA; 541 return true; 542 }; 543 544 bool Any = false; 545 Any |= convertArg(AI.PrivateSegmentBuffer, ArgInfo.PrivateSegmentBuffer); 546 Any |= convertArg(AI.DispatchPtr, ArgInfo.DispatchPtr); 547 Any |= convertArg(AI.QueuePtr, ArgInfo.QueuePtr); 548 Any |= convertArg(AI.KernargSegmentPtr, ArgInfo.KernargSegmentPtr); 549 Any |= convertArg(AI.DispatchID, ArgInfo.DispatchID); 550 Any |= convertArg(AI.FlatScratchInit, ArgInfo.FlatScratchInit); 551 Any |= convertArg(AI.PrivateSegmentSize, ArgInfo.PrivateSegmentSize); 552 Any |= convertArg(AI.WorkGroupIDX, ArgInfo.WorkGroupIDX); 553 Any |= convertArg(AI.WorkGroupIDY, ArgInfo.WorkGroupIDY); 554 Any |= convertArg(AI.WorkGroupIDZ, ArgInfo.WorkGroupIDZ); 555 Any |= convertArg(AI.WorkGroupInfo, ArgInfo.WorkGroupInfo); 556 Any |= convertArg(AI.PrivateSegmentWaveByteOffset, 557 ArgInfo.PrivateSegmentWaveByteOffset); 558 Any |= convertArg(AI.ImplicitArgPtr, ArgInfo.ImplicitArgPtr); 559 Any |= convertArg(AI.ImplicitBufferPtr, ArgInfo.ImplicitBufferPtr); 560 Any |= convertArg(AI.WorkItemIDX, ArgInfo.WorkItemIDX); 561 Any |= convertArg(AI.WorkItemIDY, ArgInfo.WorkItemIDY); 562 Any |= convertArg(AI.WorkItemIDZ, ArgInfo.WorkItemIDZ); 563 564 if (Any) 565 return AI; 566 567 return None; 568 } 569 570 yaml::SIMachineFunctionInfo::SIMachineFunctionInfo( 571 const llvm::SIMachineFunctionInfo &MFI, const TargetRegisterInfo &TRI, 572 const llvm::MachineFunction &MF) 573 : ExplicitKernArgSize(MFI.getExplicitKernArgSize()), 574 MaxKernArgAlign(MFI.getMaxKernArgAlign()), LDSSize(MFI.getLDSSize()), 575 DynLDSAlign(MFI.getDynLDSAlign()), IsEntryFunction(MFI.isEntryFunction()), 576 NoSignedZerosFPMath(MFI.hasNoSignedZerosFPMath()), 577 MemoryBound(MFI.isMemoryBound()), WaveLimiter(MFI.needsWaveLimiter()), 578 HasSpilledSGPRs(MFI.hasSpilledSGPRs()), 579 HasSpilledVGPRs(MFI.hasSpilledVGPRs()), 580 HighBitsOf32BitAddress(MFI.get32BitAddressHighBits()), 581 Occupancy(MFI.getOccupancy()), 582 ScratchRSrcReg(regToString(MFI.getScratchRSrcReg(), TRI)), 583 FrameOffsetReg(regToString(MFI.getFrameOffsetReg(), TRI)), 584 StackPtrOffsetReg(regToString(MFI.getStackPtrOffsetReg(), TRI)), 585 ArgInfo(convertArgumentInfo(MFI.getArgInfo(), TRI)), Mode(MFI.getMode()) { 586 auto SFI = MFI.getOptionalScavengeFI(); 587 if (SFI) 588 ScavengeFI = yaml::FrameIndex(*SFI, MF.getFrameInfo()); 589 } 590 591 void yaml::SIMachineFunctionInfo::mappingImpl(yaml::IO &YamlIO) { 592 MappingTraits<SIMachineFunctionInfo>::mapping(YamlIO, *this); 593 } 594 595 bool SIMachineFunctionInfo::initializeBaseYamlFields( 596 const yaml::SIMachineFunctionInfo &YamlMFI, const MachineFunction &MF, 597 PerFunctionMIParsingState &PFS, SMDiagnostic &Error, SMRange &SourceRange) { 598 ExplicitKernArgSize = YamlMFI.ExplicitKernArgSize; 599 MaxKernArgAlign = assumeAligned(YamlMFI.MaxKernArgAlign); 600 LDSSize = YamlMFI.LDSSize; 601 DynLDSAlign = YamlMFI.DynLDSAlign; 602 HighBitsOf32BitAddress = YamlMFI.HighBitsOf32BitAddress; 603 Occupancy = YamlMFI.Occupancy; 604 IsEntryFunction = YamlMFI.IsEntryFunction; 605 NoSignedZerosFPMath = YamlMFI.NoSignedZerosFPMath; 606 MemoryBound = YamlMFI.MemoryBound; 607 WaveLimiter = YamlMFI.WaveLimiter; 608 HasSpilledSGPRs = YamlMFI.HasSpilledSGPRs; 609 HasSpilledVGPRs = YamlMFI.HasSpilledVGPRs; 610 611 if (YamlMFI.ScavengeFI) { 612 auto FIOrErr = YamlMFI.ScavengeFI->getFI(MF.getFrameInfo()); 613 if (!FIOrErr) { 614 // Create a diagnostic for a the frame index. 615 const MemoryBuffer &Buffer = 616 *PFS.SM->getMemoryBuffer(PFS.SM->getMainFileID()); 617 618 Error = SMDiagnostic(*PFS.SM, SMLoc(), Buffer.getBufferIdentifier(), 1, 1, 619 SourceMgr::DK_Error, toString(FIOrErr.takeError()), 620 "", None, None); 621 SourceRange = YamlMFI.ScavengeFI->SourceRange; 622 return true; 623 } 624 ScavengeFI = *FIOrErr; 625 } else { 626 ScavengeFI = None; 627 } 628 return false; 629 } 630 631 // Remove VGPR which was reserved for SGPR spills if there are no spilled SGPRs 632 bool SIMachineFunctionInfo::removeVGPRForSGPRSpill(Register ReservedVGPR, 633 MachineFunction &MF) { 634 for (auto *i = SpillVGPRs.begin(); i < SpillVGPRs.end(); i++) { 635 if (i->VGPR == ReservedVGPR) { 636 SpillVGPRs.erase(i); 637 638 for (MachineBasicBlock &MBB : MF) { 639 MBB.removeLiveIn(ReservedVGPR); 640 MBB.sortUniqueLiveIns(); 641 } 642 this->VGPRReservedForSGPRSpill = AMDGPU::NoRegister; 643 return true; 644 } 645 } 646 return false; 647 } 648