1 //===-- AMDGPUAsmPrinter.cpp - AMDGPU Assebly printer --------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// 12 /// The AMDGPUAsmPrinter is used to print both assembly string and also binary 13 /// code. When passed an MCAsmStreamer it prints assembly and when passed 14 /// an MCObjectStreamer it outputs binary code. 15 // 16 //===----------------------------------------------------------------------===// 17 // 18 19 #include "AMDGPUAsmPrinter.h" 20 #include "AMDGPU.h" 21 #include "AMDGPUSubtarget.h" 22 #include "AMDGPUTargetMachine.h" 23 #include "InstPrinter/AMDGPUInstPrinter.h" 24 #include "MCTargetDesc/AMDGPUTargetStreamer.h" 25 #include "R600Defines.h" 26 #include "R600MachineFunctionInfo.h" 27 #include "R600RegisterInfo.h" 28 #include "SIDefines.h" 29 #include "SIInstrInfo.h" 30 #include "SIMachineFunctionInfo.h" 31 #include "SIRegisterInfo.h" 32 #include "Utils/AMDGPUBaseInfo.h" 33 #include "llvm/BinaryFormat/ELF.h" 34 #include "llvm/CodeGen/MachineFrameInfo.h" 35 #include "llvm/CodeGen/TargetLoweringObjectFile.h" 36 #include "llvm/IR/DiagnosticInfo.h" 37 #include "llvm/MC/MCContext.h" 38 #include "llvm/MC/MCSectionELF.h" 39 #include "llvm/MC/MCStreamer.h" 40 #include "llvm/Support/AMDGPUMetadata.h" 41 #include "llvm/Support/MathExtras.h" 42 #include "llvm/Support/TargetRegistry.h" 43 44 using namespace llvm; 45 using namespace llvm::AMDGPU; 46 47 // TODO: This should get the default rounding mode from the kernel. We just set 48 // the default here, but this could change if the OpenCL rounding mode pragmas 49 // are used. 50 // 51 // The denormal mode here should match what is reported by the OpenCL runtime 52 // for the CL_FP_DENORM bit from CL_DEVICE_{HALF|SINGLE|DOUBLE}_FP_CONFIG, but 53 // can also be override to flush with the -cl-denorms-are-zero compiler flag. 54 // 55 // AMD OpenCL only sets flush none and reports CL_FP_DENORM for double 56 // precision, and leaves single precision to flush all and does not report 57 // CL_FP_DENORM for CL_DEVICE_SINGLE_FP_CONFIG. Mesa's OpenCL currently reports 58 // CL_FP_DENORM for both. 59 // 60 // FIXME: It seems some instructions do not support single precision denormals 61 // regardless of the mode (exp_*_f32, rcp_*_f32, rsq_*_f32, rsq_*f32, sqrt_f32, 62 // and sin_f32, cos_f32 on most parts). 63 64 // We want to use these instructions, and using fp32 denormals also causes 65 // instructions to run at the double precision rate for the device so it's 66 // probably best to just report no single precision denormals. 67 static uint32_t getFPMode(const MachineFunction &F) { 68 const SISubtarget& ST = F.getSubtarget<SISubtarget>(); 69 // TODO: Is there any real use for the flush in only / flush out only modes? 70 71 uint32_t FP32Denormals = 72 ST.hasFP32Denormals() ? FP_DENORM_FLUSH_NONE : FP_DENORM_FLUSH_IN_FLUSH_OUT; 73 74 uint32_t FP64Denormals = 75 ST.hasFP64Denormals() ? FP_DENORM_FLUSH_NONE : FP_DENORM_FLUSH_IN_FLUSH_OUT; 76 77 return FP_ROUND_MODE_SP(FP_ROUND_ROUND_TO_NEAREST) | 78 FP_ROUND_MODE_DP(FP_ROUND_ROUND_TO_NEAREST) | 79 FP_DENORM_MODE_SP(FP32Denormals) | 80 FP_DENORM_MODE_DP(FP64Denormals); 81 } 82 83 static AsmPrinter * 84 createAMDGPUAsmPrinterPass(TargetMachine &tm, 85 std::unique_ptr<MCStreamer> &&Streamer) { 86 return new AMDGPUAsmPrinter(tm, std::move(Streamer)); 87 } 88 89 extern "C" void LLVMInitializeAMDGPUAsmPrinter() { 90 TargetRegistry::RegisterAsmPrinter(getTheAMDGPUTarget(), 91 createAMDGPUAsmPrinterPass); 92 TargetRegistry::RegisterAsmPrinter(getTheGCNTarget(), 93 createAMDGPUAsmPrinterPass); 94 } 95 96 AMDGPUAsmPrinter::AMDGPUAsmPrinter(TargetMachine &TM, 97 std::unique_ptr<MCStreamer> Streamer) 98 : AsmPrinter(TM, std::move(Streamer)) { 99 AMDGPUASI = static_cast<AMDGPUTargetMachine*>(&TM)->getAMDGPUAS(); 100 } 101 102 StringRef AMDGPUAsmPrinter::getPassName() const { 103 return "AMDGPU Assembly Printer"; 104 } 105 106 const MCSubtargetInfo* AMDGPUAsmPrinter::getSTI() const { 107 return TM.getMCSubtargetInfo(); 108 } 109 110 AMDGPUTargetStreamer* AMDGPUAsmPrinter::getTargetStreamer() const { 111 if (!OutStreamer) 112 return nullptr; 113 return static_cast<AMDGPUTargetStreamer*>(OutStreamer->getTargetStreamer()); 114 } 115 116 void AMDGPUAsmPrinter::EmitStartOfAsmFile(Module &M) { 117 if (TM.getTargetTriple().getArch() != Triple::amdgcn) 118 return; 119 120 if (TM.getTargetTriple().getOS() != Triple::AMDHSA && 121 TM.getTargetTriple().getOS() != Triple::AMDPAL) 122 return; 123 124 if (TM.getTargetTriple().getOS() == Triple::AMDHSA) 125 HSAMetadataStream.begin(M); 126 127 if (TM.getTargetTriple().getOS() == Triple::AMDPAL) 128 readPALMetadata(M); 129 130 // Deprecated notes are not emitted for code object v3. 131 if (IsaInfo::hasCodeObjectV3(getSTI()->getFeatureBits())) 132 return; 133 134 // HSA emits NT_AMDGPU_HSA_CODE_OBJECT_VERSION for code objects v2. 135 if (TM.getTargetTriple().getOS() == Triple::AMDHSA) 136 getTargetStreamer()->EmitDirectiveHSACodeObjectVersion(2, 1); 137 138 // HSA and PAL emit NT_AMDGPU_HSA_ISA for code objects v2. 139 IsaInfo::IsaVersion ISA = IsaInfo::getIsaVersion(getSTI()->getFeatureBits()); 140 getTargetStreamer()->EmitDirectiveHSACodeObjectISA( 141 ISA.Major, ISA.Minor, ISA.Stepping, "AMD", "AMDGPU"); 142 } 143 144 void AMDGPUAsmPrinter::EmitEndOfAsmFile(Module &M) { 145 if (TM.getTargetTriple().getArch() != Triple::amdgcn) 146 return; 147 148 // Following code requires TargetStreamer to be present. 149 if (!getTargetStreamer()) 150 return; 151 152 // Emit ISA Version (NT_AMD_AMDGPU_ISA). 153 std::string ISAVersionString; 154 raw_string_ostream ISAVersionStream(ISAVersionString); 155 IsaInfo::streamIsaVersion(getSTI(), ISAVersionStream); 156 getTargetStreamer()->EmitISAVersion(ISAVersionStream.str()); 157 158 // Emit HSA Metadata (NT_AMD_AMDGPU_HSA_METADATA). 159 if (TM.getTargetTriple().getOS() == Triple::AMDHSA) { 160 HSAMetadataStream.end(); 161 getTargetStreamer()->EmitHSAMetadata(HSAMetadataStream.getHSAMetadata()); 162 } 163 164 // Emit PAL Metadata (NT_AMD_AMDGPU_PAL_METADATA). 165 if (TM.getTargetTriple().getOS() == Triple::AMDPAL) { 166 // Copy the PAL metadata from the map where we collected it into a vector, 167 // then write it as a .note. 168 PALMD::Metadata PALMetadataVector; 169 for (auto i : PALMetadataMap) { 170 PALMetadataVector.push_back(i.first); 171 PALMetadataVector.push_back(i.second); 172 } 173 getTargetStreamer()->EmitPALMetadata(PALMetadataVector); 174 } 175 } 176 177 bool AMDGPUAsmPrinter::isBlockOnlyReachableByFallthrough( 178 const MachineBasicBlock *MBB) const { 179 if (!AsmPrinter::isBlockOnlyReachableByFallthrough(MBB)) 180 return false; 181 182 if (MBB->empty()) 183 return true; 184 185 // If this is a block implementing a long branch, an expression relative to 186 // the start of the block is needed. to the start of the block. 187 // XXX - Is there a smarter way to check this? 188 return (MBB->back().getOpcode() != AMDGPU::S_SETPC_B64); 189 } 190 191 void AMDGPUAsmPrinter::EmitFunctionBodyStart() { 192 const AMDGPUMachineFunction *MFI = MF->getInfo<AMDGPUMachineFunction>(); 193 if (!MFI->isEntryFunction()) 194 return; 195 196 const AMDGPUSubtarget &STM = MF->getSubtarget<AMDGPUSubtarget>(); 197 amd_kernel_code_t KernelCode; 198 if (STM.isAmdCodeObjectV2(*MF)) { 199 getAmdKernelCode(KernelCode, CurrentProgramInfo, *MF); 200 201 OutStreamer->SwitchSection(getObjFileLowering().getTextSection()); 202 getTargetStreamer()->EmitAMDKernelCodeT(KernelCode); 203 } 204 205 if (TM.getTargetTriple().getOS() != Triple::AMDHSA) 206 return; 207 208 HSAMetadataStream.emitKernel(MF->getFunction(), 209 getHSACodeProps(*MF, CurrentProgramInfo), 210 getHSADebugProps(*MF, CurrentProgramInfo)); 211 } 212 213 void AMDGPUAsmPrinter::EmitFunctionEntryLabel() { 214 const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 215 const AMDGPUSubtarget &STM = MF->getSubtarget<AMDGPUSubtarget>(); 216 if (MFI->isEntryFunction() && STM.isAmdCodeObjectV2(*MF)) { 217 SmallString<128> SymbolName; 218 getNameWithPrefix(SymbolName, &MF->getFunction()), 219 getTargetStreamer()->EmitAMDGPUSymbolType( 220 SymbolName, ELF::STT_AMDGPU_HSA_KERNEL); 221 } 222 const AMDGPUSubtarget &STI = MF->getSubtarget<AMDGPUSubtarget>(); 223 if (STI.dumpCode()) { 224 // Disassemble function name label to text. 225 DisasmLines.push_back(MF->getName().str() + ":"); 226 DisasmLineMaxLen = std::max(DisasmLineMaxLen, DisasmLines.back().size()); 227 HexLines.push_back(""); 228 } 229 230 AsmPrinter::EmitFunctionEntryLabel(); 231 } 232 233 void AMDGPUAsmPrinter::EmitBasicBlockStart(const MachineBasicBlock &MBB) const { 234 const AMDGPUSubtarget &STI = MBB.getParent()->getSubtarget<AMDGPUSubtarget>(); 235 if (STI.dumpCode() && !isBlockOnlyReachableByFallthrough(&MBB)) { 236 // Write a line for the basic block label if it is not only fallthrough. 237 DisasmLines.push_back( 238 (Twine("BB") + Twine(getFunctionNumber()) 239 + "_" + Twine(MBB.getNumber()) + ":").str()); 240 DisasmLineMaxLen = std::max(DisasmLineMaxLen, DisasmLines.back().size()); 241 HexLines.push_back(""); 242 } 243 AsmPrinter::EmitBasicBlockStart(MBB); 244 } 245 246 void AMDGPUAsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) { 247 248 // Group segment variables aren't emitted in HSA. 249 if (AMDGPU::isGroupSegment(GV)) 250 return; 251 252 AsmPrinter::EmitGlobalVariable(GV); 253 } 254 255 bool AMDGPUAsmPrinter::doFinalization(Module &M) { 256 CallGraphResourceInfo.clear(); 257 return AsmPrinter::doFinalization(M); 258 } 259 260 // For the amdpal OS type, read the amdgpu.pal.metadata supplied by the 261 // frontend into our PALMetadataMap, ready for per-function modification. It 262 // is a NamedMD containing an MDTuple containing a number of MDNodes each of 263 // which is an integer value, and each two integer values forms a key=value 264 // pair that we store as PALMetadataMap[key]=value in the map. 265 void AMDGPUAsmPrinter::readPALMetadata(Module &M) { 266 auto NamedMD = M.getNamedMetadata("amdgpu.pal.metadata"); 267 if (!NamedMD || !NamedMD->getNumOperands()) 268 return; 269 auto Tuple = dyn_cast<MDTuple>(NamedMD->getOperand(0)); 270 if (!Tuple) 271 return; 272 for (unsigned I = 0, E = Tuple->getNumOperands() & -2; I != E; I += 2) { 273 auto Key = mdconst::dyn_extract<ConstantInt>(Tuple->getOperand(I)); 274 auto Val = mdconst::dyn_extract<ConstantInt>(Tuple->getOperand(I + 1)); 275 if (!Key || !Val) 276 continue; 277 PALMetadataMap[Key->getZExtValue()] = Val->getZExtValue(); 278 } 279 } 280 281 // Print comments that apply to both callable functions and entry points. 282 void AMDGPUAsmPrinter::emitCommonFunctionComments( 283 uint32_t NumVGPR, 284 uint32_t NumSGPR, 285 uint64_t ScratchSize, 286 uint64_t CodeSize) { 287 OutStreamer->emitRawComment(" codeLenInByte = " + Twine(CodeSize), false); 288 OutStreamer->emitRawComment(" NumSgprs: " + Twine(NumSGPR), false); 289 OutStreamer->emitRawComment(" NumVgprs: " + Twine(NumVGPR), false); 290 OutStreamer->emitRawComment(" ScratchSize: " + Twine(ScratchSize), false); 291 } 292 293 bool AMDGPUAsmPrinter::runOnMachineFunction(MachineFunction &MF) { 294 CurrentProgramInfo = SIProgramInfo(); 295 296 const AMDGPUMachineFunction *MFI = MF.getInfo<AMDGPUMachineFunction>(); 297 298 // The starting address of all shader programs must be 256 bytes aligned. 299 // Regular functions just need the basic required instruction alignment. 300 MF.setAlignment(MFI->isEntryFunction() ? 8 : 2); 301 302 SetupMachineFunction(MF); 303 304 const AMDGPUSubtarget &STM = MF.getSubtarget<AMDGPUSubtarget>(); 305 MCContext &Context = getObjFileLowering().getContext(); 306 if (!STM.isAmdHsaOS()) { 307 MCSectionELF *ConfigSection = 308 Context.getELFSection(".AMDGPU.config", ELF::SHT_PROGBITS, 0); 309 OutStreamer->SwitchSection(ConfigSection); 310 } 311 312 if (STM.getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) { 313 if (MFI->isEntryFunction()) { 314 getSIProgramInfo(CurrentProgramInfo, MF); 315 } else { 316 auto I = CallGraphResourceInfo.insert( 317 std::make_pair(&MF.getFunction(), SIFunctionResourceInfo())); 318 SIFunctionResourceInfo &Info = I.first->second; 319 assert(I.second && "should only be called once per function"); 320 Info = analyzeResourceUsage(MF); 321 } 322 323 if (STM.isAmdPalOS()) 324 EmitPALMetadata(MF, CurrentProgramInfo); 325 if (!STM.isAmdHsaOS()) { 326 EmitProgramInfoSI(MF, CurrentProgramInfo); 327 } 328 } else { 329 EmitProgramInfoR600(MF); 330 } 331 332 DisasmLines.clear(); 333 HexLines.clear(); 334 DisasmLineMaxLen = 0; 335 336 EmitFunctionBody(); 337 338 if (isVerbose()) { 339 MCSectionELF *CommentSection = 340 Context.getELFSection(".AMDGPU.csdata", ELF::SHT_PROGBITS, 0); 341 OutStreamer->SwitchSection(CommentSection); 342 343 if (STM.getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) { 344 if (!MFI->isEntryFunction()) { 345 OutStreamer->emitRawComment(" Function info:", false); 346 SIFunctionResourceInfo &Info = CallGraphResourceInfo[&MF.getFunction()]; 347 emitCommonFunctionComments( 348 Info.NumVGPR, 349 Info.getTotalNumSGPRs(MF.getSubtarget<SISubtarget>()), 350 Info.PrivateSegmentSize, 351 getFunctionCodeSize(MF)); 352 return false; 353 } 354 355 OutStreamer->emitRawComment(" Kernel info:", false); 356 emitCommonFunctionComments(CurrentProgramInfo.NumVGPR, 357 CurrentProgramInfo.NumSGPR, 358 CurrentProgramInfo.ScratchSize, 359 getFunctionCodeSize(MF)); 360 361 OutStreamer->emitRawComment( 362 " FloatMode: " + Twine(CurrentProgramInfo.FloatMode), false); 363 OutStreamer->emitRawComment( 364 " IeeeMode: " + Twine(CurrentProgramInfo.IEEEMode), false); 365 OutStreamer->emitRawComment( 366 " LDSByteSize: " + Twine(CurrentProgramInfo.LDSSize) + 367 " bytes/workgroup (compile time only)", false); 368 369 OutStreamer->emitRawComment( 370 " SGPRBlocks: " + Twine(CurrentProgramInfo.SGPRBlocks), false); 371 OutStreamer->emitRawComment( 372 " VGPRBlocks: " + Twine(CurrentProgramInfo.VGPRBlocks), false); 373 374 OutStreamer->emitRawComment( 375 " NumSGPRsForWavesPerEU: " + 376 Twine(CurrentProgramInfo.NumSGPRsForWavesPerEU), false); 377 OutStreamer->emitRawComment( 378 " NumVGPRsForWavesPerEU: " + 379 Twine(CurrentProgramInfo.NumVGPRsForWavesPerEU), false); 380 381 OutStreamer->emitRawComment( 382 " ReservedVGPRFirst: " + Twine(CurrentProgramInfo.ReservedVGPRFirst), 383 false); 384 OutStreamer->emitRawComment( 385 " ReservedVGPRCount: " + Twine(CurrentProgramInfo.ReservedVGPRCount), 386 false); 387 388 if (MF.getSubtarget<SISubtarget>().debuggerEmitPrologue()) { 389 OutStreamer->emitRawComment( 390 " DebuggerWavefrontPrivateSegmentOffsetSGPR: s" + 391 Twine(CurrentProgramInfo.DebuggerWavefrontPrivateSegmentOffsetSGPR), false); 392 OutStreamer->emitRawComment( 393 " DebuggerPrivateSegmentBufferSGPR: s" + 394 Twine(CurrentProgramInfo.DebuggerPrivateSegmentBufferSGPR), false); 395 } 396 397 OutStreamer->emitRawComment( 398 " COMPUTE_PGM_RSRC2:USER_SGPR: " + 399 Twine(G_00B84C_USER_SGPR(CurrentProgramInfo.ComputePGMRSrc2)), false); 400 OutStreamer->emitRawComment( 401 " COMPUTE_PGM_RSRC2:TRAP_HANDLER: " + 402 Twine(G_00B84C_TRAP_HANDLER(CurrentProgramInfo.ComputePGMRSrc2)), false); 403 OutStreamer->emitRawComment( 404 " COMPUTE_PGM_RSRC2:TGID_X_EN: " + 405 Twine(G_00B84C_TGID_X_EN(CurrentProgramInfo.ComputePGMRSrc2)), false); 406 OutStreamer->emitRawComment( 407 " COMPUTE_PGM_RSRC2:TGID_Y_EN: " + 408 Twine(G_00B84C_TGID_Y_EN(CurrentProgramInfo.ComputePGMRSrc2)), false); 409 OutStreamer->emitRawComment( 410 " COMPUTE_PGM_RSRC2:TGID_Z_EN: " + 411 Twine(G_00B84C_TGID_Z_EN(CurrentProgramInfo.ComputePGMRSrc2)), false); 412 OutStreamer->emitRawComment( 413 " COMPUTE_PGM_RSRC2:TIDIG_COMP_CNT: " + 414 Twine(G_00B84C_TIDIG_COMP_CNT(CurrentProgramInfo.ComputePGMRSrc2)), 415 false); 416 } else { 417 R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>(); 418 OutStreamer->emitRawComment( 419 Twine("SQ_PGM_RESOURCES:STACK_SIZE = " + Twine(MFI->CFStackSize))); 420 } 421 } 422 423 if (STM.dumpCode()) { 424 425 OutStreamer->SwitchSection( 426 Context.getELFSection(".AMDGPU.disasm", ELF::SHT_NOTE, 0)); 427 428 for (size_t i = 0; i < DisasmLines.size(); ++i) { 429 std::string Comment = "\n"; 430 if (!HexLines[i].empty()) { 431 Comment = std::string(DisasmLineMaxLen - DisasmLines[i].size(), ' '); 432 Comment += " ; " + HexLines[i] + "\n"; 433 } 434 435 OutStreamer->EmitBytes(StringRef(DisasmLines[i])); 436 OutStreamer->EmitBytes(StringRef(Comment)); 437 } 438 } 439 440 return false; 441 } 442 443 void AMDGPUAsmPrinter::EmitProgramInfoR600(const MachineFunction &MF) { 444 unsigned MaxGPR = 0; 445 bool killPixel = false; 446 const R600Subtarget &STM = MF.getSubtarget<R600Subtarget>(); 447 const R600RegisterInfo *RI = STM.getRegisterInfo(); 448 const R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>(); 449 450 for (const MachineBasicBlock &MBB : MF) { 451 for (const MachineInstr &MI : MBB) { 452 if (MI.getOpcode() == AMDGPU::KILLGT) 453 killPixel = true; 454 unsigned numOperands = MI.getNumOperands(); 455 for (unsigned op_idx = 0; op_idx < numOperands; op_idx++) { 456 const MachineOperand &MO = MI.getOperand(op_idx); 457 if (!MO.isReg()) 458 continue; 459 unsigned HWReg = RI->getHWRegIndex(MO.getReg()); 460 461 // Register with value > 127 aren't GPR 462 if (HWReg > 127) 463 continue; 464 MaxGPR = std::max(MaxGPR, HWReg); 465 } 466 } 467 } 468 469 unsigned RsrcReg; 470 if (STM.getGeneration() >= R600Subtarget::EVERGREEN) { 471 // Evergreen / Northern Islands 472 switch (MF.getFunction().getCallingConv()) { 473 default: LLVM_FALLTHROUGH; 474 case CallingConv::AMDGPU_CS: RsrcReg = R_0288D4_SQ_PGM_RESOURCES_LS; break; 475 case CallingConv::AMDGPU_GS: RsrcReg = R_028878_SQ_PGM_RESOURCES_GS; break; 476 case CallingConv::AMDGPU_PS: RsrcReg = R_028844_SQ_PGM_RESOURCES_PS; break; 477 case CallingConv::AMDGPU_VS: RsrcReg = R_028860_SQ_PGM_RESOURCES_VS; break; 478 } 479 } else { 480 // R600 / R700 481 switch (MF.getFunction().getCallingConv()) { 482 default: LLVM_FALLTHROUGH; 483 case CallingConv::AMDGPU_GS: LLVM_FALLTHROUGH; 484 case CallingConv::AMDGPU_CS: LLVM_FALLTHROUGH; 485 case CallingConv::AMDGPU_VS: RsrcReg = R_028868_SQ_PGM_RESOURCES_VS; break; 486 case CallingConv::AMDGPU_PS: RsrcReg = R_028850_SQ_PGM_RESOURCES_PS; break; 487 } 488 } 489 490 OutStreamer->EmitIntValue(RsrcReg, 4); 491 OutStreamer->EmitIntValue(S_NUM_GPRS(MaxGPR + 1) | 492 S_STACK_SIZE(MFI->CFStackSize), 4); 493 OutStreamer->EmitIntValue(R_02880C_DB_SHADER_CONTROL, 4); 494 OutStreamer->EmitIntValue(S_02880C_KILL_ENABLE(killPixel), 4); 495 496 if (AMDGPU::isCompute(MF.getFunction().getCallingConv())) { 497 OutStreamer->EmitIntValue(R_0288E8_SQ_LDS_ALLOC, 4); 498 OutStreamer->EmitIntValue(alignTo(MFI->getLDSSize(), 4) >> 2, 4); 499 } 500 } 501 502 uint64_t AMDGPUAsmPrinter::getFunctionCodeSize(const MachineFunction &MF) const { 503 const SISubtarget &STM = MF.getSubtarget<SISubtarget>(); 504 const SIInstrInfo *TII = STM.getInstrInfo(); 505 506 uint64_t CodeSize = 0; 507 508 for (const MachineBasicBlock &MBB : MF) { 509 for (const MachineInstr &MI : MBB) { 510 // TODO: CodeSize should account for multiple functions. 511 512 // TODO: Should we count size of debug info? 513 if (MI.isDebugValue()) 514 continue; 515 516 CodeSize += TII->getInstSizeInBytes(MI); 517 } 518 } 519 520 return CodeSize; 521 } 522 523 static bool hasAnyNonFlatUseOfReg(const MachineRegisterInfo &MRI, 524 const SIInstrInfo &TII, 525 unsigned Reg) { 526 for (const MachineOperand &UseOp : MRI.reg_operands(Reg)) { 527 if (!UseOp.isImplicit() || !TII.isFLAT(*UseOp.getParent())) 528 return true; 529 } 530 531 return false; 532 } 533 534 static unsigned getNumExtraSGPRs(const SISubtarget &ST, 535 bool VCCUsed, 536 bool FlatScrUsed) { 537 unsigned ExtraSGPRs = 0; 538 if (VCCUsed) 539 ExtraSGPRs = 2; 540 541 if (ST.getGeneration() < SISubtarget::VOLCANIC_ISLANDS) { 542 if (FlatScrUsed) 543 ExtraSGPRs = 4; 544 } else { 545 if (ST.isXNACKEnabled()) 546 ExtraSGPRs = 4; 547 548 if (FlatScrUsed) 549 ExtraSGPRs = 6; 550 } 551 552 return ExtraSGPRs; 553 } 554 555 int32_t AMDGPUAsmPrinter::SIFunctionResourceInfo::getTotalNumSGPRs( 556 const SISubtarget &ST) const { 557 return NumExplicitSGPR + getNumExtraSGPRs(ST, UsesVCC, UsesFlatScratch); 558 } 559 560 AMDGPUAsmPrinter::SIFunctionResourceInfo AMDGPUAsmPrinter::analyzeResourceUsage( 561 const MachineFunction &MF) const { 562 SIFunctionResourceInfo Info; 563 564 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 565 const SISubtarget &ST = MF.getSubtarget<SISubtarget>(); 566 const MachineFrameInfo &FrameInfo = MF.getFrameInfo(); 567 const MachineRegisterInfo &MRI = MF.getRegInfo(); 568 const SIInstrInfo *TII = ST.getInstrInfo(); 569 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 570 571 Info.UsesFlatScratch = MRI.isPhysRegUsed(AMDGPU::FLAT_SCR_LO) || 572 MRI.isPhysRegUsed(AMDGPU::FLAT_SCR_HI); 573 574 // Even if FLAT_SCRATCH is implicitly used, it has no effect if flat 575 // instructions aren't used to access the scratch buffer. Inline assembly may 576 // need it though. 577 // 578 // If we only have implicit uses of flat_scr on flat instructions, it is not 579 // really needed. 580 if (Info.UsesFlatScratch && !MFI->hasFlatScratchInit() && 581 (!hasAnyNonFlatUseOfReg(MRI, *TII, AMDGPU::FLAT_SCR) && 582 !hasAnyNonFlatUseOfReg(MRI, *TII, AMDGPU::FLAT_SCR_LO) && 583 !hasAnyNonFlatUseOfReg(MRI, *TII, AMDGPU::FLAT_SCR_HI))) { 584 Info.UsesFlatScratch = false; 585 } 586 587 Info.HasDynamicallySizedStack = FrameInfo.hasVarSizedObjects(); 588 Info.PrivateSegmentSize = FrameInfo.getStackSize(); 589 590 591 Info.UsesVCC = MRI.isPhysRegUsed(AMDGPU::VCC_LO) || 592 MRI.isPhysRegUsed(AMDGPU::VCC_HI); 593 594 // If there are no calls, MachineRegisterInfo can tell us the used register 595 // count easily. 596 // A tail call isn't considered a call for MachineFrameInfo's purposes. 597 if (!FrameInfo.hasCalls() && !FrameInfo.hasTailCall()) { 598 MCPhysReg HighestVGPRReg = AMDGPU::NoRegister; 599 for (MCPhysReg Reg : reverse(AMDGPU::VGPR_32RegClass.getRegisters())) { 600 if (MRI.isPhysRegUsed(Reg)) { 601 HighestVGPRReg = Reg; 602 break; 603 } 604 } 605 606 MCPhysReg HighestSGPRReg = AMDGPU::NoRegister; 607 for (MCPhysReg Reg : reverse(AMDGPU::SGPR_32RegClass.getRegisters())) { 608 if (MRI.isPhysRegUsed(Reg)) { 609 HighestSGPRReg = Reg; 610 break; 611 } 612 } 613 614 // We found the maximum register index. They start at 0, so add one to get the 615 // number of registers. 616 Info.NumVGPR = HighestVGPRReg == AMDGPU::NoRegister ? 0 : 617 TRI.getHWRegIndex(HighestVGPRReg) + 1; 618 Info.NumExplicitSGPR = HighestSGPRReg == AMDGPU::NoRegister ? 0 : 619 TRI.getHWRegIndex(HighestSGPRReg) + 1; 620 621 return Info; 622 } 623 624 int32_t MaxVGPR = -1; 625 int32_t MaxSGPR = -1; 626 uint64_t CalleeFrameSize = 0; 627 628 for (const MachineBasicBlock &MBB : MF) { 629 for (const MachineInstr &MI : MBB) { 630 // TODO: Check regmasks? Do they occur anywhere except calls? 631 for (const MachineOperand &MO : MI.operands()) { 632 unsigned Width = 0; 633 bool IsSGPR = false; 634 635 if (!MO.isReg()) 636 continue; 637 638 unsigned Reg = MO.getReg(); 639 switch (Reg) { 640 case AMDGPU::EXEC: 641 case AMDGPU::EXEC_LO: 642 case AMDGPU::EXEC_HI: 643 case AMDGPU::SCC: 644 case AMDGPU::M0: 645 case AMDGPU::SRC_SHARED_BASE: 646 case AMDGPU::SRC_SHARED_LIMIT: 647 case AMDGPU::SRC_PRIVATE_BASE: 648 case AMDGPU::SRC_PRIVATE_LIMIT: 649 continue; 650 651 case AMDGPU::NoRegister: 652 assert(MI.isDebugValue()); 653 continue; 654 655 case AMDGPU::VCC: 656 case AMDGPU::VCC_LO: 657 case AMDGPU::VCC_HI: 658 Info.UsesVCC = true; 659 continue; 660 661 case AMDGPU::FLAT_SCR: 662 case AMDGPU::FLAT_SCR_LO: 663 case AMDGPU::FLAT_SCR_HI: 664 continue; 665 666 case AMDGPU::XNACK_MASK: 667 case AMDGPU::XNACK_MASK_LO: 668 case AMDGPU::XNACK_MASK_HI: 669 llvm_unreachable("xnack_mask registers should not be used"); 670 671 case AMDGPU::TBA: 672 case AMDGPU::TBA_LO: 673 case AMDGPU::TBA_HI: 674 case AMDGPU::TMA: 675 case AMDGPU::TMA_LO: 676 case AMDGPU::TMA_HI: 677 llvm_unreachable("trap handler registers should not be used"); 678 679 default: 680 break; 681 } 682 683 if (AMDGPU::SReg_32RegClass.contains(Reg)) { 684 assert(!AMDGPU::TTMP_32RegClass.contains(Reg) && 685 "trap handler registers should not be used"); 686 IsSGPR = true; 687 Width = 1; 688 } else if (AMDGPU::VGPR_32RegClass.contains(Reg)) { 689 IsSGPR = false; 690 Width = 1; 691 } else if (AMDGPU::SReg_64RegClass.contains(Reg)) { 692 assert(!AMDGPU::TTMP_64RegClass.contains(Reg) && 693 "trap handler registers should not be used"); 694 IsSGPR = true; 695 Width = 2; 696 } else if (AMDGPU::VReg_64RegClass.contains(Reg)) { 697 IsSGPR = false; 698 Width = 2; 699 } else if (AMDGPU::VReg_96RegClass.contains(Reg)) { 700 IsSGPR = false; 701 Width = 3; 702 } else if (AMDGPU::SReg_128RegClass.contains(Reg)) { 703 assert(!AMDGPU::TTMP_128RegClass.contains(Reg) && 704 "trap handler registers should not be used"); 705 IsSGPR = true; 706 Width = 4; 707 } else if (AMDGPU::VReg_128RegClass.contains(Reg)) { 708 IsSGPR = false; 709 Width = 4; 710 } else if (AMDGPU::SReg_256RegClass.contains(Reg)) { 711 assert(!AMDGPU::TTMP_256RegClass.contains(Reg) && 712 "trap handler registers should not be used"); 713 IsSGPR = true; 714 Width = 8; 715 } else if (AMDGPU::VReg_256RegClass.contains(Reg)) { 716 IsSGPR = false; 717 Width = 8; 718 } else if (AMDGPU::SReg_512RegClass.contains(Reg)) { 719 assert(!AMDGPU::TTMP_512RegClass.contains(Reg) && 720 "trap handler registers should not be used"); 721 IsSGPR = true; 722 Width = 16; 723 } else if (AMDGPU::VReg_512RegClass.contains(Reg)) { 724 IsSGPR = false; 725 Width = 16; 726 } else { 727 llvm_unreachable("Unknown register class"); 728 } 729 unsigned HWReg = TRI.getHWRegIndex(Reg); 730 int MaxUsed = HWReg + Width - 1; 731 if (IsSGPR) { 732 MaxSGPR = MaxUsed > MaxSGPR ? MaxUsed : MaxSGPR; 733 } else { 734 MaxVGPR = MaxUsed > MaxVGPR ? MaxUsed : MaxVGPR; 735 } 736 } 737 738 if (MI.isCall()) { 739 // Pseudo used just to encode the underlying global. Is there a better 740 // way to track this? 741 742 const MachineOperand *CalleeOp 743 = TII->getNamedOperand(MI, AMDGPU::OpName::callee); 744 const Function *Callee = cast<Function>(CalleeOp->getGlobal()); 745 if (Callee->isDeclaration()) { 746 // If this is a call to an external function, we can't do much. Make 747 // conservative guesses. 748 749 // 48 SGPRs - vcc, - flat_scr, -xnack 750 int MaxSGPRGuess = 47 - getNumExtraSGPRs(ST, true, 751 ST.hasFlatAddressSpace()); 752 MaxSGPR = std::max(MaxSGPR, MaxSGPRGuess); 753 MaxVGPR = std::max(MaxVGPR, 23); 754 755 CalleeFrameSize = std::max(CalleeFrameSize, UINT64_C(16384)); 756 Info.UsesVCC = true; 757 Info.UsesFlatScratch = ST.hasFlatAddressSpace(); 758 Info.HasDynamicallySizedStack = true; 759 } else { 760 // We force CodeGen to run in SCC order, so the callee's register 761 // usage etc. should be the cumulative usage of all callees. 762 auto I = CallGraphResourceInfo.find(Callee); 763 assert(I != CallGraphResourceInfo.end() && 764 "callee should have been handled before caller"); 765 766 MaxSGPR = std::max(I->second.NumExplicitSGPR - 1, MaxSGPR); 767 MaxVGPR = std::max(I->second.NumVGPR - 1, MaxVGPR); 768 CalleeFrameSize 769 = std::max(I->second.PrivateSegmentSize, CalleeFrameSize); 770 Info.UsesVCC |= I->second.UsesVCC; 771 Info.UsesFlatScratch |= I->second.UsesFlatScratch; 772 Info.HasDynamicallySizedStack |= I->second.HasDynamicallySizedStack; 773 Info.HasRecursion |= I->second.HasRecursion; 774 } 775 776 if (!Callee->doesNotRecurse()) 777 Info.HasRecursion = true; 778 } 779 } 780 } 781 782 Info.NumExplicitSGPR = MaxSGPR + 1; 783 Info.NumVGPR = MaxVGPR + 1; 784 Info.PrivateSegmentSize += CalleeFrameSize; 785 786 return Info; 787 } 788 789 void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo, 790 const MachineFunction &MF) { 791 SIFunctionResourceInfo Info = analyzeResourceUsage(MF); 792 793 ProgInfo.NumVGPR = Info.NumVGPR; 794 ProgInfo.NumSGPR = Info.NumExplicitSGPR; 795 ProgInfo.ScratchSize = Info.PrivateSegmentSize; 796 ProgInfo.VCCUsed = Info.UsesVCC; 797 ProgInfo.FlatUsed = Info.UsesFlatScratch; 798 ProgInfo.DynamicCallStack = Info.HasDynamicallySizedStack || Info.HasRecursion; 799 800 if (!isUInt<32>(ProgInfo.ScratchSize)) { 801 DiagnosticInfoStackSize DiagStackSize(MF.getFunction(), 802 ProgInfo.ScratchSize, DS_Error); 803 MF.getFunction().getContext().diagnose(DiagStackSize); 804 } 805 806 const SISubtarget &STM = MF.getSubtarget<SISubtarget>(); 807 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 808 const SIInstrInfo *TII = STM.getInstrInfo(); 809 const SIRegisterInfo *RI = &TII->getRegisterInfo(); 810 811 unsigned ExtraSGPRs = getNumExtraSGPRs(STM, 812 ProgInfo.VCCUsed, 813 ProgInfo.FlatUsed); 814 unsigned ExtraVGPRs = STM.getReservedNumVGPRs(MF); 815 816 // Check the addressable register limit before we add ExtraSGPRs. 817 if (STM.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS && 818 !STM.hasSGPRInitBug()) { 819 unsigned MaxAddressableNumSGPRs = STM.getAddressableNumSGPRs(); 820 if (ProgInfo.NumSGPR > MaxAddressableNumSGPRs) { 821 // This can happen due to a compiler bug or when using inline asm. 822 LLVMContext &Ctx = MF.getFunction().getContext(); 823 DiagnosticInfoResourceLimit Diag(MF.getFunction(), 824 "addressable scalar registers", 825 ProgInfo.NumSGPR, DS_Error, 826 DK_ResourceLimit, 827 MaxAddressableNumSGPRs); 828 Ctx.diagnose(Diag); 829 ProgInfo.NumSGPR = MaxAddressableNumSGPRs - 1; 830 } 831 } 832 833 // Account for extra SGPRs and VGPRs reserved for debugger use. 834 ProgInfo.NumSGPR += ExtraSGPRs; 835 ProgInfo.NumVGPR += ExtraVGPRs; 836 837 // Adjust number of registers used to meet default/requested minimum/maximum 838 // number of waves per execution unit request. 839 ProgInfo.NumSGPRsForWavesPerEU = std::max( 840 std::max(ProgInfo.NumSGPR, 1u), STM.getMinNumSGPRs(MFI->getMaxWavesPerEU())); 841 ProgInfo.NumVGPRsForWavesPerEU = std::max( 842 std::max(ProgInfo.NumVGPR, 1u), STM.getMinNumVGPRs(MFI->getMaxWavesPerEU())); 843 844 if (STM.getGeneration() <= AMDGPUSubtarget::SEA_ISLANDS || 845 STM.hasSGPRInitBug()) { 846 unsigned MaxAddressableNumSGPRs = STM.getAddressableNumSGPRs(); 847 if (ProgInfo.NumSGPR > MaxAddressableNumSGPRs) { 848 // This can happen due to a compiler bug or when using inline asm to use 849 // the registers which are usually reserved for vcc etc. 850 LLVMContext &Ctx = MF.getFunction().getContext(); 851 DiagnosticInfoResourceLimit Diag(MF.getFunction(), 852 "scalar registers", 853 ProgInfo.NumSGPR, DS_Error, 854 DK_ResourceLimit, 855 MaxAddressableNumSGPRs); 856 Ctx.diagnose(Diag); 857 ProgInfo.NumSGPR = MaxAddressableNumSGPRs; 858 ProgInfo.NumSGPRsForWavesPerEU = MaxAddressableNumSGPRs; 859 } 860 } 861 862 if (STM.hasSGPRInitBug()) { 863 ProgInfo.NumSGPR = 864 AMDGPU::IsaInfo::FIXED_NUM_SGPRS_FOR_INIT_BUG; 865 ProgInfo.NumSGPRsForWavesPerEU = 866 AMDGPU::IsaInfo::FIXED_NUM_SGPRS_FOR_INIT_BUG; 867 } 868 869 if (MFI->getNumUserSGPRs() > STM.getMaxNumUserSGPRs()) { 870 LLVMContext &Ctx = MF.getFunction().getContext(); 871 DiagnosticInfoResourceLimit Diag(MF.getFunction(), "user SGPRs", 872 MFI->getNumUserSGPRs(), DS_Error); 873 Ctx.diagnose(Diag); 874 } 875 876 if (MFI->getLDSSize() > static_cast<unsigned>(STM.getLocalMemorySize())) { 877 LLVMContext &Ctx = MF.getFunction().getContext(); 878 DiagnosticInfoResourceLimit Diag(MF.getFunction(), "local memory", 879 MFI->getLDSSize(), DS_Error); 880 Ctx.diagnose(Diag); 881 } 882 883 // SGPRBlocks is actual number of SGPR blocks minus 1. 884 ProgInfo.SGPRBlocks = alignTo(ProgInfo.NumSGPRsForWavesPerEU, 885 STM.getSGPREncodingGranule()); 886 ProgInfo.SGPRBlocks = ProgInfo.SGPRBlocks / STM.getSGPREncodingGranule() - 1; 887 888 // VGPRBlocks is actual number of VGPR blocks minus 1. 889 ProgInfo.VGPRBlocks = alignTo(ProgInfo.NumVGPRsForWavesPerEU, 890 STM.getVGPREncodingGranule()); 891 ProgInfo.VGPRBlocks = ProgInfo.VGPRBlocks / STM.getVGPREncodingGranule() - 1; 892 893 // Record first reserved VGPR and number of reserved VGPRs. 894 ProgInfo.ReservedVGPRFirst = STM.debuggerReserveRegs() ? ProgInfo.NumVGPR : 0; 895 ProgInfo.ReservedVGPRCount = STM.getReservedNumVGPRs(MF); 896 897 // Update DebuggerWavefrontPrivateSegmentOffsetSGPR and 898 // DebuggerPrivateSegmentBufferSGPR fields if "amdgpu-debugger-emit-prologue" 899 // attribute was requested. 900 if (STM.debuggerEmitPrologue()) { 901 ProgInfo.DebuggerWavefrontPrivateSegmentOffsetSGPR = 902 RI->getHWRegIndex(MFI->getScratchWaveOffsetReg()); 903 ProgInfo.DebuggerPrivateSegmentBufferSGPR = 904 RI->getHWRegIndex(MFI->getScratchRSrcReg()); 905 } 906 907 // Set the value to initialize FP_ROUND and FP_DENORM parts of the mode 908 // register. 909 ProgInfo.FloatMode = getFPMode(MF); 910 911 ProgInfo.IEEEMode = STM.enableIEEEBit(MF); 912 913 // Make clamp modifier on NaN input returns 0. 914 ProgInfo.DX10Clamp = STM.enableDX10Clamp(); 915 916 unsigned LDSAlignShift; 917 if (STM.getGeneration() < SISubtarget::SEA_ISLANDS) { 918 // LDS is allocated in 64 dword blocks. 919 LDSAlignShift = 8; 920 } else { 921 // LDS is allocated in 128 dword blocks. 922 LDSAlignShift = 9; 923 } 924 925 unsigned LDSSpillSize = 926 MFI->getLDSWaveSpillSize() * MFI->getMaxFlatWorkGroupSize(); 927 928 ProgInfo.LDSSize = MFI->getLDSSize() + LDSSpillSize; 929 ProgInfo.LDSBlocks = 930 alignTo(ProgInfo.LDSSize, 1ULL << LDSAlignShift) >> LDSAlignShift; 931 932 // Scratch is allocated in 256 dword blocks. 933 unsigned ScratchAlignShift = 10; 934 // We need to program the hardware with the amount of scratch memory that 935 // is used by the entire wave. ProgInfo.ScratchSize is the amount of 936 // scratch memory used per thread. 937 ProgInfo.ScratchBlocks = 938 alignTo(ProgInfo.ScratchSize * STM.getWavefrontSize(), 939 1ULL << ScratchAlignShift) >> 940 ScratchAlignShift; 941 942 ProgInfo.ComputePGMRSrc1 = 943 S_00B848_VGPRS(ProgInfo.VGPRBlocks) | 944 S_00B848_SGPRS(ProgInfo.SGPRBlocks) | 945 S_00B848_PRIORITY(ProgInfo.Priority) | 946 S_00B848_FLOAT_MODE(ProgInfo.FloatMode) | 947 S_00B848_PRIV(ProgInfo.Priv) | 948 S_00B848_DX10_CLAMP(ProgInfo.DX10Clamp) | 949 S_00B848_DEBUG_MODE(ProgInfo.DebugMode) | 950 S_00B848_IEEE_MODE(ProgInfo.IEEEMode); 951 952 // 0 = X, 1 = XY, 2 = XYZ 953 unsigned TIDIGCompCnt = 0; 954 if (MFI->hasWorkItemIDZ()) 955 TIDIGCompCnt = 2; 956 else if (MFI->hasWorkItemIDY()) 957 TIDIGCompCnt = 1; 958 959 ProgInfo.ComputePGMRSrc2 = 960 S_00B84C_SCRATCH_EN(ProgInfo.ScratchBlocks > 0) | 961 S_00B84C_USER_SGPR(MFI->getNumUserSGPRs()) | 962 S_00B84C_TRAP_HANDLER(STM.isTrapHandlerEnabled()) | 963 S_00B84C_TGID_X_EN(MFI->hasWorkGroupIDX()) | 964 S_00B84C_TGID_Y_EN(MFI->hasWorkGroupIDY()) | 965 S_00B84C_TGID_Z_EN(MFI->hasWorkGroupIDZ()) | 966 S_00B84C_TG_SIZE_EN(MFI->hasWorkGroupInfo()) | 967 S_00B84C_TIDIG_COMP_CNT(TIDIGCompCnt) | 968 S_00B84C_EXCP_EN_MSB(0) | 969 // For AMDHSA, LDS_SIZE must be zero, as it is populated by the CP. 970 S_00B84C_LDS_SIZE(STM.isAmdHsaOS() ? 0 : ProgInfo.LDSBlocks) | 971 S_00B84C_EXCP_EN(0); 972 } 973 974 static unsigned getRsrcReg(CallingConv::ID CallConv) { 975 switch (CallConv) { 976 default: LLVM_FALLTHROUGH; 977 case CallingConv::AMDGPU_CS: return R_00B848_COMPUTE_PGM_RSRC1; 978 case CallingConv::AMDGPU_LS: return R_00B528_SPI_SHADER_PGM_RSRC1_LS; 979 case CallingConv::AMDGPU_HS: return R_00B428_SPI_SHADER_PGM_RSRC1_HS; 980 case CallingConv::AMDGPU_ES: return R_00B328_SPI_SHADER_PGM_RSRC1_ES; 981 case CallingConv::AMDGPU_GS: return R_00B228_SPI_SHADER_PGM_RSRC1_GS; 982 case CallingConv::AMDGPU_VS: return R_00B128_SPI_SHADER_PGM_RSRC1_VS; 983 case CallingConv::AMDGPU_PS: return R_00B028_SPI_SHADER_PGM_RSRC1_PS; 984 } 985 } 986 987 void AMDGPUAsmPrinter::EmitProgramInfoSI(const MachineFunction &MF, 988 const SIProgramInfo &CurrentProgramInfo) { 989 const SISubtarget &STM = MF.getSubtarget<SISubtarget>(); 990 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 991 unsigned RsrcReg = getRsrcReg(MF.getFunction().getCallingConv()); 992 993 if (AMDGPU::isCompute(MF.getFunction().getCallingConv())) { 994 OutStreamer->EmitIntValue(R_00B848_COMPUTE_PGM_RSRC1, 4); 995 996 OutStreamer->EmitIntValue(CurrentProgramInfo.ComputePGMRSrc1, 4); 997 998 OutStreamer->EmitIntValue(R_00B84C_COMPUTE_PGM_RSRC2, 4); 999 OutStreamer->EmitIntValue(CurrentProgramInfo.ComputePGMRSrc2, 4); 1000 1001 OutStreamer->EmitIntValue(R_00B860_COMPUTE_TMPRING_SIZE, 4); 1002 OutStreamer->EmitIntValue(S_00B860_WAVESIZE(CurrentProgramInfo.ScratchBlocks), 4); 1003 1004 // TODO: Should probably note flat usage somewhere. SC emits a "FlatPtr32 = 1005 // 0" comment but I don't see a corresponding field in the register spec. 1006 } else { 1007 OutStreamer->EmitIntValue(RsrcReg, 4); 1008 OutStreamer->EmitIntValue(S_00B028_VGPRS(CurrentProgramInfo.VGPRBlocks) | 1009 S_00B028_SGPRS(CurrentProgramInfo.SGPRBlocks), 4); 1010 unsigned Rsrc2Val = 0; 1011 if (STM.isVGPRSpillingEnabled(MF.getFunction())) { 1012 OutStreamer->EmitIntValue(R_0286E8_SPI_TMPRING_SIZE, 4); 1013 OutStreamer->EmitIntValue(S_0286E8_WAVESIZE(CurrentProgramInfo.ScratchBlocks), 4); 1014 if (TM.getTargetTriple().getOS() == Triple::AMDPAL) 1015 Rsrc2Val = S_00B84C_SCRATCH_EN(CurrentProgramInfo.ScratchBlocks > 0); 1016 } 1017 if (MF.getFunction().getCallingConv() == CallingConv::AMDGPU_PS) { 1018 OutStreamer->EmitIntValue(R_0286CC_SPI_PS_INPUT_ENA, 4); 1019 OutStreamer->EmitIntValue(MFI->getPSInputEnable(), 4); 1020 OutStreamer->EmitIntValue(R_0286D0_SPI_PS_INPUT_ADDR, 4); 1021 OutStreamer->EmitIntValue(MFI->getPSInputAddr(), 4); 1022 Rsrc2Val |= S_00B02C_EXTRA_LDS_SIZE(CurrentProgramInfo.LDSBlocks); 1023 } 1024 if (Rsrc2Val) { 1025 OutStreamer->EmitIntValue(RsrcReg + 4 /*rsrc2*/, 4); 1026 OutStreamer->EmitIntValue(Rsrc2Val, 4); 1027 } 1028 } 1029 1030 OutStreamer->EmitIntValue(R_SPILLED_SGPRS, 4); 1031 OutStreamer->EmitIntValue(MFI->getNumSpilledSGPRs(), 4); 1032 OutStreamer->EmitIntValue(R_SPILLED_VGPRS, 4); 1033 OutStreamer->EmitIntValue(MFI->getNumSpilledVGPRs(), 4); 1034 } 1035 1036 // This is the equivalent of EmitProgramInfoSI above, but for when the OS type 1037 // is AMDPAL. It stores each compute/SPI register setting and other PAL 1038 // metadata items into the PALMetadataMap, combining with any provided by the 1039 // frontend as LLVM metadata. Once all functions are written, PALMetadataMap is 1040 // then written as a single block in the .note section. 1041 void AMDGPUAsmPrinter::EmitPALMetadata(const MachineFunction &MF, 1042 const SIProgramInfo &CurrentProgramInfo) { 1043 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 1044 // Given the calling convention, calculate the register number for rsrc1. In 1045 // principle the register number could change in future hardware, but we know 1046 // it is the same for gfx6-9 (except that LS and ES don't exist on gfx9), so 1047 // we can use the same fixed value that .AMDGPU.config has for Mesa. Note 1048 // that we use a register number rather than a byte offset, so we need to 1049 // divide by 4. 1050 unsigned Rsrc1Reg = getRsrcReg(MF.getFunction().getCallingConv()) / 4; 1051 unsigned Rsrc2Reg = Rsrc1Reg + 1; 1052 // Also calculate the PAL metadata key for *S_SCRATCH_SIZE. It can be used 1053 // with a constant offset to access any non-register shader-specific PAL 1054 // metadata key. 1055 unsigned ScratchSizeKey = PALMD::Key::CS_SCRATCH_SIZE; 1056 switch (MF.getFunction().getCallingConv()) { 1057 case CallingConv::AMDGPU_PS: 1058 ScratchSizeKey = PALMD::Key::PS_SCRATCH_SIZE; 1059 break; 1060 case CallingConv::AMDGPU_VS: 1061 ScratchSizeKey = PALMD::Key::VS_SCRATCH_SIZE; 1062 break; 1063 case CallingConv::AMDGPU_GS: 1064 ScratchSizeKey = PALMD::Key::GS_SCRATCH_SIZE; 1065 break; 1066 case CallingConv::AMDGPU_ES: 1067 ScratchSizeKey = PALMD::Key::ES_SCRATCH_SIZE; 1068 break; 1069 case CallingConv::AMDGPU_HS: 1070 ScratchSizeKey = PALMD::Key::HS_SCRATCH_SIZE; 1071 break; 1072 case CallingConv::AMDGPU_LS: 1073 ScratchSizeKey = PALMD::Key::LS_SCRATCH_SIZE; 1074 break; 1075 } 1076 unsigned NumUsedVgprsKey = ScratchSizeKey + 1077 PALMD::Key::VS_NUM_USED_VGPRS - PALMD::Key::VS_SCRATCH_SIZE; 1078 unsigned NumUsedSgprsKey = ScratchSizeKey + 1079 PALMD::Key::VS_NUM_USED_SGPRS - PALMD::Key::VS_SCRATCH_SIZE; 1080 PALMetadataMap[NumUsedVgprsKey] = CurrentProgramInfo.NumVGPRsForWavesPerEU; 1081 PALMetadataMap[NumUsedSgprsKey] = CurrentProgramInfo.NumSGPRsForWavesPerEU; 1082 if (AMDGPU::isCompute(MF.getFunction().getCallingConv())) { 1083 PALMetadataMap[Rsrc1Reg] |= CurrentProgramInfo.ComputePGMRSrc1; 1084 PALMetadataMap[Rsrc2Reg] |= CurrentProgramInfo.ComputePGMRSrc2; 1085 // ScratchSize is in bytes, 16 aligned. 1086 PALMetadataMap[ScratchSizeKey] |= 1087 alignTo(CurrentProgramInfo.ScratchSize, 16); 1088 } else { 1089 PALMetadataMap[Rsrc1Reg] |= S_00B028_VGPRS(CurrentProgramInfo.VGPRBlocks) | 1090 S_00B028_SGPRS(CurrentProgramInfo.SGPRBlocks); 1091 if (CurrentProgramInfo.ScratchBlocks > 0) 1092 PALMetadataMap[Rsrc2Reg] |= S_00B84C_SCRATCH_EN(1); 1093 // ScratchSize is in bytes, 16 aligned. 1094 PALMetadataMap[ScratchSizeKey] |= 1095 alignTo(CurrentProgramInfo.ScratchSize, 16); 1096 } 1097 if (MF.getFunction().getCallingConv() == CallingConv::AMDGPU_PS) { 1098 PALMetadataMap[Rsrc2Reg] |= 1099 S_00B02C_EXTRA_LDS_SIZE(CurrentProgramInfo.LDSBlocks); 1100 PALMetadataMap[R_0286CC_SPI_PS_INPUT_ENA / 4] |= MFI->getPSInputEnable(); 1101 PALMetadataMap[R_0286D0_SPI_PS_INPUT_ADDR / 4] |= MFI->getPSInputAddr(); 1102 } 1103 } 1104 1105 // This is supposed to be log2(Size) 1106 static amd_element_byte_size_t getElementByteSizeValue(unsigned Size) { 1107 switch (Size) { 1108 case 4: 1109 return AMD_ELEMENT_4_BYTES; 1110 case 8: 1111 return AMD_ELEMENT_8_BYTES; 1112 case 16: 1113 return AMD_ELEMENT_16_BYTES; 1114 default: 1115 llvm_unreachable("invalid private_element_size"); 1116 } 1117 } 1118 1119 void AMDGPUAsmPrinter::getAmdKernelCode(amd_kernel_code_t &Out, 1120 const SIProgramInfo &CurrentProgramInfo, 1121 const MachineFunction &MF) const { 1122 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 1123 const SISubtarget &STM = MF.getSubtarget<SISubtarget>(); 1124 1125 AMDGPU::initDefaultAMDKernelCodeT(Out, STM.getFeatureBits()); 1126 1127 Out.compute_pgm_resource_registers = 1128 CurrentProgramInfo.ComputePGMRSrc1 | 1129 (CurrentProgramInfo.ComputePGMRSrc2 << 32); 1130 Out.code_properties = AMD_CODE_PROPERTY_IS_PTR64; 1131 1132 if (CurrentProgramInfo.DynamicCallStack) 1133 Out.code_properties |= AMD_CODE_PROPERTY_IS_DYNAMIC_CALLSTACK; 1134 1135 AMD_HSA_BITS_SET(Out.code_properties, 1136 AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE, 1137 getElementByteSizeValue(STM.getMaxPrivateElementSize())); 1138 1139 if (MFI->hasPrivateSegmentBuffer()) { 1140 Out.code_properties |= 1141 AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER; 1142 } 1143 1144 if (MFI->hasDispatchPtr()) 1145 Out.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR; 1146 1147 if (MFI->hasQueuePtr()) 1148 Out.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR; 1149 1150 if (MFI->hasKernargSegmentPtr()) 1151 Out.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR; 1152 1153 if (MFI->hasDispatchID()) 1154 Out.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID; 1155 1156 if (MFI->hasFlatScratchInit()) 1157 Out.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT; 1158 1159 if (MFI->hasGridWorkgroupCountX()) { 1160 Out.code_properties |= 1161 AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X; 1162 } 1163 1164 if (MFI->hasGridWorkgroupCountY()) { 1165 Out.code_properties |= 1166 AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Y; 1167 } 1168 1169 if (MFI->hasGridWorkgroupCountZ()) { 1170 Out.code_properties |= 1171 AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Z; 1172 } 1173 1174 if (MFI->hasDispatchPtr()) 1175 Out.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR; 1176 1177 if (STM.debuggerSupported()) 1178 Out.code_properties |= AMD_CODE_PROPERTY_IS_DEBUG_SUPPORTED; 1179 1180 if (STM.isXNACKEnabled()) 1181 Out.code_properties |= AMD_CODE_PROPERTY_IS_XNACK_SUPPORTED; 1182 1183 // FIXME: Should use getKernArgSize 1184 Out.kernarg_segment_byte_size = 1185 STM.getKernArgSegmentSize(MF, MFI->getABIArgOffset()); 1186 Out.wavefront_sgpr_count = CurrentProgramInfo.NumSGPR; 1187 Out.workitem_vgpr_count = CurrentProgramInfo.NumVGPR; 1188 Out.workitem_private_segment_byte_size = CurrentProgramInfo.ScratchSize; 1189 Out.workgroup_group_segment_byte_size = CurrentProgramInfo.LDSSize; 1190 Out.reserved_vgpr_first = CurrentProgramInfo.ReservedVGPRFirst; 1191 Out.reserved_vgpr_count = CurrentProgramInfo.ReservedVGPRCount; 1192 1193 // These alignment values are specified in powers of two, so alignment = 1194 // 2^n. The minimum alignment is 2^4 = 16. 1195 Out.kernarg_segment_alignment = std::max((size_t)4, 1196 countTrailingZeros(MFI->getMaxKernArgAlign())); 1197 1198 if (STM.debuggerEmitPrologue()) { 1199 Out.debug_wavefront_private_segment_offset_sgpr = 1200 CurrentProgramInfo.DebuggerWavefrontPrivateSegmentOffsetSGPR; 1201 Out.debug_private_segment_buffer_sgpr = 1202 CurrentProgramInfo.DebuggerPrivateSegmentBufferSGPR; 1203 } 1204 } 1205 1206 AMDGPU::HSAMD::Kernel::CodeProps::Metadata AMDGPUAsmPrinter::getHSACodeProps( 1207 const MachineFunction &MF, 1208 const SIProgramInfo &ProgramInfo) const { 1209 const SISubtarget &STM = MF.getSubtarget<SISubtarget>(); 1210 const SIMachineFunctionInfo &MFI = *MF.getInfo<SIMachineFunctionInfo>(); 1211 HSAMD::Kernel::CodeProps::Metadata HSACodeProps; 1212 1213 HSACodeProps.mKernargSegmentSize = 1214 STM.getKernArgSegmentSize(MF, MFI.getABIArgOffset()); 1215 HSACodeProps.mGroupSegmentFixedSize = ProgramInfo.LDSSize; 1216 HSACodeProps.mPrivateSegmentFixedSize = ProgramInfo.ScratchSize; 1217 HSACodeProps.mKernargSegmentAlign = 1218 std::max(uint32_t(4), MFI.getMaxKernArgAlign()); 1219 HSACodeProps.mWavefrontSize = STM.getWavefrontSize(); 1220 HSACodeProps.mNumSGPRs = CurrentProgramInfo.NumSGPR; 1221 HSACodeProps.mNumVGPRs = CurrentProgramInfo.NumVGPR; 1222 HSACodeProps.mMaxFlatWorkGroupSize = MFI.getMaxFlatWorkGroupSize(); 1223 HSACodeProps.mIsDynamicCallStack = ProgramInfo.DynamicCallStack; 1224 HSACodeProps.mIsXNACKEnabled = STM.isXNACKEnabled(); 1225 HSACodeProps.mNumSpilledSGPRs = MFI.getNumSpilledSGPRs(); 1226 HSACodeProps.mNumSpilledVGPRs = MFI.getNumSpilledVGPRs(); 1227 1228 return HSACodeProps; 1229 } 1230 1231 AMDGPU::HSAMD::Kernel::DebugProps::Metadata AMDGPUAsmPrinter::getHSADebugProps( 1232 const MachineFunction &MF, 1233 const SIProgramInfo &ProgramInfo) const { 1234 const SISubtarget &STM = MF.getSubtarget<SISubtarget>(); 1235 HSAMD::Kernel::DebugProps::Metadata HSADebugProps; 1236 1237 if (!STM.debuggerSupported()) 1238 return HSADebugProps; 1239 1240 HSADebugProps.mDebuggerABIVersion.push_back(1); 1241 HSADebugProps.mDebuggerABIVersion.push_back(0); 1242 HSADebugProps.mReservedNumVGPRs = ProgramInfo.ReservedVGPRCount; 1243 HSADebugProps.mReservedFirstVGPR = ProgramInfo.ReservedVGPRFirst; 1244 1245 if (STM.debuggerEmitPrologue()) { 1246 HSADebugProps.mPrivateSegmentBufferSGPR = 1247 ProgramInfo.DebuggerPrivateSegmentBufferSGPR; 1248 HSADebugProps.mWavefrontPrivateSegmentOffsetSGPR = 1249 ProgramInfo.DebuggerWavefrontPrivateSegmentOffsetSGPR; 1250 } 1251 1252 return HSADebugProps; 1253 } 1254 1255 bool AMDGPUAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, 1256 unsigned AsmVariant, 1257 const char *ExtraCode, raw_ostream &O) { 1258 // First try the generic code, which knows about modifiers like 'c' and 'n'. 1259 if (!AsmPrinter::PrintAsmOperand(MI, OpNo, AsmVariant, ExtraCode, O)) 1260 return false; 1261 1262 if (ExtraCode && ExtraCode[0]) { 1263 if (ExtraCode[1] != 0) 1264 return true; // Unknown modifier. 1265 1266 switch (ExtraCode[0]) { 1267 case 'r': 1268 break; 1269 default: 1270 return true; 1271 } 1272 } 1273 1274 // TODO: Should be able to support other operand types like globals. 1275 const MachineOperand &MO = MI->getOperand(OpNo); 1276 if (MO.isReg()) { 1277 AMDGPUInstPrinter::printRegOperand(MO.getReg(), O, 1278 *MF->getSubtarget().getRegisterInfo()); 1279 return false; 1280 } 1281 1282 return true; 1283 } 1284