1 //===-- AMDGPUAsmPrinter.cpp - AMDGPU assembly printer --------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// 11 /// The AMDGPUAsmPrinter is used to print both assembly string and also binary 12 /// code. When passed an MCAsmStreamer it prints assembly and when passed 13 /// an MCObjectStreamer it outputs binary code. 14 // 15 //===----------------------------------------------------------------------===// 16 // 17 18 #include "AMDGPUAsmPrinter.h" 19 #include "AMDGPU.h" 20 #include "AMDGPUHSAMetadataStreamer.h" 21 #include "AMDKernelCodeT.h" 22 #include "GCNSubtarget.h" 23 #include "MCTargetDesc/AMDGPUInstPrinter.h" 24 #include "MCTargetDesc/AMDGPUTargetStreamer.h" 25 #include "R600AsmPrinter.h" 26 #include "SIMachineFunctionInfo.h" 27 #include "TargetInfo/AMDGPUTargetInfo.h" 28 #include "Utils/AMDGPUBaseInfo.h" 29 #include "llvm/IR/DiagnosticInfo.h" 30 #include "llvm/MC/MCAssembler.h" 31 #include "llvm/MC/MCContext.h" 32 #include "llvm/MC/MCSectionELF.h" 33 #include "llvm/MC/MCStreamer.h" 34 #include "llvm/Support/AMDHSAKernelDescriptor.h" 35 #include "llvm/Support/TargetRegistry.h" 36 #include "llvm/Target/TargetLoweringObjectFile.h" 37 #include "llvm/Target/TargetMachine.h" 38 39 using namespace llvm; 40 using namespace llvm::AMDGPU; 41 42 // We need to tell the runtime some amount ahead of time if we don't know the 43 // true stack size. Assume a smaller number if this is only due to dynamic / 44 // non-entry block allocas. 45 static cl::opt<uint32_t> AssumedStackSizeForExternalCall( 46 "amdgpu-assume-external-call-stack-size", 47 cl::desc("Assumed stack use of any external call (in bytes)"), 48 cl::Hidden, 49 cl::init(16384)); 50 51 static cl::opt<uint32_t> AssumedStackSizeForDynamicSizeObjects( 52 "amdgpu-assume-dynamic-stack-object-size", 53 cl::desc("Assumed extra stack use if there are any " 54 "variable sized objects (in bytes)"), 55 cl::Hidden, 56 cl::init(4096)); 57 58 // This should get the default rounding mode from the kernel. We just set the 59 // default here, but this could change if the OpenCL rounding mode pragmas are 60 // used. 61 // 62 // The denormal mode here should match what is reported by the OpenCL runtime 63 // for the CL_FP_DENORM bit from CL_DEVICE_{HALF|SINGLE|DOUBLE}_FP_CONFIG, but 64 // can also be override to flush with the -cl-denorms-are-zero compiler flag. 65 // 66 // AMD OpenCL only sets flush none and reports CL_FP_DENORM for double 67 // precision, and leaves single precision to flush all and does not report 68 // CL_FP_DENORM for CL_DEVICE_SINGLE_FP_CONFIG. Mesa's OpenCL currently reports 69 // CL_FP_DENORM for both. 70 // 71 // FIXME: It seems some instructions do not support single precision denormals 72 // regardless of the mode (exp_*_f32, rcp_*_f32, rsq_*_f32, rsq_*f32, sqrt_f32, 73 // and sin_f32, cos_f32 on most parts). 74 75 // We want to use these instructions, and using fp32 denormals also causes 76 // instructions to run at the double precision rate for the device so it's 77 // probably best to just report no single precision denormals. 78 static uint32_t getFPMode(AMDGPU::SIModeRegisterDefaults Mode) { 79 return FP_ROUND_MODE_SP(FP_ROUND_ROUND_TO_NEAREST) | 80 FP_ROUND_MODE_DP(FP_ROUND_ROUND_TO_NEAREST) | 81 FP_DENORM_MODE_SP(Mode.fpDenormModeSPValue()) | 82 FP_DENORM_MODE_DP(Mode.fpDenormModeDPValue()); 83 } 84 85 static AsmPrinter * 86 createAMDGPUAsmPrinterPass(TargetMachine &tm, 87 std::unique_ptr<MCStreamer> &&Streamer) { 88 return new AMDGPUAsmPrinter(tm, std::move(Streamer)); 89 } 90 91 extern "C" void LLVM_EXTERNAL_VISIBILITY LLVMInitializeAMDGPUAsmPrinter() { 92 TargetRegistry::RegisterAsmPrinter(getTheAMDGPUTarget(), 93 llvm::createR600AsmPrinterPass); 94 TargetRegistry::RegisterAsmPrinter(getTheGCNTarget(), 95 createAMDGPUAsmPrinterPass); 96 } 97 98 AMDGPUAsmPrinter::AMDGPUAsmPrinter(TargetMachine &TM, 99 std::unique_ptr<MCStreamer> Streamer) 100 : AsmPrinter(TM, std::move(Streamer)) { 101 if (TM.getTargetTriple().getOS() == Triple::AMDHSA) { 102 if (isHsaAbiVersion2(getGlobalSTI())) { 103 HSAMetadataStream.reset(new HSAMD::MetadataStreamerV2()); 104 } else if (isHsaAbiVersion3(getGlobalSTI())) { 105 HSAMetadataStream.reset(new HSAMD::MetadataStreamerV3()); 106 } else { 107 HSAMetadataStream.reset(new HSAMD::MetadataStreamerV4()); 108 } 109 } 110 } 111 112 StringRef AMDGPUAsmPrinter::getPassName() const { 113 return "AMDGPU Assembly Printer"; 114 } 115 116 const MCSubtargetInfo *AMDGPUAsmPrinter::getGlobalSTI() const { 117 return TM.getMCSubtargetInfo(); 118 } 119 120 AMDGPUTargetStreamer* AMDGPUAsmPrinter::getTargetStreamer() const { 121 if (!OutStreamer) 122 return nullptr; 123 return static_cast<AMDGPUTargetStreamer*>(OutStreamer->getTargetStreamer()); 124 } 125 126 void AMDGPUAsmPrinter::emitStartOfAsmFile(Module &M) { 127 // TODO: Which one is called first, emitStartOfAsmFile or 128 // emitFunctionBodyStart? 129 if (getTargetStreamer() && !getTargetStreamer()->getTargetID()) 130 initializeTargetID(M); 131 132 if (TM.getTargetTriple().getOS() != Triple::AMDHSA && 133 TM.getTargetTriple().getOS() != Triple::AMDPAL) 134 return; 135 136 if (isHsaAbiVersion3Or4(getGlobalSTI())) 137 getTargetStreamer()->EmitDirectiveAMDGCNTarget(); 138 139 if (TM.getTargetTriple().getOS() == Triple::AMDHSA) 140 HSAMetadataStream->begin(M, *getTargetStreamer()->getTargetID()); 141 142 if (TM.getTargetTriple().getOS() == Triple::AMDPAL) 143 getTargetStreamer()->getPALMetadata()->readFromIR(M); 144 145 if (isHsaAbiVersion3Or4(getGlobalSTI())) 146 return; 147 148 // HSA emits NT_AMD_HSA_CODE_OBJECT_VERSION for code objects v2. 149 if (TM.getTargetTriple().getOS() == Triple::AMDHSA) 150 getTargetStreamer()->EmitDirectiveHSACodeObjectVersion(2, 1); 151 152 // HSA and PAL emit NT_AMD_HSA_ISA_VERSION for code objects v2. 153 IsaVersion Version = getIsaVersion(getGlobalSTI()->getCPU()); 154 getTargetStreamer()->EmitDirectiveHSACodeObjectISAV2( 155 Version.Major, Version.Minor, Version.Stepping, "AMD", "AMDGPU"); 156 } 157 158 void AMDGPUAsmPrinter::emitEndOfAsmFile(Module &M) { 159 // Following code requires TargetStreamer to be present. 160 if (!getTargetStreamer()) 161 return; 162 163 if (TM.getTargetTriple().getOS() != Triple::AMDHSA || 164 isHsaAbiVersion2(getGlobalSTI())) 165 getTargetStreamer()->EmitISAVersion(); 166 167 // Emit HSA Metadata (NT_AMD_AMDGPU_HSA_METADATA). 168 // Emit HSA Metadata (NT_AMD_HSA_METADATA). 169 if (TM.getTargetTriple().getOS() == Triple::AMDHSA) { 170 HSAMetadataStream->end(); 171 bool Success = HSAMetadataStream->emitTo(*getTargetStreamer()); 172 (void)Success; 173 assert(Success && "Malformed HSA Metadata"); 174 } 175 } 176 177 bool AMDGPUAsmPrinter::isBlockOnlyReachableByFallthrough( 178 const MachineBasicBlock *MBB) const { 179 if (!AsmPrinter::isBlockOnlyReachableByFallthrough(MBB)) 180 return false; 181 182 if (MBB->empty()) 183 return true; 184 185 // If this is a block implementing a long branch, an expression relative to 186 // the start of the block is needed. to the start of the block. 187 // XXX - Is there a smarter way to check this? 188 return (MBB->back().getOpcode() != AMDGPU::S_SETPC_B64); 189 } 190 191 void AMDGPUAsmPrinter::emitFunctionBodyStart() { 192 const SIMachineFunctionInfo &MFI = *MF->getInfo<SIMachineFunctionInfo>(); 193 const GCNSubtarget &STM = MF->getSubtarget<GCNSubtarget>(); 194 const Function &F = MF->getFunction(); 195 196 // TODO: Which one is called first, emitStartOfAsmFile or 197 // emitFunctionBodyStart? 198 if (getTargetStreamer() && !getTargetStreamer()->getTargetID()) 199 initializeTargetID(*F.getParent()); 200 201 const auto &FunctionTargetID = STM.getTargetID(); 202 // Make sure function's xnack settings are compatible with module's 203 // xnack settings. 204 if (FunctionTargetID.isXnackSupported() && 205 FunctionTargetID.getXnackSetting() != IsaInfo::TargetIDSetting::Any && 206 FunctionTargetID.getXnackSetting() != getTargetStreamer()->getTargetID()->getXnackSetting()) { 207 OutContext.reportError({}, "xnack setting of '" + Twine(MF->getName()) + 208 "' function does not match module xnack setting"); 209 return; 210 } 211 // Make sure function's sramecc settings are compatible with module's 212 // sramecc settings. 213 if (FunctionTargetID.isSramEccSupported() && 214 FunctionTargetID.getSramEccSetting() != IsaInfo::TargetIDSetting::Any && 215 FunctionTargetID.getSramEccSetting() != getTargetStreamer()->getTargetID()->getSramEccSetting()) { 216 OutContext.reportError({}, "sramecc setting of '" + Twine(MF->getName()) + 217 "' function does not match module sramecc setting"); 218 return; 219 } 220 221 if (!MFI.isEntryFunction()) 222 return; 223 224 if ((STM.isMesaKernel(F) || isHsaAbiVersion2(getGlobalSTI())) && 225 (F.getCallingConv() == CallingConv::AMDGPU_KERNEL || 226 F.getCallingConv() == CallingConv::SPIR_KERNEL)) { 227 amd_kernel_code_t KernelCode; 228 getAmdKernelCode(KernelCode, CurrentProgramInfo, *MF); 229 getTargetStreamer()->EmitAMDKernelCodeT(KernelCode); 230 } 231 232 if (STM.isAmdHsaOS()) 233 HSAMetadataStream->emitKernel(*MF, CurrentProgramInfo); 234 } 235 236 void AMDGPUAsmPrinter::emitFunctionBodyEnd() { 237 const SIMachineFunctionInfo &MFI = *MF->getInfo<SIMachineFunctionInfo>(); 238 if (!MFI.isEntryFunction()) 239 return; 240 241 if (TM.getTargetTriple().getOS() != Triple::AMDHSA || 242 isHsaAbiVersion2(getGlobalSTI())) 243 return; 244 245 auto &Streamer = getTargetStreamer()->getStreamer(); 246 auto &Context = Streamer.getContext(); 247 auto &ObjectFileInfo = *Context.getObjectFileInfo(); 248 auto &ReadOnlySection = *ObjectFileInfo.getReadOnlySection(); 249 250 Streamer.PushSection(); 251 Streamer.SwitchSection(&ReadOnlySection); 252 253 // CP microcode requires the kernel descriptor to be allocated on 64 byte 254 // alignment. 255 Streamer.emitValueToAlignment(64, 0, 1, 0); 256 if (ReadOnlySection.getAlignment() < 64) 257 ReadOnlySection.setAlignment(Align(64)); 258 259 const GCNSubtarget &STM = MF->getSubtarget<GCNSubtarget>(); 260 261 SmallString<128> KernelName; 262 getNameWithPrefix(KernelName, &MF->getFunction()); 263 getTargetStreamer()->EmitAmdhsaKernelDescriptor( 264 STM, KernelName, getAmdhsaKernelDescriptor(*MF, CurrentProgramInfo), 265 CurrentProgramInfo.NumVGPRsForWavesPerEU, 266 CurrentProgramInfo.NumSGPRsForWavesPerEU - 267 IsaInfo::getNumExtraSGPRs(&STM, 268 CurrentProgramInfo.VCCUsed, 269 CurrentProgramInfo.FlatUsed), 270 CurrentProgramInfo.VCCUsed, CurrentProgramInfo.FlatUsed); 271 272 Streamer.PopSection(); 273 } 274 275 void AMDGPUAsmPrinter::emitFunctionEntryLabel() { 276 if (TM.getTargetTriple().getOS() == Triple::AMDHSA && 277 isHsaAbiVersion3Or4(getGlobalSTI())) { 278 AsmPrinter::emitFunctionEntryLabel(); 279 return; 280 } 281 282 const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 283 const GCNSubtarget &STM = MF->getSubtarget<GCNSubtarget>(); 284 if (MFI->isEntryFunction() && STM.isAmdHsaOrMesa(MF->getFunction())) { 285 SmallString<128> SymbolName; 286 getNameWithPrefix(SymbolName, &MF->getFunction()), 287 getTargetStreamer()->EmitAMDGPUSymbolType( 288 SymbolName, ELF::STT_AMDGPU_HSA_KERNEL); 289 } 290 if (DumpCodeInstEmitter) { 291 // Disassemble function name label to text. 292 DisasmLines.push_back(MF->getName().str() + ":"); 293 DisasmLineMaxLen = std::max(DisasmLineMaxLen, DisasmLines.back().size()); 294 HexLines.push_back(""); 295 } 296 297 AsmPrinter::emitFunctionEntryLabel(); 298 } 299 300 void AMDGPUAsmPrinter::emitBasicBlockStart(const MachineBasicBlock &MBB) { 301 if (DumpCodeInstEmitter && !isBlockOnlyReachableByFallthrough(&MBB)) { 302 // Write a line for the basic block label if it is not only fallthrough. 303 DisasmLines.push_back( 304 (Twine("BB") + Twine(getFunctionNumber()) 305 + "_" + Twine(MBB.getNumber()) + ":").str()); 306 DisasmLineMaxLen = std::max(DisasmLineMaxLen, DisasmLines.back().size()); 307 HexLines.push_back(""); 308 } 309 AsmPrinter::emitBasicBlockStart(MBB); 310 } 311 312 void AMDGPUAsmPrinter::emitGlobalVariable(const GlobalVariable *GV) { 313 if (GV->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) { 314 if (GV->hasInitializer() && !isa<UndefValue>(GV->getInitializer())) { 315 OutContext.reportError({}, 316 Twine(GV->getName()) + 317 ": unsupported initializer for address space"); 318 return; 319 } 320 321 // LDS variables aren't emitted in HSA or PAL yet. 322 const Triple::OSType OS = TM.getTargetTriple().getOS(); 323 if (OS == Triple::AMDHSA || OS == Triple::AMDPAL) 324 return; 325 326 MCSymbol *GVSym = getSymbol(GV); 327 328 GVSym->redefineIfPossible(); 329 if (GVSym->isDefined() || GVSym->isVariable()) 330 report_fatal_error("symbol '" + Twine(GVSym->getName()) + 331 "' is already defined"); 332 333 const DataLayout &DL = GV->getParent()->getDataLayout(); 334 uint64_t Size = DL.getTypeAllocSize(GV->getValueType()); 335 Align Alignment = GV->getAlign().getValueOr(Align(4)); 336 337 emitVisibility(GVSym, GV->getVisibility(), !GV->isDeclaration()); 338 emitLinkage(GV, GVSym); 339 if (auto TS = getTargetStreamer()) 340 TS->emitAMDGPULDS(GVSym, Size, Alignment); 341 return; 342 } 343 344 AsmPrinter::emitGlobalVariable(GV); 345 } 346 347 bool AMDGPUAsmPrinter::doFinalization(Module &M) { 348 CallGraphResourceInfo.clear(); 349 350 // Pad with s_code_end to help tools and guard against instruction prefetch 351 // causing stale data in caches. Arguably this should be done by the linker, 352 // which is why this isn't done for Mesa. 353 const MCSubtargetInfo &STI = *getGlobalSTI(); 354 if ((AMDGPU::isGFX10Plus(STI) || AMDGPU::isGFX90A(STI)) && 355 (STI.getTargetTriple().getOS() == Triple::AMDHSA || 356 STI.getTargetTriple().getOS() == Triple::AMDPAL)) { 357 OutStreamer->SwitchSection(getObjFileLowering().getTextSection()); 358 getTargetStreamer()->EmitCodeEnd(STI); 359 } 360 361 return AsmPrinter::doFinalization(M); 362 } 363 364 // Print comments that apply to both callable functions and entry points. 365 void AMDGPUAsmPrinter::emitCommonFunctionComments( 366 uint32_t NumVGPR, 367 Optional<uint32_t> NumAGPR, 368 uint32_t TotalNumVGPR, 369 uint32_t NumSGPR, 370 uint64_t ScratchSize, 371 uint64_t CodeSize, 372 const AMDGPUMachineFunction *MFI) { 373 OutStreamer->emitRawComment(" codeLenInByte = " + Twine(CodeSize), false); 374 OutStreamer->emitRawComment(" NumSgprs: " + Twine(NumSGPR), false); 375 OutStreamer->emitRawComment(" NumVgprs: " + Twine(NumVGPR), false); 376 if (NumAGPR) { 377 OutStreamer->emitRawComment(" NumAgprs: " + Twine(*NumAGPR), false); 378 OutStreamer->emitRawComment(" TotalNumVgprs: " + Twine(TotalNumVGPR), 379 false); 380 } 381 OutStreamer->emitRawComment(" ScratchSize: " + Twine(ScratchSize), false); 382 OutStreamer->emitRawComment(" MemoryBound: " + Twine(MFI->isMemoryBound()), 383 false); 384 } 385 386 uint16_t AMDGPUAsmPrinter::getAmdhsaKernelCodeProperties( 387 const MachineFunction &MF) const { 388 const SIMachineFunctionInfo &MFI = *MF.getInfo<SIMachineFunctionInfo>(); 389 uint16_t KernelCodeProperties = 0; 390 391 if (MFI.hasPrivateSegmentBuffer()) { 392 KernelCodeProperties |= 393 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER; 394 } 395 if (MFI.hasDispatchPtr()) { 396 KernelCodeProperties |= 397 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR; 398 } 399 if (MFI.hasQueuePtr()) { 400 KernelCodeProperties |= 401 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR; 402 } 403 if (MFI.hasKernargSegmentPtr()) { 404 KernelCodeProperties |= 405 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR; 406 } 407 if (MFI.hasDispatchID()) { 408 KernelCodeProperties |= 409 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID; 410 } 411 if (MFI.hasFlatScratchInit()) { 412 KernelCodeProperties |= 413 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT; 414 } 415 if (MF.getSubtarget<GCNSubtarget>().isWave32()) { 416 KernelCodeProperties |= 417 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32; 418 } 419 420 return KernelCodeProperties; 421 } 422 423 amdhsa::kernel_descriptor_t AMDGPUAsmPrinter::getAmdhsaKernelDescriptor( 424 const MachineFunction &MF, 425 const SIProgramInfo &PI) const { 426 const GCNSubtarget &STM = MF.getSubtarget<GCNSubtarget>(); 427 const Function &F = MF.getFunction(); 428 429 amdhsa::kernel_descriptor_t KernelDescriptor; 430 memset(&KernelDescriptor, 0x0, sizeof(KernelDescriptor)); 431 432 assert(isUInt<32>(PI.ScratchSize)); 433 assert(isUInt<32>(PI.getComputePGMRSrc1())); 434 assert(isUInt<32>(PI.ComputePGMRSrc2)); 435 436 KernelDescriptor.group_segment_fixed_size = PI.LDSSize; 437 KernelDescriptor.private_segment_fixed_size = PI.ScratchSize; 438 439 Align MaxKernArgAlign; 440 KernelDescriptor.kernarg_size = STM.getKernArgSegmentSize(F, MaxKernArgAlign); 441 442 KernelDescriptor.compute_pgm_rsrc1 = PI.getComputePGMRSrc1(); 443 KernelDescriptor.compute_pgm_rsrc2 = PI.ComputePGMRSrc2; 444 KernelDescriptor.kernel_code_properties = getAmdhsaKernelCodeProperties(MF); 445 446 assert(STM.hasGFX90AInsts() || CurrentProgramInfo.ComputePGMRSrc3GFX90A == 0); 447 if (STM.hasGFX90AInsts()) 448 KernelDescriptor.compute_pgm_rsrc3 = 449 CurrentProgramInfo.ComputePGMRSrc3GFX90A; 450 451 return KernelDescriptor; 452 } 453 454 bool AMDGPUAsmPrinter::runOnMachineFunction(MachineFunction &MF) { 455 CurrentProgramInfo = SIProgramInfo(); 456 457 const AMDGPUMachineFunction *MFI = MF.getInfo<AMDGPUMachineFunction>(); 458 459 // The starting address of all shader programs must be 256 bytes aligned. 460 // Regular functions just need the basic required instruction alignment. 461 MF.setAlignment(MFI->isEntryFunction() ? Align(256) : Align(4)); 462 463 SetupMachineFunction(MF); 464 465 const GCNSubtarget &STM = MF.getSubtarget<GCNSubtarget>(); 466 MCContext &Context = getObjFileLowering().getContext(); 467 // FIXME: This should be an explicit check for Mesa. 468 if (!STM.isAmdHsaOS() && !STM.isAmdPalOS()) { 469 MCSectionELF *ConfigSection = 470 Context.getELFSection(".AMDGPU.config", ELF::SHT_PROGBITS, 0); 471 OutStreamer->SwitchSection(ConfigSection); 472 } 473 474 if (MFI->isModuleEntryFunction()) { 475 getSIProgramInfo(CurrentProgramInfo, MF); 476 } else { 477 auto I = CallGraphResourceInfo.insert( 478 std::make_pair(&MF.getFunction(), SIFunctionResourceInfo())); 479 SIFunctionResourceInfo &Info = I.first->second; 480 assert(I.second && "should only be called once per function"); 481 Info = analyzeResourceUsage(MF); 482 } 483 484 if (STM.isAmdPalOS()) { 485 if (MFI->isEntryFunction()) 486 EmitPALMetadata(MF, CurrentProgramInfo); 487 else if (MFI->isModuleEntryFunction()) 488 emitPALFunctionMetadata(MF); 489 } else if (!STM.isAmdHsaOS()) { 490 EmitProgramInfoSI(MF, CurrentProgramInfo); 491 } 492 493 DumpCodeInstEmitter = nullptr; 494 if (STM.dumpCode()) { 495 // For -dumpcode, get the assembler out of the streamer, even if it does 496 // not really want to let us have it. This only works with -filetype=obj. 497 bool SaveFlag = OutStreamer->getUseAssemblerInfoForParsing(); 498 OutStreamer->setUseAssemblerInfoForParsing(true); 499 MCAssembler *Assembler = OutStreamer->getAssemblerPtr(); 500 OutStreamer->setUseAssemblerInfoForParsing(SaveFlag); 501 if (Assembler) 502 DumpCodeInstEmitter = Assembler->getEmitterPtr(); 503 } 504 505 DisasmLines.clear(); 506 HexLines.clear(); 507 DisasmLineMaxLen = 0; 508 509 emitFunctionBody(); 510 511 if (isVerbose()) { 512 MCSectionELF *CommentSection = 513 Context.getELFSection(".AMDGPU.csdata", ELF::SHT_PROGBITS, 0); 514 OutStreamer->SwitchSection(CommentSection); 515 516 if (!MFI->isEntryFunction()) { 517 OutStreamer->emitRawComment(" Function info:", false); 518 SIFunctionResourceInfo &Info = CallGraphResourceInfo[&MF.getFunction()]; 519 emitCommonFunctionComments( 520 Info.NumVGPR, 521 STM.hasMAIInsts() ? Info.NumAGPR : Optional<uint32_t>(), 522 Info.getTotalNumVGPRs(STM), 523 Info.getTotalNumSGPRs(MF.getSubtarget<GCNSubtarget>()), 524 Info.PrivateSegmentSize, 525 getFunctionCodeSize(MF), MFI); 526 return false; 527 } 528 529 OutStreamer->emitRawComment(" Kernel info:", false); 530 emitCommonFunctionComments(CurrentProgramInfo.NumArchVGPR, 531 STM.hasMAIInsts() 532 ? CurrentProgramInfo.NumAccVGPR 533 : Optional<uint32_t>(), 534 CurrentProgramInfo.NumVGPR, 535 CurrentProgramInfo.NumSGPR, 536 CurrentProgramInfo.ScratchSize, 537 getFunctionCodeSize(MF), MFI); 538 539 OutStreamer->emitRawComment( 540 " FloatMode: " + Twine(CurrentProgramInfo.FloatMode), false); 541 OutStreamer->emitRawComment( 542 " IeeeMode: " + Twine(CurrentProgramInfo.IEEEMode), false); 543 OutStreamer->emitRawComment( 544 " LDSByteSize: " + Twine(CurrentProgramInfo.LDSSize) + 545 " bytes/workgroup (compile time only)", false); 546 547 OutStreamer->emitRawComment( 548 " SGPRBlocks: " + Twine(CurrentProgramInfo.SGPRBlocks), false); 549 OutStreamer->emitRawComment( 550 " VGPRBlocks: " + Twine(CurrentProgramInfo.VGPRBlocks), false); 551 552 OutStreamer->emitRawComment( 553 " NumSGPRsForWavesPerEU: " + 554 Twine(CurrentProgramInfo.NumSGPRsForWavesPerEU), false); 555 OutStreamer->emitRawComment( 556 " NumVGPRsForWavesPerEU: " + 557 Twine(CurrentProgramInfo.NumVGPRsForWavesPerEU), false); 558 559 if (STM.hasGFX90AInsts()) 560 OutStreamer->emitRawComment( 561 " AccumOffset: " + 562 Twine((CurrentProgramInfo.AccumOffset + 1) * 4), false); 563 564 OutStreamer->emitRawComment( 565 " Occupancy: " + 566 Twine(CurrentProgramInfo.Occupancy), false); 567 568 OutStreamer->emitRawComment( 569 " WaveLimiterHint : " + Twine(MFI->needsWaveLimiter()), false); 570 571 OutStreamer->emitRawComment( 572 " COMPUTE_PGM_RSRC2:SCRATCH_EN: " + 573 Twine(G_00B84C_SCRATCH_EN(CurrentProgramInfo.ComputePGMRSrc2)), false); 574 OutStreamer->emitRawComment( 575 " COMPUTE_PGM_RSRC2:USER_SGPR: " + 576 Twine(G_00B84C_USER_SGPR(CurrentProgramInfo.ComputePGMRSrc2)), false); 577 OutStreamer->emitRawComment( 578 " COMPUTE_PGM_RSRC2:TRAP_HANDLER: " + 579 Twine(G_00B84C_TRAP_HANDLER(CurrentProgramInfo.ComputePGMRSrc2)), false); 580 OutStreamer->emitRawComment( 581 " COMPUTE_PGM_RSRC2:TGID_X_EN: " + 582 Twine(G_00B84C_TGID_X_EN(CurrentProgramInfo.ComputePGMRSrc2)), false); 583 OutStreamer->emitRawComment( 584 " COMPUTE_PGM_RSRC2:TGID_Y_EN: " + 585 Twine(G_00B84C_TGID_Y_EN(CurrentProgramInfo.ComputePGMRSrc2)), false); 586 OutStreamer->emitRawComment( 587 " COMPUTE_PGM_RSRC2:TGID_Z_EN: " + 588 Twine(G_00B84C_TGID_Z_EN(CurrentProgramInfo.ComputePGMRSrc2)), false); 589 OutStreamer->emitRawComment( 590 " COMPUTE_PGM_RSRC2:TIDIG_COMP_CNT: " + 591 Twine(G_00B84C_TIDIG_COMP_CNT(CurrentProgramInfo.ComputePGMRSrc2)), 592 false); 593 594 assert(STM.hasGFX90AInsts() || 595 CurrentProgramInfo.ComputePGMRSrc3GFX90A == 0); 596 if (STM.hasGFX90AInsts()) { 597 OutStreamer->emitRawComment( 598 " COMPUTE_PGM_RSRC3_GFX90A:ACCUM_OFFSET: " + 599 Twine((AMDHSA_BITS_GET(CurrentProgramInfo.ComputePGMRSrc3GFX90A, 600 amdhsa::COMPUTE_PGM_RSRC3_GFX90A_ACCUM_OFFSET))), 601 false); 602 OutStreamer->emitRawComment( 603 " COMPUTE_PGM_RSRC3_GFX90A:TG_SPLIT: " + 604 Twine((AMDHSA_BITS_GET(CurrentProgramInfo.ComputePGMRSrc3GFX90A, 605 amdhsa::COMPUTE_PGM_RSRC3_GFX90A_TG_SPLIT))), 606 false); 607 } 608 } 609 610 if (DumpCodeInstEmitter) { 611 612 OutStreamer->SwitchSection( 613 Context.getELFSection(".AMDGPU.disasm", ELF::SHT_PROGBITS, 0)); 614 615 for (size_t i = 0; i < DisasmLines.size(); ++i) { 616 std::string Comment = "\n"; 617 if (!HexLines[i].empty()) { 618 Comment = std::string(DisasmLineMaxLen - DisasmLines[i].size(), ' '); 619 Comment += " ; " + HexLines[i] + "\n"; 620 } 621 622 OutStreamer->emitBytes(StringRef(DisasmLines[i])); 623 OutStreamer->emitBytes(StringRef(Comment)); 624 } 625 } 626 627 return false; 628 } 629 630 bool AMDGPUAsmPrinter::doInitialization(Module &M) { 631 NonKernelMaxSGPRs = 0; 632 NonKernelMaxVGPRs = 0; 633 // Compute upper bound on the number of SGPRs and VGPRs 634 // for non-kernel functions. 635 for (const Function &F : M) { 636 if (!AMDGPU::isEntryFunctionCC(F.getCallingConv())) { 637 const GCNSubtarget &STM = TM.getSubtarget<GCNSubtarget>(F); 638 NonKernelMaxSGPRs = std::max(NonKernelMaxSGPRs, STM.getMaxNumSGPRs(F)); 639 NonKernelMaxVGPRs = std::max(NonKernelMaxVGPRs, STM.getMaxNumVGPRs(F)); 640 } 641 } 642 return AsmPrinter::doInitialization(M); 643 } 644 645 // TODO: Fold this into emitFunctionBodyStart. 646 void AMDGPUAsmPrinter::initializeTargetID(const Module &M) { 647 // In the beginning all features are either 'Any' or 'NotSupported', 648 // depending on global target features. This will cover empty modules. 649 getTargetStreamer()->initializeTargetID( 650 *getGlobalSTI(), getGlobalSTI()->getFeatureString()); 651 652 // If module is empty, we are done. 653 if (M.empty()) 654 return; 655 656 // If module is not empty, need to find first 'Off' or 'On' feature 657 // setting per feature from functions in module. 658 for (auto &F : M) { 659 auto &TSTargetID = getTargetStreamer()->getTargetID(); 660 if ((!TSTargetID->isXnackSupported() || TSTargetID->isXnackOnOrOff()) && 661 (!TSTargetID->isSramEccSupported() || TSTargetID->isSramEccOnOrOff())) 662 break; 663 664 const GCNSubtarget &STM = TM.getSubtarget<GCNSubtarget>(F); 665 const IsaInfo::AMDGPUTargetID &STMTargetID = STM.getTargetID(); 666 if (TSTargetID->isXnackSupported()) 667 if (TSTargetID->getXnackSetting() == IsaInfo::TargetIDSetting::Any) 668 TSTargetID->setXnackSetting(STMTargetID.getXnackSetting()); 669 if (TSTargetID->isSramEccSupported()) 670 if (TSTargetID->getSramEccSetting() == IsaInfo::TargetIDSetting::Any) 671 TSTargetID->setSramEccSetting(STMTargetID.getSramEccSetting()); 672 } 673 } 674 675 uint64_t AMDGPUAsmPrinter::getFunctionCodeSize(const MachineFunction &MF) const { 676 const GCNSubtarget &STM = MF.getSubtarget<GCNSubtarget>(); 677 const SIInstrInfo *TII = STM.getInstrInfo(); 678 679 uint64_t CodeSize = 0; 680 681 for (const MachineBasicBlock &MBB : MF) { 682 for (const MachineInstr &MI : MBB) { 683 // TODO: CodeSize should account for multiple functions. 684 685 // TODO: Should we count size of debug info? 686 if (MI.isDebugInstr()) 687 continue; 688 689 CodeSize += TII->getInstSizeInBytes(MI); 690 } 691 } 692 693 return CodeSize; 694 } 695 696 static bool hasAnyNonFlatUseOfReg(const MachineRegisterInfo &MRI, 697 const SIInstrInfo &TII, 698 unsigned Reg) { 699 for (const MachineOperand &UseOp : MRI.reg_operands(Reg)) { 700 if (!UseOp.isImplicit() || !TII.isFLAT(*UseOp.getParent())) 701 return true; 702 } 703 704 return false; 705 } 706 707 int32_t AMDGPUAsmPrinter::SIFunctionResourceInfo::getTotalNumSGPRs( 708 const GCNSubtarget &ST) const { 709 return NumExplicitSGPR + IsaInfo::getNumExtraSGPRs( 710 &ST, UsesVCC, UsesFlatScratch, ST.getTargetID().isXnackOnOrAny()); 711 } 712 713 int32_t AMDGPUAsmPrinter::SIFunctionResourceInfo::getTotalNumVGPRs( 714 const GCNSubtarget &ST) const { 715 if (ST.hasGFX90AInsts() && NumAGPR) 716 return alignTo(NumVGPR, 4) + NumAGPR; 717 return std::max(NumVGPR, NumAGPR); 718 } 719 720 static const Function *getCalleeFunction(const MachineOperand &Op) { 721 if (Op.isImm()) { 722 assert(Op.getImm() == 0); 723 return nullptr; 724 } 725 726 return cast<Function>(Op.getGlobal()); 727 } 728 729 AMDGPUAsmPrinter::SIFunctionResourceInfo AMDGPUAsmPrinter::analyzeResourceUsage( 730 const MachineFunction &MF) const { 731 SIFunctionResourceInfo Info; 732 733 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 734 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 735 const MachineFrameInfo &FrameInfo = MF.getFrameInfo(); 736 const MachineRegisterInfo &MRI = MF.getRegInfo(); 737 const SIInstrInfo *TII = ST.getInstrInfo(); 738 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 739 740 Info.UsesFlatScratch = MRI.isPhysRegUsed(AMDGPU::FLAT_SCR_LO) || 741 MRI.isPhysRegUsed(AMDGPU::FLAT_SCR_HI) || 742 MRI.isLiveIn(MFI->getPreloadedReg( 743 AMDGPUFunctionArgInfo::FLAT_SCRATCH_INIT)); 744 745 // Even if FLAT_SCRATCH is implicitly used, it has no effect if flat 746 // instructions aren't used to access the scratch buffer. Inline assembly may 747 // need it though. 748 // 749 // If we only have implicit uses of flat_scr on flat instructions, it is not 750 // really needed. 751 if (Info.UsesFlatScratch && !MFI->hasFlatScratchInit() && 752 (!hasAnyNonFlatUseOfReg(MRI, *TII, AMDGPU::FLAT_SCR) && 753 !hasAnyNonFlatUseOfReg(MRI, *TII, AMDGPU::FLAT_SCR_LO) && 754 !hasAnyNonFlatUseOfReg(MRI, *TII, AMDGPU::FLAT_SCR_HI))) { 755 Info.UsesFlatScratch = false; 756 } 757 758 Info.PrivateSegmentSize = FrameInfo.getStackSize(); 759 760 // Assume a big number if there are any unknown sized objects. 761 Info.HasDynamicallySizedStack = FrameInfo.hasVarSizedObjects(); 762 if (Info.HasDynamicallySizedStack) 763 Info.PrivateSegmentSize += AssumedStackSizeForDynamicSizeObjects; 764 765 if (MFI->isStackRealigned()) 766 Info.PrivateSegmentSize += FrameInfo.getMaxAlign().value(); 767 768 Info.UsesVCC = MRI.isPhysRegUsed(AMDGPU::VCC_LO) || 769 MRI.isPhysRegUsed(AMDGPU::VCC_HI); 770 771 // If there are no calls, MachineRegisterInfo can tell us the used register 772 // count easily. 773 // A tail call isn't considered a call for MachineFrameInfo's purposes. 774 if (!FrameInfo.hasCalls() && !FrameInfo.hasTailCall()) { 775 MCPhysReg HighestVGPRReg = AMDGPU::NoRegister; 776 for (MCPhysReg Reg : reverse(AMDGPU::VGPR_32RegClass.getRegisters())) { 777 if (MRI.isPhysRegUsed(Reg)) { 778 HighestVGPRReg = Reg; 779 break; 780 } 781 } 782 783 if (ST.hasMAIInsts()) { 784 MCPhysReg HighestAGPRReg = AMDGPU::NoRegister; 785 for (MCPhysReg Reg : reverse(AMDGPU::AGPR_32RegClass.getRegisters())) { 786 if (MRI.isPhysRegUsed(Reg)) { 787 HighestAGPRReg = Reg; 788 break; 789 } 790 } 791 Info.NumAGPR = HighestAGPRReg == AMDGPU::NoRegister ? 0 : 792 TRI.getHWRegIndex(HighestAGPRReg) + 1; 793 } 794 795 MCPhysReg HighestSGPRReg = AMDGPU::NoRegister; 796 for (MCPhysReg Reg : reverse(AMDGPU::SGPR_32RegClass.getRegisters())) { 797 if (MRI.isPhysRegUsed(Reg)) { 798 HighestSGPRReg = Reg; 799 break; 800 } 801 } 802 803 // We found the maximum register index. They start at 0, so add one to get the 804 // number of registers. 805 Info.NumVGPR = HighestVGPRReg == AMDGPU::NoRegister ? 0 : 806 TRI.getHWRegIndex(HighestVGPRReg) + 1; 807 Info.NumExplicitSGPR = HighestSGPRReg == AMDGPU::NoRegister ? 0 : 808 TRI.getHWRegIndex(HighestSGPRReg) + 1; 809 810 return Info; 811 } 812 813 int32_t MaxVGPR = -1; 814 int32_t MaxAGPR = -1; 815 int32_t MaxSGPR = -1; 816 uint64_t CalleeFrameSize = 0; 817 818 for (const MachineBasicBlock &MBB : MF) { 819 for (const MachineInstr &MI : MBB) { 820 // TODO: Check regmasks? Do they occur anywhere except calls? 821 for (const MachineOperand &MO : MI.operands()) { 822 unsigned Width = 0; 823 bool IsSGPR = false; 824 bool IsAGPR = false; 825 826 if (!MO.isReg()) 827 continue; 828 829 Register Reg = MO.getReg(); 830 switch (Reg) { 831 case AMDGPU::EXEC: 832 case AMDGPU::EXEC_LO: 833 case AMDGPU::EXEC_HI: 834 case AMDGPU::SCC: 835 case AMDGPU::M0: 836 case AMDGPU::M0_LO16: 837 case AMDGPU::M0_HI16: 838 case AMDGPU::SRC_SHARED_BASE: 839 case AMDGPU::SRC_SHARED_LIMIT: 840 case AMDGPU::SRC_PRIVATE_BASE: 841 case AMDGPU::SRC_PRIVATE_LIMIT: 842 case AMDGPU::SGPR_NULL: 843 case AMDGPU::MODE: 844 continue; 845 846 case AMDGPU::SRC_POPS_EXITING_WAVE_ID: 847 llvm_unreachable("src_pops_exiting_wave_id should not be used"); 848 849 case AMDGPU::NoRegister: 850 assert(MI.isDebugInstr() && "Instruction uses invalid noreg register"); 851 continue; 852 853 case AMDGPU::VCC: 854 case AMDGPU::VCC_LO: 855 case AMDGPU::VCC_HI: 856 case AMDGPU::VCC_LO_LO16: 857 case AMDGPU::VCC_LO_HI16: 858 case AMDGPU::VCC_HI_LO16: 859 case AMDGPU::VCC_HI_HI16: 860 Info.UsesVCC = true; 861 continue; 862 863 case AMDGPU::FLAT_SCR: 864 case AMDGPU::FLAT_SCR_LO: 865 case AMDGPU::FLAT_SCR_HI: 866 continue; 867 868 case AMDGPU::XNACK_MASK: 869 case AMDGPU::XNACK_MASK_LO: 870 case AMDGPU::XNACK_MASK_HI: 871 llvm_unreachable("xnack_mask registers should not be used"); 872 873 case AMDGPU::LDS_DIRECT: 874 llvm_unreachable("lds_direct register should not be used"); 875 876 case AMDGPU::TBA: 877 case AMDGPU::TBA_LO: 878 case AMDGPU::TBA_HI: 879 case AMDGPU::TMA: 880 case AMDGPU::TMA_LO: 881 case AMDGPU::TMA_HI: 882 llvm_unreachable("trap handler registers should not be used"); 883 884 case AMDGPU::SRC_VCCZ: 885 llvm_unreachable("src_vccz register should not be used"); 886 887 case AMDGPU::SRC_EXECZ: 888 llvm_unreachable("src_execz register should not be used"); 889 890 case AMDGPU::SRC_SCC: 891 llvm_unreachable("src_scc register should not be used"); 892 893 default: 894 break; 895 } 896 897 if (AMDGPU::SReg_32RegClass.contains(Reg) || 898 AMDGPU::SReg_LO16RegClass.contains(Reg) || 899 AMDGPU::SGPR_HI16RegClass.contains(Reg)) { 900 assert(!AMDGPU::TTMP_32RegClass.contains(Reg) && 901 "trap handler registers should not be used"); 902 IsSGPR = true; 903 Width = 1; 904 } else if (AMDGPU::VGPR_32RegClass.contains(Reg) || 905 AMDGPU::VGPR_LO16RegClass.contains(Reg) || 906 AMDGPU::VGPR_HI16RegClass.contains(Reg)) { 907 IsSGPR = false; 908 Width = 1; 909 } else if (AMDGPU::AGPR_32RegClass.contains(Reg) || 910 AMDGPU::AGPR_LO16RegClass.contains(Reg)) { 911 IsSGPR = false; 912 IsAGPR = true; 913 Width = 1; 914 } else if (AMDGPU::SReg_64RegClass.contains(Reg)) { 915 assert(!AMDGPU::TTMP_64RegClass.contains(Reg) && 916 "trap handler registers should not be used"); 917 IsSGPR = true; 918 Width = 2; 919 } else if (AMDGPU::VReg_64RegClass.contains(Reg)) { 920 IsSGPR = false; 921 Width = 2; 922 } else if (AMDGPU::AReg_64RegClass.contains(Reg)) { 923 IsSGPR = false; 924 IsAGPR = true; 925 Width = 2; 926 } else if (AMDGPU::VReg_96RegClass.contains(Reg)) { 927 IsSGPR = false; 928 Width = 3; 929 } else if (AMDGPU::SReg_96RegClass.contains(Reg)) { 930 IsSGPR = true; 931 Width = 3; 932 } else if (AMDGPU::AReg_96RegClass.contains(Reg)) { 933 IsSGPR = false; 934 IsAGPR = true; 935 Width = 3; 936 } else if (AMDGPU::SReg_128RegClass.contains(Reg)) { 937 assert(!AMDGPU::TTMP_128RegClass.contains(Reg) && 938 "trap handler registers should not be used"); 939 IsSGPR = true; 940 Width = 4; 941 } else if (AMDGPU::VReg_128RegClass.contains(Reg)) { 942 IsSGPR = false; 943 Width = 4; 944 } else if (AMDGPU::AReg_128RegClass.contains(Reg)) { 945 IsSGPR = false; 946 IsAGPR = true; 947 Width = 4; 948 } else if (AMDGPU::VReg_160RegClass.contains(Reg)) { 949 IsSGPR = false; 950 Width = 5; 951 } else if (AMDGPU::SReg_160RegClass.contains(Reg)) { 952 IsSGPR = true; 953 Width = 5; 954 } else if (AMDGPU::AReg_160RegClass.contains(Reg)) { 955 IsSGPR = false; 956 IsAGPR = true; 957 Width = 5; 958 } else if (AMDGPU::VReg_192RegClass.contains(Reg)) { 959 IsSGPR = false; 960 Width = 6; 961 } else if (AMDGPU::SReg_192RegClass.contains(Reg)) { 962 IsSGPR = true; 963 Width = 6; 964 } else if (AMDGPU::AReg_192RegClass.contains(Reg)) { 965 IsSGPR = false; 966 IsAGPR = true; 967 Width = 6; 968 } else if (AMDGPU::SReg_256RegClass.contains(Reg)) { 969 assert(!AMDGPU::TTMP_256RegClass.contains(Reg) && 970 "trap handler registers should not be used"); 971 IsSGPR = true; 972 Width = 8; 973 } else if (AMDGPU::VReg_256RegClass.contains(Reg)) { 974 IsSGPR = false; 975 Width = 8; 976 } else if (AMDGPU::AReg_256RegClass.contains(Reg)) { 977 IsSGPR = false; 978 IsAGPR = true; 979 Width = 8; 980 } else if (AMDGPU::SReg_512RegClass.contains(Reg)) { 981 assert(!AMDGPU::TTMP_512RegClass.contains(Reg) && 982 "trap handler registers should not be used"); 983 IsSGPR = true; 984 Width = 16; 985 } else if (AMDGPU::VReg_512RegClass.contains(Reg)) { 986 IsSGPR = false; 987 Width = 16; 988 } else if (AMDGPU::AReg_512RegClass.contains(Reg)) { 989 IsSGPR = false; 990 IsAGPR = true; 991 Width = 16; 992 } else if (AMDGPU::SReg_1024RegClass.contains(Reg)) { 993 IsSGPR = true; 994 Width = 32; 995 } else if (AMDGPU::VReg_1024RegClass.contains(Reg)) { 996 IsSGPR = false; 997 Width = 32; 998 } else if (AMDGPU::AReg_1024RegClass.contains(Reg)) { 999 IsSGPR = false; 1000 IsAGPR = true; 1001 Width = 32; 1002 } else { 1003 llvm_unreachable("Unknown register class"); 1004 } 1005 unsigned HWReg = TRI.getHWRegIndex(Reg); 1006 int MaxUsed = HWReg + Width - 1; 1007 if (IsSGPR) { 1008 MaxSGPR = MaxUsed > MaxSGPR ? MaxUsed : MaxSGPR; 1009 } else if (IsAGPR) { 1010 MaxAGPR = MaxUsed > MaxAGPR ? MaxUsed : MaxAGPR; 1011 } else { 1012 MaxVGPR = MaxUsed > MaxVGPR ? MaxUsed : MaxVGPR; 1013 } 1014 } 1015 1016 if (MI.isCall()) { 1017 // Pseudo used just to encode the underlying global. Is there a better 1018 // way to track this? 1019 1020 const MachineOperand *CalleeOp 1021 = TII->getNamedOperand(MI, AMDGPU::OpName::callee); 1022 1023 const Function *Callee = getCalleeFunction(*CalleeOp); 1024 DenseMap<const Function *, SIFunctionResourceInfo>::const_iterator I = 1025 CallGraphResourceInfo.end(); 1026 bool IsExternal = !Callee || Callee->isDeclaration(); 1027 if (!IsExternal) 1028 I = CallGraphResourceInfo.find(Callee); 1029 1030 if (IsExternal || I == CallGraphResourceInfo.end()) { 1031 // Avoid crashing on undefined behavior with an illegal call to a 1032 // kernel. If a callsite's calling convention doesn't match the 1033 // function's, it's undefined behavior. If the callsite calling 1034 // convention does match, that would have errored earlier. 1035 // FIXME: The verifier shouldn't allow this. 1036 if (!IsExternal && 1037 AMDGPU::isEntryFunctionCC(Callee->getCallingConv())) 1038 report_fatal_error("invalid call to entry function"); 1039 1040 unsigned ExtraSGPRs = IsaInfo::getNumExtraSGPRs( 1041 TM.getMCSubtargetInfo(), false, ST.hasFlatAddressSpace()); 1042 // If this is a call to an external function, we put the 1043 // max values computed in doInitialization(). 1044 // Subtract extra SGPRs in case of indirect calls. 1045 // For indirect calls, we take the max for the module 1046 // and use that as the register budget for functions 1047 // which makes an indirect calls. This max value 1048 // includes extra SGPRs too (e.g. flatscratch and vcc). 1049 // which are getting added later. 1050 // Subtract them here so that they don't get added twice. 1051 MaxSGPR = NonKernelMaxSGPRs - ExtraSGPRs - 1; 1052 MaxVGPR = NonKernelMaxVGPRs - 1; 1053 // TODO: handle AGPRs 1054 MaxAGPR = std::max(MaxAGPR, 23); 1055 1056 CalleeFrameSize = std::max(CalleeFrameSize, 1057 static_cast<uint64_t>(AssumedStackSizeForExternalCall)); 1058 1059 Info.UsesVCC = true; 1060 Info.UsesFlatScratch = ST.hasFlatAddressSpace(); 1061 Info.HasDynamicallySizedStack = true; 1062 } else { 1063 // We force CodeGen to run in SCC order, so the callee's register 1064 // usage etc. should be the cumulative usage of all callees. 1065 1066 MaxSGPR = std::max(I->second.NumExplicitSGPR - 1, MaxSGPR); 1067 MaxVGPR = std::max(I->second.NumVGPR - 1, MaxVGPR); 1068 MaxAGPR = std::max(I->second.NumAGPR - 1, MaxAGPR); 1069 CalleeFrameSize 1070 = std::max(I->second.PrivateSegmentSize, CalleeFrameSize); 1071 Info.UsesVCC |= I->second.UsesVCC; 1072 Info.UsesFlatScratch |= I->second.UsesFlatScratch; 1073 Info.HasDynamicallySizedStack |= I->second.HasDynamicallySizedStack; 1074 Info.HasRecursion |= I->second.HasRecursion; 1075 } 1076 1077 // FIXME: Call site could have norecurse on it 1078 if (!Callee || !Callee->doesNotRecurse()) 1079 Info.HasRecursion = true; 1080 } 1081 } 1082 } 1083 1084 Info.NumExplicitSGPR = MaxSGPR + 1; 1085 Info.NumVGPR = MaxVGPR + 1; 1086 Info.NumAGPR = MaxAGPR + 1; 1087 Info.PrivateSegmentSize += CalleeFrameSize; 1088 1089 return Info; 1090 } 1091 1092 void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo, 1093 const MachineFunction &MF) { 1094 SIFunctionResourceInfo Info = analyzeResourceUsage(MF); 1095 const GCNSubtarget &STM = MF.getSubtarget<GCNSubtarget>(); 1096 1097 ProgInfo.NumArchVGPR = Info.NumVGPR; 1098 ProgInfo.NumAccVGPR = Info.NumAGPR; 1099 ProgInfo.NumVGPR = Info.getTotalNumVGPRs(STM); 1100 ProgInfo.AccumOffset = alignTo(std::max(1, Info.NumVGPR), 4) / 4 - 1; 1101 ProgInfo.TgSplit = STM.isTgSplitEnabled(); 1102 ProgInfo.NumSGPR = Info.NumExplicitSGPR; 1103 ProgInfo.ScratchSize = Info.PrivateSegmentSize; 1104 ProgInfo.VCCUsed = Info.UsesVCC; 1105 ProgInfo.FlatUsed = Info.UsesFlatScratch; 1106 ProgInfo.DynamicCallStack = Info.HasDynamicallySizedStack || Info.HasRecursion; 1107 1108 const uint64_t MaxScratchPerWorkitem = 1109 GCNSubtarget::MaxWaveScratchSize / STM.getWavefrontSize(); 1110 if (ProgInfo.ScratchSize > MaxScratchPerWorkitem) { 1111 DiagnosticInfoStackSize DiagStackSize(MF.getFunction(), 1112 ProgInfo.ScratchSize, DS_Error); 1113 MF.getFunction().getContext().diagnose(DiagStackSize); 1114 } 1115 1116 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 1117 1118 // The calculations related to SGPR/VGPR blocks are 1119 // duplicated in part in AMDGPUAsmParser::calculateGPRBlocks, and could be 1120 // unified. 1121 unsigned ExtraSGPRs = IsaInfo::getNumExtraSGPRs( 1122 &STM, ProgInfo.VCCUsed, ProgInfo.FlatUsed); 1123 1124 // Check the addressable register limit before we add ExtraSGPRs. 1125 if (STM.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS && 1126 !STM.hasSGPRInitBug()) { 1127 unsigned MaxAddressableNumSGPRs = STM.getAddressableNumSGPRs(); 1128 if (ProgInfo.NumSGPR > MaxAddressableNumSGPRs) { 1129 // This can happen due to a compiler bug or when using inline asm. 1130 LLVMContext &Ctx = MF.getFunction().getContext(); 1131 DiagnosticInfoResourceLimit Diag(MF.getFunction(), 1132 "addressable scalar registers", 1133 ProgInfo.NumSGPR, DS_Error, 1134 DK_ResourceLimit, 1135 MaxAddressableNumSGPRs); 1136 Ctx.diagnose(Diag); 1137 ProgInfo.NumSGPR = MaxAddressableNumSGPRs - 1; 1138 } 1139 } 1140 1141 // Account for extra SGPRs and VGPRs reserved for debugger use. 1142 ProgInfo.NumSGPR += ExtraSGPRs; 1143 1144 const Function &F = MF.getFunction(); 1145 1146 // Ensure there are enough SGPRs and VGPRs for wave dispatch, where wave 1147 // dispatch registers are function args. 1148 unsigned WaveDispatchNumSGPR = 0, WaveDispatchNumVGPR = 0; 1149 1150 if (isShader(F.getCallingConv())) { 1151 // FIXME: We should be using the number of registers determined during 1152 // calling convention lowering to legalize the types. 1153 const DataLayout &DL = F.getParent()->getDataLayout(); 1154 for (auto &Arg : F.args()) { 1155 unsigned NumRegs = (DL.getTypeSizeInBits(Arg.getType()) + 31) / 32; 1156 if (Arg.hasAttribute(Attribute::InReg)) 1157 WaveDispatchNumSGPR += NumRegs; 1158 else 1159 WaveDispatchNumVGPR += NumRegs; 1160 } 1161 ProgInfo.NumSGPR = std::max(ProgInfo.NumSGPR, WaveDispatchNumSGPR); 1162 ProgInfo.NumVGPR = std::max(ProgInfo.NumVGPR, WaveDispatchNumVGPR); 1163 } 1164 1165 // Adjust number of registers used to meet default/requested minimum/maximum 1166 // number of waves per execution unit request. 1167 ProgInfo.NumSGPRsForWavesPerEU = std::max( 1168 std::max(ProgInfo.NumSGPR, 1u), STM.getMinNumSGPRs(MFI->getMaxWavesPerEU())); 1169 ProgInfo.NumVGPRsForWavesPerEU = std::max( 1170 std::max(ProgInfo.NumVGPR, 1u), STM.getMinNumVGPRs(MFI->getMaxWavesPerEU())); 1171 1172 if (STM.getGeneration() <= AMDGPUSubtarget::SEA_ISLANDS || 1173 STM.hasSGPRInitBug()) { 1174 unsigned MaxAddressableNumSGPRs = STM.getAddressableNumSGPRs(); 1175 if (ProgInfo.NumSGPR > MaxAddressableNumSGPRs) { 1176 // This can happen due to a compiler bug or when using inline asm to use 1177 // the registers which are usually reserved for vcc etc. 1178 LLVMContext &Ctx = MF.getFunction().getContext(); 1179 DiagnosticInfoResourceLimit Diag(MF.getFunction(), 1180 "scalar registers", 1181 ProgInfo.NumSGPR, DS_Error, 1182 DK_ResourceLimit, 1183 MaxAddressableNumSGPRs); 1184 Ctx.diagnose(Diag); 1185 ProgInfo.NumSGPR = MaxAddressableNumSGPRs; 1186 ProgInfo.NumSGPRsForWavesPerEU = MaxAddressableNumSGPRs; 1187 } 1188 } 1189 1190 if (STM.hasSGPRInitBug()) { 1191 ProgInfo.NumSGPR = 1192 AMDGPU::IsaInfo::FIXED_NUM_SGPRS_FOR_INIT_BUG; 1193 ProgInfo.NumSGPRsForWavesPerEU = 1194 AMDGPU::IsaInfo::FIXED_NUM_SGPRS_FOR_INIT_BUG; 1195 } 1196 1197 if (MFI->getNumUserSGPRs() > STM.getMaxNumUserSGPRs()) { 1198 LLVMContext &Ctx = MF.getFunction().getContext(); 1199 DiagnosticInfoResourceLimit Diag(MF.getFunction(), "user SGPRs", 1200 MFI->getNumUserSGPRs(), DS_Error); 1201 Ctx.diagnose(Diag); 1202 } 1203 1204 if (MFI->getLDSSize() > static_cast<unsigned>(STM.getLocalMemorySize())) { 1205 LLVMContext &Ctx = MF.getFunction().getContext(); 1206 DiagnosticInfoResourceLimit Diag(MF.getFunction(), "local memory", 1207 MFI->getLDSSize(), DS_Error); 1208 Ctx.diagnose(Diag); 1209 } 1210 1211 ProgInfo.SGPRBlocks = IsaInfo::getNumSGPRBlocks( 1212 &STM, ProgInfo.NumSGPRsForWavesPerEU); 1213 ProgInfo.VGPRBlocks = IsaInfo::getNumVGPRBlocks( 1214 &STM, ProgInfo.NumVGPRsForWavesPerEU); 1215 1216 const SIModeRegisterDefaults Mode = MFI->getMode(); 1217 1218 // Set the value to initialize FP_ROUND and FP_DENORM parts of the mode 1219 // register. 1220 ProgInfo.FloatMode = getFPMode(Mode); 1221 1222 ProgInfo.IEEEMode = Mode.IEEE; 1223 1224 // Make clamp modifier on NaN input returns 0. 1225 ProgInfo.DX10Clamp = Mode.DX10Clamp; 1226 1227 unsigned LDSAlignShift; 1228 if (STM.getGeneration() < AMDGPUSubtarget::SEA_ISLANDS) { 1229 // LDS is allocated in 64 dword blocks. 1230 LDSAlignShift = 8; 1231 } else { 1232 // LDS is allocated in 128 dword blocks. 1233 LDSAlignShift = 9; 1234 } 1235 1236 unsigned LDSSpillSize = 1237 MFI->getLDSWaveSpillSize() * MFI->getMaxFlatWorkGroupSize(); 1238 1239 ProgInfo.LDSSize = MFI->getLDSSize() + LDSSpillSize; 1240 ProgInfo.LDSBlocks = 1241 alignTo(ProgInfo.LDSSize, 1ULL << LDSAlignShift) >> LDSAlignShift; 1242 1243 // Scratch is allocated in 256 dword blocks. 1244 unsigned ScratchAlignShift = 10; 1245 // We need to program the hardware with the amount of scratch memory that 1246 // is used by the entire wave. ProgInfo.ScratchSize is the amount of 1247 // scratch memory used per thread. 1248 ProgInfo.ScratchBlocks = 1249 alignTo(ProgInfo.ScratchSize * STM.getWavefrontSize(), 1250 1ULL << ScratchAlignShift) >> 1251 ScratchAlignShift; 1252 1253 if (getIsaVersion(getGlobalSTI()->getCPU()).Major >= 10) { 1254 ProgInfo.WgpMode = STM.isCuModeEnabled() ? 0 : 1; 1255 ProgInfo.MemOrdered = 1; 1256 } 1257 1258 // 0 = X, 1 = XY, 2 = XYZ 1259 unsigned TIDIGCompCnt = 0; 1260 if (MFI->hasWorkItemIDZ()) 1261 TIDIGCompCnt = 2; 1262 else if (MFI->hasWorkItemIDY()) 1263 TIDIGCompCnt = 1; 1264 1265 ProgInfo.ComputePGMRSrc2 = 1266 S_00B84C_SCRATCH_EN(ProgInfo.ScratchBlocks > 0) | 1267 S_00B84C_USER_SGPR(MFI->getNumUserSGPRs()) | 1268 // For AMDHSA, TRAP_HANDLER must be zero, as it is populated by the CP. 1269 S_00B84C_TRAP_HANDLER(STM.isAmdHsaOS() ? 0 : STM.isTrapHandlerEnabled()) | 1270 S_00B84C_TGID_X_EN(MFI->hasWorkGroupIDX()) | 1271 S_00B84C_TGID_Y_EN(MFI->hasWorkGroupIDY()) | 1272 S_00B84C_TGID_Z_EN(MFI->hasWorkGroupIDZ()) | 1273 S_00B84C_TG_SIZE_EN(MFI->hasWorkGroupInfo()) | 1274 S_00B84C_TIDIG_COMP_CNT(TIDIGCompCnt) | 1275 S_00B84C_EXCP_EN_MSB(0) | 1276 // For AMDHSA, LDS_SIZE must be zero, as it is populated by the CP. 1277 S_00B84C_LDS_SIZE(STM.isAmdHsaOS() ? 0 : ProgInfo.LDSBlocks) | 1278 S_00B84C_EXCP_EN(0); 1279 1280 if (STM.hasGFX90AInsts()) { 1281 AMDHSA_BITS_SET(ProgInfo.ComputePGMRSrc3GFX90A, 1282 amdhsa::COMPUTE_PGM_RSRC3_GFX90A_ACCUM_OFFSET, 1283 ProgInfo.AccumOffset); 1284 AMDHSA_BITS_SET(ProgInfo.ComputePGMRSrc3GFX90A, 1285 amdhsa::COMPUTE_PGM_RSRC3_GFX90A_TG_SPLIT, 1286 ProgInfo.TgSplit); 1287 } 1288 1289 ProgInfo.Occupancy = STM.computeOccupancy(MF.getFunction(), ProgInfo.LDSSize, 1290 ProgInfo.NumSGPRsForWavesPerEU, 1291 ProgInfo.NumVGPRsForWavesPerEU); 1292 } 1293 1294 static unsigned getRsrcReg(CallingConv::ID CallConv) { 1295 switch (CallConv) { 1296 default: LLVM_FALLTHROUGH; 1297 case CallingConv::AMDGPU_CS: return R_00B848_COMPUTE_PGM_RSRC1; 1298 case CallingConv::AMDGPU_LS: return R_00B528_SPI_SHADER_PGM_RSRC1_LS; 1299 case CallingConv::AMDGPU_HS: return R_00B428_SPI_SHADER_PGM_RSRC1_HS; 1300 case CallingConv::AMDGPU_ES: return R_00B328_SPI_SHADER_PGM_RSRC1_ES; 1301 case CallingConv::AMDGPU_GS: return R_00B228_SPI_SHADER_PGM_RSRC1_GS; 1302 case CallingConv::AMDGPU_VS: return R_00B128_SPI_SHADER_PGM_RSRC1_VS; 1303 case CallingConv::AMDGPU_PS: return R_00B028_SPI_SHADER_PGM_RSRC1_PS; 1304 } 1305 } 1306 1307 void AMDGPUAsmPrinter::EmitProgramInfoSI(const MachineFunction &MF, 1308 const SIProgramInfo &CurrentProgramInfo) { 1309 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 1310 unsigned RsrcReg = getRsrcReg(MF.getFunction().getCallingConv()); 1311 1312 if (AMDGPU::isCompute(MF.getFunction().getCallingConv())) { 1313 OutStreamer->emitInt32(R_00B848_COMPUTE_PGM_RSRC1); 1314 1315 OutStreamer->emitInt32(CurrentProgramInfo.getComputePGMRSrc1()); 1316 1317 OutStreamer->emitInt32(R_00B84C_COMPUTE_PGM_RSRC2); 1318 OutStreamer->emitInt32(CurrentProgramInfo.ComputePGMRSrc2); 1319 1320 OutStreamer->emitInt32(R_00B860_COMPUTE_TMPRING_SIZE); 1321 OutStreamer->emitInt32(S_00B860_WAVESIZE(CurrentProgramInfo.ScratchBlocks)); 1322 1323 // TODO: Should probably note flat usage somewhere. SC emits a "FlatPtr32 = 1324 // 0" comment but I don't see a corresponding field in the register spec. 1325 } else { 1326 OutStreamer->emitInt32(RsrcReg); 1327 OutStreamer->emitIntValue(S_00B028_VGPRS(CurrentProgramInfo.VGPRBlocks) | 1328 S_00B028_SGPRS(CurrentProgramInfo.SGPRBlocks), 4); 1329 OutStreamer->emitInt32(R_0286E8_SPI_TMPRING_SIZE); 1330 OutStreamer->emitIntValue( 1331 S_0286E8_WAVESIZE(CurrentProgramInfo.ScratchBlocks), 4); 1332 } 1333 1334 if (MF.getFunction().getCallingConv() == CallingConv::AMDGPU_PS) { 1335 OutStreamer->emitInt32(R_00B02C_SPI_SHADER_PGM_RSRC2_PS); 1336 OutStreamer->emitInt32( 1337 S_00B02C_EXTRA_LDS_SIZE(CurrentProgramInfo.LDSBlocks)); 1338 OutStreamer->emitInt32(R_0286CC_SPI_PS_INPUT_ENA); 1339 OutStreamer->emitInt32(MFI->getPSInputEnable()); 1340 OutStreamer->emitInt32(R_0286D0_SPI_PS_INPUT_ADDR); 1341 OutStreamer->emitInt32(MFI->getPSInputAddr()); 1342 } 1343 1344 OutStreamer->emitInt32(R_SPILLED_SGPRS); 1345 OutStreamer->emitInt32(MFI->getNumSpilledSGPRs()); 1346 OutStreamer->emitInt32(R_SPILLED_VGPRS); 1347 OutStreamer->emitInt32(MFI->getNumSpilledVGPRs()); 1348 } 1349 1350 // This is the equivalent of EmitProgramInfoSI above, but for when the OS type 1351 // is AMDPAL. It stores each compute/SPI register setting and other PAL 1352 // metadata items into the PALMD::Metadata, combining with any provided by the 1353 // frontend as LLVM metadata. Once all functions are written, the PAL metadata 1354 // is then written as a single block in the .note section. 1355 void AMDGPUAsmPrinter::EmitPALMetadata(const MachineFunction &MF, 1356 const SIProgramInfo &CurrentProgramInfo) { 1357 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 1358 auto CC = MF.getFunction().getCallingConv(); 1359 auto MD = getTargetStreamer()->getPALMetadata(); 1360 1361 MD->setEntryPoint(CC, MF.getFunction().getName()); 1362 MD->setNumUsedVgprs(CC, CurrentProgramInfo.NumVGPRsForWavesPerEU); 1363 MD->setNumUsedSgprs(CC, CurrentProgramInfo.NumSGPRsForWavesPerEU); 1364 MD->setRsrc1(CC, CurrentProgramInfo.getPGMRSrc1(CC)); 1365 if (AMDGPU::isCompute(CC)) { 1366 MD->setRsrc2(CC, CurrentProgramInfo.ComputePGMRSrc2); 1367 } else { 1368 if (CurrentProgramInfo.ScratchBlocks > 0) 1369 MD->setRsrc2(CC, S_00B84C_SCRATCH_EN(1)); 1370 } 1371 // ScratchSize is in bytes, 16 aligned. 1372 MD->setScratchSize(CC, alignTo(CurrentProgramInfo.ScratchSize, 16)); 1373 if (MF.getFunction().getCallingConv() == CallingConv::AMDGPU_PS) { 1374 MD->setRsrc2(CC, S_00B02C_EXTRA_LDS_SIZE(CurrentProgramInfo.LDSBlocks)); 1375 MD->setSpiPsInputEna(MFI->getPSInputEnable()); 1376 MD->setSpiPsInputAddr(MFI->getPSInputAddr()); 1377 } 1378 1379 const GCNSubtarget &STM = MF.getSubtarget<GCNSubtarget>(); 1380 if (STM.isWave32()) 1381 MD->setWave32(MF.getFunction().getCallingConv()); 1382 } 1383 1384 void AMDGPUAsmPrinter::emitPALFunctionMetadata(const MachineFunction &MF) { 1385 auto *MD = getTargetStreamer()->getPALMetadata(); 1386 const MachineFrameInfo &MFI = MF.getFrameInfo(); 1387 MD->setFunctionScratchSize(MF, MFI.getStackSize()); 1388 // Set compute registers 1389 MD->setRsrc1(CallingConv::AMDGPU_CS, 1390 CurrentProgramInfo.getPGMRSrc1(CallingConv::AMDGPU_CS)); 1391 MD->setRsrc2(CallingConv::AMDGPU_CS, CurrentProgramInfo.ComputePGMRSrc2); 1392 } 1393 1394 // This is supposed to be log2(Size) 1395 static amd_element_byte_size_t getElementByteSizeValue(unsigned Size) { 1396 switch (Size) { 1397 case 4: 1398 return AMD_ELEMENT_4_BYTES; 1399 case 8: 1400 return AMD_ELEMENT_8_BYTES; 1401 case 16: 1402 return AMD_ELEMENT_16_BYTES; 1403 default: 1404 llvm_unreachable("invalid private_element_size"); 1405 } 1406 } 1407 1408 void AMDGPUAsmPrinter::getAmdKernelCode(amd_kernel_code_t &Out, 1409 const SIProgramInfo &CurrentProgramInfo, 1410 const MachineFunction &MF) const { 1411 const Function &F = MF.getFunction(); 1412 assert(F.getCallingConv() == CallingConv::AMDGPU_KERNEL || 1413 F.getCallingConv() == CallingConv::SPIR_KERNEL); 1414 1415 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 1416 const GCNSubtarget &STM = MF.getSubtarget<GCNSubtarget>(); 1417 1418 AMDGPU::initDefaultAMDKernelCodeT(Out, &STM); 1419 1420 Out.compute_pgm_resource_registers = 1421 CurrentProgramInfo.getComputePGMRSrc1() | 1422 (CurrentProgramInfo.ComputePGMRSrc2 << 32); 1423 Out.code_properties |= AMD_CODE_PROPERTY_IS_PTR64; 1424 1425 if (CurrentProgramInfo.DynamicCallStack) 1426 Out.code_properties |= AMD_CODE_PROPERTY_IS_DYNAMIC_CALLSTACK; 1427 1428 AMD_HSA_BITS_SET(Out.code_properties, 1429 AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE, 1430 getElementByteSizeValue(STM.getMaxPrivateElementSize(true))); 1431 1432 if (MFI->hasPrivateSegmentBuffer()) { 1433 Out.code_properties |= 1434 AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER; 1435 } 1436 1437 if (MFI->hasDispatchPtr()) 1438 Out.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR; 1439 1440 if (MFI->hasQueuePtr()) 1441 Out.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR; 1442 1443 if (MFI->hasKernargSegmentPtr()) 1444 Out.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR; 1445 1446 if (MFI->hasDispatchID()) 1447 Out.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID; 1448 1449 if (MFI->hasFlatScratchInit()) 1450 Out.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT; 1451 1452 if (MFI->hasDispatchPtr()) 1453 Out.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR; 1454 1455 if (STM.isXNACKEnabled()) 1456 Out.code_properties |= AMD_CODE_PROPERTY_IS_XNACK_SUPPORTED; 1457 1458 Align MaxKernArgAlign; 1459 Out.kernarg_segment_byte_size = STM.getKernArgSegmentSize(F, MaxKernArgAlign); 1460 Out.wavefront_sgpr_count = CurrentProgramInfo.NumSGPR; 1461 Out.workitem_vgpr_count = CurrentProgramInfo.NumVGPR; 1462 Out.workitem_private_segment_byte_size = CurrentProgramInfo.ScratchSize; 1463 Out.workgroup_group_segment_byte_size = CurrentProgramInfo.LDSSize; 1464 1465 // kernarg_segment_alignment is specified as log of the alignment. 1466 // The minimum alignment is 16. 1467 Out.kernarg_segment_alignment = Log2(std::max(Align(16), MaxKernArgAlign)); 1468 } 1469 1470 bool AMDGPUAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, 1471 const char *ExtraCode, raw_ostream &O) { 1472 // First try the generic code, which knows about modifiers like 'c' and 'n'. 1473 if (!AsmPrinter::PrintAsmOperand(MI, OpNo, ExtraCode, O)) 1474 return false; 1475 1476 if (ExtraCode && ExtraCode[0]) { 1477 if (ExtraCode[1] != 0) 1478 return true; // Unknown modifier. 1479 1480 switch (ExtraCode[0]) { 1481 case 'r': 1482 break; 1483 default: 1484 return true; 1485 } 1486 } 1487 1488 // TODO: Should be able to support other operand types like globals. 1489 const MachineOperand &MO = MI->getOperand(OpNo); 1490 if (MO.isReg()) { 1491 AMDGPUInstPrinter::printRegOperand(MO.getReg(), O, 1492 *MF->getSubtarget().getRegisterInfo()); 1493 return false; 1494 } else if (MO.isImm()) { 1495 int64_t Val = MO.getImm(); 1496 if (AMDGPU::isInlinableIntLiteral(Val)) { 1497 O << Val; 1498 } else if (isUInt<16>(Val)) { 1499 O << format("0x%" PRIx16, static_cast<uint16_t>(Val)); 1500 } else if (isUInt<32>(Val)) { 1501 O << format("0x%" PRIx32, static_cast<uint32_t>(Val)); 1502 } else { 1503 O << format("0x%" PRIx64, static_cast<uint64_t>(Val)); 1504 } 1505 return false; 1506 } 1507 return true; 1508 } 1509