1 //===-- AMDGPUAsmPrinter.cpp - AMDGPU Assebly printer --------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// 12 /// The AMDGPUAsmPrinter is used to print both assembly string and also binary 13 /// code. When passed an MCAsmStreamer it prints assembly and when passed 14 /// an MCObjectStreamer it outputs binary code. 15 // 16 //===----------------------------------------------------------------------===// 17 // 18 19 #include "AMDGPUAsmPrinter.h" 20 #include "MCTargetDesc/AMDGPUTargetStreamer.h" 21 #include "InstPrinter/AMDGPUInstPrinter.h" 22 #include "Utils/AMDGPUBaseInfo.h" 23 #include "AMDGPU.h" 24 #include "AMDKernelCodeT.h" 25 #include "AMDGPUSubtarget.h" 26 #include "R600Defines.h" 27 #include "R600MachineFunctionInfo.h" 28 #include "R600RegisterInfo.h" 29 #include "SIDefines.h" 30 #include "SIMachineFunctionInfo.h" 31 #include "SIInstrInfo.h" 32 #include "SIRegisterInfo.h" 33 #include "llvm/CodeGen/MachineFrameInfo.h" 34 #include "llvm/IR/DiagnosticInfo.h" 35 #include "llvm/MC/MCContext.h" 36 #include "llvm/MC/MCSectionELF.h" 37 #include "llvm/MC/MCStreamer.h" 38 #include "llvm/Support/ELF.h" 39 #include "llvm/Support/MathExtras.h" 40 #include "llvm/Support/TargetRegistry.h" 41 #include "llvm/Target/TargetLoweringObjectFile.h" 42 43 using namespace llvm; 44 45 // TODO: This should get the default rounding mode from the kernel. We just set 46 // the default here, but this could change if the OpenCL rounding mode pragmas 47 // are used. 48 // 49 // The denormal mode here should match what is reported by the OpenCL runtime 50 // for the CL_FP_DENORM bit from CL_DEVICE_{HALF|SINGLE|DOUBLE}_FP_CONFIG, but 51 // can also be override to flush with the -cl-denorms-are-zero compiler flag. 52 // 53 // AMD OpenCL only sets flush none and reports CL_FP_DENORM for double 54 // precision, and leaves single precision to flush all and does not report 55 // CL_FP_DENORM for CL_DEVICE_SINGLE_FP_CONFIG. Mesa's OpenCL currently reports 56 // CL_FP_DENORM for both. 57 // 58 // FIXME: It seems some instructions do not support single precision denormals 59 // regardless of the mode (exp_*_f32, rcp_*_f32, rsq_*_f32, rsq_*f32, sqrt_f32, 60 // and sin_f32, cos_f32 on most parts). 61 62 // We want to use these instructions, and using fp32 denormals also causes 63 // instructions to run at the double precision rate for the device so it's 64 // probably best to just report no single precision denormals. 65 static uint32_t getFPMode(const MachineFunction &F) { 66 const SISubtarget& ST = F.getSubtarget<SISubtarget>(); 67 // TODO: Is there any real use for the flush in only / flush out only modes? 68 69 uint32_t FP32Denormals = 70 ST.hasFP32Denormals() ? FP_DENORM_FLUSH_NONE : FP_DENORM_FLUSH_IN_FLUSH_OUT; 71 72 uint32_t FP64Denormals = 73 ST.hasFP64Denormals() ? FP_DENORM_FLUSH_NONE : FP_DENORM_FLUSH_IN_FLUSH_OUT; 74 75 return FP_ROUND_MODE_SP(FP_ROUND_ROUND_TO_NEAREST) | 76 FP_ROUND_MODE_DP(FP_ROUND_ROUND_TO_NEAREST) | 77 FP_DENORM_MODE_SP(FP32Denormals) | 78 FP_DENORM_MODE_DP(FP64Denormals); 79 } 80 81 static AsmPrinter * 82 createAMDGPUAsmPrinterPass(TargetMachine &tm, 83 std::unique_ptr<MCStreamer> &&Streamer) { 84 return new AMDGPUAsmPrinter(tm, std::move(Streamer)); 85 } 86 87 extern "C" void LLVMInitializeAMDGPUAsmPrinter() { 88 TargetRegistry::RegisterAsmPrinter(getTheAMDGPUTarget(), 89 createAMDGPUAsmPrinterPass); 90 TargetRegistry::RegisterAsmPrinter(getTheGCNTarget(), 91 createAMDGPUAsmPrinterPass); 92 } 93 94 AMDGPUAsmPrinter::AMDGPUAsmPrinter(TargetMachine &TM, 95 std::unique_ptr<MCStreamer> Streamer) 96 : AsmPrinter(TM, std::move(Streamer)) {} 97 98 StringRef AMDGPUAsmPrinter::getPassName() const { 99 return "AMDGPU Assembly Printer"; 100 } 101 102 void AMDGPUAsmPrinter::EmitStartOfAsmFile(Module &M) { 103 if (TM.getTargetTriple().getOS() != Triple::AMDHSA) 104 return; 105 106 // Need to construct an MCSubtargetInfo here in case we have no functions 107 // in the module. 108 std::unique_ptr<MCSubtargetInfo> STI(TM.getTarget().createMCSubtargetInfo( 109 TM.getTargetTriple().str(), TM.getTargetCPU(), 110 TM.getTargetFeatureString())); 111 112 AMDGPUTargetStreamer *TS = 113 static_cast<AMDGPUTargetStreamer *>(OutStreamer->getTargetStreamer()); 114 115 TS->EmitDirectiveHSACodeObjectVersion(2, 1); 116 117 AMDGPU::IsaVersion ISA = AMDGPU::getIsaVersion(STI->getFeatureBits()); 118 TS->EmitDirectiveHSACodeObjectISA(ISA.Major, ISA.Minor, ISA.Stepping, 119 "AMD", "AMDGPU"); 120 121 // Emit runtime metadata. 122 TS->emitRuntimeMetadata(M); 123 } 124 125 bool AMDGPUAsmPrinter::isBlockOnlyReachableByFallthrough( 126 const MachineBasicBlock *MBB) const { 127 if (!AsmPrinter::isBlockOnlyReachableByFallthrough(MBB)) 128 return false; 129 130 if (MBB->empty()) 131 return true; 132 133 // If this is a block implementing a long branch, an expression relative to 134 // the start of the block is needed. to the start of the block. 135 // XXX - Is there a smarter way to check this? 136 return (MBB->back().getOpcode() != AMDGPU::S_SETPC_B64); 137 } 138 139 140 void AMDGPUAsmPrinter::EmitFunctionBodyStart() { 141 const AMDGPUSubtarget &STM = MF->getSubtarget<AMDGPUSubtarget>(); 142 SIProgramInfo KernelInfo; 143 if (STM.isAmdCodeObjectV2()) { 144 getSIProgramInfo(KernelInfo, *MF); 145 EmitAmdKernelCodeT(*MF, KernelInfo); 146 } 147 } 148 149 void AMDGPUAsmPrinter::EmitFunctionEntryLabel() { 150 const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 151 const AMDGPUSubtarget &STM = MF->getSubtarget<AMDGPUSubtarget>(); 152 if (MFI->isKernel() && STM.isAmdCodeObjectV2()) { 153 AMDGPUTargetStreamer *TS = 154 static_cast<AMDGPUTargetStreamer *>(OutStreamer->getTargetStreamer()); 155 SmallString<128> SymbolName; 156 getNameWithPrefix(SymbolName, MF->getFunction()), 157 TS->EmitAMDGPUSymbolType(SymbolName, ELF::STT_AMDGPU_HSA_KERNEL); 158 } 159 160 AsmPrinter::EmitFunctionEntryLabel(); 161 } 162 163 void AMDGPUAsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) { 164 165 // Group segment variables aren't emitted in HSA. 166 if (AMDGPU::isGroupSegment(GV)) 167 return; 168 169 AsmPrinter::EmitGlobalVariable(GV); 170 } 171 172 bool AMDGPUAsmPrinter::runOnMachineFunction(MachineFunction &MF) { 173 174 // The starting address of all shader programs must be 256 bytes aligned. 175 MF.setAlignment(8); 176 177 SetupMachineFunction(MF); 178 179 MCContext &Context = getObjFileLowering().getContext(); 180 MCSectionELF *ConfigSection = 181 Context.getELFSection(".AMDGPU.config", ELF::SHT_PROGBITS, 0); 182 OutStreamer->SwitchSection(ConfigSection); 183 184 const AMDGPUSubtarget &STM = MF.getSubtarget<AMDGPUSubtarget>(); 185 SIProgramInfo KernelInfo; 186 if (STM.getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) { 187 getSIProgramInfo(KernelInfo, MF); 188 if (!STM.isAmdHsaOS()) { 189 EmitProgramInfoSI(MF, KernelInfo); 190 } 191 } else { 192 EmitProgramInfoR600(MF); 193 } 194 195 DisasmLines.clear(); 196 HexLines.clear(); 197 DisasmLineMaxLen = 0; 198 199 EmitFunctionBody(); 200 201 if (isVerbose()) { 202 MCSectionELF *CommentSection = 203 Context.getELFSection(".AMDGPU.csdata", ELF::SHT_PROGBITS, 0); 204 OutStreamer->SwitchSection(CommentSection); 205 206 if (STM.getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) { 207 OutStreamer->emitRawComment(" Kernel info:", false); 208 OutStreamer->emitRawComment(" codeLenInByte = " + Twine(KernelInfo.CodeLen), 209 false); 210 OutStreamer->emitRawComment(" NumSgprs: " + Twine(KernelInfo.NumSGPR), 211 false); 212 OutStreamer->emitRawComment(" NumVgprs: " + Twine(KernelInfo.NumVGPR), 213 false); 214 OutStreamer->emitRawComment(" FloatMode: " + Twine(KernelInfo.FloatMode), 215 false); 216 OutStreamer->emitRawComment(" IeeeMode: " + Twine(KernelInfo.IEEEMode), 217 false); 218 OutStreamer->emitRawComment(" ScratchSize: " + Twine(KernelInfo.ScratchSize), 219 false); 220 OutStreamer->emitRawComment(" LDSByteSize: " + Twine(KernelInfo.LDSSize) + 221 " bytes/workgroup (compile time only)", false); 222 223 OutStreamer->emitRawComment(" SGPRBlocks: " + 224 Twine(KernelInfo.SGPRBlocks), false); 225 OutStreamer->emitRawComment(" VGPRBlocks: " + 226 Twine(KernelInfo.VGPRBlocks), false); 227 228 OutStreamer->emitRawComment(" NumSGPRsForWavesPerEU: " + 229 Twine(KernelInfo.NumSGPRsForWavesPerEU), false); 230 OutStreamer->emitRawComment(" NumVGPRsForWavesPerEU: " + 231 Twine(KernelInfo.NumVGPRsForWavesPerEU), false); 232 233 OutStreamer->emitRawComment(" ReservedVGPRFirst: " + Twine(KernelInfo.ReservedVGPRFirst), 234 false); 235 OutStreamer->emitRawComment(" ReservedVGPRCount: " + Twine(KernelInfo.ReservedVGPRCount), 236 false); 237 238 if (MF.getSubtarget<SISubtarget>().debuggerEmitPrologue()) { 239 OutStreamer->emitRawComment(" DebuggerWavefrontPrivateSegmentOffsetSGPR: s" + 240 Twine(KernelInfo.DebuggerWavefrontPrivateSegmentOffsetSGPR), false); 241 OutStreamer->emitRawComment(" DebuggerPrivateSegmentBufferSGPR: s" + 242 Twine(KernelInfo.DebuggerPrivateSegmentBufferSGPR), false); 243 } 244 245 OutStreamer->emitRawComment(" COMPUTE_PGM_RSRC2:USER_SGPR: " + 246 Twine(G_00B84C_USER_SGPR(KernelInfo.ComputePGMRSrc2)), 247 false); 248 OutStreamer->emitRawComment(" COMPUTE_PGM_RSRC2:TGID_X_EN: " + 249 Twine(G_00B84C_TGID_X_EN(KernelInfo.ComputePGMRSrc2)), 250 false); 251 OutStreamer->emitRawComment(" COMPUTE_PGM_RSRC2:TGID_Y_EN: " + 252 Twine(G_00B84C_TGID_Y_EN(KernelInfo.ComputePGMRSrc2)), 253 false); 254 OutStreamer->emitRawComment(" COMPUTE_PGM_RSRC2:TGID_Z_EN: " + 255 Twine(G_00B84C_TGID_Z_EN(KernelInfo.ComputePGMRSrc2)), 256 false); 257 OutStreamer->emitRawComment(" COMPUTE_PGM_RSRC2:TIDIG_COMP_CNT: " + 258 Twine(G_00B84C_TIDIG_COMP_CNT(KernelInfo.ComputePGMRSrc2)), 259 false); 260 261 } else { 262 R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>(); 263 OutStreamer->emitRawComment( 264 Twine("SQ_PGM_RESOURCES:STACK_SIZE = " + Twine(MFI->CFStackSize))); 265 } 266 } 267 268 if (STM.dumpCode()) { 269 270 OutStreamer->SwitchSection( 271 Context.getELFSection(".AMDGPU.disasm", ELF::SHT_NOTE, 0)); 272 273 for (size_t i = 0; i < DisasmLines.size(); ++i) { 274 std::string Comment(DisasmLineMaxLen - DisasmLines[i].size(), ' '); 275 Comment += " ; " + HexLines[i] + "\n"; 276 277 OutStreamer->EmitBytes(StringRef(DisasmLines[i])); 278 OutStreamer->EmitBytes(StringRef(Comment)); 279 } 280 } 281 282 return false; 283 } 284 285 void AMDGPUAsmPrinter::EmitProgramInfoR600(const MachineFunction &MF) { 286 unsigned MaxGPR = 0; 287 bool killPixel = false; 288 const R600Subtarget &STM = MF.getSubtarget<R600Subtarget>(); 289 const R600RegisterInfo *RI = STM.getRegisterInfo(); 290 const R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>(); 291 292 for (const MachineBasicBlock &MBB : MF) { 293 for (const MachineInstr &MI : MBB) { 294 if (MI.getOpcode() == AMDGPU::KILLGT) 295 killPixel = true; 296 unsigned numOperands = MI.getNumOperands(); 297 for (unsigned op_idx = 0; op_idx < numOperands; op_idx++) { 298 const MachineOperand &MO = MI.getOperand(op_idx); 299 if (!MO.isReg()) 300 continue; 301 unsigned HWReg = RI->getEncodingValue(MO.getReg()) & 0xff; 302 303 // Register with value > 127 aren't GPR 304 if (HWReg > 127) 305 continue; 306 MaxGPR = std::max(MaxGPR, HWReg); 307 } 308 } 309 } 310 311 unsigned RsrcReg; 312 if (STM.getGeneration() >= R600Subtarget::EVERGREEN) { 313 // Evergreen / Northern Islands 314 switch (MF.getFunction()->getCallingConv()) { 315 default: LLVM_FALLTHROUGH; 316 case CallingConv::AMDGPU_CS: RsrcReg = R_0288D4_SQ_PGM_RESOURCES_LS; break; 317 case CallingConv::AMDGPU_GS: RsrcReg = R_028878_SQ_PGM_RESOURCES_GS; break; 318 case CallingConv::AMDGPU_PS: RsrcReg = R_028844_SQ_PGM_RESOURCES_PS; break; 319 case CallingConv::AMDGPU_VS: RsrcReg = R_028860_SQ_PGM_RESOURCES_VS; break; 320 } 321 } else { 322 // R600 / R700 323 switch (MF.getFunction()->getCallingConv()) { 324 default: LLVM_FALLTHROUGH; 325 case CallingConv::AMDGPU_GS: LLVM_FALLTHROUGH; 326 case CallingConv::AMDGPU_CS: LLVM_FALLTHROUGH; 327 case CallingConv::AMDGPU_VS: RsrcReg = R_028868_SQ_PGM_RESOURCES_VS; break; 328 case CallingConv::AMDGPU_PS: RsrcReg = R_028850_SQ_PGM_RESOURCES_PS; break; 329 } 330 } 331 332 OutStreamer->EmitIntValue(RsrcReg, 4); 333 OutStreamer->EmitIntValue(S_NUM_GPRS(MaxGPR + 1) | 334 S_STACK_SIZE(MFI->CFStackSize), 4); 335 OutStreamer->EmitIntValue(R_02880C_DB_SHADER_CONTROL, 4); 336 OutStreamer->EmitIntValue(S_02880C_KILL_ENABLE(killPixel), 4); 337 338 if (AMDGPU::isCompute(MF.getFunction()->getCallingConv())) { 339 OutStreamer->EmitIntValue(R_0288E8_SQ_LDS_ALLOC, 4); 340 OutStreamer->EmitIntValue(alignTo(MFI->getLDSSize(), 4) >> 2, 4); 341 } 342 } 343 344 void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo, 345 const MachineFunction &MF) const { 346 const SISubtarget &STM = MF.getSubtarget<SISubtarget>(); 347 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 348 uint64_t CodeSize = 0; 349 unsigned MaxSGPR = 0; 350 unsigned MaxVGPR = 0; 351 bool VCCUsed = false; 352 bool FlatUsed = false; 353 const SIRegisterInfo *RI = STM.getRegisterInfo(); 354 const SIInstrInfo *TII = STM.getInstrInfo(); 355 356 for (const MachineBasicBlock &MBB : MF) { 357 for (const MachineInstr &MI : MBB) { 358 // TODO: CodeSize should account for multiple functions. 359 360 // TODO: Should we count size of debug info? 361 if (MI.isDebugValue()) 362 continue; 363 364 if (isVerbose()) 365 CodeSize += TII->getInstSizeInBytes(MI); 366 367 unsigned numOperands = MI.getNumOperands(); 368 for (unsigned op_idx = 0; op_idx < numOperands; op_idx++) { 369 const MachineOperand &MO = MI.getOperand(op_idx); 370 unsigned width = 0; 371 bool isSGPR = false; 372 373 if (!MO.isReg()) 374 continue; 375 376 unsigned reg = MO.getReg(); 377 switch (reg) { 378 case AMDGPU::EXEC: 379 case AMDGPU::EXEC_LO: 380 case AMDGPU::EXEC_HI: 381 case AMDGPU::SCC: 382 case AMDGPU::M0: 383 continue; 384 385 case AMDGPU::VCC: 386 case AMDGPU::VCC_LO: 387 case AMDGPU::VCC_HI: 388 VCCUsed = true; 389 continue; 390 391 case AMDGPU::FLAT_SCR: 392 case AMDGPU::FLAT_SCR_LO: 393 case AMDGPU::FLAT_SCR_HI: 394 // Even if FLAT_SCRATCH is implicitly used, it has no effect if flat 395 // instructions aren't used to access the scratch buffer. 396 if (MFI->hasFlatScratchInit()) 397 FlatUsed = true; 398 continue; 399 400 case AMDGPU::TBA: 401 case AMDGPU::TBA_LO: 402 case AMDGPU::TBA_HI: 403 case AMDGPU::TMA: 404 case AMDGPU::TMA_LO: 405 case AMDGPU::TMA_HI: 406 llvm_unreachable("trap handler registers should not be used"); 407 408 default: 409 break; 410 } 411 412 if (AMDGPU::SReg_32RegClass.contains(reg)) { 413 assert(!AMDGPU::TTMP_32RegClass.contains(reg) && 414 "trap handler registers should not be used"); 415 isSGPR = true; 416 width = 1; 417 } else if (AMDGPU::VGPR_32RegClass.contains(reg)) { 418 isSGPR = false; 419 width = 1; 420 } else if (AMDGPU::SReg_64RegClass.contains(reg)) { 421 assert(!AMDGPU::TTMP_64RegClass.contains(reg) && 422 "trap handler registers should not be used"); 423 isSGPR = true; 424 width = 2; 425 } else if (AMDGPU::VReg_64RegClass.contains(reg)) { 426 isSGPR = false; 427 width = 2; 428 } else if (AMDGPU::VReg_96RegClass.contains(reg)) { 429 isSGPR = false; 430 width = 3; 431 } else if (AMDGPU::SReg_128RegClass.contains(reg)) { 432 isSGPR = true; 433 width = 4; 434 } else if (AMDGPU::VReg_128RegClass.contains(reg)) { 435 isSGPR = false; 436 width = 4; 437 } else if (AMDGPU::SReg_256RegClass.contains(reg)) { 438 isSGPR = true; 439 width = 8; 440 } else if (AMDGPU::VReg_256RegClass.contains(reg)) { 441 isSGPR = false; 442 width = 8; 443 } else if (AMDGPU::SReg_512RegClass.contains(reg)) { 444 isSGPR = true; 445 width = 16; 446 } else if (AMDGPU::VReg_512RegClass.contains(reg)) { 447 isSGPR = false; 448 width = 16; 449 } else { 450 llvm_unreachable("Unknown register class"); 451 } 452 unsigned hwReg = RI->getEncodingValue(reg) & 0xff; 453 unsigned maxUsed = hwReg + width - 1; 454 if (isSGPR) { 455 MaxSGPR = maxUsed > MaxSGPR ? maxUsed : MaxSGPR; 456 } else { 457 MaxVGPR = maxUsed > MaxVGPR ? maxUsed : MaxVGPR; 458 } 459 } 460 } 461 } 462 463 unsigned ExtraSGPRs = 0; 464 465 if (VCCUsed) 466 ExtraSGPRs = 2; 467 468 if (STM.getGeneration() < SISubtarget::VOLCANIC_ISLANDS) { 469 if (FlatUsed) 470 ExtraSGPRs = 4; 471 } else { 472 if (STM.isXNACKEnabled()) 473 ExtraSGPRs = 4; 474 475 if (FlatUsed) 476 ExtraSGPRs = 6; 477 } 478 479 // Record first reserved register and reserved register count fields, and 480 // update max register counts if "amdgpu-debugger-reserve-regs" attribute was 481 // requested. 482 ProgInfo.ReservedVGPRFirst = STM.debuggerReserveRegs() ? MaxVGPR + 1 : 0; 483 ProgInfo.ReservedVGPRCount = RI->getNumDebuggerReservedVGPRs(STM); 484 485 // Update DebuggerWavefrontPrivateSegmentOffsetSGPR and 486 // DebuggerPrivateSegmentBufferSGPR fields if "amdgpu-debugger-emit-prologue" 487 // attribute was requested. 488 if (STM.debuggerEmitPrologue()) { 489 ProgInfo.DebuggerWavefrontPrivateSegmentOffsetSGPR = 490 RI->getHWRegIndex(MFI->getScratchWaveOffsetReg()); 491 ProgInfo.DebuggerPrivateSegmentBufferSGPR = 492 RI->getHWRegIndex(MFI->getScratchRSrcReg()); 493 } 494 495 // Check the addressable register limit before we add ExtraSGPRs. 496 if (STM.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS && 497 !STM.hasSGPRInitBug()) { 498 unsigned MaxAddressableNumSGPRs = STM.getMaxNumSGPRs(); 499 if (MaxSGPR + 1 > MaxAddressableNumSGPRs) { 500 // This can happen due to a compiler bug or when using inline asm. 501 LLVMContext &Ctx = MF.getFunction()->getContext(); 502 DiagnosticInfoResourceLimit Diag(*MF.getFunction(), 503 "addressable scalar registers", 504 MaxSGPR + 1, DS_Error, 505 DK_ResourceLimit, MaxAddressableNumSGPRs); 506 Ctx.diagnose(Diag); 507 MaxSGPR = MaxAddressableNumSGPRs - 1; 508 } 509 } 510 511 // Account for extra SGPRs and VGPRs reserved for debugger use. 512 MaxSGPR += ExtraSGPRs; 513 MaxVGPR += RI->getNumDebuggerReservedVGPRs(STM); 514 515 // We found the maximum register index. They start at 0, so add one to get the 516 // number of registers. 517 ProgInfo.NumVGPR = MaxVGPR + 1; 518 ProgInfo.NumSGPR = MaxSGPR + 1; 519 520 // Adjust number of registers used to meet default/requested minimum/maximum 521 // number of waves per execution unit request. 522 ProgInfo.NumSGPRsForWavesPerEU = std::max( 523 ProgInfo.NumSGPR, RI->getMinNumSGPRs(STM, MFI->getMaxWavesPerEU())); 524 ProgInfo.NumVGPRsForWavesPerEU = std::max( 525 ProgInfo.NumVGPR, RI->getMinNumVGPRs(MFI->getMaxWavesPerEU())); 526 527 if (STM.getGeneration() <= AMDGPUSubtarget::SEA_ISLANDS || 528 STM.hasSGPRInitBug()) { 529 unsigned MaxNumSGPRs = STM.getMaxNumSGPRs(); 530 if (ProgInfo.NumSGPR > MaxNumSGPRs) { 531 // This can happen due to a compiler bug or when using inline asm to use the 532 // registers which are usually reserved for vcc etc. 533 534 LLVMContext &Ctx = MF.getFunction()->getContext(); 535 DiagnosticInfoResourceLimit Diag(*MF.getFunction(), 536 "scalar registers", 537 ProgInfo.NumSGPR, DS_Error, 538 DK_ResourceLimit, MaxNumSGPRs); 539 Ctx.diagnose(Diag); 540 ProgInfo.NumSGPR = MaxNumSGPRs; 541 ProgInfo.NumSGPRsForWavesPerEU = MaxNumSGPRs; 542 } 543 } 544 545 if (STM.hasSGPRInitBug()) { 546 ProgInfo.NumSGPR = SISubtarget::FIXED_SGPR_COUNT_FOR_INIT_BUG; 547 ProgInfo.NumSGPRsForWavesPerEU = SISubtarget::FIXED_SGPR_COUNT_FOR_INIT_BUG; 548 } 549 550 if (MFI->NumUserSGPRs > STM.getMaxNumUserSGPRs()) { 551 LLVMContext &Ctx = MF.getFunction()->getContext(); 552 DiagnosticInfoResourceLimit Diag(*MF.getFunction(), "user SGPRs", 553 MFI->NumUserSGPRs, DS_Error); 554 Ctx.diagnose(Diag); 555 } 556 557 if (MFI->getLDSSize() > static_cast<unsigned>(STM.getLocalMemorySize())) { 558 LLVMContext &Ctx = MF.getFunction()->getContext(); 559 DiagnosticInfoResourceLimit Diag(*MF.getFunction(), "local memory", 560 MFI->getLDSSize(), DS_Error); 561 Ctx.diagnose(Diag); 562 } 563 564 // SGPRBlocks is actual number of SGPR blocks minus 1. 565 ProgInfo.SGPRBlocks = alignTo(ProgInfo.NumSGPRsForWavesPerEU, 566 RI->getSGPRAllocGranule()); 567 ProgInfo.SGPRBlocks = ProgInfo.SGPRBlocks / RI->getSGPRAllocGranule() - 1; 568 569 // VGPRBlocks is actual number of VGPR blocks minus 1. 570 ProgInfo.VGPRBlocks = alignTo(ProgInfo.NumVGPRsForWavesPerEU, 571 RI->getVGPRAllocGranule()); 572 ProgInfo.VGPRBlocks = ProgInfo.VGPRBlocks / RI->getVGPRAllocGranule() - 1; 573 574 // Set the value to initialize FP_ROUND and FP_DENORM parts of the mode 575 // register. 576 ProgInfo.FloatMode = getFPMode(MF); 577 578 ProgInfo.IEEEMode = STM.enableIEEEBit(MF); 579 580 // Make clamp modifier on NaN input returns 0. 581 ProgInfo.DX10Clamp = 1; 582 583 const MachineFrameInfo &FrameInfo = MF.getFrameInfo(); 584 ProgInfo.ScratchSize = FrameInfo.getStackSize(); 585 586 ProgInfo.FlatUsed = FlatUsed; 587 ProgInfo.VCCUsed = VCCUsed; 588 ProgInfo.CodeLen = CodeSize; 589 590 unsigned LDSAlignShift; 591 if (STM.getGeneration() < SISubtarget::SEA_ISLANDS) { 592 // LDS is allocated in 64 dword blocks. 593 LDSAlignShift = 8; 594 } else { 595 // LDS is allocated in 128 dword blocks. 596 LDSAlignShift = 9; 597 } 598 599 unsigned LDSSpillSize = 600 MFI->LDSWaveSpillSize * MFI->getMaxFlatWorkGroupSize(); 601 602 ProgInfo.LDSSize = MFI->getLDSSize() + LDSSpillSize; 603 ProgInfo.LDSBlocks = 604 alignTo(ProgInfo.LDSSize, 1ULL << LDSAlignShift) >> LDSAlignShift; 605 606 // Scratch is allocated in 256 dword blocks. 607 unsigned ScratchAlignShift = 10; 608 // We need to program the hardware with the amount of scratch memory that 609 // is used by the entire wave. ProgInfo.ScratchSize is the amount of 610 // scratch memory used per thread. 611 ProgInfo.ScratchBlocks = 612 alignTo(ProgInfo.ScratchSize * STM.getWavefrontSize(), 613 1ULL << ScratchAlignShift) >> 614 ScratchAlignShift; 615 616 ProgInfo.ComputePGMRSrc1 = 617 S_00B848_VGPRS(ProgInfo.VGPRBlocks) | 618 S_00B848_SGPRS(ProgInfo.SGPRBlocks) | 619 S_00B848_PRIORITY(ProgInfo.Priority) | 620 S_00B848_FLOAT_MODE(ProgInfo.FloatMode) | 621 S_00B848_PRIV(ProgInfo.Priv) | 622 S_00B848_DX10_CLAMP(ProgInfo.DX10Clamp) | 623 S_00B848_DEBUG_MODE(ProgInfo.DebugMode) | 624 S_00B848_IEEE_MODE(ProgInfo.IEEEMode); 625 626 // 0 = X, 1 = XY, 2 = XYZ 627 unsigned TIDIGCompCnt = 0; 628 if (MFI->hasWorkItemIDZ()) 629 TIDIGCompCnt = 2; 630 else if (MFI->hasWorkItemIDY()) 631 TIDIGCompCnt = 1; 632 633 ProgInfo.ComputePGMRSrc2 = 634 S_00B84C_SCRATCH_EN(ProgInfo.ScratchBlocks > 0) | 635 S_00B84C_USER_SGPR(MFI->getNumUserSGPRs()) | 636 S_00B84C_TGID_X_EN(MFI->hasWorkGroupIDX()) | 637 S_00B84C_TGID_Y_EN(MFI->hasWorkGroupIDY()) | 638 S_00B84C_TGID_Z_EN(MFI->hasWorkGroupIDZ()) | 639 S_00B84C_TG_SIZE_EN(MFI->hasWorkGroupInfo()) | 640 S_00B84C_TIDIG_COMP_CNT(TIDIGCompCnt) | 641 S_00B84C_EXCP_EN_MSB(0) | 642 S_00B84C_LDS_SIZE(ProgInfo.LDSBlocks) | 643 S_00B84C_EXCP_EN(0); 644 } 645 646 static unsigned getRsrcReg(CallingConv::ID CallConv) { 647 switch (CallConv) { 648 default: LLVM_FALLTHROUGH; 649 case CallingConv::AMDGPU_CS: return R_00B848_COMPUTE_PGM_RSRC1; 650 case CallingConv::AMDGPU_GS: return R_00B228_SPI_SHADER_PGM_RSRC1_GS; 651 case CallingConv::AMDGPU_PS: return R_00B028_SPI_SHADER_PGM_RSRC1_PS; 652 case CallingConv::AMDGPU_VS: return R_00B128_SPI_SHADER_PGM_RSRC1_VS; 653 } 654 } 655 656 void AMDGPUAsmPrinter::EmitProgramInfoSI(const MachineFunction &MF, 657 const SIProgramInfo &KernelInfo) { 658 const SISubtarget &STM = MF.getSubtarget<SISubtarget>(); 659 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 660 unsigned RsrcReg = getRsrcReg(MF.getFunction()->getCallingConv()); 661 662 if (AMDGPU::isCompute(MF.getFunction()->getCallingConv())) { 663 OutStreamer->EmitIntValue(R_00B848_COMPUTE_PGM_RSRC1, 4); 664 665 OutStreamer->EmitIntValue(KernelInfo.ComputePGMRSrc1, 4); 666 667 OutStreamer->EmitIntValue(R_00B84C_COMPUTE_PGM_RSRC2, 4); 668 OutStreamer->EmitIntValue(KernelInfo.ComputePGMRSrc2, 4); 669 670 OutStreamer->EmitIntValue(R_00B860_COMPUTE_TMPRING_SIZE, 4); 671 OutStreamer->EmitIntValue(S_00B860_WAVESIZE(KernelInfo.ScratchBlocks), 4); 672 673 // TODO: Should probably note flat usage somewhere. SC emits a "FlatPtr32 = 674 // 0" comment but I don't see a corresponding field in the register spec. 675 } else { 676 OutStreamer->EmitIntValue(RsrcReg, 4); 677 OutStreamer->EmitIntValue(S_00B028_VGPRS(KernelInfo.VGPRBlocks) | 678 S_00B028_SGPRS(KernelInfo.SGPRBlocks), 4); 679 if (STM.isVGPRSpillingEnabled(*MF.getFunction())) { 680 OutStreamer->EmitIntValue(R_0286E8_SPI_TMPRING_SIZE, 4); 681 OutStreamer->EmitIntValue(S_0286E8_WAVESIZE(KernelInfo.ScratchBlocks), 4); 682 } 683 } 684 685 if (MF.getFunction()->getCallingConv() == CallingConv::AMDGPU_PS) { 686 OutStreamer->EmitIntValue(R_00B02C_SPI_SHADER_PGM_RSRC2_PS, 4); 687 OutStreamer->EmitIntValue(S_00B02C_EXTRA_LDS_SIZE(KernelInfo.LDSBlocks), 4); 688 OutStreamer->EmitIntValue(R_0286CC_SPI_PS_INPUT_ENA, 4); 689 OutStreamer->EmitIntValue(MFI->PSInputEna, 4); 690 OutStreamer->EmitIntValue(R_0286D0_SPI_PS_INPUT_ADDR, 4); 691 OutStreamer->EmitIntValue(MFI->getPSInputAddr(), 4); 692 } 693 694 OutStreamer->EmitIntValue(R_SPILLED_SGPRS, 4); 695 OutStreamer->EmitIntValue(MFI->getNumSpilledSGPRs(), 4); 696 OutStreamer->EmitIntValue(R_SPILLED_VGPRS, 4); 697 OutStreamer->EmitIntValue(MFI->getNumSpilledVGPRs(), 4); 698 } 699 700 // This is supposed to be log2(Size) 701 static amd_element_byte_size_t getElementByteSizeValue(unsigned Size) { 702 switch (Size) { 703 case 4: 704 return AMD_ELEMENT_4_BYTES; 705 case 8: 706 return AMD_ELEMENT_8_BYTES; 707 case 16: 708 return AMD_ELEMENT_16_BYTES; 709 default: 710 llvm_unreachable("invalid private_element_size"); 711 } 712 } 713 714 void AMDGPUAsmPrinter::EmitAmdKernelCodeT(const MachineFunction &MF, 715 const SIProgramInfo &KernelInfo) const { 716 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 717 const SISubtarget &STM = MF.getSubtarget<SISubtarget>(); 718 amd_kernel_code_t header; 719 720 AMDGPU::initDefaultAMDKernelCodeT(header, STM.getFeatureBits()); 721 722 header.compute_pgm_resource_registers = 723 KernelInfo.ComputePGMRSrc1 | 724 (KernelInfo.ComputePGMRSrc2 << 32); 725 header.code_properties = AMD_CODE_PROPERTY_IS_PTR64; 726 727 728 AMD_HSA_BITS_SET(header.code_properties, 729 AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE, 730 getElementByteSizeValue(STM.getMaxPrivateElementSize())); 731 732 if (MFI->hasPrivateSegmentBuffer()) { 733 header.code_properties |= 734 AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER; 735 } 736 737 if (MFI->hasDispatchPtr()) 738 header.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR; 739 740 if (MFI->hasQueuePtr()) 741 header.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR; 742 743 if (MFI->hasKernargSegmentPtr()) 744 header.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR; 745 746 if (MFI->hasDispatchID()) 747 header.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID; 748 749 if (MFI->hasFlatScratchInit()) 750 header.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT; 751 752 // TODO: Private segment size 753 754 if (MFI->hasGridWorkgroupCountX()) { 755 header.code_properties |= 756 AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X; 757 } 758 759 if (MFI->hasGridWorkgroupCountY()) { 760 header.code_properties |= 761 AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Y; 762 } 763 764 if (MFI->hasGridWorkgroupCountZ()) { 765 header.code_properties |= 766 AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Z; 767 } 768 769 if (MFI->hasDispatchPtr()) 770 header.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR; 771 772 if (STM.debuggerSupported()) 773 header.code_properties |= AMD_CODE_PROPERTY_IS_DEBUG_SUPPORTED; 774 775 if (STM.isXNACKEnabled()) 776 header.code_properties |= AMD_CODE_PROPERTY_IS_XNACK_SUPPORTED; 777 778 // FIXME: Should use getKernArgSize 779 header.kernarg_segment_byte_size = 780 STM.getKernArgSegmentSize(MFI->getABIArgOffset()); 781 header.wavefront_sgpr_count = KernelInfo.NumSGPR; 782 header.workitem_vgpr_count = KernelInfo.NumVGPR; 783 header.workitem_private_segment_byte_size = KernelInfo.ScratchSize; 784 header.workgroup_group_segment_byte_size = KernelInfo.LDSSize; 785 header.reserved_vgpr_first = KernelInfo.ReservedVGPRFirst; 786 header.reserved_vgpr_count = KernelInfo.ReservedVGPRCount; 787 788 // These alignment values are specified in powers of two, so alignment = 789 // 2^n. The minimum alignment is 2^4 = 16. 790 header.kernarg_segment_alignment = std::max((size_t)4, 791 countTrailingZeros(MFI->getMaxKernArgAlign())); 792 793 if (STM.debuggerEmitPrologue()) { 794 header.debug_wavefront_private_segment_offset_sgpr = 795 KernelInfo.DebuggerWavefrontPrivateSegmentOffsetSGPR; 796 header.debug_private_segment_buffer_sgpr = 797 KernelInfo.DebuggerPrivateSegmentBufferSGPR; 798 } 799 800 AMDGPUTargetStreamer *TS = 801 static_cast<AMDGPUTargetStreamer *>(OutStreamer->getTargetStreamer()); 802 803 OutStreamer->SwitchSection(getObjFileLowering().getTextSection()); 804 TS->EmitAMDKernelCodeT(header); 805 } 806 807 bool AMDGPUAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, 808 unsigned AsmVariant, 809 const char *ExtraCode, raw_ostream &O) { 810 if (ExtraCode && ExtraCode[0]) { 811 if (ExtraCode[1] != 0) 812 return true; // Unknown modifier. 813 814 switch (ExtraCode[0]) { 815 default: 816 // See if this is a generic print operand 817 return AsmPrinter::PrintAsmOperand(MI, OpNo, AsmVariant, ExtraCode, O); 818 case 'r': 819 break; 820 } 821 } 822 823 AMDGPUInstPrinter::printRegOperand(MI->getOperand(OpNo).getReg(), O, 824 *TM.getSubtargetImpl(*MF->getFunction())->getRegisterInfo()); 825 return false; 826 } 827