1 //===- AMDGPUBaseInfo.cpp - AMDGPU Base encoding information --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "AMDGPUBaseInfo.h" 10 #include "AMDGPU.h" 11 #include "AMDGPUAsmUtils.h" 12 #include "AMDKernelCodeT.h" 13 #include "GCNSubtarget.h" 14 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 15 #include "llvm/BinaryFormat/ELF.h" 16 #include "llvm/IR/Attributes.h" 17 #include "llvm/IR/Function.h" 18 #include "llvm/IR/GlobalValue.h" 19 #include "llvm/IR/IntrinsicsAMDGPU.h" 20 #include "llvm/IR/IntrinsicsR600.h" 21 #include "llvm/IR/LLVMContext.h" 22 #include "llvm/MC/MCSubtargetInfo.h" 23 #include "llvm/Support/AMDHSAKernelDescriptor.h" 24 #include "llvm/Support/CommandLine.h" 25 #include "llvm/Support/TargetParser.h" 26 27 #define GET_INSTRINFO_NAMED_OPS 28 #define GET_INSTRMAP_INFO 29 #include "AMDGPUGenInstrInfo.inc" 30 31 static llvm::cl::opt<unsigned> AmdhsaCodeObjectVersion( 32 "amdhsa-code-object-version", llvm::cl::Hidden, 33 llvm::cl::desc("AMDHSA Code Object Version"), llvm::cl::init(4), 34 llvm::cl::ZeroOrMore); 35 36 namespace { 37 38 /// \returns Bit mask for given bit \p Shift and bit \p Width. 39 unsigned getBitMask(unsigned Shift, unsigned Width) { 40 return ((1 << Width) - 1) << Shift; 41 } 42 43 /// Packs \p Src into \p Dst for given bit \p Shift and bit \p Width. 44 /// 45 /// \returns Packed \p Dst. 46 unsigned packBits(unsigned Src, unsigned Dst, unsigned Shift, unsigned Width) { 47 Dst &= ~(1 << Shift) & ~getBitMask(Shift, Width); 48 Dst |= (Src << Shift) & getBitMask(Shift, Width); 49 return Dst; 50 } 51 52 /// Unpacks bits from \p Src for given bit \p Shift and bit \p Width. 53 /// 54 /// \returns Unpacked bits. 55 unsigned unpackBits(unsigned Src, unsigned Shift, unsigned Width) { 56 return (Src & getBitMask(Shift, Width)) >> Shift; 57 } 58 59 /// \returns Vmcnt bit shift (lower bits). 60 unsigned getVmcntBitShiftLo() { return 0; } 61 62 /// \returns Vmcnt bit width (lower bits). 63 unsigned getVmcntBitWidthLo() { return 4; } 64 65 /// \returns Expcnt bit shift. 66 unsigned getExpcntBitShift() { return 4; } 67 68 /// \returns Expcnt bit width. 69 unsigned getExpcntBitWidth() { return 3; } 70 71 /// \returns Lgkmcnt bit shift. 72 unsigned getLgkmcntBitShift() { return 8; } 73 74 /// \returns Lgkmcnt bit width. 75 unsigned getLgkmcntBitWidth(unsigned VersionMajor) { 76 return (VersionMajor >= 10) ? 6 : 4; 77 } 78 79 /// \returns Vmcnt bit shift (higher bits). 80 unsigned getVmcntBitShiftHi() { return 14; } 81 82 /// \returns Vmcnt bit width (higher bits). 83 unsigned getVmcntBitWidthHi() { return 2; } 84 85 } // end namespace anonymous 86 87 namespace llvm { 88 89 namespace AMDGPU { 90 91 Optional<uint8_t> getHsaAbiVersion(const MCSubtargetInfo *STI) { 92 if (STI && STI->getTargetTriple().getOS() != Triple::AMDHSA) 93 return None; 94 95 switch (AmdhsaCodeObjectVersion) { 96 case 2: 97 return ELF::ELFABIVERSION_AMDGPU_HSA_V2; 98 case 3: 99 return ELF::ELFABIVERSION_AMDGPU_HSA_V3; 100 case 4: 101 return ELF::ELFABIVERSION_AMDGPU_HSA_V4; 102 case 5: 103 return ELF::ELFABIVERSION_AMDGPU_HSA_V5; 104 default: 105 report_fatal_error(Twine("Unsupported AMDHSA Code Object Version ") + 106 Twine(AmdhsaCodeObjectVersion)); 107 } 108 } 109 110 bool isHsaAbiVersion2(const MCSubtargetInfo *STI) { 111 if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI)) 112 return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V2; 113 return false; 114 } 115 116 bool isHsaAbiVersion3(const MCSubtargetInfo *STI) { 117 if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI)) 118 return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V3; 119 return false; 120 } 121 122 bool isHsaAbiVersion4(const MCSubtargetInfo *STI) { 123 if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI)) 124 return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V4; 125 return false; 126 } 127 128 bool isHsaAbiVersion5(const MCSubtargetInfo *STI) { 129 if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI)) 130 return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V5; 131 return false; 132 } 133 134 bool isHsaAbiVersion3AndAbove(const MCSubtargetInfo *STI) { 135 return isHsaAbiVersion3(STI) || isHsaAbiVersion4(STI) || 136 isHsaAbiVersion5(STI); 137 } 138 139 // FIXME: All such magic numbers about the ABI should be in a 140 // central TD file. 141 unsigned getHostcallImplicitArgPosition() { 142 switch (AmdhsaCodeObjectVersion) { 143 case 2: 144 case 3: 145 case 4: 146 return 24; 147 case 5: 148 return 80; 149 default: 150 llvm_unreachable("Unexpected code object version"); 151 return 0; 152 } 153 } 154 155 #define GET_MIMGBaseOpcodesTable_IMPL 156 #define GET_MIMGDimInfoTable_IMPL 157 #define GET_MIMGInfoTable_IMPL 158 #define GET_MIMGLZMappingTable_IMPL 159 #define GET_MIMGMIPMappingTable_IMPL 160 #define GET_MIMGBiasMappingTable_IMPL 161 #define GET_MIMGOffsetMappingTable_IMPL 162 #define GET_MIMGG16MappingTable_IMPL 163 #include "AMDGPUGenSearchableTables.inc" 164 165 int getMIMGOpcode(unsigned BaseOpcode, unsigned MIMGEncoding, 166 unsigned VDataDwords, unsigned VAddrDwords) { 167 const MIMGInfo *Info = getMIMGOpcodeHelper(BaseOpcode, MIMGEncoding, 168 VDataDwords, VAddrDwords); 169 return Info ? Info->Opcode : -1; 170 } 171 172 const MIMGBaseOpcodeInfo *getMIMGBaseOpcode(unsigned Opc) { 173 const MIMGInfo *Info = getMIMGInfo(Opc); 174 return Info ? getMIMGBaseOpcodeInfo(Info->BaseOpcode) : nullptr; 175 } 176 177 int getMaskedMIMGOp(unsigned Opc, unsigned NewChannels) { 178 const MIMGInfo *OrigInfo = getMIMGInfo(Opc); 179 const MIMGInfo *NewInfo = 180 getMIMGOpcodeHelper(OrigInfo->BaseOpcode, OrigInfo->MIMGEncoding, 181 NewChannels, OrigInfo->VAddrDwords); 182 return NewInfo ? NewInfo->Opcode : -1; 183 } 184 185 unsigned getAddrSizeMIMGOp(const MIMGBaseOpcodeInfo *BaseOpcode, 186 const MIMGDimInfo *Dim, bool IsA16, 187 bool IsG16Supported) { 188 unsigned AddrWords = BaseOpcode->NumExtraArgs; 189 unsigned AddrComponents = (BaseOpcode->Coordinates ? Dim->NumCoords : 0) + 190 (BaseOpcode->LodOrClampOrMip ? 1 : 0); 191 if (IsA16) 192 AddrWords += divideCeil(AddrComponents, 2); 193 else 194 AddrWords += AddrComponents; 195 196 // Note: For subtargets that support A16 but not G16, enabling A16 also 197 // enables 16 bit gradients. 198 // For subtargets that support A16 (operand) and G16 (done with a different 199 // instruction encoding), they are independent. 200 201 if (BaseOpcode->Gradients) { 202 if ((IsA16 && !IsG16Supported) || BaseOpcode->G16) 203 // There are two gradients per coordinate, we pack them separately. 204 // For the 3d case, 205 // we get (dy/du, dx/du) (-, dz/du) (dy/dv, dx/dv) (-, dz/dv) 206 AddrWords += alignTo<2>(Dim->NumGradients / 2); 207 else 208 AddrWords += Dim->NumGradients; 209 } 210 return AddrWords; 211 } 212 213 struct MUBUFInfo { 214 uint16_t Opcode; 215 uint16_t BaseOpcode; 216 uint8_t elements; 217 bool has_vaddr; 218 bool has_srsrc; 219 bool has_soffset; 220 bool IsBufferInv; 221 }; 222 223 struct MTBUFInfo { 224 uint16_t Opcode; 225 uint16_t BaseOpcode; 226 uint8_t elements; 227 bool has_vaddr; 228 bool has_srsrc; 229 bool has_soffset; 230 }; 231 232 struct SMInfo { 233 uint16_t Opcode; 234 bool IsBuffer; 235 }; 236 237 struct VOPInfo { 238 uint16_t Opcode; 239 bool IsSingle; 240 }; 241 242 #define GET_MTBUFInfoTable_DECL 243 #define GET_MTBUFInfoTable_IMPL 244 #define GET_MUBUFInfoTable_DECL 245 #define GET_MUBUFInfoTable_IMPL 246 #define GET_SMInfoTable_DECL 247 #define GET_SMInfoTable_IMPL 248 #define GET_VOP1InfoTable_DECL 249 #define GET_VOP1InfoTable_IMPL 250 #define GET_VOP2InfoTable_DECL 251 #define GET_VOP2InfoTable_IMPL 252 #define GET_VOP3InfoTable_DECL 253 #define GET_VOP3InfoTable_IMPL 254 #include "AMDGPUGenSearchableTables.inc" 255 256 int getMTBUFBaseOpcode(unsigned Opc) { 257 const MTBUFInfo *Info = getMTBUFInfoFromOpcode(Opc); 258 return Info ? Info->BaseOpcode : -1; 259 } 260 261 int getMTBUFOpcode(unsigned BaseOpc, unsigned Elements) { 262 const MTBUFInfo *Info = getMTBUFInfoFromBaseOpcodeAndElements(BaseOpc, Elements); 263 return Info ? Info->Opcode : -1; 264 } 265 266 int getMTBUFElements(unsigned Opc) { 267 const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc); 268 return Info ? Info->elements : 0; 269 } 270 271 bool getMTBUFHasVAddr(unsigned Opc) { 272 const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc); 273 return Info ? Info->has_vaddr : false; 274 } 275 276 bool getMTBUFHasSrsrc(unsigned Opc) { 277 const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc); 278 return Info ? Info->has_srsrc : false; 279 } 280 281 bool getMTBUFHasSoffset(unsigned Opc) { 282 const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc); 283 return Info ? Info->has_soffset : false; 284 } 285 286 int getMUBUFBaseOpcode(unsigned Opc) { 287 const MUBUFInfo *Info = getMUBUFInfoFromOpcode(Opc); 288 return Info ? Info->BaseOpcode : -1; 289 } 290 291 int getMUBUFOpcode(unsigned BaseOpc, unsigned Elements) { 292 const MUBUFInfo *Info = getMUBUFInfoFromBaseOpcodeAndElements(BaseOpc, Elements); 293 return Info ? Info->Opcode : -1; 294 } 295 296 int getMUBUFElements(unsigned Opc) { 297 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc); 298 return Info ? Info->elements : 0; 299 } 300 301 bool getMUBUFHasVAddr(unsigned Opc) { 302 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc); 303 return Info ? Info->has_vaddr : false; 304 } 305 306 bool getMUBUFHasSrsrc(unsigned Opc) { 307 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc); 308 return Info ? Info->has_srsrc : false; 309 } 310 311 bool getMUBUFHasSoffset(unsigned Opc) { 312 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc); 313 return Info ? Info->has_soffset : false; 314 } 315 316 bool getMUBUFIsBufferInv(unsigned Opc) { 317 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc); 318 return Info ? Info->IsBufferInv : false; 319 } 320 321 bool getSMEMIsBuffer(unsigned Opc) { 322 const SMInfo *Info = getSMEMOpcodeHelper(Opc); 323 return Info ? Info->IsBuffer : false; 324 } 325 326 bool getVOP1IsSingle(unsigned Opc) { 327 const VOPInfo *Info = getVOP1OpcodeHelper(Opc); 328 return Info ? Info->IsSingle : false; 329 } 330 331 bool getVOP2IsSingle(unsigned Opc) { 332 const VOPInfo *Info = getVOP2OpcodeHelper(Opc); 333 return Info ? Info->IsSingle : false; 334 } 335 336 bool getVOP3IsSingle(unsigned Opc) { 337 const VOPInfo *Info = getVOP3OpcodeHelper(Opc); 338 return Info ? Info->IsSingle : false; 339 } 340 341 // Wrapper for Tablegen'd function. enum Subtarget is not defined in any 342 // header files, so we need to wrap it in a function that takes unsigned 343 // instead. 344 int getMCOpcode(uint16_t Opcode, unsigned Gen) { 345 return getMCOpcodeGen(Opcode, static_cast<Subtarget>(Gen)); 346 } 347 348 namespace IsaInfo { 349 350 AMDGPUTargetID::AMDGPUTargetID(const MCSubtargetInfo &STI) 351 : STI(STI), XnackSetting(TargetIDSetting::Any), 352 SramEccSetting(TargetIDSetting::Any) { 353 if (!STI.getFeatureBits().test(FeatureSupportsXNACK)) 354 XnackSetting = TargetIDSetting::Unsupported; 355 if (!STI.getFeatureBits().test(FeatureSupportsSRAMECC)) 356 SramEccSetting = TargetIDSetting::Unsupported; 357 } 358 359 void AMDGPUTargetID::setTargetIDFromFeaturesString(StringRef FS) { 360 // Check if xnack or sramecc is explicitly enabled or disabled. In the 361 // absence of the target features we assume we must generate code that can run 362 // in any environment. 363 SubtargetFeatures Features(FS); 364 Optional<bool> XnackRequested; 365 Optional<bool> SramEccRequested; 366 367 for (const std::string &Feature : Features.getFeatures()) { 368 if (Feature == "+xnack") 369 XnackRequested = true; 370 else if (Feature == "-xnack") 371 XnackRequested = false; 372 else if (Feature == "+sramecc") 373 SramEccRequested = true; 374 else if (Feature == "-sramecc") 375 SramEccRequested = false; 376 } 377 378 bool XnackSupported = isXnackSupported(); 379 bool SramEccSupported = isSramEccSupported(); 380 381 if (XnackRequested) { 382 if (XnackSupported) { 383 XnackSetting = 384 *XnackRequested ? TargetIDSetting::On : TargetIDSetting::Off; 385 } else { 386 // If a specific xnack setting was requested and this GPU does not support 387 // xnack emit a warning. Setting will remain set to "Unsupported". 388 if (*XnackRequested) { 389 errs() << "warning: xnack 'On' was requested for a processor that does " 390 "not support it!\n"; 391 } else { 392 errs() << "warning: xnack 'Off' was requested for a processor that " 393 "does not support it!\n"; 394 } 395 } 396 } 397 398 if (SramEccRequested) { 399 if (SramEccSupported) { 400 SramEccSetting = 401 *SramEccRequested ? TargetIDSetting::On : TargetIDSetting::Off; 402 } else { 403 // If a specific sramecc setting was requested and this GPU does not 404 // support sramecc emit a warning. Setting will remain set to 405 // "Unsupported". 406 if (*SramEccRequested) { 407 errs() << "warning: sramecc 'On' was requested for a processor that " 408 "does not support it!\n"; 409 } else { 410 errs() << "warning: sramecc 'Off' was requested for a processor that " 411 "does not support it!\n"; 412 } 413 } 414 } 415 } 416 417 static TargetIDSetting 418 getTargetIDSettingFromFeatureString(StringRef FeatureString) { 419 if (FeatureString.endswith("-")) 420 return TargetIDSetting::Off; 421 if (FeatureString.endswith("+")) 422 return TargetIDSetting::On; 423 424 llvm_unreachable("Malformed feature string"); 425 } 426 427 void AMDGPUTargetID::setTargetIDFromTargetIDStream(StringRef TargetID) { 428 SmallVector<StringRef, 3> TargetIDSplit; 429 TargetID.split(TargetIDSplit, ':'); 430 431 for (const auto &FeatureString : TargetIDSplit) { 432 if (FeatureString.startswith("xnack")) 433 XnackSetting = getTargetIDSettingFromFeatureString(FeatureString); 434 if (FeatureString.startswith("sramecc")) 435 SramEccSetting = getTargetIDSettingFromFeatureString(FeatureString); 436 } 437 } 438 439 std::string AMDGPUTargetID::toString() const { 440 std::string StringRep; 441 raw_string_ostream StreamRep(StringRep); 442 443 auto TargetTriple = STI.getTargetTriple(); 444 auto Version = getIsaVersion(STI.getCPU()); 445 446 StreamRep << TargetTriple.getArchName() << '-' 447 << TargetTriple.getVendorName() << '-' 448 << TargetTriple.getOSName() << '-' 449 << TargetTriple.getEnvironmentName() << '-'; 450 451 std::string Processor; 452 // TODO: Following else statement is present here because we used various 453 // alias names for GPUs up until GFX9 (e.g. 'fiji' is same as 'gfx803'). 454 // Remove once all aliases are removed from GCNProcessors.td. 455 if (Version.Major >= 9) 456 Processor = STI.getCPU().str(); 457 else 458 Processor = (Twine("gfx") + Twine(Version.Major) + Twine(Version.Minor) + 459 Twine(Version.Stepping)) 460 .str(); 461 462 std::string Features; 463 if (Optional<uint8_t> HsaAbiVersion = getHsaAbiVersion(&STI)) { 464 switch (*HsaAbiVersion) { 465 case ELF::ELFABIVERSION_AMDGPU_HSA_V2: 466 // Code object V2 only supported specific processors and had fixed 467 // settings for the XNACK. 468 if (Processor == "gfx600") { 469 } else if (Processor == "gfx601") { 470 } else if (Processor == "gfx602") { 471 } else if (Processor == "gfx700") { 472 } else if (Processor == "gfx701") { 473 } else if (Processor == "gfx702") { 474 } else if (Processor == "gfx703") { 475 } else if (Processor == "gfx704") { 476 } else if (Processor == "gfx705") { 477 } else if (Processor == "gfx801") { 478 if (!isXnackOnOrAny()) 479 report_fatal_error( 480 "AMD GPU code object V2 does not support processor " + 481 Twine(Processor) + " without XNACK"); 482 } else if (Processor == "gfx802") { 483 } else if (Processor == "gfx803") { 484 } else if (Processor == "gfx805") { 485 } else if (Processor == "gfx810") { 486 if (!isXnackOnOrAny()) 487 report_fatal_error( 488 "AMD GPU code object V2 does not support processor " + 489 Twine(Processor) + " without XNACK"); 490 } else if (Processor == "gfx900") { 491 if (isXnackOnOrAny()) 492 Processor = "gfx901"; 493 } else if (Processor == "gfx902") { 494 if (isXnackOnOrAny()) 495 Processor = "gfx903"; 496 } else if (Processor == "gfx904") { 497 if (isXnackOnOrAny()) 498 Processor = "gfx905"; 499 } else if (Processor == "gfx906") { 500 if (isXnackOnOrAny()) 501 Processor = "gfx907"; 502 } else if (Processor == "gfx90c") { 503 if (isXnackOnOrAny()) 504 report_fatal_error( 505 "AMD GPU code object V2 does not support processor " + 506 Twine(Processor) + " with XNACK being ON or ANY"); 507 } else { 508 report_fatal_error( 509 "AMD GPU code object V2 does not support processor " + 510 Twine(Processor)); 511 } 512 break; 513 case ELF::ELFABIVERSION_AMDGPU_HSA_V3: 514 // xnack. 515 if (isXnackOnOrAny()) 516 Features += "+xnack"; 517 // In code object v2 and v3, "sramecc" feature was spelled with a 518 // hyphen ("sram-ecc"). 519 if (isSramEccOnOrAny()) 520 Features += "+sram-ecc"; 521 break; 522 case ELF::ELFABIVERSION_AMDGPU_HSA_V4: 523 case ELF::ELFABIVERSION_AMDGPU_HSA_V5: 524 // sramecc. 525 if (getSramEccSetting() == TargetIDSetting::Off) 526 Features += ":sramecc-"; 527 else if (getSramEccSetting() == TargetIDSetting::On) 528 Features += ":sramecc+"; 529 // xnack. 530 if (getXnackSetting() == TargetIDSetting::Off) 531 Features += ":xnack-"; 532 else if (getXnackSetting() == TargetIDSetting::On) 533 Features += ":xnack+"; 534 break; 535 default: 536 break; 537 } 538 } 539 540 StreamRep << Processor << Features; 541 542 StreamRep.flush(); 543 return StringRep; 544 } 545 546 unsigned getWavefrontSize(const MCSubtargetInfo *STI) { 547 if (STI->getFeatureBits().test(FeatureWavefrontSize16)) 548 return 16; 549 if (STI->getFeatureBits().test(FeatureWavefrontSize32)) 550 return 32; 551 552 return 64; 553 } 554 555 unsigned getLocalMemorySize(const MCSubtargetInfo *STI) { 556 if (STI->getFeatureBits().test(FeatureLocalMemorySize32768)) 557 return 32768; 558 if (STI->getFeatureBits().test(FeatureLocalMemorySize65536)) 559 return 65536; 560 561 return 0; 562 } 563 564 unsigned getEUsPerCU(const MCSubtargetInfo *STI) { 565 // "Per CU" really means "per whatever functional block the waves of a 566 // workgroup must share". For gfx10 in CU mode this is the CU, which contains 567 // two SIMDs. 568 if (isGFX10Plus(*STI) && STI->getFeatureBits().test(FeatureCuMode)) 569 return 2; 570 // Pre-gfx10 a CU contains four SIMDs. For gfx10 in WGP mode the WGP contains 571 // two CUs, so a total of four SIMDs. 572 return 4; 573 } 574 575 unsigned getMaxWorkGroupsPerCU(const MCSubtargetInfo *STI, 576 unsigned FlatWorkGroupSize) { 577 assert(FlatWorkGroupSize != 0); 578 if (STI->getTargetTriple().getArch() != Triple::amdgcn) 579 return 8; 580 unsigned N = getWavesPerWorkGroup(STI, FlatWorkGroupSize); 581 if (N == 1) 582 return 40; 583 N = 40 / N; 584 return std::min(N, 16u); 585 } 586 587 unsigned getMinWavesPerEU(const MCSubtargetInfo *STI) { 588 return 1; 589 } 590 591 unsigned getMaxWavesPerEU(const MCSubtargetInfo *STI) { 592 // FIXME: Need to take scratch memory into account. 593 if (isGFX90A(*STI)) 594 return 8; 595 if (!isGFX10Plus(*STI)) 596 return 10; 597 return hasGFX10_3Insts(*STI) ? 16 : 20; 598 } 599 600 unsigned getWavesPerEUForWorkGroup(const MCSubtargetInfo *STI, 601 unsigned FlatWorkGroupSize) { 602 return divideCeil(getWavesPerWorkGroup(STI, FlatWorkGroupSize), 603 getEUsPerCU(STI)); 604 } 605 606 unsigned getMinFlatWorkGroupSize(const MCSubtargetInfo *STI) { 607 return 1; 608 } 609 610 unsigned getMaxFlatWorkGroupSize(const MCSubtargetInfo *STI) { 611 // Some subtargets allow encoding 2048, but this isn't tested or supported. 612 return 1024; 613 } 614 615 unsigned getWavesPerWorkGroup(const MCSubtargetInfo *STI, 616 unsigned FlatWorkGroupSize) { 617 return divideCeil(FlatWorkGroupSize, getWavefrontSize(STI)); 618 } 619 620 unsigned getSGPRAllocGranule(const MCSubtargetInfo *STI) { 621 IsaVersion Version = getIsaVersion(STI->getCPU()); 622 if (Version.Major >= 10) 623 return getAddressableNumSGPRs(STI); 624 if (Version.Major >= 8) 625 return 16; 626 return 8; 627 } 628 629 unsigned getSGPREncodingGranule(const MCSubtargetInfo *STI) { 630 return 8; 631 } 632 633 unsigned getTotalNumSGPRs(const MCSubtargetInfo *STI) { 634 IsaVersion Version = getIsaVersion(STI->getCPU()); 635 if (Version.Major >= 8) 636 return 800; 637 return 512; 638 } 639 640 unsigned getAddressableNumSGPRs(const MCSubtargetInfo *STI) { 641 if (STI->getFeatureBits().test(FeatureSGPRInitBug)) 642 return FIXED_NUM_SGPRS_FOR_INIT_BUG; 643 644 IsaVersion Version = getIsaVersion(STI->getCPU()); 645 if (Version.Major >= 10) 646 return 106; 647 if (Version.Major >= 8) 648 return 102; 649 return 104; 650 } 651 652 unsigned getMinNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) { 653 assert(WavesPerEU != 0); 654 655 IsaVersion Version = getIsaVersion(STI->getCPU()); 656 if (Version.Major >= 10) 657 return 0; 658 659 if (WavesPerEU >= getMaxWavesPerEU(STI)) 660 return 0; 661 662 unsigned MinNumSGPRs = getTotalNumSGPRs(STI) / (WavesPerEU + 1); 663 if (STI->getFeatureBits().test(FeatureTrapHandler)) 664 MinNumSGPRs -= std::min(MinNumSGPRs, (unsigned)TRAP_NUM_SGPRS); 665 MinNumSGPRs = alignDown(MinNumSGPRs, getSGPRAllocGranule(STI)) + 1; 666 return std::min(MinNumSGPRs, getAddressableNumSGPRs(STI)); 667 } 668 669 unsigned getMaxNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU, 670 bool Addressable) { 671 assert(WavesPerEU != 0); 672 673 unsigned AddressableNumSGPRs = getAddressableNumSGPRs(STI); 674 IsaVersion Version = getIsaVersion(STI->getCPU()); 675 if (Version.Major >= 10) 676 return Addressable ? AddressableNumSGPRs : 108; 677 if (Version.Major >= 8 && !Addressable) 678 AddressableNumSGPRs = 112; 679 unsigned MaxNumSGPRs = getTotalNumSGPRs(STI) / WavesPerEU; 680 if (STI->getFeatureBits().test(FeatureTrapHandler)) 681 MaxNumSGPRs -= std::min(MaxNumSGPRs, (unsigned)TRAP_NUM_SGPRS); 682 MaxNumSGPRs = alignDown(MaxNumSGPRs, getSGPRAllocGranule(STI)); 683 return std::min(MaxNumSGPRs, AddressableNumSGPRs); 684 } 685 686 unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed, 687 bool FlatScrUsed, bool XNACKUsed) { 688 unsigned ExtraSGPRs = 0; 689 if (VCCUsed) 690 ExtraSGPRs = 2; 691 692 IsaVersion Version = getIsaVersion(STI->getCPU()); 693 if (Version.Major >= 10) 694 return ExtraSGPRs; 695 696 if (Version.Major < 8) { 697 if (FlatScrUsed) 698 ExtraSGPRs = 4; 699 } else { 700 if (XNACKUsed) 701 ExtraSGPRs = 4; 702 703 if (FlatScrUsed || 704 STI->getFeatureBits().test(AMDGPU::FeatureArchitectedFlatScratch)) 705 ExtraSGPRs = 6; 706 } 707 708 return ExtraSGPRs; 709 } 710 711 unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed, 712 bool FlatScrUsed) { 713 return getNumExtraSGPRs(STI, VCCUsed, FlatScrUsed, 714 STI->getFeatureBits().test(AMDGPU::FeatureXNACK)); 715 } 716 717 unsigned getNumSGPRBlocks(const MCSubtargetInfo *STI, unsigned NumSGPRs) { 718 NumSGPRs = alignTo(std::max(1u, NumSGPRs), getSGPREncodingGranule(STI)); 719 // SGPRBlocks is actual number of SGPR blocks minus 1. 720 return NumSGPRs / getSGPREncodingGranule(STI) - 1; 721 } 722 723 unsigned getVGPRAllocGranule(const MCSubtargetInfo *STI, 724 Optional<bool> EnableWavefrontSize32) { 725 if (STI->getFeatureBits().test(FeatureGFX90AInsts)) 726 return 8; 727 728 bool IsWave32 = EnableWavefrontSize32 ? 729 *EnableWavefrontSize32 : 730 STI->getFeatureBits().test(FeatureWavefrontSize32); 731 732 if (hasGFX10_3Insts(*STI)) 733 return IsWave32 ? 16 : 8; 734 735 return IsWave32 ? 8 : 4; 736 } 737 738 unsigned getVGPREncodingGranule(const MCSubtargetInfo *STI, 739 Optional<bool> EnableWavefrontSize32) { 740 if (STI->getFeatureBits().test(FeatureGFX90AInsts)) 741 return 8; 742 743 bool IsWave32 = EnableWavefrontSize32 ? 744 *EnableWavefrontSize32 : 745 STI->getFeatureBits().test(FeatureWavefrontSize32); 746 747 return IsWave32 ? 8 : 4; 748 } 749 750 unsigned getTotalNumVGPRs(const MCSubtargetInfo *STI) { 751 if (STI->getFeatureBits().test(FeatureGFX90AInsts)) 752 return 512; 753 if (!isGFX10Plus(*STI)) 754 return 256; 755 return STI->getFeatureBits().test(FeatureWavefrontSize32) ? 1024 : 512; 756 } 757 758 unsigned getAddressableNumVGPRs(const MCSubtargetInfo *STI) { 759 if (STI->getFeatureBits().test(FeatureGFX90AInsts)) 760 return 512; 761 return 256; 762 } 763 764 unsigned getMinNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) { 765 assert(WavesPerEU != 0); 766 767 if (WavesPerEU >= getMaxWavesPerEU(STI)) 768 return 0; 769 unsigned MinNumVGPRs = 770 alignDown(getTotalNumVGPRs(STI) / (WavesPerEU + 1), 771 getVGPRAllocGranule(STI)) + 1; 772 return std::min(MinNumVGPRs, getAddressableNumVGPRs(STI)); 773 } 774 775 unsigned getMaxNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) { 776 assert(WavesPerEU != 0); 777 778 unsigned MaxNumVGPRs = alignDown(getTotalNumVGPRs(STI) / WavesPerEU, 779 getVGPRAllocGranule(STI)); 780 unsigned AddressableNumVGPRs = getAddressableNumVGPRs(STI); 781 return std::min(MaxNumVGPRs, AddressableNumVGPRs); 782 } 783 784 unsigned getNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumVGPRs, 785 Optional<bool> EnableWavefrontSize32) { 786 NumVGPRs = alignTo(std::max(1u, NumVGPRs), 787 getVGPREncodingGranule(STI, EnableWavefrontSize32)); 788 // VGPRBlocks is actual number of VGPR blocks minus 1. 789 return NumVGPRs / getVGPREncodingGranule(STI, EnableWavefrontSize32) - 1; 790 } 791 792 } // end namespace IsaInfo 793 794 void initDefaultAMDKernelCodeT(amd_kernel_code_t &Header, 795 const MCSubtargetInfo *STI) { 796 IsaVersion Version = getIsaVersion(STI->getCPU()); 797 798 memset(&Header, 0, sizeof(Header)); 799 800 Header.amd_kernel_code_version_major = 1; 801 Header.amd_kernel_code_version_minor = 2; 802 Header.amd_machine_kind = 1; // AMD_MACHINE_KIND_AMDGPU 803 Header.amd_machine_version_major = Version.Major; 804 Header.amd_machine_version_minor = Version.Minor; 805 Header.amd_machine_version_stepping = Version.Stepping; 806 Header.kernel_code_entry_byte_offset = sizeof(Header); 807 Header.wavefront_size = 6; 808 809 // If the code object does not support indirect functions, then the value must 810 // be 0xffffffff. 811 Header.call_convention = -1; 812 813 // These alignment values are specified in powers of two, so alignment = 814 // 2^n. The minimum alignment is 2^4 = 16. 815 Header.kernarg_segment_alignment = 4; 816 Header.group_segment_alignment = 4; 817 Header.private_segment_alignment = 4; 818 819 if (Version.Major >= 10) { 820 if (STI->getFeatureBits().test(FeatureWavefrontSize32)) { 821 Header.wavefront_size = 5; 822 Header.code_properties |= AMD_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32; 823 } 824 Header.compute_pgm_resource_registers |= 825 S_00B848_WGP_MODE(STI->getFeatureBits().test(FeatureCuMode) ? 0 : 1) | 826 S_00B848_MEM_ORDERED(1); 827 } 828 } 829 830 amdhsa::kernel_descriptor_t getDefaultAmdhsaKernelDescriptor( 831 const MCSubtargetInfo *STI) { 832 IsaVersion Version = getIsaVersion(STI->getCPU()); 833 834 amdhsa::kernel_descriptor_t KD; 835 memset(&KD, 0, sizeof(KD)); 836 837 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1, 838 amdhsa::COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64, 839 amdhsa::FLOAT_DENORM_MODE_FLUSH_NONE); 840 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1, 841 amdhsa::COMPUTE_PGM_RSRC1_ENABLE_DX10_CLAMP, 1); 842 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1, 843 amdhsa::COMPUTE_PGM_RSRC1_ENABLE_IEEE_MODE, 1); 844 AMDHSA_BITS_SET(KD.compute_pgm_rsrc2, 845 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X, 1); 846 if (Version.Major >= 10) { 847 AMDHSA_BITS_SET(KD.kernel_code_properties, 848 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32, 849 STI->getFeatureBits().test(FeatureWavefrontSize32) ? 1 : 0); 850 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1, 851 amdhsa::COMPUTE_PGM_RSRC1_WGP_MODE, 852 STI->getFeatureBits().test(FeatureCuMode) ? 0 : 1); 853 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1, 854 amdhsa::COMPUTE_PGM_RSRC1_MEM_ORDERED, 1); 855 } 856 if (AMDGPU::isGFX90A(*STI)) { 857 AMDHSA_BITS_SET(KD.compute_pgm_rsrc3, 858 amdhsa::COMPUTE_PGM_RSRC3_GFX90A_TG_SPLIT, 859 STI->getFeatureBits().test(FeatureTgSplit) ? 1 : 0); 860 } 861 return KD; 862 } 863 864 bool isGroupSegment(const GlobalValue *GV) { 865 return GV->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS; 866 } 867 868 bool isGlobalSegment(const GlobalValue *GV) { 869 return GV->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS; 870 } 871 872 bool isReadOnlySegment(const GlobalValue *GV) { 873 unsigned AS = GV->getAddressSpace(); 874 return AS == AMDGPUAS::CONSTANT_ADDRESS || 875 AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT; 876 } 877 878 bool shouldEmitConstantsToTextSection(const Triple &TT) { 879 return TT.getArch() == Triple::r600; 880 } 881 882 int getIntegerAttribute(const Function &F, StringRef Name, int Default) { 883 Attribute A = F.getFnAttribute(Name); 884 int Result = Default; 885 886 if (A.isStringAttribute()) { 887 StringRef Str = A.getValueAsString(); 888 if (Str.getAsInteger(0, Result)) { 889 LLVMContext &Ctx = F.getContext(); 890 Ctx.emitError("can't parse integer attribute " + Name); 891 } 892 } 893 894 return Result; 895 } 896 897 std::pair<int, int> getIntegerPairAttribute(const Function &F, 898 StringRef Name, 899 std::pair<int, int> Default, 900 bool OnlyFirstRequired) { 901 Attribute A = F.getFnAttribute(Name); 902 if (!A.isStringAttribute()) 903 return Default; 904 905 LLVMContext &Ctx = F.getContext(); 906 std::pair<int, int> Ints = Default; 907 std::pair<StringRef, StringRef> Strs = A.getValueAsString().split(','); 908 if (Strs.first.trim().getAsInteger(0, Ints.first)) { 909 Ctx.emitError("can't parse first integer attribute " + Name); 910 return Default; 911 } 912 if (Strs.second.trim().getAsInteger(0, Ints.second)) { 913 if (!OnlyFirstRequired || !Strs.second.trim().empty()) { 914 Ctx.emitError("can't parse second integer attribute " + Name); 915 return Default; 916 } 917 } 918 919 return Ints; 920 } 921 922 unsigned getVmcntBitMask(const IsaVersion &Version) { 923 unsigned VmcntLo = (1 << getVmcntBitWidthLo()) - 1; 924 if (Version.Major < 9) 925 return VmcntLo; 926 927 unsigned VmcntHi = ((1 << getVmcntBitWidthHi()) - 1) << getVmcntBitWidthLo(); 928 return VmcntLo | VmcntHi; 929 } 930 931 unsigned getExpcntBitMask(const IsaVersion &Version) { 932 return (1 << getExpcntBitWidth()) - 1; 933 } 934 935 unsigned getLgkmcntBitMask(const IsaVersion &Version) { 936 return (1 << getLgkmcntBitWidth(Version.Major)) - 1; 937 } 938 939 unsigned getWaitcntBitMask(const IsaVersion &Version) { 940 unsigned VmcntLo = getBitMask(getVmcntBitShiftLo(), getVmcntBitWidthLo()); 941 unsigned Expcnt = getBitMask(getExpcntBitShift(), getExpcntBitWidth()); 942 unsigned Lgkmcnt = getBitMask(getLgkmcntBitShift(), 943 getLgkmcntBitWidth(Version.Major)); 944 unsigned Waitcnt = VmcntLo | Expcnt | Lgkmcnt; 945 if (Version.Major < 9) 946 return Waitcnt; 947 948 unsigned VmcntHi = getBitMask(getVmcntBitShiftHi(), getVmcntBitWidthHi()); 949 return Waitcnt | VmcntHi; 950 } 951 952 unsigned decodeVmcnt(const IsaVersion &Version, unsigned Waitcnt) { 953 unsigned VmcntLo = 954 unpackBits(Waitcnt, getVmcntBitShiftLo(), getVmcntBitWidthLo()); 955 if (Version.Major < 9) 956 return VmcntLo; 957 958 unsigned VmcntHi = 959 unpackBits(Waitcnt, getVmcntBitShiftHi(), getVmcntBitWidthHi()); 960 VmcntHi <<= getVmcntBitWidthLo(); 961 return VmcntLo | VmcntHi; 962 } 963 964 unsigned decodeExpcnt(const IsaVersion &Version, unsigned Waitcnt) { 965 return unpackBits(Waitcnt, getExpcntBitShift(), getExpcntBitWidth()); 966 } 967 968 unsigned decodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt) { 969 return unpackBits(Waitcnt, getLgkmcntBitShift(), 970 getLgkmcntBitWidth(Version.Major)); 971 } 972 973 void decodeWaitcnt(const IsaVersion &Version, unsigned Waitcnt, 974 unsigned &Vmcnt, unsigned &Expcnt, unsigned &Lgkmcnt) { 975 Vmcnt = decodeVmcnt(Version, Waitcnt); 976 Expcnt = decodeExpcnt(Version, Waitcnt); 977 Lgkmcnt = decodeLgkmcnt(Version, Waitcnt); 978 } 979 980 Waitcnt decodeWaitcnt(const IsaVersion &Version, unsigned Encoded) { 981 Waitcnt Decoded; 982 Decoded.VmCnt = decodeVmcnt(Version, Encoded); 983 Decoded.ExpCnt = decodeExpcnt(Version, Encoded); 984 Decoded.LgkmCnt = decodeLgkmcnt(Version, Encoded); 985 return Decoded; 986 } 987 988 unsigned encodeVmcnt(const IsaVersion &Version, unsigned Waitcnt, 989 unsigned Vmcnt) { 990 Waitcnt = 991 packBits(Vmcnt, Waitcnt, getVmcntBitShiftLo(), getVmcntBitWidthLo()); 992 if (Version.Major < 9) 993 return Waitcnt; 994 995 Vmcnt >>= getVmcntBitWidthLo(); 996 return packBits(Vmcnt, Waitcnt, getVmcntBitShiftHi(), getVmcntBitWidthHi()); 997 } 998 999 unsigned encodeExpcnt(const IsaVersion &Version, unsigned Waitcnt, 1000 unsigned Expcnt) { 1001 return packBits(Expcnt, Waitcnt, getExpcntBitShift(), getExpcntBitWidth()); 1002 } 1003 1004 unsigned encodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt, 1005 unsigned Lgkmcnt) { 1006 return packBits(Lgkmcnt, Waitcnt, getLgkmcntBitShift(), 1007 getLgkmcntBitWidth(Version.Major)); 1008 } 1009 1010 unsigned encodeWaitcnt(const IsaVersion &Version, 1011 unsigned Vmcnt, unsigned Expcnt, unsigned Lgkmcnt) { 1012 unsigned Waitcnt = getWaitcntBitMask(Version); 1013 Waitcnt = encodeVmcnt(Version, Waitcnt, Vmcnt); 1014 Waitcnt = encodeExpcnt(Version, Waitcnt, Expcnt); 1015 Waitcnt = encodeLgkmcnt(Version, Waitcnt, Lgkmcnt); 1016 return Waitcnt; 1017 } 1018 1019 unsigned encodeWaitcnt(const IsaVersion &Version, const Waitcnt &Decoded) { 1020 return encodeWaitcnt(Version, Decoded.VmCnt, Decoded.ExpCnt, Decoded.LgkmCnt); 1021 } 1022 1023 //===----------------------------------------------------------------------===// 1024 // hwreg 1025 //===----------------------------------------------------------------------===// 1026 1027 namespace Hwreg { 1028 1029 int64_t getHwregId(const StringRef Name, const MCSubtargetInfo &STI) { 1030 if (isGFX10(STI) && Name == "HW_REG_HW_ID") // An alias 1031 return ID_HW_ID1; 1032 for (int Id = ID_SYMBOLIC_FIRST_; Id < ID_SYMBOLIC_LAST_; ++Id) { 1033 if (IdSymbolic[Id] && Name == IdSymbolic[Id]) 1034 return Id; 1035 } 1036 return ID_UNKNOWN_; 1037 } 1038 1039 static unsigned getLastSymbolicHwreg(const MCSubtargetInfo &STI) { 1040 if (isSI(STI) || isCI(STI) || isVI(STI)) 1041 return ID_SYMBOLIC_FIRST_GFX9_; 1042 else if (isGFX9(STI)) 1043 return ID_SYMBOLIC_FIRST_GFX10_; 1044 else if (isGFX10(STI) && !isGFX10_BEncoding(STI)) 1045 return ID_SYMBOLIC_FIRST_GFX1030_; 1046 else 1047 return ID_SYMBOLIC_LAST_; 1048 } 1049 1050 bool isValidHwreg(int64_t Id, const MCSubtargetInfo &STI) { 1051 switch (Id) { 1052 case ID_HW_ID: 1053 return isSI(STI) || isCI(STI) || isVI(STI) || isGFX9(STI); 1054 case ID_HW_ID1: 1055 case ID_HW_ID2: 1056 return isGFX10Plus(STI); 1057 case ID_XNACK_MASK: 1058 return isGFX10(STI) && !AMDGPU::isGFX10_BEncoding(STI); 1059 default: 1060 return ID_SYMBOLIC_FIRST_ <= Id && Id < getLastSymbolicHwreg(STI) && 1061 IdSymbolic[Id]; 1062 } 1063 } 1064 1065 bool isValidHwreg(int64_t Id) { 1066 return 0 <= Id && isUInt<ID_WIDTH_>(Id); 1067 } 1068 1069 bool isValidHwregOffset(int64_t Offset) { 1070 return 0 <= Offset && isUInt<OFFSET_WIDTH_>(Offset); 1071 } 1072 1073 bool isValidHwregWidth(int64_t Width) { 1074 return 0 <= (Width - 1) && isUInt<WIDTH_M1_WIDTH_>(Width - 1); 1075 } 1076 1077 uint64_t encodeHwreg(uint64_t Id, uint64_t Offset, uint64_t Width) { 1078 return (Id << ID_SHIFT_) | 1079 (Offset << OFFSET_SHIFT_) | 1080 ((Width - 1) << WIDTH_M1_SHIFT_); 1081 } 1082 1083 StringRef getHwreg(unsigned Id, const MCSubtargetInfo &STI) { 1084 return isValidHwreg(Id, STI) ? IdSymbolic[Id] : ""; 1085 } 1086 1087 void decodeHwreg(unsigned Val, unsigned &Id, unsigned &Offset, unsigned &Width) { 1088 Id = (Val & ID_MASK_) >> ID_SHIFT_; 1089 Offset = (Val & OFFSET_MASK_) >> OFFSET_SHIFT_; 1090 Width = ((Val & WIDTH_M1_MASK_) >> WIDTH_M1_SHIFT_) + 1; 1091 } 1092 1093 } // namespace Hwreg 1094 1095 //===----------------------------------------------------------------------===// 1096 // exp tgt 1097 //===----------------------------------------------------------------------===// 1098 1099 namespace Exp { 1100 1101 struct ExpTgt { 1102 StringLiteral Name; 1103 unsigned Tgt; 1104 unsigned MaxIndex; 1105 }; 1106 1107 static constexpr ExpTgt ExpTgtInfo[] = { 1108 {{"null"}, ET_NULL, ET_NULL_MAX_IDX}, 1109 {{"mrtz"}, ET_MRTZ, ET_MRTZ_MAX_IDX}, 1110 {{"prim"}, ET_PRIM, ET_PRIM_MAX_IDX}, 1111 {{"mrt"}, ET_MRT0, ET_MRT_MAX_IDX}, 1112 {{"pos"}, ET_POS0, ET_POS_MAX_IDX}, 1113 {{"param"}, ET_PARAM0, ET_PARAM_MAX_IDX}, 1114 }; 1115 1116 bool getTgtName(unsigned Id, StringRef &Name, int &Index) { 1117 for (const ExpTgt &Val : ExpTgtInfo) { 1118 if (Val.Tgt <= Id && Id <= Val.Tgt + Val.MaxIndex) { 1119 Index = (Val.MaxIndex == 0) ? -1 : (Id - Val.Tgt); 1120 Name = Val.Name; 1121 return true; 1122 } 1123 } 1124 return false; 1125 } 1126 1127 unsigned getTgtId(const StringRef Name) { 1128 1129 for (const ExpTgt &Val : ExpTgtInfo) { 1130 if (Val.MaxIndex == 0 && Name == Val.Name) 1131 return Val.Tgt; 1132 1133 if (Val.MaxIndex > 0 && Name.startswith(Val.Name)) { 1134 StringRef Suffix = Name.drop_front(Val.Name.size()); 1135 1136 unsigned Id; 1137 if (Suffix.getAsInteger(10, Id) || Id > Val.MaxIndex) 1138 return ET_INVALID; 1139 1140 // Disable leading zeroes 1141 if (Suffix.size() > 1 && Suffix[0] == '0') 1142 return ET_INVALID; 1143 1144 return Val.Tgt + Id; 1145 } 1146 } 1147 return ET_INVALID; 1148 } 1149 1150 bool isSupportedTgtId(unsigned Id, const MCSubtargetInfo &STI) { 1151 return (Id != ET_POS4 && Id != ET_PRIM) || isGFX10Plus(STI); 1152 } 1153 1154 } // namespace Exp 1155 1156 //===----------------------------------------------------------------------===// 1157 // MTBUF Format 1158 //===----------------------------------------------------------------------===// 1159 1160 namespace MTBUFFormat { 1161 1162 int64_t getDfmt(const StringRef Name) { 1163 for (int Id = DFMT_MIN; Id <= DFMT_MAX; ++Id) { 1164 if (Name == DfmtSymbolic[Id]) 1165 return Id; 1166 } 1167 return DFMT_UNDEF; 1168 } 1169 1170 StringRef getDfmtName(unsigned Id) { 1171 assert(Id <= DFMT_MAX); 1172 return DfmtSymbolic[Id]; 1173 } 1174 1175 static StringLiteral const *getNfmtLookupTable(const MCSubtargetInfo &STI) { 1176 if (isSI(STI) || isCI(STI)) 1177 return NfmtSymbolicSICI; 1178 if (isVI(STI) || isGFX9(STI)) 1179 return NfmtSymbolicVI; 1180 return NfmtSymbolicGFX10; 1181 } 1182 1183 int64_t getNfmt(const StringRef Name, const MCSubtargetInfo &STI) { 1184 auto lookupTable = getNfmtLookupTable(STI); 1185 for (int Id = NFMT_MIN; Id <= NFMT_MAX; ++Id) { 1186 if (Name == lookupTable[Id]) 1187 return Id; 1188 } 1189 return NFMT_UNDEF; 1190 } 1191 1192 StringRef getNfmtName(unsigned Id, const MCSubtargetInfo &STI) { 1193 assert(Id <= NFMT_MAX); 1194 return getNfmtLookupTable(STI)[Id]; 1195 } 1196 1197 bool isValidDfmtNfmt(unsigned Id, const MCSubtargetInfo &STI) { 1198 unsigned Dfmt; 1199 unsigned Nfmt; 1200 decodeDfmtNfmt(Id, Dfmt, Nfmt); 1201 return isValidNfmt(Nfmt, STI); 1202 } 1203 1204 bool isValidNfmt(unsigned Id, const MCSubtargetInfo &STI) { 1205 return !getNfmtName(Id, STI).empty(); 1206 } 1207 1208 int64_t encodeDfmtNfmt(unsigned Dfmt, unsigned Nfmt) { 1209 return (Dfmt << DFMT_SHIFT) | (Nfmt << NFMT_SHIFT); 1210 } 1211 1212 void decodeDfmtNfmt(unsigned Format, unsigned &Dfmt, unsigned &Nfmt) { 1213 Dfmt = (Format >> DFMT_SHIFT) & DFMT_MASK; 1214 Nfmt = (Format >> NFMT_SHIFT) & NFMT_MASK; 1215 } 1216 1217 int64_t getUnifiedFormat(const StringRef Name) { 1218 for (int Id = UFMT_FIRST; Id <= UFMT_LAST; ++Id) { 1219 if (Name == UfmtSymbolic[Id]) 1220 return Id; 1221 } 1222 return UFMT_UNDEF; 1223 } 1224 1225 StringRef getUnifiedFormatName(unsigned Id) { 1226 return isValidUnifiedFormat(Id) ? UfmtSymbolic[Id] : ""; 1227 } 1228 1229 bool isValidUnifiedFormat(unsigned Id) { 1230 return Id <= UFMT_LAST; 1231 } 1232 1233 int64_t convertDfmtNfmt2Ufmt(unsigned Dfmt, unsigned Nfmt) { 1234 int64_t Fmt = encodeDfmtNfmt(Dfmt, Nfmt); 1235 for (int Id = UFMT_FIRST; Id <= UFMT_LAST; ++Id) { 1236 if (Fmt == DfmtNfmt2UFmt[Id]) 1237 return Id; 1238 } 1239 return UFMT_UNDEF; 1240 } 1241 1242 bool isValidFormatEncoding(unsigned Val, const MCSubtargetInfo &STI) { 1243 return isGFX10Plus(STI) ? (Val <= UFMT_MAX) : (Val <= DFMT_NFMT_MAX); 1244 } 1245 1246 unsigned getDefaultFormatEncoding(const MCSubtargetInfo &STI) { 1247 if (isGFX10Plus(STI)) 1248 return UFMT_DEFAULT; 1249 return DFMT_NFMT_DEFAULT; 1250 } 1251 1252 } // namespace MTBUFFormat 1253 1254 //===----------------------------------------------------------------------===// 1255 // SendMsg 1256 //===----------------------------------------------------------------------===// 1257 1258 namespace SendMsg { 1259 1260 int64_t getMsgId(const StringRef Name) { 1261 for (int i = ID_GAPS_FIRST_; i < ID_GAPS_LAST_; ++i) { 1262 if (IdSymbolic[i] && Name == IdSymbolic[i]) 1263 return i; 1264 } 1265 return ID_UNKNOWN_; 1266 } 1267 1268 bool isValidMsgId(int64_t MsgId, const MCSubtargetInfo &STI, bool Strict) { 1269 if (Strict) { 1270 switch (MsgId) { 1271 case ID_SAVEWAVE: 1272 return isVI(STI) || isGFX9Plus(STI); 1273 case ID_STALL_WAVE_GEN: 1274 case ID_HALT_WAVES: 1275 case ID_ORDERED_PS_DONE: 1276 case ID_GS_ALLOC_REQ: 1277 case ID_GET_DOORBELL: 1278 return isGFX9Plus(STI); 1279 case ID_EARLY_PRIM_DEALLOC: 1280 return isGFX9(STI); 1281 case ID_GET_DDID: 1282 return isGFX10Plus(STI); 1283 default: 1284 return 0 <= MsgId && MsgId < ID_GAPS_LAST_ && IdSymbolic[MsgId]; 1285 } 1286 } else { 1287 return 0 <= MsgId && isUInt<ID_WIDTH_>(MsgId); 1288 } 1289 } 1290 1291 StringRef getMsgName(int64_t MsgId) { 1292 assert(0 <= MsgId && MsgId < ID_GAPS_LAST_); 1293 return IdSymbolic[MsgId]; 1294 } 1295 1296 int64_t getMsgOpId(int64_t MsgId, const StringRef Name) { 1297 const char* const *S = (MsgId == ID_SYSMSG) ? OpSysSymbolic : OpGsSymbolic; 1298 const int F = (MsgId == ID_SYSMSG) ? OP_SYS_FIRST_ : OP_GS_FIRST_; 1299 const int L = (MsgId == ID_SYSMSG) ? OP_SYS_LAST_ : OP_GS_LAST_; 1300 for (int i = F; i < L; ++i) { 1301 if (Name == S[i]) { 1302 return i; 1303 } 1304 } 1305 return OP_UNKNOWN_; 1306 } 1307 1308 bool isValidMsgOp(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI, 1309 bool Strict) { 1310 assert(isValidMsgId(MsgId, STI, Strict)); 1311 1312 if (!Strict) 1313 return 0 <= OpId && isUInt<OP_WIDTH_>(OpId); 1314 1315 switch(MsgId) 1316 { 1317 case ID_GS: 1318 return (OP_GS_FIRST_ <= OpId && OpId < OP_GS_LAST_) && OpId != OP_GS_NOP; 1319 case ID_GS_DONE: 1320 return OP_GS_FIRST_ <= OpId && OpId < OP_GS_LAST_; 1321 case ID_SYSMSG: 1322 return OP_SYS_FIRST_ <= OpId && OpId < OP_SYS_LAST_; 1323 default: 1324 return OpId == OP_NONE_; 1325 } 1326 } 1327 1328 StringRef getMsgOpName(int64_t MsgId, int64_t OpId) { 1329 assert(msgRequiresOp(MsgId)); 1330 return (MsgId == ID_SYSMSG)? OpSysSymbolic[OpId] : OpGsSymbolic[OpId]; 1331 } 1332 1333 bool isValidMsgStream(int64_t MsgId, int64_t OpId, int64_t StreamId, 1334 const MCSubtargetInfo &STI, bool Strict) { 1335 assert(isValidMsgOp(MsgId, OpId, STI, Strict)); 1336 1337 if (!Strict) 1338 return 0 <= StreamId && isUInt<STREAM_ID_WIDTH_>(StreamId); 1339 1340 switch(MsgId) 1341 { 1342 case ID_GS: 1343 return STREAM_ID_FIRST_ <= StreamId && StreamId < STREAM_ID_LAST_; 1344 case ID_GS_DONE: 1345 return (OpId == OP_GS_NOP)? 1346 (StreamId == STREAM_ID_NONE_) : 1347 (STREAM_ID_FIRST_ <= StreamId && StreamId < STREAM_ID_LAST_); 1348 default: 1349 return StreamId == STREAM_ID_NONE_; 1350 } 1351 } 1352 1353 bool msgRequiresOp(int64_t MsgId) { 1354 return MsgId == ID_GS || MsgId == ID_GS_DONE || MsgId == ID_SYSMSG; 1355 } 1356 1357 bool msgSupportsStream(int64_t MsgId, int64_t OpId) { 1358 return (MsgId == ID_GS || MsgId == ID_GS_DONE) && OpId != OP_GS_NOP; 1359 } 1360 1361 void decodeMsg(unsigned Val, 1362 uint16_t &MsgId, 1363 uint16_t &OpId, 1364 uint16_t &StreamId) { 1365 MsgId = Val & ID_MASK_; 1366 OpId = (Val & OP_MASK_) >> OP_SHIFT_; 1367 StreamId = (Val & STREAM_ID_MASK_) >> STREAM_ID_SHIFT_; 1368 } 1369 1370 uint64_t encodeMsg(uint64_t MsgId, 1371 uint64_t OpId, 1372 uint64_t StreamId) { 1373 return (MsgId << ID_SHIFT_) | 1374 (OpId << OP_SHIFT_) | 1375 (StreamId << STREAM_ID_SHIFT_); 1376 } 1377 1378 } // namespace SendMsg 1379 1380 //===----------------------------------------------------------------------===// 1381 // 1382 //===----------------------------------------------------------------------===// 1383 1384 unsigned getInitialPSInputAddr(const Function &F) { 1385 return getIntegerAttribute(F, "InitialPSInputAddr", 0); 1386 } 1387 1388 bool getHasColorExport(const Function &F) { 1389 // As a safe default always respond as if PS has color exports. 1390 return getIntegerAttribute( 1391 F, "amdgpu-color-export", 1392 F.getCallingConv() == CallingConv::AMDGPU_PS ? 1 : 0) != 0; 1393 } 1394 1395 bool getHasDepthExport(const Function &F) { 1396 return getIntegerAttribute(F, "amdgpu-depth-export", 0) != 0; 1397 } 1398 1399 bool isShader(CallingConv::ID cc) { 1400 switch(cc) { 1401 case CallingConv::AMDGPU_VS: 1402 case CallingConv::AMDGPU_LS: 1403 case CallingConv::AMDGPU_HS: 1404 case CallingConv::AMDGPU_ES: 1405 case CallingConv::AMDGPU_GS: 1406 case CallingConv::AMDGPU_PS: 1407 case CallingConv::AMDGPU_CS: 1408 return true; 1409 default: 1410 return false; 1411 } 1412 } 1413 1414 bool isGraphics(CallingConv::ID cc) { 1415 return isShader(cc) || cc == CallingConv::AMDGPU_Gfx; 1416 } 1417 1418 bool isCompute(CallingConv::ID cc) { 1419 return !isGraphics(cc) || cc == CallingConv::AMDGPU_CS; 1420 } 1421 1422 bool isEntryFunctionCC(CallingConv::ID CC) { 1423 switch (CC) { 1424 case CallingConv::AMDGPU_KERNEL: 1425 case CallingConv::SPIR_KERNEL: 1426 case CallingConv::AMDGPU_VS: 1427 case CallingConv::AMDGPU_GS: 1428 case CallingConv::AMDGPU_PS: 1429 case CallingConv::AMDGPU_CS: 1430 case CallingConv::AMDGPU_ES: 1431 case CallingConv::AMDGPU_HS: 1432 case CallingConv::AMDGPU_LS: 1433 return true; 1434 default: 1435 return false; 1436 } 1437 } 1438 1439 bool isModuleEntryFunctionCC(CallingConv::ID CC) { 1440 switch (CC) { 1441 case CallingConv::AMDGPU_Gfx: 1442 return true; 1443 default: 1444 return isEntryFunctionCC(CC); 1445 } 1446 } 1447 1448 bool isKernelCC(const Function *Func) { 1449 return AMDGPU::isModuleEntryFunctionCC(Func->getCallingConv()); 1450 } 1451 1452 bool hasXNACK(const MCSubtargetInfo &STI) { 1453 return STI.getFeatureBits()[AMDGPU::FeatureXNACK]; 1454 } 1455 1456 bool hasSRAMECC(const MCSubtargetInfo &STI) { 1457 return STI.getFeatureBits()[AMDGPU::FeatureSRAMECC]; 1458 } 1459 1460 bool hasMIMG_R128(const MCSubtargetInfo &STI) { 1461 return STI.getFeatureBits()[AMDGPU::FeatureMIMG_R128] && !STI.getFeatureBits()[AMDGPU::FeatureR128A16]; 1462 } 1463 1464 bool hasGFX10A16(const MCSubtargetInfo &STI) { 1465 return STI.getFeatureBits()[AMDGPU::FeatureGFX10A16]; 1466 } 1467 1468 bool hasG16(const MCSubtargetInfo &STI) { 1469 return STI.getFeatureBits()[AMDGPU::FeatureG16]; 1470 } 1471 1472 bool hasPackedD16(const MCSubtargetInfo &STI) { 1473 return !STI.getFeatureBits()[AMDGPU::FeatureUnpackedD16VMem]; 1474 } 1475 1476 bool isSI(const MCSubtargetInfo &STI) { 1477 return STI.getFeatureBits()[AMDGPU::FeatureSouthernIslands]; 1478 } 1479 1480 bool isCI(const MCSubtargetInfo &STI) { 1481 return STI.getFeatureBits()[AMDGPU::FeatureSeaIslands]; 1482 } 1483 1484 bool isVI(const MCSubtargetInfo &STI) { 1485 return STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]; 1486 } 1487 1488 bool isGFX9(const MCSubtargetInfo &STI) { 1489 return STI.getFeatureBits()[AMDGPU::FeatureGFX9]; 1490 } 1491 1492 bool isGFX9Plus(const MCSubtargetInfo &STI) { 1493 return isGFX9(STI) || isGFX10Plus(STI); 1494 } 1495 1496 bool isGFX10(const MCSubtargetInfo &STI) { 1497 return STI.getFeatureBits()[AMDGPU::FeatureGFX10]; 1498 } 1499 1500 bool isGFX10Plus(const MCSubtargetInfo &STI) { return isGFX10(STI); } 1501 1502 bool isGCN3Encoding(const MCSubtargetInfo &STI) { 1503 return STI.getFeatureBits()[AMDGPU::FeatureGCN3Encoding]; 1504 } 1505 1506 bool isGFX10_AEncoding(const MCSubtargetInfo &STI) { 1507 return STI.getFeatureBits()[AMDGPU::FeatureGFX10_AEncoding]; 1508 } 1509 1510 bool isGFX10_BEncoding(const MCSubtargetInfo &STI) { 1511 return STI.getFeatureBits()[AMDGPU::FeatureGFX10_BEncoding]; 1512 } 1513 1514 bool hasGFX10_3Insts(const MCSubtargetInfo &STI) { 1515 return STI.getFeatureBits()[AMDGPU::FeatureGFX10_3Insts]; 1516 } 1517 1518 bool isGFX90A(const MCSubtargetInfo &STI) { 1519 return STI.getFeatureBits()[AMDGPU::FeatureGFX90AInsts]; 1520 } 1521 1522 bool hasArchitectedFlatScratch(const MCSubtargetInfo &STI) { 1523 return STI.getFeatureBits()[AMDGPU::FeatureArchitectedFlatScratch]; 1524 } 1525 1526 bool hasMAIInsts(const MCSubtargetInfo &STI) { 1527 return STI.getFeatureBits()[AMDGPU::FeatureMAIInsts]; 1528 } 1529 1530 int32_t getTotalNumVGPRs(bool has90AInsts, int32_t ArgNumAGPR, 1531 int32_t ArgNumVGPR) { 1532 if (has90AInsts && ArgNumAGPR) 1533 return alignTo(ArgNumVGPR, 4) + ArgNumAGPR; 1534 return std::max(ArgNumVGPR, ArgNumAGPR); 1535 } 1536 1537 bool isSGPR(unsigned Reg, const MCRegisterInfo* TRI) { 1538 const MCRegisterClass SGPRClass = TRI->getRegClass(AMDGPU::SReg_32RegClassID); 1539 const unsigned FirstSubReg = TRI->getSubReg(Reg, AMDGPU::sub0); 1540 return SGPRClass.contains(FirstSubReg != 0 ? FirstSubReg : Reg) || 1541 Reg == AMDGPU::SCC; 1542 } 1543 1544 #define MAP_REG2REG \ 1545 using namespace AMDGPU; \ 1546 switch(Reg) { \ 1547 default: return Reg; \ 1548 CASE_CI_VI(FLAT_SCR) \ 1549 CASE_CI_VI(FLAT_SCR_LO) \ 1550 CASE_CI_VI(FLAT_SCR_HI) \ 1551 CASE_VI_GFX9PLUS(TTMP0) \ 1552 CASE_VI_GFX9PLUS(TTMP1) \ 1553 CASE_VI_GFX9PLUS(TTMP2) \ 1554 CASE_VI_GFX9PLUS(TTMP3) \ 1555 CASE_VI_GFX9PLUS(TTMP4) \ 1556 CASE_VI_GFX9PLUS(TTMP5) \ 1557 CASE_VI_GFX9PLUS(TTMP6) \ 1558 CASE_VI_GFX9PLUS(TTMP7) \ 1559 CASE_VI_GFX9PLUS(TTMP8) \ 1560 CASE_VI_GFX9PLUS(TTMP9) \ 1561 CASE_VI_GFX9PLUS(TTMP10) \ 1562 CASE_VI_GFX9PLUS(TTMP11) \ 1563 CASE_VI_GFX9PLUS(TTMP12) \ 1564 CASE_VI_GFX9PLUS(TTMP13) \ 1565 CASE_VI_GFX9PLUS(TTMP14) \ 1566 CASE_VI_GFX9PLUS(TTMP15) \ 1567 CASE_VI_GFX9PLUS(TTMP0_TTMP1) \ 1568 CASE_VI_GFX9PLUS(TTMP2_TTMP3) \ 1569 CASE_VI_GFX9PLUS(TTMP4_TTMP5) \ 1570 CASE_VI_GFX9PLUS(TTMP6_TTMP7) \ 1571 CASE_VI_GFX9PLUS(TTMP8_TTMP9) \ 1572 CASE_VI_GFX9PLUS(TTMP10_TTMP11) \ 1573 CASE_VI_GFX9PLUS(TTMP12_TTMP13) \ 1574 CASE_VI_GFX9PLUS(TTMP14_TTMP15) \ 1575 CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3) \ 1576 CASE_VI_GFX9PLUS(TTMP4_TTMP5_TTMP6_TTMP7) \ 1577 CASE_VI_GFX9PLUS(TTMP8_TTMP9_TTMP10_TTMP11) \ 1578 CASE_VI_GFX9PLUS(TTMP12_TTMP13_TTMP14_TTMP15) \ 1579 CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7) \ 1580 CASE_VI_GFX9PLUS(TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11) \ 1581 CASE_VI_GFX9PLUS(TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \ 1582 CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \ 1583 } 1584 1585 #define CASE_CI_VI(node) \ 1586 assert(!isSI(STI)); \ 1587 case node: return isCI(STI) ? node##_ci : node##_vi; 1588 1589 #define CASE_VI_GFX9PLUS(node) \ 1590 case node: return isGFX9Plus(STI) ? node##_gfx9plus : node##_vi; 1591 1592 unsigned getMCReg(unsigned Reg, const MCSubtargetInfo &STI) { 1593 if (STI.getTargetTriple().getArch() == Triple::r600) 1594 return Reg; 1595 MAP_REG2REG 1596 } 1597 1598 #undef CASE_CI_VI 1599 #undef CASE_VI_GFX9PLUS 1600 1601 #define CASE_CI_VI(node) case node##_ci: case node##_vi: return node; 1602 #define CASE_VI_GFX9PLUS(node) case node##_vi: case node##_gfx9plus: return node; 1603 1604 unsigned mc2PseudoReg(unsigned Reg) { 1605 MAP_REG2REG 1606 } 1607 1608 #undef CASE_CI_VI 1609 #undef CASE_VI_GFX9PLUS 1610 #undef MAP_REG2REG 1611 1612 bool isSISrcOperand(const MCInstrDesc &Desc, unsigned OpNo) { 1613 assert(OpNo < Desc.NumOperands); 1614 unsigned OpType = Desc.OpInfo[OpNo].OperandType; 1615 return OpType >= AMDGPU::OPERAND_SRC_FIRST && 1616 OpType <= AMDGPU::OPERAND_SRC_LAST; 1617 } 1618 1619 bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo) { 1620 assert(OpNo < Desc.NumOperands); 1621 unsigned OpType = Desc.OpInfo[OpNo].OperandType; 1622 switch (OpType) { 1623 case AMDGPU::OPERAND_REG_IMM_FP32: 1624 case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED: 1625 case AMDGPU::OPERAND_REG_IMM_FP64: 1626 case AMDGPU::OPERAND_REG_IMM_FP16: 1627 case AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED: 1628 case AMDGPU::OPERAND_REG_IMM_V2FP16: 1629 case AMDGPU::OPERAND_REG_IMM_V2INT16: 1630 case AMDGPU::OPERAND_REG_INLINE_C_FP32: 1631 case AMDGPU::OPERAND_REG_INLINE_C_FP64: 1632 case AMDGPU::OPERAND_REG_INLINE_C_FP16: 1633 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: 1634 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16: 1635 case AMDGPU::OPERAND_REG_INLINE_AC_FP32: 1636 case AMDGPU::OPERAND_REG_INLINE_AC_FP16: 1637 case AMDGPU::OPERAND_REG_INLINE_AC_V2FP16: 1638 case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16: 1639 case AMDGPU::OPERAND_REG_IMM_V2FP32: 1640 case AMDGPU::OPERAND_REG_INLINE_C_V2FP32: 1641 case AMDGPU::OPERAND_REG_INLINE_AC_FP64: 1642 return true; 1643 default: 1644 return false; 1645 } 1646 } 1647 1648 bool isSISrcInlinableOperand(const MCInstrDesc &Desc, unsigned OpNo) { 1649 assert(OpNo < Desc.NumOperands); 1650 unsigned OpType = Desc.OpInfo[OpNo].OperandType; 1651 return OpType >= AMDGPU::OPERAND_REG_INLINE_C_FIRST && 1652 OpType <= AMDGPU::OPERAND_REG_INLINE_C_LAST; 1653 } 1654 1655 // Avoid using MCRegisterClass::getSize, since that function will go away 1656 // (move from MC* level to Target* level). Return size in bits. 1657 unsigned getRegBitWidth(unsigned RCID) { 1658 switch (RCID) { 1659 case AMDGPU::VGPR_LO16RegClassID: 1660 case AMDGPU::VGPR_HI16RegClassID: 1661 case AMDGPU::SGPR_LO16RegClassID: 1662 case AMDGPU::AGPR_LO16RegClassID: 1663 return 16; 1664 case AMDGPU::SGPR_32RegClassID: 1665 case AMDGPU::VGPR_32RegClassID: 1666 case AMDGPU::VRegOrLds_32RegClassID: 1667 case AMDGPU::AGPR_32RegClassID: 1668 case AMDGPU::VS_32RegClassID: 1669 case AMDGPU::AV_32RegClassID: 1670 case AMDGPU::SReg_32RegClassID: 1671 case AMDGPU::SReg_32_XM0RegClassID: 1672 case AMDGPU::SRegOrLds_32RegClassID: 1673 return 32; 1674 case AMDGPU::SGPR_64RegClassID: 1675 case AMDGPU::VS_64RegClassID: 1676 case AMDGPU::SReg_64RegClassID: 1677 case AMDGPU::VReg_64RegClassID: 1678 case AMDGPU::AReg_64RegClassID: 1679 case AMDGPU::SReg_64_XEXECRegClassID: 1680 case AMDGPU::VReg_64_Align2RegClassID: 1681 case AMDGPU::AReg_64_Align2RegClassID: 1682 case AMDGPU::AV_64RegClassID: 1683 case AMDGPU::AV_64_Align2RegClassID: 1684 return 64; 1685 case AMDGPU::SGPR_96RegClassID: 1686 case AMDGPU::SReg_96RegClassID: 1687 case AMDGPU::VReg_96RegClassID: 1688 case AMDGPU::AReg_96RegClassID: 1689 case AMDGPU::VReg_96_Align2RegClassID: 1690 case AMDGPU::AReg_96_Align2RegClassID: 1691 case AMDGPU::AV_96RegClassID: 1692 case AMDGPU::AV_96_Align2RegClassID: 1693 return 96; 1694 case AMDGPU::SGPR_128RegClassID: 1695 case AMDGPU::SReg_128RegClassID: 1696 case AMDGPU::VReg_128RegClassID: 1697 case AMDGPU::AReg_128RegClassID: 1698 case AMDGPU::VReg_128_Align2RegClassID: 1699 case AMDGPU::AReg_128_Align2RegClassID: 1700 case AMDGPU::AV_128RegClassID: 1701 case AMDGPU::AV_128_Align2RegClassID: 1702 return 128; 1703 case AMDGPU::SGPR_160RegClassID: 1704 case AMDGPU::SReg_160RegClassID: 1705 case AMDGPU::VReg_160RegClassID: 1706 case AMDGPU::AReg_160RegClassID: 1707 case AMDGPU::VReg_160_Align2RegClassID: 1708 case AMDGPU::AReg_160_Align2RegClassID: 1709 case AMDGPU::AV_160RegClassID: 1710 case AMDGPU::AV_160_Align2RegClassID: 1711 return 160; 1712 case AMDGPU::SGPR_192RegClassID: 1713 case AMDGPU::SReg_192RegClassID: 1714 case AMDGPU::VReg_192RegClassID: 1715 case AMDGPU::AReg_192RegClassID: 1716 case AMDGPU::VReg_192_Align2RegClassID: 1717 case AMDGPU::AReg_192_Align2RegClassID: 1718 case AMDGPU::AV_192RegClassID: 1719 case AMDGPU::AV_192_Align2RegClassID: 1720 return 192; 1721 case AMDGPU::SGPR_224RegClassID: 1722 case AMDGPU::SReg_224RegClassID: 1723 case AMDGPU::VReg_224RegClassID: 1724 case AMDGPU::AReg_224RegClassID: 1725 case AMDGPU::VReg_224_Align2RegClassID: 1726 case AMDGPU::AReg_224_Align2RegClassID: 1727 case AMDGPU::AV_224RegClassID: 1728 case AMDGPU::AV_224_Align2RegClassID: 1729 return 224; 1730 case AMDGPU::SGPR_256RegClassID: 1731 case AMDGPU::SReg_256RegClassID: 1732 case AMDGPU::VReg_256RegClassID: 1733 case AMDGPU::AReg_256RegClassID: 1734 case AMDGPU::VReg_256_Align2RegClassID: 1735 case AMDGPU::AReg_256_Align2RegClassID: 1736 case AMDGPU::AV_256RegClassID: 1737 case AMDGPU::AV_256_Align2RegClassID: 1738 return 256; 1739 case AMDGPU::SGPR_512RegClassID: 1740 case AMDGPU::SReg_512RegClassID: 1741 case AMDGPU::VReg_512RegClassID: 1742 case AMDGPU::AReg_512RegClassID: 1743 case AMDGPU::VReg_512_Align2RegClassID: 1744 case AMDGPU::AReg_512_Align2RegClassID: 1745 case AMDGPU::AV_512RegClassID: 1746 case AMDGPU::AV_512_Align2RegClassID: 1747 return 512; 1748 case AMDGPU::SGPR_1024RegClassID: 1749 case AMDGPU::SReg_1024RegClassID: 1750 case AMDGPU::VReg_1024RegClassID: 1751 case AMDGPU::AReg_1024RegClassID: 1752 case AMDGPU::VReg_1024_Align2RegClassID: 1753 case AMDGPU::AReg_1024_Align2RegClassID: 1754 case AMDGPU::AV_1024RegClassID: 1755 case AMDGPU::AV_1024_Align2RegClassID: 1756 return 1024; 1757 default: 1758 llvm_unreachable("Unexpected register class"); 1759 } 1760 } 1761 1762 unsigned getRegBitWidth(const MCRegisterClass &RC) { 1763 return getRegBitWidth(RC.getID()); 1764 } 1765 1766 unsigned getRegOperandSize(const MCRegisterInfo *MRI, const MCInstrDesc &Desc, 1767 unsigned OpNo) { 1768 assert(OpNo < Desc.NumOperands); 1769 unsigned RCID = Desc.OpInfo[OpNo].RegClass; 1770 return getRegBitWidth(MRI->getRegClass(RCID)) / 8; 1771 } 1772 1773 bool isInlinableLiteral64(int64_t Literal, bool HasInv2Pi) { 1774 if (isInlinableIntLiteral(Literal)) 1775 return true; 1776 1777 uint64_t Val = static_cast<uint64_t>(Literal); 1778 return (Val == DoubleToBits(0.0)) || 1779 (Val == DoubleToBits(1.0)) || 1780 (Val == DoubleToBits(-1.0)) || 1781 (Val == DoubleToBits(0.5)) || 1782 (Val == DoubleToBits(-0.5)) || 1783 (Val == DoubleToBits(2.0)) || 1784 (Val == DoubleToBits(-2.0)) || 1785 (Val == DoubleToBits(4.0)) || 1786 (Val == DoubleToBits(-4.0)) || 1787 (Val == 0x3fc45f306dc9c882 && HasInv2Pi); 1788 } 1789 1790 bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi) { 1791 if (isInlinableIntLiteral(Literal)) 1792 return true; 1793 1794 // The actual type of the operand does not seem to matter as long 1795 // as the bits match one of the inline immediate values. For example: 1796 // 1797 // -nan has the hexadecimal encoding of 0xfffffffe which is -2 in decimal, 1798 // so it is a legal inline immediate. 1799 // 1800 // 1065353216 has the hexadecimal encoding 0x3f800000 which is 1.0f in 1801 // floating-point, so it is a legal inline immediate. 1802 1803 uint32_t Val = static_cast<uint32_t>(Literal); 1804 return (Val == FloatToBits(0.0f)) || 1805 (Val == FloatToBits(1.0f)) || 1806 (Val == FloatToBits(-1.0f)) || 1807 (Val == FloatToBits(0.5f)) || 1808 (Val == FloatToBits(-0.5f)) || 1809 (Val == FloatToBits(2.0f)) || 1810 (Val == FloatToBits(-2.0f)) || 1811 (Val == FloatToBits(4.0f)) || 1812 (Val == FloatToBits(-4.0f)) || 1813 (Val == 0x3e22f983 && HasInv2Pi); 1814 } 1815 1816 bool isInlinableLiteral16(int16_t Literal, bool HasInv2Pi) { 1817 if (!HasInv2Pi) 1818 return false; 1819 1820 if (isInlinableIntLiteral(Literal)) 1821 return true; 1822 1823 uint16_t Val = static_cast<uint16_t>(Literal); 1824 return Val == 0x3C00 || // 1.0 1825 Val == 0xBC00 || // -1.0 1826 Val == 0x3800 || // 0.5 1827 Val == 0xB800 || // -0.5 1828 Val == 0x4000 || // 2.0 1829 Val == 0xC000 || // -2.0 1830 Val == 0x4400 || // 4.0 1831 Val == 0xC400 || // -4.0 1832 Val == 0x3118; // 1/2pi 1833 } 1834 1835 bool isInlinableLiteralV216(int32_t Literal, bool HasInv2Pi) { 1836 assert(HasInv2Pi); 1837 1838 if (isInt<16>(Literal) || isUInt<16>(Literal)) { 1839 int16_t Trunc = static_cast<int16_t>(Literal); 1840 return AMDGPU::isInlinableLiteral16(Trunc, HasInv2Pi); 1841 } 1842 if (!(Literal & 0xffff)) 1843 return AMDGPU::isInlinableLiteral16(Literal >> 16, HasInv2Pi); 1844 1845 int16_t Lo16 = static_cast<int16_t>(Literal); 1846 int16_t Hi16 = static_cast<int16_t>(Literal >> 16); 1847 return Lo16 == Hi16 && isInlinableLiteral16(Lo16, HasInv2Pi); 1848 } 1849 1850 bool isInlinableIntLiteralV216(int32_t Literal) { 1851 int16_t Lo16 = static_cast<int16_t>(Literal); 1852 if (isInt<16>(Literal) || isUInt<16>(Literal)) 1853 return isInlinableIntLiteral(Lo16); 1854 1855 int16_t Hi16 = static_cast<int16_t>(Literal >> 16); 1856 if (!(Literal & 0xffff)) 1857 return isInlinableIntLiteral(Hi16); 1858 return Lo16 == Hi16 && isInlinableIntLiteral(Lo16); 1859 } 1860 1861 bool isFoldableLiteralV216(int32_t Literal, bool HasInv2Pi) { 1862 assert(HasInv2Pi); 1863 1864 int16_t Lo16 = static_cast<int16_t>(Literal); 1865 if (isInt<16>(Literal) || isUInt<16>(Literal)) 1866 return true; 1867 1868 int16_t Hi16 = static_cast<int16_t>(Literal >> 16); 1869 if (!(Literal & 0xffff)) 1870 return true; 1871 return Lo16 == Hi16; 1872 } 1873 1874 bool isArgPassedInSGPR(const Argument *A) { 1875 const Function *F = A->getParent(); 1876 1877 // Arguments to compute shaders are never a source of divergence. 1878 CallingConv::ID CC = F->getCallingConv(); 1879 switch (CC) { 1880 case CallingConv::AMDGPU_KERNEL: 1881 case CallingConv::SPIR_KERNEL: 1882 return true; 1883 case CallingConv::AMDGPU_VS: 1884 case CallingConv::AMDGPU_LS: 1885 case CallingConv::AMDGPU_HS: 1886 case CallingConv::AMDGPU_ES: 1887 case CallingConv::AMDGPU_GS: 1888 case CallingConv::AMDGPU_PS: 1889 case CallingConv::AMDGPU_CS: 1890 case CallingConv::AMDGPU_Gfx: 1891 // For non-compute shaders, SGPR inputs are marked with either inreg or byval. 1892 // Everything else is in VGPRs. 1893 return F->getAttributes().hasParamAttr(A->getArgNo(), Attribute::InReg) || 1894 F->getAttributes().hasParamAttr(A->getArgNo(), Attribute::ByVal); 1895 default: 1896 // TODO: Should calls support inreg for SGPR inputs? 1897 return false; 1898 } 1899 } 1900 1901 static bool hasSMEMByteOffset(const MCSubtargetInfo &ST) { 1902 return isGCN3Encoding(ST) || isGFX10Plus(ST); 1903 } 1904 1905 static bool hasSMRDSignedImmOffset(const MCSubtargetInfo &ST) { 1906 return isGFX9Plus(ST); 1907 } 1908 1909 bool isLegalSMRDEncodedUnsignedOffset(const MCSubtargetInfo &ST, 1910 int64_t EncodedOffset) { 1911 return hasSMEMByteOffset(ST) ? isUInt<20>(EncodedOffset) 1912 : isUInt<8>(EncodedOffset); 1913 } 1914 1915 bool isLegalSMRDEncodedSignedOffset(const MCSubtargetInfo &ST, 1916 int64_t EncodedOffset, 1917 bool IsBuffer) { 1918 return !IsBuffer && 1919 hasSMRDSignedImmOffset(ST) && 1920 isInt<21>(EncodedOffset); 1921 } 1922 1923 static bool isDwordAligned(uint64_t ByteOffset) { 1924 return (ByteOffset & 3) == 0; 1925 } 1926 1927 uint64_t convertSMRDOffsetUnits(const MCSubtargetInfo &ST, 1928 uint64_t ByteOffset) { 1929 if (hasSMEMByteOffset(ST)) 1930 return ByteOffset; 1931 1932 assert(isDwordAligned(ByteOffset)); 1933 return ByteOffset >> 2; 1934 } 1935 1936 Optional<int64_t> getSMRDEncodedOffset(const MCSubtargetInfo &ST, 1937 int64_t ByteOffset, bool IsBuffer) { 1938 // The signed version is always a byte offset. 1939 if (!IsBuffer && hasSMRDSignedImmOffset(ST)) { 1940 assert(hasSMEMByteOffset(ST)); 1941 return isInt<20>(ByteOffset) ? Optional<int64_t>(ByteOffset) : None; 1942 } 1943 1944 if (!isDwordAligned(ByteOffset) && !hasSMEMByteOffset(ST)) 1945 return None; 1946 1947 int64_t EncodedOffset = convertSMRDOffsetUnits(ST, ByteOffset); 1948 return isLegalSMRDEncodedUnsignedOffset(ST, EncodedOffset) 1949 ? Optional<int64_t>(EncodedOffset) 1950 : None; 1951 } 1952 1953 Optional<int64_t> getSMRDEncodedLiteralOffset32(const MCSubtargetInfo &ST, 1954 int64_t ByteOffset) { 1955 if (!isCI(ST) || !isDwordAligned(ByteOffset)) 1956 return None; 1957 1958 int64_t EncodedOffset = convertSMRDOffsetUnits(ST, ByteOffset); 1959 return isUInt<32>(EncodedOffset) ? Optional<int64_t>(EncodedOffset) : None; 1960 } 1961 1962 unsigned getNumFlatOffsetBits(const MCSubtargetInfo &ST, bool Signed) { 1963 // Address offset is 12-bit signed for GFX10, 13-bit for GFX9. 1964 if (AMDGPU::isGFX10(ST)) 1965 return Signed ? 12 : 11; 1966 1967 return Signed ? 13 : 12; 1968 } 1969 1970 // Given Imm, split it into the values to put into the SOffset and ImmOffset 1971 // fields in an MUBUF instruction. Return false if it is not possible (due to a 1972 // hardware bug needing a workaround). 1973 // 1974 // The required alignment ensures that individual address components remain 1975 // aligned if they are aligned to begin with. It also ensures that additional 1976 // offsets within the given alignment can be added to the resulting ImmOffset. 1977 bool splitMUBUFOffset(uint32_t Imm, uint32_t &SOffset, uint32_t &ImmOffset, 1978 const GCNSubtarget *Subtarget, Align Alignment) { 1979 const uint32_t MaxImm = alignDown(4095, Alignment.value()); 1980 uint32_t Overflow = 0; 1981 1982 if (Imm > MaxImm) { 1983 if (Imm <= MaxImm + 64) { 1984 // Use an SOffset inline constant for 4..64 1985 Overflow = Imm - MaxImm; 1986 Imm = MaxImm; 1987 } else { 1988 // Try to keep the same value in SOffset for adjacent loads, so that 1989 // the corresponding register contents can be re-used. 1990 // 1991 // Load values with all low-bits (except for alignment bits) set into 1992 // SOffset, so that a larger range of values can be covered using 1993 // s_movk_i32. 1994 // 1995 // Atomic operations fail to work correctly when individual address 1996 // components are unaligned, even if their sum is aligned. 1997 uint32_t High = (Imm + Alignment.value()) & ~4095; 1998 uint32_t Low = (Imm + Alignment.value()) & 4095; 1999 Imm = Low; 2000 Overflow = High - Alignment.value(); 2001 } 2002 } 2003 2004 // There is a hardware bug in SI and CI which prevents address clamping in 2005 // MUBUF instructions from working correctly with SOffsets. The immediate 2006 // offset is unaffected. 2007 if (Overflow > 0 && 2008 Subtarget->getGeneration() <= AMDGPUSubtarget::SEA_ISLANDS) 2009 return false; 2010 2011 ImmOffset = Imm; 2012 SOffset = Overflow; 2013 return true; 2014 } 2015 2016 SIModeRegisterDefaults::SIModeRegisterDefaults(const Function &F) { 2017 *this = getDefaultForCallingConv(F.getCallingConv()); 2018 2019 StringRef IEEEAttr = F.getFnAttribute("amdgpu-ieee").getValueAsString(); 2020 if (!IEEEAttr.empty()) 2021 IEEE = IEEEAttr == "true"; 2022 2023 StringRef DX10ClampAttr 2024 = F.getFnAttribute("amdgpu-dx10-clamp").getValueAsString(); 2025 if (!DX10ClampAttr.empty()) 2026 DX10Clamp = DX10ClampAttr == "true"; 2027 2028 StringRef DenormF32Attr = F.getFnAttribute("denormal-fp-math-f32").getValueAsString(); 2029 if (!DenormF32Attr.empty()) { 2030 DenormalMode DenormMode = parseDenormalFPAttribute(DenormF32Attr); 2031 FP32InputDenormals = DenormMode.Input == DenormalMode::IEEE; 2032 FP32OutputDenormals = DenormMode.Output == DenormalMode::IEEE; 2033 } 2034 2035 StringRef DenormAttr = F.getFnAttribute("denormal-fp-math").getValueAsString(); 2036 if (!DenormAttr.empty()) { 2037 DenormalMode DenormMode = parseDenormalFPAttribute(DenormAttr); 2038 2039 if (DenormF32Attr.empty()) { 2040 FP32InputDenormals = DenormMode.Input == DenormalMode::IEEE; 2041 FP32OutputDenormals = DenormMode.Output == DenormalMode::IEEE; 2042 } 2043 2044 FP64FP16InputDenormals = DenormMode.Input == DenormalMode::IEEE; 2045 FP64FP16OutputDenormals = DenormMode.Output == DenormalMode::IEEE; 2046 } 2047 } 2048 2049 namespace { 2050 2051 struct SourceOfDivergence { 2052 unsigned Intr; 2053 }; 2054 const SourceOfDivergence *lookupSourceOfDivergence(unsigned Intr); 2055 2056 #define GET_SourcesOfDivergence_IMPL 2057 #define GET_Gfx9BufferFormat_IMPL 2058 #define GET_Gfx10PlusBufferFormat_IMPL 2059 #include "AMDGPUGenSearchableTables.inc" 2060 2061 } // end anonymous namespace 2062 2063 bool isIntrinsicSourceOfDivergence(unsigned IntrID) { 2064 return lookupSourceOfDivergence(IntrID); 2065 } 2066 2067 const GcnBufferFormatInfo *getGcnBufferFormatInfo(uint8_t BitsPerComp, 2068 uint8_t NumComponents, 2069 uint8_t NumFormat, 2070 const MCSubtargetInfo &STI) { 2071 return isGFX10Plus(STI) 2072 ? getGfx10PlusBufferFormatInfo(BitsPerComp, NumComponents, 2073 NumFormat) 2074 : getGfx9BufferFormatInfo(BitsPerComp, NumComponents, NumFormat); 2075 } 2076 2077 const GcnBufferFormatInfo *getGcnBufferFormatInfo(uint8_t Format, 2078 const MCSubtargetInfo &STI) { 2079 return isGFX10Plus(STI) ? getGfx10PlusBufferFormatInfo(Format) 2080 : getGfx9BufferFormatInfo(Format); 2081 } 2082 2083 } // namespace AMDGPU 2084 2085 raw_ostream &operator<<(raw_ostream &OS, 2086 const AMDGPU::IsaInfo::TargetIDSetting S) { 2087 switch (S) { 2088 case (AMDGPU::IsaInfo::TargetIDSetting::Unsupported): 2089 OS << "Unsupported"; 2090 break; 2091 case (AMDGPU::IsaInfo::TargetIDSetting::Any): 2092 OS << "Any"; 2093 break; 2094 case (AMDGPU::IsaInfo::TargetIDSetting::Off): 2095 OS << "Off"; 2096 break; 2097 case (AMDGPU::IsaInfo::TargetIDSetting::On): 2098 OS << "On"; 2099 break; 2100 } 2101 return OS; 2102 } 2103 2104 } // namespace llvm 2105