1 //===- AMDGPUBaseInfo.cpp - AMDGPU Base encoding information --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "AMDGPUBaseInfo.h" 10 #include "AMDGPU.h" 11 #include "AMDGPUAsmUtils.h" 12 #include "AMDKernelCodeT.h" 13 #include "GCNSubtarget.h" 14 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 15 #include "llvm/BinaryFormat/ELF.h" 16 #include "llvm/IR/Attributes.h" 17 #include "llvm/IR/Function.h" 18 #include "llvm/IR/GlobalValue.h" 19 #include "llvm/IR/IntrinsicsAMDGPU.h" 20 #include "llvm/IR/IntrinsicsR600.h" 21 #include "llvm/IR/LLVMContext.h" 22 #include "llvm/MC/MCSubtargetInfo.h" 23 #include "llvm/Support/AMDHSAKernelDescriptor.h" 24 #include "llvm/Support/CommandLine.h" 25 #include "llvm/Support/TargetParser.h" 26 27 #define GET_INSTRINFO_NAMED_OPS 28 #define GET_INSTRMAP_INFO 29 #include "AMDGPUGenInstrInfo.inc" 30 31 static llvm::cl::opt<unsigned> 32 AmdhsaCodeObjectVersion("amdhsa-code-object-version", llvm::cl::Hidden, 33 llvm::cl::desc("AMDHSA Code Object Version"), 34 llvm::cl::init(4)); 35 36 // TODO-GFX11: Remove this when full 16-bit codegen is implemented. 37 static llvm::cl::opt<bool> 38 LimitTo128VGPRs("amdgpu-limit-to-128-vgprs", llvm::cl::Hidden, 39 llvm::cl::desc("Never use more than 128 VGPRs")); 40 41 namespace { 42 43 /// \returns Bit mask for given bit \p Shift and bit \p Width. 44 unsigned getBitMask(unsigned Shift, unsigned Width) { 45 return ((1 << Width) - 1) << Shift; 46 } 47 48 /// Packs \p Src into \p Dst for given bit \p Shift and bit \p Width. 49 /// 50 /// \returns Packed \p Dst. 51 unsigned packBits(unsigned Src, unsigned Dst, unsigned Shift, unsigned Width) { 52 unsigned Mask = getBitMask(Shift, Width); 53 return ((Src << Shift) & Mask) | (Dst & ~Mask); 54 } 55 56 /// Unpacks bits from \p Src for given bit \p Shift and bit \p Width. 57 /// 58 /// \returns Unpacked bits. 59 unsigned unpackBits(unsigned Src, unsigned Shift, unsigned Width) { 60 return (Src & getBitMask(Shift, Width)) >> Shift; 61 } 62 63 /// \returns Vmcnt bit shift (lower bits). 64 unsigned getVmcntBitShiftLo(unsigned VersionMajor) { 65 return VersionMajor >= 11 ? 10 : 0; 66 } 67 68 /// \returns Vmcnt bit width (lower bits). 69 unsigned getVmcntBitWidthLo(unsigned VersionMajor) { 70 return VersionMajor >= 11 ? 6 : 4; 71 } 72 73 /// \returns Expcnt bit shift. 74 unsigned getExpcntBitShift(unsigned VersionMajor) { 75 return VersionMajor >= 11 ? 0 : 4; 76 } 77 78 /// \returns Expcnt bit width. 79 unsigned getExpcntBitWidth(unsigned VersionMajor) { return 3; } 80 81 /// \returns Lgkmcnt bit shift. 82 unsigned getLgkmcntBitShift(unsigned VersionMajor) { 83 return VersionMajor >= 11 ? 4 : 8; 84 } 85 86 /// \returns Lgkmcnt bit width. 87 unsigned getLgkmcntBitWidth(unsigned VersionMajor) { 88 return VersionMajor >= 10 ? 6 : 4; 89 } 90 91 /// \returns Vmcnt bit shift (higher bits). 92 unsigned getVmcntBitShiftHi(unsigned VersionMajor) { return 14; } 93 94 /// \returns Vmcnt bit width (higher bits). 95 unsigned getVmcntBitWidthHi(unsigned VersionMajor) { 96 return (VersionMajor == 9 || VersionMajor == 10) ? 2 : 0; 97 } 98 99 } // end namespace anonymous 100 101 namespace llvm { 102 103 namespace AMDGPU { 104 105 Optional<uint8_t> getHsaAbiVersion(const MCSubtargetInfo *STI) { 106 if (STI && STI->getTargetTriple().getOS() != Triple::AMDHSA) 107 return None; 108 109 switch (AmdhsaCodeObjectVersion) { 110 case 2: 111 return ELF::ELFABIVERSION_AMDGPU_HSA_V2; 112 case 3: 113 return ELF::ELFABIVERSION_AMDGPU_HSA_V3; 114 case 4: 115 return ELF::ELFABIVERSION_AMDGPU_HSA_V4; 116 case 5: 117 return ELF::ELFABIVERSION_AMDGPU_HSA_V5; 118 default: 119 report_fatal_error(Twine("Unsupported AMDHSA Code Object Version ") + 120 Twine(AmdhsaCodeObjectVersion)); 121 } 122 } 123 124 bool isHsaAbiVersion2(const MCSubtargetInfo *STI) { 125 if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI)) 126 return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V2; 127 return false; 128 } 129 130 bool isHsaAbiVersion3(const MCSubtargetInfo *STI) { 131 if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI)) 132 return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V3; 133 return false; 134 } 135 136 bool isHsaAbiVersion4(const MCSubtargetInfo *STI) { 137 if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI)) 138 return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V4; 139 return false; 140 } 141 142 bool isHsaAbiVersion5(const MCSubtargetInfo *STI) { 143 if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI)) 144 return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V5; 145 return false; 146 } 147 148 bool isHsaAbiVersion3AndAbove(const MCSubtargetInfo *STI) { 149 return isHsaAbiVersion3(STI) || isHsaAbiVersion4(STI) || 150 isHsaAbiVersion5(STI); 151 } 152 153 unsigned getAmdhsaCodeObjectVersion() { 154 return AmdhsaCodeObjectVersion; 155 } 156 157 unsigned getMultigridSyncArgImplicitArgPosition() { 158 switch (AmdhsaCodeObjectVersion) { 159 case 2: 160 case 3: 161 case 4: 162 return 48; 163 case 5: 164 return AMDGPU::ImplicitArg::MULTIGRID_SYNC_ARG_OFFSET; 165 default: 166 llvm_unreachable("Unexpected code object version"); 167 return 0; 168 } 169 } 170 171 172 // FIXME: All such magic numbers about the ABI should be in a 173 // central TD file. 174 unsigned getHostcallImplicitArgPosition() { 175 switch (AmdhsaCodeObjectVersion) { 176 case 2: 177 case 3: 178 case 4: 179 return 24; 180 case 5: 181 return AMDGPU::ImplicitArg::HOSTCALL_PTR_OFFSET; 182 default: 183 llvm_unreachable("Unexpected code object version"); 184 return 0; 185 } 186 } 187 188 #define GET_MIMGBaseOpcodesTable_IMPL 189 #define GET_MIMGDimInfoTable_IMPL 190 #define GET_MIMGInfoTable_IMPL 191 #define GET_MIMGLZMappingTable_IMPL 192 #define GET_MIMGMIPMappingTable_IMPL 193 #define GET_MIMGBiasMappingTable_IMPL 194 #define GET_MIMGOffsetMappingTable_IMPL 195 #define GET_MIMGG16MappingTable_IMPL 196 #define GET_MAIInstInfoTable_IMPL 197 #include "AMDGPUGenSearchableTables.inc" 198 199 int getMIMGOpcode(unsigned BaseOpcode, unsigned MIMGEncoding, 200 unsigned VDataDwords, unsigned VAddrDwords) { 201 const MIMGInfo *Info = getMIMGOpcodeHelper(BaseOpcode, MIMGEncoding, 202 VDataDwords, VAddrDwords); 203 return Info ? Info->Opcode : -1; 204 } 205 206 const MIMGBaseOpcodeInfo *getMIMGBaseOpcode(unsigned Opc) { 207 const MIMGInfo *Info = getMIMGInfo(Opc); 208 return Info ? getMIMGBaseOpcodeInfo(Info->BaseOpcode) : nullptr; 209 } 210 211 int getMaskedMIMGOp(unsigned Opc, unsigned NewChannels) { 212 const MIMGInfo *OrigInfo = getMIMGInfo(Opc); 213 const MIMGInfo *NewInfo = 214 getMIMGOpcodeHelper(OrigInfo->BaseOpcode, OrigInfo->MIMGEncoding, 215 NewChannels, OrigInfo->VAddrDwords); 216 return NewInfo ? NewInfo->Opcode : -1; 217 } 218 219 unsigned getAddrSizeMIMGOp(const MIMGBaseOpcodeInfo *BaseOpcode, 220 const MIMGDimInfo *Dim, bool IsA16, 221 bool IsG16Supported) { 222 unsigned AddrWords = BaseOpcode->NumExtraArgs; 223 unsigned AddrComponents = (BaseOpcode->Coordinates ? Dim->NumCoords : 0) + 224 (BaseOpcode->LodOrClampOrMip ? 1 : 0); 225 if (IsA16) 226 AddrWords += divideCeil(AddrComponents, 2); 227 else 228 AddrWords += AddrComponents; 229 230 // Note: For subtargets that support A16 but not G16, enabling A16 also 231 // enables 16 bit gradients. 232 // For subtargets that support A16 (operand) and G16 (done with a different 233 // instruction encoding), they are independent. 234 235 if (BaseOpcode->Gradients) { 236 if ((IsA16 && !IsG16Supported) || BaseOpcode->G16) 237 // There are two gradients per coordinate, we pack them separately. 238 // For the 3d case, 239 // we get (dy/du, dx/du) (-, dz/du) (dy/dv, dx/dv) (-, dz/dv) 240 AddrWords += alignTo<2>(Dim->NumGradients / 2); 241 else 242 AddrWords += Dim->NumGradients; 243 } 244 return AddrWords; 245 } 246 247 struct MUBUFInfo { 248 uint16_t Opcode; 249 uint16_t BaseOpcode; 250 uint8_t elements; 251 bool has_vaddr; 252 bool has_srsrc; 253 bool has_soffset; 254 bool IsBufferInv; 255 }; 256 257 struct MTBUFInfo { 258 uint16_t Opcode; 259 uint16_t BaseOpcode; 260 uint8_t elements; 261 bool has_vaddr; 262 bool has_srsrc; 263 bool has_soffset; 264 }; 265 266 struct SMInfo { 267 uint16_t Opcode; 268 bool IsBuffer; 269 }; 270 271 struct VOPInfo { 272 uint16_t Opcode; 273 bool IsSingle; 274 }; 275 276 struct VOPC64DPPInfo { 277 uint16_t Opcode; 278 }; 279 280 #define GET_MTBUFInfoTable_DECL 281 #define GET_MTBUFInfoTable_IMPL 282 #define GET_MUBUFInfoTable_DECL 283 #define GET_MUBUFInfoTable_IMPL 284 #define GET_SMInfoTable_DECL 285 #define GET_SMInfoTable_IMPL 286 #define GET_VOP1InfoTable_DECL 287 #define GET_VOP1InfoTable_IMPL 288 #define GET_VOP2InfoTable_DECL 289 #define GET_VOP2InfoTable_IMPL 290 #define GET_VOP3InfoTable_DECL 291 #define GET_VOP3InfoTable_IMPL 292 #define GET_VOPC64DPPTable_DECL 293 #define GET_VOPC64DPPTable_IMPL 294 #define GET_VOPC64DPP8Table_DECL 295 #define GET_VOPC64DPP8Table_IMPL 296 #include "AMDGPUGenSearchableTables.inc" 297 298 int getMTBUFBaseOpcode(unsigned Opc) { 299 const MTBUFInfo *Info = getMTBUFInfoFromOpcode(Opc); 300 return Info ? Info->BaseOpcode : -1; 301 } 302 303 int getMTBUFOpcode(unsigned BaseOpc, unsigned Elements) { 304 const MTBUFInfo *Info = getMTBUFInfoFromBaseOpcodeAndElements(BaseOpc, Elements); 305 return Info ? Info->Opcode : -1; 306 } 307 308 int getMTBUFElements(unsigned Opc) { 309 const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc); 310 return Info ? Info->elements : 0; 311 } 312 313 bool getMTBUFHasVAddr(unsigned Opc) { 314 const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc); 315 return Info ? Info->has_vaddr : false; 316 } 317 318 bool getMTBUFHasSrsrc(unsigned Opc) { 319 const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc); 320 return Info ? Info->has_srsrc : false; 321 } 322 323 bool getMTBUFHasSoffset(unsigned Opc) { 324 const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc); 325 return Info ? Info->has_soffset : false; 326 } 327 328 int getMUBUFBaseOpcode(unsigned Opc) { 329 const MUBUFInfo *Info = getMUBUFInfoFromOpcode(Opc); 330 return Info ? Info->BaseOpcode : -1; 331 } 332 333 int getMUBUFOpcode(unsigned BaseOpc, unsigned Elements) { 334 const MUBUFInfo *Info = getMUBUFInfoFromBaseOpcodeAndElements(BaseOpc, Elements); 335 return Info ? Info->Opcode : -1; 336 } 337 338 int getMUBUFElements(unsigned Opc) { 339 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc); 340 return Info ? Info->elements : 0; 341 } 342 343 bool getMUBUFHasVAddr(unsigned Opc) { 344 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc); 345 return Info ? Info->has_vaddr : false; 346 } 347 348 bool getMUBUFHasSrsrc(unsigned Opc) { 349 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc); 350 return Info ? Info->has_srsrc : false; 351 } 352 353 bool getMUBUFHasSoffset(unsigned Opc) { 354 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc); 355 return Info ? Info->has_soffset : false; 356 } 357 358 bool getMUBUFIsBufferInv(unsigned Opc) { 359 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc); 360 return Info ? Info->IsBufferInv : false; 361 } 362 363 bool getSMEMIsBuffer(unsigned Opc) { 364 const SMInfo *Info = getSMEMOpcodeHelper(Opc); 365 return Info ? Info->IsBuffer : false; 366 } 367 368 bool getVOP1IsSingle(unsigned Opc) { 369 const VOPInfo *Info = getVOP1OpcodeHelper(Opc); 370 return Info ? Info->IsSingle : false; 371 } 372 373 bool getVOP2IsSingle(unsigned Opc) { 374 const VOPInfo *Info = getVOP2OpcodeHelper(Opc); 375 return Info ? Info->IsSingle : false; 376 } 377 378 bool getVOP3IsSingle(unsigned Opc) { 379 const VOPInfo *Info = getVOP3OpcodeHelper(Opc); 380 return Info ? Info->IsSingle : false; 381 } 382 383 bool isVOPC64DPP(unsigned Opc) { 384 return isVOPC64DPPOpcodeHelper(Opc) || isVOPC64DPP8OpcodeHelper(Opc); 385 } 386 387 bool getMAIIsDGEMM(unsigned Opc) { 388 const MAIInstInfo *Info = getMAIInstInfoHelper(Opc); 389 return Info ? Info->is_dgemm : false; 390 } 391 392 bool getMAIIsGFX940XDL(unsigned Opc) { 393 const MAIInstInfo *Info = getMAIInstInfoHelper(Opc); 394 return Info ? Info->is_gfx940_xdl : false; 395 } 396 397 // Wrapper for Tablegen'd function. enum Subtarget is not defined in any 398 // header files, so we need to wrap it in a function that takes unsigned 399 // instead. 400 int getMCOpcode(uint16_t Opcode, unsigned Gen) { 401 return getMCOpcodeGen(Opcode, static_cast<Subtarget>(Gen)); 402 } 403 404 namespace IsaInfo { 405 406 AMDGPUTargetID::AMDGPUTargetID(const MCSubtargetInfo &STI) 407 : STI(STI), XnackSetting(TargetIDSetting::Any), 408 SramEccSetting(TargetIDSetting::Any) { 409 if (!STI.getFeatureBits().test(FeatureSupportsXNACK)) 410 XnackSetting = TargetIDSetting::Unsupported; 411 if (!STI.getFeatureBits().test(FeatureSupportsSRAMECC)) 412 SramEccSetting = TargetIDSetting::Unsupported; 413 } 414 415 void AMDGPUTargetID::setTargetIDFromFeaturesString(StringRef FS) { 416 // Check if xnack or sramecc is explicitly enabled or disabled. In the 417 // absence of the target features we assume we must generate code that can run 418 // in any environment. 419 SubtargetFeatures Features(FS); 420 Optional<bool> XnackRequested; 421 Optional<bool> SramEccRequested; 422 423 for (const std::string &Feature : Features.getFeatures()) { 424 if (Feature == "+xnack") 425 XnackRequested = true; 426 else if (Feature == "-xnack") 427 XnackRequested = false; 428 else if (Feature == "+sramecc") 429 SramEccRequested = true; 430 else if (Feature == "-sramecc") 431 SramEccRequested = false; 432 } 433 434 bool XnackSupported = isXnackSupported(); 435 bool SramEccSupported = isSramEccSupported(); 436 437 if (XnackRequested) { 438 if (XnackSupported) { 439 XnackSetting = 440 *XnackRequested ? TargetIDSetting::On : TargetIDSetting::Off; 441 } else { 442 // If a specific xnack setting was requested and this GPU does not support 443 // xnack emit a warning. Setting will remain set to "Unsupported". 444 if (*XnackRequested) { 445 errs() << "warning: xnack 'On' was requested for a processor that does " 446 "not support it!\n"; 447 } else { 448 errs() << "warning: xnack 'Off' was requested for a processor that " 449 "does not support it!\n"; 450 } 451 } 452 } 453 454 if (SramEccRequested) { 455 if (SramEccSupported) { 456 SramEccSetting = 457 *SramEccRequested ? TargetIDSetting::On : TargetIDSetting::Off; 458 } else { 459 // If a specific sramecc setting was requested and this GPU does not 460 // support sramecc emit a warning. Setting will remain set to 461 // "Unsupported". 462 if (*SramEccRequested) { 463 errs() << "warning: sramecc 'On' was requested for a processor that " 464 "does not support it!\n"; 465 } else { 466 errs() << "warning: sramecc 'Off' was requested for a processor that " 467 "does not support it!\n"; 468 } 469 } 470 } 471 } 472 473 static TargetIDSetting 474 getTargetIDSettingFromFeatureString(StringRef FeatureString) { 475 if (FeatureString.endswith("-")) 476 return TargetIDSetting::Off; 477 if (FeatureString.endswith("+")) 478 return TargetIDSetting::On; 479 480 llvm_unreachable("Malformed feature string"); 481 } 482 483 void AMDGPUTargetID::setTargetIDFromTargetIDStream(StringRef TargetID) { 484 SmallVector<StringRef, 3> TargetIDSplit; 485 TargetID.split(TargetIDSplit, ':'); 486 487 for (const auto &FeatureString : TargetIDSplit) { 488 if (FeatureString.startswith("xnack")) 489 XnackSetting = getTargetIDSettingFromFeatureString(FeatureString); 490 if (FeatureString.startswith("sramecc")) 491 SramEccSetting = getTargetIDSettingFromFeatureString(FeatureString); 492 } 493 } 494 495 std::string AMDGPUTargetID::toString() const { 496 std::string StringRep; 497 raw_string_ostream StreamRep(StringRep); 498 499 auto TargetTriple = STI.getTargetTriple(); 500 auto Version = getIsaVersion(STI.getCPU()); 501 502 StreamRep << TargetTriple.getArchName() << '-' 503 << TargetTriple.getVendorName() << '-' 504 << TargetTriple.getOSName() << '-' 505 << TargetTriple.getEnvironmentName() << '-'; 506 507 std::string Processor; 508 // TODO: Following else statement is present here because we used various 509 // alias names for GPUs up until GFX9 (e.g. 'fiji' is same as 'gfx803'). 510 // Remove once all aliases are removed from GCNProcessors.td. 511 if (Version.Major >= 9) 512 Processor = STI.getCPU().str(); 513 else 514 Processor = (Twine("gfx") + Twine(Version.Major) + Twine(Version.Minor) + 515 Twine(Version.Stepping)) 516 .str(); 517 518 std::string Features; 519 if (Optional<uint8_t> HsaAbiVersion = getHsaAbiVersion(&STI)) { 520 switch (*HsaAbiVersion) { 521 case ELF::ELFABIVERSION_AMDGPU_HSA_V2: 522 // Code object V2 only supported specific processors and had fixed 523 // settings for the XNACK. 524 if (Processor == "gfx600") { 525 } else if (Processor == "gfx601") { 526 } else if (Processor == "gfx602") { 527 } else if (Processor == "gfx700") { 528 } else if (Processor == "gfx701") { 529 } else if (Processor == "gfx702") { 530 } else if (Processor == "gfx703") { 531 } else if (Processor == "gfx704") { 532 } else if (Processor == "gfx705") { 533 } else if (Processor == "gfx801") { 534 if (!isXnackOnOrAny()) 535 report_fatal_error( 536 "AMD GPU code object V2 does not support processor " + 537 Twine(Processor) + " without XNACK"); 538 } else if (Processor == "gfx802") { 539 } else if (Processor == "gfx803") { 540 } else if (Processor == "gfx805") { 541 } else if (Processor == "gfx810") { 542 if (!isXnackOnOrAny()) 543 report_fatal_error( 544 "AMD GPU code object V2 does not support processor " + 545 Twine(Processor) + " without XNACK"); 546 } else if (Processor == "gfx900") { 547 if (isXnackOnOrAny()) 548 Processor = "gfx901"; 549 } else if (Processor == "gfx902") { 550 if (isXnackOnOrAny()) 551 Processor = "gfx903"; 552 } else if (Processor == "gfx904") { 553 if (isXnackOnOrAny()) 554 Processor = "gfx905"; 555 } else if (Processor == "gfx906") { 556 if (isXnackOnOrAny()) 557 Processor = "gfx907"; 558 } else if (Processor == "gfx90c") { 559 if (isXnackOnOrAny()) 560 report_fatal_error( 561 "AMD GPU code object V2 does not support processor " + 562 Twine(Processor) + " with XNACK being ON or ANY"); 563 } else { 564 report_fatal_error( 565 "AMD GPU code object V2 does not support processor " + 566 Twine(Processor)); 567 } 568 break; 569 case ELF::ELFABIVERSION_AMDGPU_HSA_V3: 570 // xnack. 571 if (isXnackOnOrAny()) 572 Features += "+xnack"; 573 // In code object v2 and v3, "sramecc" feature was spelled with a 574 // hyphen ("sram-ecc"). 575 if (isSramEccOnOrAny()) 576 Features += "+sram-ecc"; 577 break; 578 case ELF::ELFABIVERSION_AMDGPU_HSA_V4: 579 case ELF::ELFABIVERSION_AMDGPU_HSA_V5: 580 // sramecc. 581 if (getSramEccSetting() == TargetIDSetting::Off) 582 Features += ":sramecc-"; 583 else if (getSramEccSetting() == TargetIDSetting::On) 584 Features += ":sramecc+"; 585 // xnack. 586 if (getXnackSetting() == TargetIDSetting::Off) 587 Features += ":xnack-"; 588 else if (getXnackSetting() == TargetIDSetting::On) 589 Features += ":xnack+"; 590 break; 591 default: 592 break; 593 } 594 } 595 596 StreamRep << Processor << Features; 597 598 StreamRep.flush(); 599 return StringRep; 600 } 601 602 unsigned getWavefrontSize(const MCSubtargetInfo *STI) { 603 if (STI->getFeatureBits().test(FeatureWavefrontSize16)) 604 return 16; 605 if (STI->getFeatureBits().test(FeatureWavefrontSize32)) 606 return 32; 607 608 return 64; 609 } 610 611 unsigned getLocalMemorySize(const MCSubtargetInfo *STI) { 612 if (STI->getFeatureBits().test(FeatureLocalMemorySize32768)) 613 return 32768; 614 if (STI->getFeatureBits().test(FeatureLocalMemorySize65536)) 615 return 65536; 616 617 return 0; 618 } 619 620 unsigned getEUsPerCU(const MCSubtargetInfo *STI) { 621 // "Per CU" really means "per whatever functional block the waves of a 622 // workgroup must share". For gfx10 in CU mode this is the CU, which contains 623 // two SIMDs. 624 if (isGFX10Plus(*STI) && STI->getFeatureBits().test(FeatureCuMode)) 625 return 2; 626 // Pre-gfx10 a CU contains four SIMDs. For gfx10 in WGP mode the WGP contains 627 // two CUs, so a total of four SIMDs. 628 return 4; 629 } 630 631 unsigned getMaxWorkGroupsPerCU(const MCSubtargetInfo *STI, 632 unsigned FlatWorkGroupSize) { 633 assert(FlatWorkGroupSize != 0); 634 if (STI->getTargetTriple().getArch() != Triple::amdgcn) 635 return 8; 636 unsigned N = getWavesPerWorkGroup(STI, FlatWorkGroupSize); 637 if (N == 1) 638 return 40; 639 N = 40 / N; 640 return std::min(N, 16u); 641 } 642 643 unsigned getMinWavesPerEU(const MCSubtargetInfo *STI) { 644 return 1; 645 } 646 647 unsigned getMaxWavesPerEU(const MCSubtargetInfo *STI) { 648 // FIXME: Need to take scratch memory into account. 649 if (isGFX90A(*STI)) 650 return 8; 651 if (!isGFX10Plus(*STI)) 652 return 10; 653 return hasGFX10_3Insts(*STI) ? 16 : 20; 654 } 655 656 unsigned getWavesPerEUForWorkGroup(const MCSubtargetInfo *STI, 657 unsigned FlatWorkGroupSize) { 658 return divideCeil(getWavesPerWorkGroup(STI, FlatWorkGroupSize), 659 getEUsPerCU(STI)); 660 } 661 662 unsigned getMinFlatWorkGroupSize(const MCSubtargetInfo *STI) { 663 return 1; 664 } 665 666 unsigned getMaxFlatWorkGroupSize(const MCSubtargetInfo *STI) { 667 // Some subtargets allow encoding 2048, but this isn't tested or supported. 668 return 1024; 669 } 670 671 unsigned getWavesPerWorkGroup(const MCSubtargetInfo *STI, 672 unsigned FlatWorkGroupSize) { 673 return divideCeil(FlatWorkGroupSize, getWavefrontSize(STI)); 674 } 675 676 unsigned getSGPRAllocGranule(const MCSubtargetInfo *STI) { 677 IsaVersion Version = getIsaVersion(STI->getCPU()); 678 if (Version.Major >= 10) 679 return getAddressableNumSGPRs(STI); 680 if (Version.Major >= 8) 681 return 16; 682 return 8; 683 } 684 685 unsigned getSGPREncodingGranule(const MCSubtargetInfo *STI) { 686 return 8; 687 } 688 689 unsigned getTotalNumSGPRs(const MCSubtargetInfo *STI) { 690 IsaVersion Version = getIsaVersion(STI->getCPU()); 691 if (Version.Major >= 8) 692 return 800; 693 return 512; 694 } 695 696 unsigned getAddressableNumSGPRs(const MCSubtargetInfo *STI) { 697 if (STI->getFeatureBits().test(FeatureSGPRInitBug)) 698 return FIXED_NUM_SGPRS_FOR_INIT_BUG; 699 700 IsaVersion Version = getIsaVersion(STI->getCPU()); 701 if (Version.Major >= 10) 702 return 106; 703 if (Version.Major >= 8) 704 return 102; 705 return 104; 706 } 707 708 unsigned getMinNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) { 709 assert(WavesPerEU != 0); 710 711 IsaVersion Version = getIsaVersion(STI->getCPU()); 712 if (Version.Major >= 10) 713 return 0; 714 715 if (WavesPerEU >= getMaxWavesPerEU(STI)) 716 return 0; 717 718 unsigned MinNumSGPRs = getTotalNumSGPRs(STI) / (WavesPerEU + 1); 719 if (STI->getFeatureBits().test(FeatureTrapHandler)) 720 MinNumSGPRs -= std::min(MinNumSGPRs, (unsigned)TRAP_NUM_SGPRS); 721 MinNumSGPRs = alignDown(MinNumSGPRs, getSGPRAllocGranule(STI)) + 1; 722 return std::min(MinNumSGPRs, getAddressableNumSGPRs(STI)); 723 } 724 725 unsigned getMaxNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU, 726 bool Addressable) { 727 assert(WavesPerEU != 0); 728 729 unsigned AddressableNumSGPRs = getAddressableNumSGPRs(STI); 730 IsaVersion Version = getIsaVersion(STI->getCPU()); 731 if (Version.Major >= 10) 732 return Addressable ? AddressableNumSGPRs : 108; 733 if (Version.Major >= 8 && !Addressable) 734 AddressableNumSGPRs = 112; 735 unsigned MaxNumSGPRs = getTotalNumSGPRs(STI) / WavesPerEU; 736 if (STI->getFeatureBits().test(FeatureTrapHandler)) 737 MaxNumSGPRs -= std::min(MaxNumSGPRs, (unsigned)TRAP_NUM_SGPRS); 738 MaxNumSGPRs = alignDown(MaxNumSGPRs, getSGPRAllocGranule(STI)); 739 return std::min(MaxNumSGPRs, AddressableNumSGPRs); 740 } 741 742 unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed, 743 bool FlatScrUsed, bool XNACKUsed) { 744 unsigned ExtraSGPRs = 0; 745 if (VCCUsed) 746 ExtraSGPRs = 2; 747 748 IsaVersion Version = getIsaVersion(STI->getCPU()); 749 if (Version.Major >= 10) 750 return ExtraSGPRs; 751 752 if (Version.Major < 8) { 753 if (FlatScrUsed) 754 ExtraSGPRs = 4; 755 } else { 756 if (XNACKUsed) 757 ExtraSGPRs = 4; 758 759 if (FlatScrUsed || 760 STI->getFeatureBits().test(AMDGPU::FeatureArchitectedFlatScratch)) 761 ExtraSGPRs = 6; 762 } 763 764 return ExtraSGPRs; 765 } 766 767 unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed, 768 bool FlatScrUsed) { 769 return getNumExtraSGPRs(STI, VCCUsed, FlatScrUsed, 770 STI->getFeatureBits().test(AMDGPU::FeatureXNACK)); 771 } 772 773 unsigned getNumSGPRBlocks(const MCSubtargetInfo *STI, unsigned NumSGPRs) { 774 NumSGPRs = alignTo(std::max(1u, NumSGPRs), getSGPREncodingGranule(STI)); 775 // SGPRBlocks is actual number of SGPR blocks minus 1. 776 return NumSGPRs / getSGPREncodingGranule(STI) - 1; 777 } 778 779 unsigned getVGPRAllocGranule(const MCSubtargetInfo *STI, 780 Optional<bool> EnableWavefrontSize32) { 781 if (STI->getFeatureBits().test(FeatureGFX90AInsts)) 782 return 8; 783 784 bool IsWave32 = EnableWavefrontSize32 ? 785 *EnableWavefrontSize32 : 786 STI->getFeatureBits().test(FeatureWavefrontSize32); 787 788 if (hasGFX10_3Insts(*STI)) 789 return IsWave32 ? 16 : 8; 790 791 return IsWave32 ? 8 : 4; 792 } 793 794 unsigned getVGPREncodingGranule(const MCSubtargetInfo *STI, 795 Optional<bool> EnableWavefrontSize32) { 796 if (STI->getFeatureBits().test(FeatureGFX90AInsts)) 797 return 8; 798 799 bool IsWave32 = EnableWavefrontSize32 ? 800 *EnableWavefrontSize32 : 801 STI->getFeatureBits().test(FeatureWavefrontSize32); 802 803 return IsWave32 ? 8 : 4; 804 } 805 806 unsigned getTotalNumVGPRs(const MCSubtargetInfo *STI) { 807 if (STI->getFeatureBits().test(FeatureGFX90AInsts)) 808 return 512; 809 if (!isGFX10Plus(*STI)) 810 return 256; 811 return STI->getFeatureBits().test(FeatureWavefrontSize32) ? 1024 : 512; 812 } 813 814 unsigned getAddressableNumVGPRs(const MCSubtargetInfo *STI) { 815 if (LimitTo128VGPRs.getNumOccurrences() ? LimitTo128VGPRs 816 : isGFX11Plus(*STI)) { 817 // GFX11 changes the encoding of 16-bit operands in VOP1/2/C instructions 818 // such that values 128..255 no longer mean v128..v255, they mean 819 // v0.hi..v127.hi instead. Until the compiler understands this, it is not 820 // safe to use v128..v255. 821 // TODO-GFX11: Remove this when full 16-bit codegen is implemented. 822 return 128; 823 } 824 if (STI->getFeatureBits().test(FeatureGFX90AInsts)) 825 return 512; 826 return 256; 827 } 828 829 unsigned getMinNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) { 830 assert(WavesPerEU != 0); 831 832 if (WavesPerEU >= getMaxWavesPerEU(STI)) 833 return 0; 834 unsigned MinNumVGPRs = 835 alignDown(getTotalNumVGPRs(STI) / (WavesPerEU + 1), 836 getVGPRAllocGranule(STI)) + 1; 837 return std::min(MinNumVGPRs, getAddressableNumVGPRs(STI)); 838 } 839 840 unsigned getMaxNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) { 841 assert(WavesPerEU != 0); 842 843 unsigned MaxNumVGPRs = alignDown(getTotalNumVGPRs(STI) / WavesPerEU, 844 getVGPRAllocGranule(STI)); 845 unsigned AddressableNumVGPRs = getAddressableNumVGPRs(STI); 846 return std::min(MaxNumVGPRs, AddressableNumVGPRs); 847 } 848 849 unsigned getNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumVGPRs, 850 Optional<bool> EnableWavefrontSize32) { 851 NumVGPRs = alignTo(std::max(1u, NumVGPRs), 852 getVGPREncodingGranule(STI, EnableWavefrontSize32)); 853 // VGPRBlocks is actual number of VGPR blocks minus 1. 854 return NumVGPRs / getVGPREncodingGranule(STI, EnableWavefrontSize32) - 1; 855 } 856 857 } // end namespace IsaInfo 858 859 void initDefaultAMDKernelCodeT(amd_kernel_code_t &Header, 860 const MCSubtargetInfo *STI) { 861 IsaVersion Version = getIsaVersion(STI->getCPU()); 862 863 memset(&Header, 0, sizeof(Header)); 864 865 Header.amd_kernel_code_version_major = 1; 866 Header.amd_kernel_code_version_minor = 2; 867 Header.amd_machine_kind = 1; // AMD_MACHINE_KIND_AMDGPU 868 Header.amd_machine_version_major = Version.Major; 869 Header.amd_machine_version_minor = Version.Minor; 870 Header.amd_machine_version_stepping = Version.Stepping; 871 Header.kernel_code_entry_byte_offset = sizeof(Header); 872 Header.wavefront_size = 6; 873 874 // If the code object does not support indirect functions, then the value must 875 // be 0xffffffff. 876 Header.call_convention = -1; 877 878 // These alignment values are specified in powers of two, so alignment = 879 // 2^n. The minimum alignment is 2^4 = 16. 880 Header.kernarg_segment_alignment = 4; 881 Header.group_segment_alignment = 4; 882 Header.private_segment_alignment = 4; 883 884 if (Version.Major >= 10) { 885 if (STI->getFeatureBits().test(FeatureWavefrontSize32)) { 886 Header.wavefront_size = 5; 887 Header.code_properties |= AMD_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32; 888 } 889 Header.compute_pgm_resource_registers |= 890 S_00B848_WGP_MODE(STI->getFeatureBits().test(FeatureCuMode) ? 0 : 1) | 891 S_00B848_MEM_ORDERED(1); 892 } 893 } 894 895 amdhsa::kernel_descriptor_t getDefaultAmdhsaKernelDescriptor( 896 const MCSubtargetInfo *STI) { 897 IsaVersion Version = getIsaVersion(STI->getCPU()); 898 899 amdhsa::kernel_descriptor_t KD; 900 memset(&KD, 0, sizeof(KD)); 901 902 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1, 903 amdhsa::COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64, 904 amdhsa::FLOAT_DENORM_MODE_FLUSH_NONE); 905 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1, 906 amdhsa::COMPUTE_PGM_RSRC1_ENABLE_DX10_CLAMP, 1); 907 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1, 908 amdhsa::COMPUTE_PGM_RSRC1_ENABLE_IEEE_MODE, 1); 909 AMDHSA_BITS_SET(KD.compute_pgm_rsrc2, 910 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X, 1); 911 if (Version.Major >= 10) { 912 AMDHSA_BITS_SET(KD.kernel_code_properties, 913 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32, 914 STI->getFeatureBits().test(FeatureWavefrontSize32) ? 1 : 0); 915 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1, 916 amdhsa::COMPUTE_PGM_RSRC1_WGP_MODE, 917 STI->getFeatureBits().test(FeatureCuMode) ? 0 : 1); 918 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1, 919 amdhsa::COMPUTE_PGM_RSRC1_MEM_ORDERED, 1); 920 } 921 if (AMDGPU::isGFX90A(*STI)) { 922 AMDHSA_BITS_SET(KD.compute_pgm_rsrc3, 923 amdhsa::COMPUTE_PGM_RSRC3_GFX90A_TG_SPLIT, 924 STI->getFeatureBits().test(FeatureTgSplit) ? 1 : 0); 925 } 926 return KD; 927 } 928 929 bool isGroupSegment(const GlobalValue *GV) { 930 return GV->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS; 931 } 932 933 bool isGlobalSegment(const GlobalValue *GV) { 934 return GV->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS; 935 } 936 937 bool isReadOnlySegment(const GlobalValue *GV) { 938 unsigned AS = GV->getAddressSpace(); 939 return AS == AMDGPUAS::CONSTANT_ADDRESS || 940 AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT; 941 } 942 943 bool shouldEmitConstantsToTextSection(const Triple &TT) { 944 return TT.getArch() == Triple::r600; 945 } 946 947 int getIntegerAttribute(const Function &F, StringRef Name, int Default) { 948 Attribute A = F.getFnAttribute(Name); 949 int Result = Default; 950 951 if (A.isStringAttribute()) { 952 StringRef Str = A.getValueAsString(); 953 if (Str.getAsInteger(0, Result)) { 954 LLVMContext &Ctx = F.getContext(); 955 Ctx.emitError("can't parse integer attribute " + Name); 956 } 957 } 958 959 return Result; 960 } 961 962 std::pair<int, int> getIntegerPairAttribute(const Function &F, 963 StringRef Name, 964 std::pair<int, int> Default, 965 bool OnlyFirstRequired) { 966 Attribute A = F.getFnAttribute(Name); 967 if (!A.isStringAttribute()) 968 return Default; 969 970 LLVMContext &Ctx = F.getContext(); 971 std::pair<int, int> Ints = Default; 972 std::pair<StringRef, StringRef> Strs = A.getValueAsString().split(','); 973 if (Strs.first.trim().getAsInteger(0, Ints.first)) { 974 Ctx.emitError("can't parse first integer attribute " + Name); 975 return Default; 976 } 977 if (Strs.second.trim().getAsInteger(0, Ints.second)) { 978 if (!OnlyFirstRequired || !Strs.second.trim().empty()) { 979 Ctx.emitError("can't parse second integer attribute " + Name); 980 return Default; 981 } 982 } 983 984 return Ints; 985 } 986 987 unsigned getVmcntBitMask(const IsaVersion &Version) { 988 return (1 << (getVmcntBitWidthLo(Version.Major) + 989 getVmcntBitWidthHi(Version.Major))) - 990 1; 991 } 992 993 unsigned getExpcntBitMask(const IsaVersion &Version) { 994 return (1 << getExpcntBitWidth(Version.Major)) - 1; 995 } 996 997 unsigned getLgkmcntBitMask(const IsaVersion &Version) { 998 return (1 << getLgkmcntBitWidth(Version.Major)) - 1; 999 } 1000 1001 unsigned getWaitcntBitMask(const IsaVersion &Version) { 1002 unsigned VmcntLo = getBitMask(getVmcntBitShiftLo(Version.Major), 1003 getVmcntBitWidthLo(Version.Major)); 1004 unsigned Expcnt = getBitMask(getExpcntBitShift(Version.Major), 1005 getExpcntBitWidth(Version.Major)); 1006 unsigned Lgkmcnt = getBitMask(getLgkmcntBitShift(Version.Major), 1007 getLgkmcntBitWidth(Version.Major)); 1008 unsigned VmcntHi = getBitMask(getVmcntBitShiftHi(Version.Major), 1009 getVmcntBitWidthHi(Version.Major)); 1010 return VmcntLo | Expcnt | Lgkmcnt | VmcntHi; 1011 } 1012 1013 unsigned decodeVmcnt(const IsaVersion &Version, unsigned Waitcnt) { 1014 unsigned VmcntLo = unpackBits(Waitcnt, getVmcntBitShiftLo(Version.Major), 1015 getVmcntBitWidthLo(Version.Major)); 1016 unsigned VmcntHi = unpackBits(Waitcnt, getVmcntBitShiftHi(Version.Major), 1017 getVmcntBitWidthHi(Version.Major)); 1018 return VmcntLo | VmcntHi << getVmcntBitWidthLo(Version.Major); 1019 } 1020 1021 unsigned decodeExpcnt(const IsaVersion &Version, unsigned Waitcnt) { 1022 return unpackBits(Waitcnt, getExpcntBitShift(Version.Major), 1023 getExpcntBitWidth(Version.Major)); 1024 } 1025 1026 unsigned decodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt) { 1027 return unpackBits(Waitcnt, getLgkmcntBitShift(Version.Major), 1028 getLgkmcntBitWidth(Version.Major)); 1029 } 1030 1031 void decodeWaitcnt(const IsaVersion &Version, unsigned Waitcnt, 1032 unsigned &Vmcnt, unsigned &Expcnt, unsigned &Lgkmcnt) { 1033 Vmcnt = decodeVmcnt(Version, Waitcnt); 1034 Expcnt = decodeExpcnt(Version, Waitcnt); 1035 Lgkmcnt = decodeLgkmcnt(Version, Waitcnt); 1036 } 1037 1038 Waitcnt decodeWaitcnt(const IsaVersion &Version, unsigned Encoded) { 1039 Waitcnt Decoded; 1040 Decoded.VmCnt = decodeVmcnt(Version, Encoded); 1041 Decoded.ExpCnt = decodeExpcnt(Version, Encoded); 1042 Decoded.LgkmCnt = decodeLgkmcnt(Version, Encoded); 1043 return Decoded; 1044 } 1045 1046 unsigned encodeVmcnt(const IsaVersion &Version, unsigned Waitcnt, 1047 unsigned Vmcnt) { 1048 Waitcnt = packBits(Vmcnt, Waitcnt, getVmcntBitShiftLo(Version.Major), 1049 getVmcntBitWidthLo(Version.Major)); 1050 return packBits(Vmcnt >> getVmcntBitWidthLo(Version.Major), Waitcnt, 1051 getVmcntBitShiftHi(Version.Major), 1052 getVmcntBitWidthHi(Version.Major)); 1053 } 1054 1055 unsigned encodeExpcnt(const IsaVersion &Version, unsigned Waitcnt, 1056 unsigned Expcnt) { 1057 return packBits(Expcnt, Waitcnt, getExpcntBitShift(Version.Major), 1058 getExpcntBitWidth(Version.Major)); 1059 } 1060 1061 unsigned encodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt, 1062 unsigned Lgkmcnt) { 1063 return packBits(Lgkmcnt, Waitcnt, getLgkmcntBitShift(Version.Major), 1064 getLgkmcntBitWidth(Version.Major)); 1065 } 1066 1067 unsigned encodeWaitcnt(const IsaVersion &Version, 1068 unsigned Vmcnt, unsigned Expcnt, unsigned Lgkmcnt) { 1069 unsigned Waitcnt = getWaitcntBitMask(Version); 1070 Waitcnt = encodeVmcnt(Version, Waitcnt, Vmcnt); 1071 Waitcnt = encodeExpcnt(Version, Waitcnt, Expcnt); 1072 Waitcnt = encodeLgkmcnt(Version, Waitcnt, Lgkmcnt); 1073 return Waitcnt; 1074 } 1075 1076 unsigned encodeWaitcnt(const IsaVersion &Version, const Waitcnt &Decoded) { 1077 return encodeWaitcnt(Version, Decoded.VmCnt, Decoded.ExpCnt, Decoded.LgkmCnt); 1078 } 1079 1080 //===----------------------------------------------------------------------===// 1081 // Custom Operands. 1082 // 1083 // A table of custom operands shall describe "primary" operand names 1084 // first followed by aliases if any. It is not required but recommended 1085 // to arrange operands so that operand encoding match operand position 1086 // in the table. This will make disassembly a bit more efficient. 1087 // Unused slots in the table shall have an empty name. 1088 // 1089 //===----------------------------------------------------------------------===// 1090 1091 template <class T> 1092 static bool isValidOpr(int Idx, const CustomOperand<T> OpInfo[], int OpInfoSize, 1093 T Context) { 1094 return 0 <= Idx && Idx < OpInfoSize && !OpInfo[Idx].Name.empty() && 1095 (!OpInfo[Idx].Cond || OpInfo[Idx].Cond(Context)); 1096 } 1097 1098 template <class T> 1099 static int getOprIdx(std::function<bool(const CustomOperand<T> &)> Test, 1100 const CustomOperand<T> OpInfo[], int OpInfoSize, 1101 T Context) { 1102 int InvalidIdx = OPR_ID_UNKNOWN; 1103 for (int Idx = 0; Idx < OpInfoSize; ++Idx) { 1104 if (Test(OpInfo[Idx])) { 1105 if (!OpInfo[Idx].Cond || OpInfo[Idx].Cond(Context)) 1106 return Idx; 1107 InvalidIdx = OPR_ID_UNSUPPORTED; 1108 } 1109 } 1110 return InvalidIdx; 1111 } 1112 1113 template <class T> 1114 static int getOprIdx(const StringRef Name, const CustomOperand<T> OpInfo[], 1115 int OpInfoSize, T Context) { 1116 auto Test = [=](const CustomOperand<T> &Op) { return Op.Name == Name; }; 1117 return getOprIdx<T>(Test, OpInfo, OpInfoSize, Context); 1118 } 1119 1120 template <class T> 1121 static int getOprIdx(int Id, const CustomOperand<T> OpInfo[], int OpInfoSize, 1122 T Context, bool QuickCheck = true) { 1123 auto Test = [=](const CustomOperand<T> &Op) { 1124 return Op.Encoding == Id && !Op.Name.empty(); 1125 }; 1126 // This is an optimization that should work in most cases. 1127 // As a side effect, it may cause selection of an alias 1128 // instead of a primary operand name in case of sparse tables. 1129 if (QuickCheck && isValidOpr<T>(Id, OpInfo, OpInfoSize, Context) && 1130 OpInfo[Id].Encoding == Id) { 1131 return Id; 1132 } 1133 return getOprIdx<T>(Test, OpInfo, OpInfoSize, Context); 1134 } 1135 1136 //===----------------------------------------------------------------------===// 1137 // Custom Operand Values 1138 //===----------------------------------------------------------------------===// 1139 1140 static unsigned getDefaultCustomOperandEncoding(const CustomOperandVal *Opr, 1141 int Size, 1142 const MCSubtargetInfo &STI) { 1143 unsigned Enc = 0; 1144 for (int Idx = 0; Idx < Size; ++Idx) { 1145 const auto &Op = Opr[Idx]; 1146 if (Op.isSupported(STI)) 1147 Enc |= Op.encode(Op.Default); 1148 } 1149 return Enc; 1150 } 1151 1152 static bool isSymbolicCustomOperandEncoding(const CustomOperandVal *Opr, 1153 int Size, unsigned Code, 1154 bool &HasNonDefaultVal, 1155 const MCSubtargetInfo &STI) { 1156 unsigned UsedOprMask = 0; 1157 HasNonDefaultVal = false; 1158 for (int Idx = 0; Idx < Size; ++Idx) { 1159 const auto &Op = Opr[Idx]; 1160 if (!Op.isSupported(STI)) 1161 continue; 1162 UsedOprMask |= Op.getMask(); 1163 unsigned Val = Op.decode(Code); 1164 if (!Op.isValid(Val)) 1165 return false; 1166 HasNonDefaultVal |= (Val != Op.Default); 1167 } 1168 return (Code & ~UsedOprMask) == 0; 1169 } 1170 1171 static bool decodeCustomOperand(const CustomOperandVal *Opr, int Size, 1172 unsigned Code, int &Idx, StringRef &Name, 1173 unsigned &Val, bool &IsDefault, 1174 const MCSubtargetInfo &STI) { 1175 while (Idx < Size) { 1176 const auto &Op = Opr[Idx++]; 1177 if (Op.isSupported(STI)) { 1178 Name = Op.Name; 1179 Val = Op.decode(Code); 1180 IsDefault = (Val == Op.Default); 1181 return true; 1182 } 1183 } 1184 1185 return false; 1186 } 1187 1188 static int encodeCustomOperandVal(const CustomOperandVal &Op, 1189 int64_t InputVal) { 1190 if (InputVal < 0 || InputVal > Op.Max) 1191 return OPR_VAL_INVALID; 1192 return Op.encode(InputVal); 1193 } 1194 1195 static int encodeCustomOperand(const CustomOperandVal *Opr, int Size, 1196 const StringRef Name, int64_t InputVal, 1197 unsigned &UsedOprMask, 1198 const MCSubtargetInfo &STI) { 1199 int InvalidId = OPR_ID_UNKNOWN; 1200 for (int Idx = 0; Idx < Size; ++Idx) { 1201 const auto &Op = Opr[Idx]; 1202 if (Op.Name == Name) { 1203 if (!Op.isSupported(STI)) { 1204 InvalidId = OPR_ID_UNSUPPORTED; 1205 continue; 1206 } 1207 auto OprMask = Op.getMask(); 1208 if (OprMask & UsedOprMask) 1209 return OPR_ID_DUPLICATE; 1210 UsedOprMask |= OprMask; 1211 return encodeCustomOperandVal(Op, InputVal); 1212 } 1213 } 1214 return InvalidId; 1215 } 1216 1217 //===----------------------------------------------------------------------===// 1218 // DepCtr 1219 //===----------------------------------------------------------------------===// 1220 1221 namespace DepCtr { 1222 1223 int getDefaultDepCtrEncoding(const MCSubtargetInfo &STI) { 1224 static int Default = -1; 1225 if (Default == -1) 1226 Default = getDefaultCustomOperandEncoding(DepCtrInfo, DEP_CTR_SIZE, STI); 1227 return Default; 1228 } 1229 1230 bool isSymbolicDepCtrEncoding(unsigned Code, bool &HasNonDefaultVal, 1231 const MCSubtargetInfo &STI) { 1232 return isSymbolicCustomOperandEncoding(DepCtrInfo, DEP_CTR_SIZE, Code, 1233 HasNonDefaultVal, STI); 1234 } 1235 1236 bool decodeDepCtr(unsigned Code, int &Id, StringRef &Name, unsigned &Val, 1237 bool &IsDefault, const MCSubtargetInfo &STI) { 1238 return decodeCustomOperand(DepCtrInfo, DEP_CTR_SIZE, Code, Id, Name, Val, 1239 IsDefault, STI); 1240 } 1241 1242 int encodeDepCtr(const StringRef Name, int64_t Val, unsigned &UsedOprMask, 1243 const MCSubtargetInfo &STI) { 1244 return encodeCustomOperand(DepCtrInfo, DEP_CTR_SIZE, Name, Val, UsedOprMask, 1245 STI); 1246 } 1247 1248 } // namespace DepCtr 1249 1250 //===----------------------------------------------------------------------===// 1251 // hwreg 1252 //===----------------------------------------------------------------------===// 1253 1254 namespace Hwreg { 1255 1256 int64_t getHwregId(const StringRef Name, const MCSubtargetInfo &STI) { 1257 int Idx = getOprIdx<const MCSubtargetInfo &>(Name, Opr, OPR_SIZE, STI); 1258 return (Idx < 0) ? Idx : Opr[Idx].Encoding; 1259 } 1260 1261 bool isValidHwreg(int64_t Id) { 1262 return 0 <= Id && isUInt<ID_WIDTH_>(Id); 1263 } 1264 1265 bool isValidHwregOffset(int64_t Offset) { 1266 return 0 <= Offset && isUInt<OFFSET_WIDTH_>(Offset); 1267 } 1268 1269 bool isValidHwregWidth(int64_t Width) { 1270 return 0 <= (Width - 1) && isUInt<WIDTH_M1_WIDTH_>(Width - 1); 1271 } 1272 1273 uint64_t encodeHwreg(uint64_t Id, uint64_t Offset, uint64_t Width) { 1274 return (Id << ID_SHIFT_) | 1275 (Offset << OFFSET_SHIFT_) | 1276 ((Width - 1) << WIDTH_M1_SHIFT_); 1277 } 1278 1279 StringRef getHwreg(unsigned Id, const MCSubtargetInfo &STI) { 1280 int Idx = getOprIdx<const MCSubtargetInfo &>(Id, Opr, OPR_SIZE, STI); 1281 return (Idx < 0) ? "" : Opr[Idx].Name; 1282 } 1283 1284 void decodeHwreg(unsigned Val, unsigned &Id, unsigned &Offset, unsigned &Width) { 1285 Id = (Val & ID_MASK_) >> ID_SHIFT_; 1286 Offset = (Val & OFFSET_MASK_) >> OFFSET_SHIFT_; 1287 Width = ((Val & WIDTH_M1_MASK_) >> WIDTH_M1_SHIFT_) + 1; 1288 } 1289 1290 } // namespace Hwreg 1291 1292 //===----------------------------------------------------------------------===// 1293 // exp tgt 1294 //===----------------------------------------------------------------------===// 1295 1296 namespace Exp { 1297 1298 struct ExpTgt { 1299 StringLiteral Name; 1300 unsigned Tgt; 1301 unsigned MaxIndex; 1302 }; 1303 1304 static constexpr ExpTgt ExpTgtInfo[] = { 1305 {{"null"}, ET_NULL, ET_NULL_MAX_IDX}, 1306 {{"mrtz"}, ET_MRTZ, ET_MRTZ_MAX_IDX}, 1307 {{"prim"}, ET_PRIM, ET_PRIM_MAX_IDX}, 1308 {{"mrt"}, ET_MRT0, ET_MRT_MAX_IDX}, 1309 {{"pos"}, ET_POS0, ET_POS_MAX_IDX}, 1310 {{"dual_src_blend"}, ET_DUAL_SRC_BLEND0, ET_DUAL_SRC_BLEND_MAX_IDX}, 1311 {{"param"}, ET_PARAM0, ET_PARAM_MAX_IDX}, 1312 }; 1313 1314 bool getTgtName(unsigned Id, StringRef &Name, int &Index) { 1315 for (const ExpTgt &Val : ExpTgtInfo) { 1316 if (Val.Tgt <= Id && Id <= Val.Tgt + Val.MaxIndex) { 1317 Index = (Val.MaxIndex == 0) ? -1 : (Id - Val.Tgt); 1318 Name = Val.Name; 1319 return true; 1320 } 1321 } 1322 return false; 1323 } 1324 1325 unsigned getTgtId(const StringRef Name) { 1326 1327 for (const ExpTgt &Val : ExpTgtInfo) { 1328 if (Val.MaxIndex == 0 && Name == Val.Name) 1329 return Val.Tgt; 1330 1331 if (Val.MaxIndex > 0 && Name.startswith(Val.Name)) { 1332 StringRef Suffix = Name.drop_front(Val.Name.size()); 1333 1334 unsigned Id; 1335 if (Suffix.getAsInteger(10, Id) || Id > Val.MaxIndex) 1336 return ET_INVALID; 1337 1338 // Disable leading zeroes 1339 if (Suffix.size() > 1 && Suffix[0] == '0') 1340 return ET_INVALID; 1341 1342 return Val.Tgt + Id; 1343 } 1344 } 1345 return ET_INVALID; 1346 } 1347 1348 bool isSupportedTgtId(unsigned Id, const MCSubtargetInfo &STI) { 1349 switch (Id) { 1350 case ET_NULL: 1351 return !isGFX11Plus(STI); 1352 case ET_POS4: 1353 case ET_PRIM: 1354 return isGFX10Plus(STI); 1355 case ET_DUAL_SRC_BLEND0: 1356 case ET_DUAL_SRC_BLEND1: 1357 return isGFX11Plus(STI); 1358 default: 1359 if (Id >= ET_PARAM0 && Id <= ET_PARAM31) 1360 return !isGFX11Plus(STI); 1361 return true; 1362 } 1363 } 1364 1365 } // namespace Exp 1366 1367 //===----------------------------------------------------------------------===// 1368 // MTBUF Format 1369 //===----------------------------------------------------------------------===// 1370 1371 namespace MTBUFFormat { 1372 1373 int64_t getDfmt(const StringRef Name) { 1374 for (int Id = DFMT_MIN; Id <= DFMT_MAX; ++Id) { 1375 if (Name == DfmtSymbolic[Id]) 1376 return Id; 1377 } 1378 return DFMT_UNDEF; 1379 } 1380 1381 StringRef getDfmtName(unsigned Id) { 1382 assert(Id <= DFMT_MAX); 1383 return DfmtSymbolic[Id]; 1384 } 1385 1386 static StringLiteral const *getNfmtLookupTable(const MCSubtargetInfo &STI) { 1387 if (isSI(STI) || isCI(STI)) 1388 return NfmtSymbolicSICI; 1389 if (isVI(STI) || isGFX9(STI)) 1390 return NfmtSymbolicVI; 1391 return NfmtSymbolicGFX10; 1392 } 1393 1394 int64_t getNfmt(const StringRef Name, const MCSubtargetInfo &STI) { 1395 auto lookupTable = getNfmtLookupTable(STI); 1396 for (int Id = NFMT_MIN; Id <= NFMT_MAX; ++Id) { 1397 if (Name == lookupTable[Id]) 1398 return Id; 1399 } 1400 return NFMT_UNDEF; 1401 } 1402 1403 StringRef getNfmtName(unsigned Id, const MCSubtargetInfo &STI) { 1404 assert(Id <= NFMT_MAX); 1405 return getNfmtLookupTable(STI)[Id]; 1406 } 1407 1408 bool isValidDfmtNfmt(unsigned Id, const MCSubtargetInfo &STI) { 1409 unsigned Dfmt; 1410 unsigned Nfmt; 1411 decodeDfmtNfmt(Id, Dfmt, Nfmt); 1412 return isValidNfmt(Nfmt, STI); 1413 } 1414 1415 bool isValidNfmt(unsigned Id, const MCSubtargetInfo &STI) { 1416 return !getNfmtName(Id, STI).empty(); 1417 } 1418 1419 int64_t encodeDfmtNfmt(unsigned Dfmt, unsigned Nfmt) { 1420 return (Dfmt << DFMT_SHIFT) | (Nfmt << NFMT_SHIFT); 1421 } 1422 1423 void decodeDfmtNfmt(unsigned Format, unsigned &Dfmt, unsigned &Nfmt) { 1424 Dfmt = (Format >> DFMT_SHIFT) & DFMT_MASK; 1425 Nfmt = (Format >> NFMT_SHIFT) & NFMT_MASK; 1426 } 1427 1428 int64_t getUnifiedFormat(const StringRef Name, const MCSubtargetInfo &STI) { 1429 if (isGFX11Plus(STI)) { 1430 for (int Id = UfmtGFX11::UFMT_FIRST; Id <= UfmtGFX11::UFMT_LAST; ++Id) { 1431 if (Name == UfmtSymbolicGFX11[Id]) 1432 return Id; 1433 } 1434 } else { 1435 for (int Id = UfmtGFX10::UFMT_FIRST; Id <= UfmtGFX10::UFMT_LAST; ++Id) { 1436 if (Name == UfmtSymbolicGFX10[Id]) 1437 return Id; 1438 } 1439 } 1440 return UFMT_UNDEF; 1441 } 1442 1443 StringRef getUnifiedFormatName(unsigned Id, const MCSubtargetInfo &STI) { 1444 if(isValidUnifiedFormat(Id, STI)) 1445 return isGFX10(STI) ? UfmtSymbolicGFX10[Id] : UfmtSymbolicGFX11[Id]; 1446 return ""; 1447 } 1448 1449 bool isValidUnifiedFormat(unsigned Id, const MCSubtargetInfo &STI) { 1450 return isGFX10(STI) ? Id <= UfmtGFX10::UFMT_LAST : Id <= UfmtGFX11::UFMT_LAST; 1451 } 1452 1453 int64_t convertDfmtNfmt2Ufmt(unsigned Dfmt, unsigned Nfmt, 1454 const MCSubtargetInfo &STI) { 1455 int64_t Fmt = encodeDfmtNfmt(Dfmt, Nfmt); 1456 if (isGFX11Plus(STI)) { 1457 for (int Id = UfmtGFX11::UFMT_FIRST; Id <= UfmtGFX11::UFMT_LAST; ++Id) { 1458 if (Fmt == DfmtNfmt2UFmtGFX11[Id]) 1459 return Id; 1460 } 1461 } else { 1462 for (int Id = UfmtGFX10::UFMT_FIRST; Id <= UfmtGFX10::UFMT_LAST; ++Id) { 1463 if (Fmt == DfmtNfmt2UFmtGFX10[Id]) 1464 return Id; 1465 } 1466 } 1467 return UFMT_UNDEF; 1468 } 1469 1470 bool isValidFormatEncoding(unsigned Val, const MCSubtargetInfo &STI) { 1471 return isGFX10Plus(STI) ? (Val <= UFMT_MAX) : (Val <= DFMT_NFMT_MAX); 1472 } 1473 1474 unsigned getDefaultFormatEncoding(const MCSubtargetInfo &STI) { 1475 if (isGFX10Plus(STI)) 1476 return UFMT_DEFAULT; 1477 return DFMT_NFMT_DEFAULT; 1478 } 1479 1480 } // namespace MTBUFFormat 1481 1482 //===----------------------------------------------------------------------===// 1483 // SendMsg 1484 //===----------------------------------------------------------------------===// 1485 1486 namespace SendMsg { 1487 1488 static uint64_t getMsgIdMask(const MCSubtargetInfo &STI) { 1489 return isGFX11Plus(STI) ? ID_MASK_GFX11Plus_ : ID_MASK_PreGFX11_; 1490 } 1491 1492 int64_t getMsgId(const StringRef Name, const MCSubtargetInfo &STI) { 1493 int Idx = getOprIdx<const MCSubtargetInfo &>(Name, Msg, MSG_SIZE, STI); 1494 return (Idx < 0) ? Idx : Msg[Idx].Encoding; 1495 } 1496 1497 bool isValidMsgId(int64_t MsgId, const MCSubtargetInfo &STI) { 1498 return (MsgId & ~(getMsgIdMask(STI))) == 0; 1499 } 1500 1501 StringRef getMsgName(int64_t MsgId, const MCSubtargetInfo &STI) { 1502 int Idx = getOprIdx<const MCSubtargetInfo &>(MsgId, Msg, MSG_SIZE, STI); 1503 return (Idx < 0) ? "" : Msg[Idx].Name; 1504 } 1505 1506 int64_t getMsgOpId(int64_t MsgId, const StringRef Name) { 1507 const char* const *S = (MsgId == ID_SYSMSG) ? OpSysSymbolic : OpGsSymbolic; 1508 const int F = (MsgId == ID_SYSMSG) ? OP_SYS_FIRST_ : OP_GS_FIRST_; 1509 const int L = (MsgId == ID_SYSMSG) ? OP_SYS_LAST_ : OP_GS_LAST_; 1510 for (int i = F; i < L; ++i) { 1511 if (Name == S[i]) { 1512 return i; 1513 } 1514 } 1515 return OP_UNKNOWN_; 1516 } 1517 1518 bool isValidMsgOp(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI, 1519 bool Strict) { 1520 assert(isValidMsgId(MsgId, STI)); 1521 1522 if (!Strict) 1523 return 0 <= OpId && isUInt<OP_WIDTH_>(OpId); 1524 1525 if (MsgId == ID_SYSMSG) 1526 return OP_SYS_FIRST_ <= OpId && OpId < OP_SYS_LAST_; 1527 if (!isGFX11Plus(STI)) { 1528 switch (MsgId) { 1529 case ID_GS_PreGFX11: 1530 return (OP_GS_FIRST_ <= OpId && OpId < OP_GS_LAST_) && OpId != OP_GS_NOP; 1531 case ID_GS_DONE_PreGFX11: 1532 return OP_GS_FIRST_ <= OpId && OpId < OP_GS_LAST_; 1533 } 1534 } 1535 return OpId == OP_NONE_; 1536 } 1537 1538 StringRef getMsgOpName(int64_t MsgId, int64_t OpId, 1539 const MCSubtargetInfo &STI) { 1540 assert(msgRequiresOp(MsgId, STI)); 1541 return (MsgId == ID_SYSMSG)? OpSysSymbolic[OpId] : OpGsSymbolic[OpId]; 1542 } 1543 1544 bool isValidMsgStream(int64_t MsgId, int64_t OpId, int64_t StreamId, 1545 const MCSubtargetInfo &STI, bool Strict) { 1546 assert(isValidMsgOp(MsgId, OpId, STI, Strict)); 1547 1548 if (!Strict) 1549 return 0 <= StreamId && isUInt<STREAM_ID_WIDTH_>(StreamId); 1550 1551 if (!isGFX11Plus(STI)) { 1552 switch (MsgId) { 1553 case ID_GS_PreGFX11: 1554 return STREAM_ID_FIRST_ <= StreamId && StreamId < STREAM_ID_LAST_; 1555 case ID_GS_DONE_PreGFX11: 1556 return (OpId == OP_GS_NOP) ? 1557 (StreamId == STREAM_ID_NONE_) : 1558 (STREAM_ID_FIRST_ <= StreamId && StreamId < STREAM_ID_LAST_); 1559 } 1560 } 1561 return StreamId == STREAM_ID_NONE_; 1562 } 1563 1564 bool msgRequiresOp(int64_t MsgId, const MCSubtargetInfo &STI) { 1565 return MsgId == ID_SYSMSG || 1566 (!isGFX11Plus(STI) && 1567 (MsgId == ID_GS_PreGFX11 || MsgId == ID_GS_DONE_PreGFX11)); 1568 } 1569 1570 bool msgSupportsStream(int64_t MsgId, int64_t OpId, 1571 const MCSubtargetInfo &STI) { 1572 return !isGFX11Plus(STI) && 1573 (MsgId == ID_GS_PreGFX11 || MsgId == ID_GS_DONE_PreGFX11) && 1574 OpId != OP_GS_NOP; 1575 } 1576 1577 void decodeMsg(unsigned Val, uint16_t &MsgId, uint16_t &OpId, 1578 uint16_t &StreamId, const MCSubtargetInfo &STI) { 1579 MsgId = Val & getMsgIdMask(STI); 1580 if (isGFX11Plus(STI)) { 1581 OpId = 0; 1582 StreamId = 0; 1583 } else { 1584 OpId = (Val & OP_MASK_) >> OP_SHIFT_; 1585 StreamId = (Val & STREAM_ID_MASK_) >> STREAM_ID_SHIFT_; 1586 } 1587 } 1588 1589 uint64_t encodeMsg(uint64_t MsgId, 1590 uint64_t OpId, 1591 uint64_t StreamId) { 1592 return MsgId | (OpId << OP_SHIFT_) | (StreamId << STREAM_ID_SHIFT_); 1593 } 1594 1595 } // namespace SendMsg 1596 1597 //===----------------------------------------------------------------------===// 1598 // 1599 //===----------------------------------------------------------------------===// 1600 1601 unsigned getInitialPSInputAddr(const Function &F) { 1602 return getIntegerAttribute(F, "InitialPSInputAddr", 0); 1603 } 1604 1605 bool getHasColorExport(const Function &F) { 1606 // As a safe default always respond as if PS has color exports. 1607 return getIntegerAttribute( 1608 F, "amdgpu-color-export", 1609 F.getCallingConv() == CallingConv::AMDGPU_PS ? 1 : 0) != 0; 1610 } 1611 1612 bool getHasDepthExport(const Function &F) { 1613 return getIntegerAttribute(F, "amdgpu-depth-export", 0) != 0; 1614 } 1615 1616 bool isShader(CallingConv::ID cc) { 1617 switch(cc) { 1618 case CallingConv::AMDGPU_VS: 1619 case CallingConv::AMDGPU_LS: 1620 case CallingConv::AMDGPU_HS: 1621 case CallingConv::AMDGPU_ES: 1622 case CallingConv::AMDGPU_GS: 1623 case CallingConv::AMDGPU_PS: 1624 case CallingConv::AMDGPU_CS: 1625 return true; 1626 default: 1627 return false; 1628 } 1629 } 1630 1631 bool isGraphics(CallingConv::ID cc) { 1632 return isShader(cc) || cc == CallingConv::AMDGPU_Gfx; 1633 } 1634 1635 bool isCompute(CallingConv::ID cc) { 1636 return !isGraphics(cc) || cc == CallingConv::AMDGPU_CS; 1637 } 1638 1639 bool isEntryFunctionCC(CallingConv::ID CC) { 1640 switch (CC) { 1641 case CallingConv::AMDGPU_KERNEL: 1642 case CallingConv::SPIR_KERNEL: 1643 case CallingConv::AMDGPU_VS: 1644 case CallingConv::AMDGPU_GS: 1645 case CallingConv::AMDGPU_PS: 1646 case CallingConv::AMDGPU_CS: 1647 case CallingConv::AMDGPU_ES: 1648 case CallingConv::AMDGPU_HS: 1649 case CallingConv::AMDGPU_LS: 1650 return true; 1651 default: 1652 return false; 1653 } 1654 } 1655 1656 bool isModuleEntryFunctionCC(CallingConv::ID CC) { 1657 switch (CC) { 1658 case CallingConv::AMDGPU_Gfx: 1659 return true; 1660 default: 1661 return isEntryFunctionCC(CC); 1662 } 1663 } 1664 1665 bool isKernelCC(const Function *Func) { 1666 return AMDGPU::isModuleEntryFunctionCC(Func->getCallingConv()); 1667 } 1668 1669 bool hasXNACK(const MCSubtargetInfo &STI) { 1670 return STI.getFeatureBits()[AMDGPU::FeatureXNACK]; 1671 } 1672 1673 bool hasSRAMECC(const MCSubtargetInfo &STI) { 1674 return STI.getFeatureBits()[AMDGPU::FeatureSRAMECC]; 1675 } 1676 1677 bool hasMIMG_R128(const MCSubtargetInfo &STI) { 1678 return STI.getFeatureBits()[AMDGPU::FeatureMIMG_R128] && !STI.getFeatureBits()[AMDGPU::FeatureR128A16]; 1679 } 1680 1681 bool hasGFX10A16(const MCSubtargetInfo &STI) { 1682 return STI.getFeatureBits()[AMDGPU::FeatureGFX10A16]; 1683 } 1684 1685 bool hasG16(const MCSubtargetInfo &STI) { 1686 return STI.getFeatureBits()[AMDGPU::FeatureG16]; 1687 } 1688 1689 bool hasPackedD16(const MCSubtargetInfo &STI) { 1690 return !STI.getFeatureBits()[AMDGPU::FeatureUnpackedD16VMem] && !isCI(STI) && 1691 !isSI(STI); 1692 } 1693 1694 bool isSI(const MCSubtargetInfo &STI) { 1695 return STI.getFeatureBits()[AMDGPU::FeatureSouthernIslands]; 1696 } 1697 1698 bool isCI(const MCSubtargetInfo &STI) { 1699 return STI.getFeatureBits()[AMDGPU::FeatureSeaIslands]; 1700 } 1701 1702 bool isVI(const MCSubtargetInfo &STI) { 1703 return STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]; 1704 } 1705 1706 bool isGFX9(const MCSubtargetInfo &STI) { 1707 return STI.getFeatureBits()[AMDGPU::FeatureGFX9]; 1708 } 1709 1710 bool isGFX9_GFX10(const MCSubtargetInfo &STI) { 1711 return isGFX9(STI) || isGFX10(STI); 1712 } 1713 1714 bool isGFX8_GFX9_GFX10(const MCSubtargetInfo &STI) { 1715 return isVI(STI) || isGFX9(STI) || isGFX10(STI); 1716 } 1717 1718 bool isGFX8Plus(const MCSubtargetInfo &STI) { 1719 return isVI(STI) || isGFX9Plus(STI); 1720 } 1721 1722 bool isGFX9Plus(const MCSubtargetInfo &STI) { 1723 return isGFX9(STI) || isGFX10Plus(STI); 1724 } 1725 1726 bool isGFX10(const MCSubtargetInfo &STI) { 1727 return STI.getFeatureBits()[AMDGPU::FeatureGFX10]; 1728 } 1729 1730 bool isGFX10Plus(const MCSubtargetInfo &STI) { 1731 return isGFX10(STI) || isGFX11Plus(STI); 1732 } 1733 1734 bool isGFX11(const MCSubtargetInfo &STI) { 1735 return STI.getFeatureBits()[AMDGPU::FeatureGFX11]; 1736 } 1737 1738 bool isGFX11Plus(const MCSubtargetInfo &STI) { 1739 return isGFX11(STI); 1740 } 1741 1742 bool isNotGFX11Plus(const MCSubtargetInfo &STI) { 1743 return !isGFX11Plus(STI); 1744 } 1745 1746 bool isNotGFX10Plus(const MCSubtargetInfo &STI) { 1747 return isSI(STI) || isCI(STI) || isVI(STI) || isGFX9(STI); 1748 } 1749 1750 bool isGFX10Before1030(const MCSubtargetInfo &STI) { 1751 return isGFX10(STI) && !AMDGPU::isGFX10_BEncoding(STI); 1752 } 1753 1754 bool isGCN3Encoding(const MCSubtargetInfo &STI) { 1755 return STI.getFeatureBits()[AMDGPU::FeatureGCN3Encoding]; 1756 } 1757 1758 bool isGFX10_AEncoding(const MCSubtargetInfo &STI) { 1759 return STI.getFeatureBits()[AMDGPU::FeatureGFX10_AEncoding]; 1760 } 1761 1762 bool isGFX10_BEncoding(const MCSubtargetInfo &STI) { 1763 return STI.getFeatureBits()[AMDGPU::FeatureGFX10_BEncoding]; 1764 } 1765 1766 bool hasGFX10_3Insts(const MCSubtargetInfo &STI) { 1767 return STI.getFeatureBits()[AMDGPU::FeatureGFX10_3Insts]; 1768 } 1769 1770 bool isGFX90A(const MCSubtargetInfo &STI) { 1771 return STI.getFeatureBits()[AMDGPU::FeatureGFX90AInsts]; 1772 } 1773 1774 bool isGFX940(const MCSubtargetInfo &STI) { 1775 return STI.getFeatureBits()[AMDGPU::FeatureGFX940Insts]; 1776 } 1777 1778 bool hasArchitectedFlatScratch(const MCSubtargetInfo &STI) { 1779 return STI.getFeatureBits()[AMDGPU::FeatureArchitectedFlatScratch]; 1780 } 1781 1782 bool hasMAIInsts(const MCSubtargetInfo &STI) { 1783 return STI.getFeatureBits()[AMDGPU::FeatureMAIInsts]; 1784 } 1785 1786 int32_t getTotalNumVGPRs(bool has90AInsts, int32_t ArgNumAGPR, 1787 int32_t ArgNumVGPR) { 1788 if (has90AInsts && ArgNumAGPR) 1789 return alignTo(ArgNumVGPR, 4) + ArgNumAGPR; 1790 return std::max(ArgNumVGPR, ArgNumAGPR); 1791 } 1792 1793 bool isSGPR(unsigned Reg, const MCRegisterInfo* TRI) { 1794 const MCRegisterClass SGPRClass = TRI->getRegClass(AMDGPU::SReg_32RegClassID); 1795 const unsigned FirstSubReg = TRI->getSubReg(Reg, AMDGPU::sub0); 1796 return SGPRClass.contains(FirstSubReg != 0 ? FirstSubReg : Reg) || 1797 Reg == AMDGPU::SCC; 1798 } 1799 1800 #define MAP_REG2REG \ 1801 using namespace AMDGPU; \ 1802 switch(Reg) { \ 1803 default: return Reg; \ 1804 CASE_CI_VI(FLAT_SCR) \ 1805 CASE_CI_VI(FLAT_SCR_LO) \ 1806 CASE_CI_VI(FLAT_SCR_HI) \ 1807 CASE_VI_GFX9PLUS(TTMP0) \ 1808 CASE_VI_GFX9PLUS(TTMP1) \ 1809 CASE_VI_GFX9PLUS(TTMP2) \ 1810 CASE_VI_GFX9PLUS(TTMP3) \ 1811 CASE_VI_GFX9PLUS(TTMP4) \ 1812 CASE_VI_GFX9PLUS(TTMP5) \ 1813 CASE_VI_GFX9PLUS(TTMP6) \ 1814 CASE_VI_GFX9PLUS(TTMP7) \ 1815 CASE_VI_GFX9PLUS(TTMP8) \ 1816 CASE_VI_GFX9PLUS(TTMP9) \ 1817 CASE_VI_GFX9PLUS(TTMP10) \ 1818 CASE_VI_GFX9PLUS(TTMP11) \ 1819 CASE_VI_GFX9PLUS(TTMP12) \ 1820 CASE_VI_GFX9PLUS(TTMP13) \ 1821 CASE_VI_GFX9PLUS(TTMP14) \ 1822 CASE_VI_GFX9PLUS(TTMP15) \ 1823 CASE_VI_GFX9PLUS(TTMP0_TTMP1) \ 1824 CASE_VI_GFX9PLUS(TTMP2_TTMP3) \ 1825 CASE_VI_GFX9PLUS(TTMP4_TTMP5) \ 1826 CASE_VI_GFX9PLUS(TTMP6_TTMP7) \ 1827 CASE_VI_GFX9PLUS(TTMP8_TTMP9) \ 1828 CASE_VI_GFX9PLUS(TTMP10_TTMP11) \ 1829 CASE_VI_GFX9PLUS(TTMP12_TTMP13) \ 1830 CASE_VI_GFX9PLUS(TTMP14_TTMP15) \ 1831 CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3) \ 1832 CASE_VI_GFX9PLUS(TTMP4_TTMP5_TTMP6_TTMP7) \ 1833 CASE_VI_GFX9PLUS(TTMP8_TTMP9_TTMP10_TTMP11) \ 1834 CASE_VI_GFX9PLUS(TTMP12_TTMP13_TTMP14_TTMP15) \ 1835 CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7) \ 1836 CASE_VI_GFX9PLUS(TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11) \ 1837 CASE_VI_GFX9PLUS(TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \ 1838 CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \ 1839 CASE_GFXPRE11_GFX11PLUS(M0) \ 1840 CASE_GFXPRE11_GFX11PLUS(SGPR_NULL) \ 1841 CASE_GFXPRE11_GFX11PLUS_TO(SGPR_NULL64, SGPR_NULL) \ 1842 } 1843 1844 #define CASE_CI_VI(node) \ 1845 assert(!isSI(STI)); \ 1846 case node: return isCI(STI) ? node##_ci : node##_vi; 1847 1848 #define CASE_VI_GFX9PLUS(node) \ 1849 case node: return isGFX9Plus(STI) ? node##_gfx9plus : node##_vi; 1850 1851 #define CASE_GFXPRE11_GFX11PLUS(node) \ 1852 case node: return isGFX11Plus(STI) ? node##_gfx11plus : node##_gfxpre11; 1853 1854 #define CASE_GFXPRE11_GFX11PLUS_TO(node, result) \ 1855 case node: return isGFX11Plus(STI) ? result##_gfx11plus : result##_gfxpre11; 1856 1857 unsigned getMCReg(unsigned Reg, const MCSubtargetInfo &STI) { 1858 if (STI.getTargetTriple().getArch() == Triple::r600) 1859 return Reg; 1860 MAP_REG2REG 1861 } 1862 1863 #undef CASE_CI_VI 1864 #undef CASE_VI_GFX9PLUS 1865 #undef CASE_GFXPRE11_GFX11PLUS 1866 #undef CASE_GFXPRE11_GFX11PLUS_TO 1867 1868 #define CASE_CI_VI(node) case node##_ci: case node##_vi: return node; 1869 #define CASE_VI_GFX9PLUS(node) case node##_vi: case node##_gfx9plus: return node; 1870 #define CASE_GFXPRE11_GFX11PLUS(node) case node##_gfx11plus: case node##_gfxpre11: return node; 1871 #define CASE_GFXPRE11_GFX11PLUS_TO(node, result) 1872 1873 unsigned mc2PseudoReg(unsigned Reg) { 1874 MAP_REG2REG 1875 } 1876 1877 #undef CASE_CI_VI 1878 #undef CASE_VI_GFX9PLUS 1879 #undef CASE_GFXPRE11_GFX11PLUS 1880 #undef CASE_GFXPRE11_GFX11PLUS_TO 1881 #undef MAP_REG2REG 1882 1883 bool isSISrcOperand(const MCInstrDesc &Desc, unsigned OpNo) { 1884 assert(OpNo < Desc.NumOperands); 1885 unsigned OpType = Desc.OpInfo[OpNo].OperandType; 1886 return OpType >= AMDGPU::OPERAND_SRC_FIRST && 1887 OpType <= AMDGPU::OPERAND_SRC_LAST; 1888 } 1889 1890 bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo) { 1891 assert(OpNo < Desc.NumOperands); 1892 unsigned OpType = Desc.OpInfo[OpNo].OperandType; 1893 switch (OpType) { 1894 case AMDGPU::OPERAND_REG_IMM_FP32: 1895 case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED: 1896 case AMDGPU::OPERAND_REG_IMM_FP64: 1897 case AMDGPU::OPERAND_REG_IMM_FP16: 1898 case AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED: 1899 case AMDGPU::OPERAND_REG_IMM_V2FP16: 1900 case AMDGPU::OPERAND_REG_IMM_V2INT16: 1901 case AMDGPU::OPERAND_REG_INLINE_C_FP32: 1902 case AMDGPU::OPERAND_REG_INLINE_C_FP64: 1903 case AMDGPU::OPERAND_REG_INLINE_C_FP16: 1904 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: 1905 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16: 1906 case AMDGPU::OPERAND_REG_INLINE_AC_FP32: 1907 case AMDGPU::OPERAND_REG_INLINE_AC_FP16: 1908 case AMDGPU::OPERAND_REG_INLINE_AC_V2FP16: 1909 case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16: 1910 case AMDGPU::OPERAND_REG_IMM_V2FP32: 1911 case AMDGPU::OPERAND_REG_INLINE_C_V2FP32: 1912 case AMDGPU::OPERAND_REG_INLINE_AC_FP64: 1913 return true; 1914 default: 1915 return false; 1916 } 1917 } 1918 1919 bool isSISrcInlinableOperand(const MCInstrDesc &Desc, unsigned OpNo) { 1920 assert(OpNo < Desc.NumOperands); 1921 unsigned OpType = Desc.OpInfo[OpNo].OperandType; 1922 return OpType >= AMDGPU::OPERAND_REG_INLINE_C_FIRST && 1923 OpType <= AMDGPU::OPERAND_REG_INLINE_C_LAST; 1924 } 1925 1926 // Avoid using MCRegisterClass::getSize, since that function will go away 1927 // (move from MC* level to Target* level). Return size in bits. 1928 unsigned getRegBitWidth(unsigned RCID) { 1929 switch (RCID) { 1930 case AMDGPU::VGPR_LO16RegClassID: 1931 case AMDGPU::VGPR_HI16RegClassID: 1932 case AMDGPU::SGPR_LO16RegClassID: 1933 case AMDGPU::AGPR_LO16RegClassID: 1934 return 16; 1935 case AMDGPU::SGPR_32RegClassID: 1936 case AMDGPU::VGPR_32RegClassID: 1937 case AMDGPU::VRegOrLds_32RegClassID: 1938 case AMDGPU::AGPR_32RegClassID: 1939 case AMDGPU::VS_32RegClassID: 1940 case AMDGPU::AV_32RegClassID: 1941 case AMDGPU::SReg_32RegClassID: 1942 case AMDGPU::SReg_32_XM0RegClassID: 1943 case AMDGPU::SRegOrLds_32RegClassID: 1944 return 32; 1945 case AMDGPU::SGPR_64RegClassID: 1946 case AMDGPU::VS_64RegClassID: 1947 case AMDGPU::SReg_64RegClassID: 1948 case AMDGPU::VReg_64RegClassID: 1949 case AMDGPU::AReg_64RegClassID: 1950 case AMDGPU::SReg_64_XEXECRegClassID: 1951 case AMDGPU::VReg_64_Align2RegClassID: 1952 case AMDGPU::AReg_64_Align2RegClassID: 1953 case AMDGPU::AV_64RegClassID: 1954 case AMDGPU::AV_64_Align2RegClassID: 1955 return 64; 1956 case AMDGPU::SGPR_96RegClassID: 1957 case AMDGPU::SReg_96RegClassID: 1958 case AMDGPU::VReg_96RegClassID: 1959 case AMDGPU::AReg_96RegClassID: 1960 case AMDGPU::VReg_96_Align2RegClassID: 1961 case AMDGPU::AReg_96_Align2RegClassID: 1962 case AMDGPU::AV_96RegClassID: 1963 case AMDGPU::AV_96_Align2RegClassID: 1964 return 96; 1965 case AMDGPU::SGPR_128RegClassID: 1966 case AMDGPU::SReg_128RegClassID: 1967 case AMDGPU::VReg_128RegClassID: 1968 case AMDGPU::AReg_128RegClassID: 1969 case AMDGPU::VReg_128_Align2RegClassID: 1970 case AMDGPU::AReg_128_Align2RegClassID: 1971 case AMDGPU::AV_128RegClassID: 1972 case AMDGPU::AV_128_Align2RegClassID: 1973 return 128; 1974 case AMDGPU::SGPR_160RegClassID: 1975 case AMDGPU::SReg_160RegClassID: 1976 case AMDGPU::VReg_160RegClassID: 1977 case AMDGPU::AReg_160RegClassID: 1978 case AMDGPU::VReg_160_Align2RegClassID: 1979 case AMDGPU::AReg_160_Align2RegClassID: 1980 case AMDGPU::AV_160RegClassID: 1981 case AMDGPU::AV_160_Align2RegClassID: 1982 return 160; 1983 case AMDGPU::SGPR_192RegClassID: 1984 case AMDGPU::SReg_192RegClassID: 1985 case AMDGPU::VReg_192RegClassID: 1986 case AMDGPU::AReg_192RegClassID: 1987 case AMDGPU::VReg_192_Align2RegClassID: 1988 case AMDGPU::AReg_192_Align2RegClassID: 1989 case AMDGPU::AV_192RegClassID: 1990 case AMDGPU::AV_192_Align2RegClassID: 1991 return 192; 1992 case AMDGPU::SGPR_224RegClassID: 1993 case AMDGPU::SReg_224RegClassID: 1994 case AMDGPU::VReg_224RegClassID: 1995 case AMDGPU::AReg_224RegClassID: 1996 case AMDGPU::VReg_224_Align2RegClassID: 1997 case AMDGPU::AReg_224_Align2RegClassID: 1998 case AMDGPU::AV_224RegClassID: 1999 case AMDGPU::AV_224_Align2RegClassID: 2000 return 224; 2001 case AMDGPU::SGPR_256RegClassID: 2002 case AMDGPU::SReg_256RegClassID: 2003 case AMDGPU::VReg_256RegClassID: 2004 case AMDGPU::AReg_256RegClassID: 2005 case AMDGPU::VReg_256_Align2RegClassID: 2006 case AMDGPU::AReg_256_Align2RegClassID: 2007 case AMDGPU::AV_256RegClassID: 2008 case AMDGPU::AV_256_Align2RegClassID: 2009 return 256; 2010 case AMDGPU::SGPR_512RegClassID: 2011 case AMDGPU::SReg_512RegClassID: 2012 case AMDGPU::VReg_512RegClassID: 2013 case AMDGPU::AReg_512RegClassID: 2014 case AMDGPU::VReg_512_Align2RegClassID: 2015 case AMDGPU::AReg_512_Align2RegClassID: 2016 case AMDGPU::AV_512RegClassID: 2017 case AMDGPU::AV_512_Align2RegClassID: 2018 return 512; 2019 case AMDGPU::SGPR_1024RegClassID: 2020 case AMDGPU::SReg_1024RegClassID: 2021 case AMDGPU::VReg_1024RegClassID: 2022 case AMDGPU::AReg_1024RegClassID: 2023 case AMDGPU::VReg_1024_Align2RegClassID: 2024 case AMDGPU::AReg_1024_Align2RegClassID: 2025 case AMDGPU::AV_1024RegClassID: 2026 case AMDGPU::AV_1024_Align2RegClassID: 2027 return 1024; 2028 default: 2029 llvm_unreachable("Unexpected register class"); 2030 } 2031 } 2032 2033 unsigned getRegBitWidth(const MCRegisterClass &RC) { 2034 return getRegBitWidth(RC.getID()); 2035 } 2036 2037 unsigned getRegOperandSize(const MCRegisterInfo *MRI, const MCInstrDesc &Desc, 2038 unsigned OpNo) { 2039 assert(OpNo < Desc.NumOperands); 2040 unsigned RCID = Desc.OpInfo[OpNo].RegClass; 2041 return getRegBitWidth(MRI->getRegClass(RCID)) / 8; 2042 } 2043 2044 bool isInlinableLiteral64(int64_t Literal, bool HasInv2Pi) { 2045 if (isInlinableIntLiteral(Literal)) 2046 return true; 2047 2048 uint64_t Val = static_cast<uint64_t>(Literal); 2049 return (Val == DoubleToBits(0.0)) || 2050 (Val == DoubleToBits(1.0)) || 2051 (Val == DoubleToBits(-1.0)) || 2052 (Val == DoubleToBits(0.5)) || 2053 (Val == DoubleToBits(-0.5)) || 2054 (Val == DoubleToBits(2.0)) || 2055 (Val == DoubleToBits(-2.0)) || 2056 (Val == DoubleToBits(4.0)) || 2057 (Val == DoubleToBits(-4.0)) || 2058 (Val == 0x3fc45f306dc9c882 && HasInv2Pi); 2059 } 2060 2061 bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi) { 2062 if (isInlinableIntLiteral(Literal)) 2063 return true; 2064 2065 // The actual type of the operand does not seem to matter as long 2066 // as the bits match one of the inline immediate values. For example: 2067 // 2068 // -nan has the hexadecimal encoding of 0xfffffffe which is -2 in decimal, 2069 // so it is a legal inline immediate. 2070 // 2071 // 1065353216 has the hexadecimal encoding 0x3f800000 which is 1.0f in 2072 // floating-point, so it is a legal inline immediate. 2073 2074 uint32_t Val = static_cast<uint32_t>(Literal); 2075 return (Val == FloatToBits(0.0f)) || 2076 (Val == FloatToBits(1.0f)) || 2077 (Val == FloatToBits(-1.0f)) || 2078 (Val == FloatToBits(0.5f)) || 2079 (Val == FloatToBits(-0.5f)) || 2080 (Val == FloatToBits(2.0f)) || 2081 (Val == FloatToBits(-2.0f)) || 2082 (Val == FloatToBits(4.0f)) || 2083 (Val == FloatToBits(-4.0f)) || 2084 (Val == 0x3e22f983 && HasInv2Pi); 2085 } 2086 2087 bool isInlinableLiteral16(int16_t Literal, bool HasInv2Pi) { 2088 if (!HasInv2Pi) 2089 return false; 2090 2091 if (isInlinableIntLiteral(Literal)) 2092 return true; 2093 2094 uint16_t Val = static_cast<uint16_t>(Literal); 2095 return Val == 0x3C00 || // 1.0 2096 Val == 0xBC00 || // -1.0 2097 Val == 0x3800 || // 0.5 2098 Val == 0xB800 || // -0.5 2099 Val == 0x4000 || // 2.0 2100 Val == 0xC000 || // -2.0 2101 Val == 0x4400 || // 4.0 2102 Val == 0xC400 || // -4.0 2103 Val == 0x3118; // 1/2pi 2104 } 2105 2106 bool isInlinableLiteralV216(int32_t Literal, bool HasInv2Pi) { 2107 assert(HasInv2Pi); 2108 2109 if (isInt<16>(Literal) || isUInt<16>(Literal)) { 2110 int16_t Trunc = static_cast<int16_t>(Literal); 2111 return AMDGPU::isInlinableLiteral16(Trunc, HasInv2Pi); 2112 } 2113 if (!(Literal & 0xffff)) 2114 return AMDGPU::isInlinableLiteral16(Literal >> 16, HasInv2Pi); 2115 2116 int16_t Lo16 = static_cast<int16_t>(Literal); 2117 int16_t Hi16 = static_cast<int16_t>(Literal >> 16); 2118 return Lo16 == Hi16 && isInlinableLiteral16(Lo16, HasInv2Pi); 2119 } 2120 2121 bool isInlinableIntLiteralV216(int32_t Literal) { 2122 int16_t Lo16 = static_cast<int16_t>(Literal); 2123 if (isInt<16>(Literal) || isUInt<16>(Literal)) 2124 return isInlinableIntLiteral(Lo16); 2125 2126 int16_t Hi16 = static_cast<int16_t>(Literal >> 16); 2127 if (!(Literal & 0xffff)) 2128 return isInlinableIntLiteral(Hi16); 2129 return Lo16 == Hi16 && isInlinableIntLiteral(Lo16); 2130 } 2131 2132 bool isFoldableLiteralV216(int32_t Literal, bool HasInv2Pi) { 2133 assert(HasInv2Pi); 2134 2135 int16_t Lo16 = static_cast<int16_t>(Literal); 2136 if (isInt<16>(Literal) || isUInt<16>(Literal)) 2137 return true; 2138 2139 int16_t Hi16 = static_cast<int16_t>(Literal >> 16); 2140 if (!(Literal & 0xffff)) 2141 return true; 2142 return Lo16 == Hi16; 2143 } 2144 2145 bool isArgPassedInSGPR(const Argument *A) { 2146 const Function *F = A->getParent(); 2147 2148 // Arguments to compute shaders are never a source of divergence. 2149 CallingConv::ID CC = F->getCallingConv(); 2150 switch (CC) { 2151 case CallingConv::AMDGPU_KERNEL: 2152 case CallingConv::SPIR_KERNEL: 2153 return true; 2154 case CallingConv::AMDGPU_VS: 2155 case CallingConv::AMDGPU_LS: 2156 case CallingConv::AMDGPU_HS: 2157 case CallingConv::AMDGPU_ES: 2158 case CallingConv::AMDGPU_GS: 2159 case CallingConv::AMDGPU_PS: 2160 case CallingConv::AMDGPU_CS: 2161 case CallingConv::AMDGPU_Gfx: 2162 // For non-compute shaders, SGPR inputs are marked with either inreg or byval. 2163 // Everything else is in VGPRs. 2164 return F->getAttributes().hasParamAttr(A->getArgNo(), Attribute::InReg) || 2165 F->getAttributes().hasParamAttr(A->getArgNo(), Attribute::ByVal); 2166 default: 2167 // TODO: Should calls support inreg for SGPR inputs? 2168 return false; 2169 } 2170 } 2171 2172 static bool hasSMEMByteOffset(const MCSubtargetInfo &ST) { 2173 return isGCN3Encoding(ST) || isGFX10Plus(ST); 2174 } 2175 2176 static bool hasSMRDSignedImmOffset(const MCSubtargetInfo &ST) { 2177 return isGFX9Plus(ST); 2178 } 2179 2180 bool isLegalSMRDEncodedUnsignedOffset(const MCSubtargetInfo &ST, 2181 int64_t EncodedOffset) { 2182 return hasSMEMByteOffset(ST) ? isUInt<20>(EncodedOffset) 2183 : isUInt<8>(EncodedOffset); 2184 } 2185 2186 bool isLegalSMRDEncodedSignedOffset(const MCSubtargetInfo &ST, 2187 int64_t EncodedOffset, 2188 bool IsBuffer) { 2189 return !IsBuffer && 2190 hasSMRDSignedImmOffset(ST) && 2191 isInt<21>(EncodedOffset); 2192 } 2193 2194 static bool isDwordAligned(uint64_t ByteOffset) { 2195 return (ByteOffset & 3) == 0; 2196 } 2197 2198 uint64_t convertSMRDOffsetUnits(const MCSubtargetInfo &ST, 2199 uint64_t ByteOffset) { 2200 if (hasSMEMByteOffset(ST)) 2201 return ByteOffset; 2202 2203 assert(isDwordAligned(ByteOffset)); 2204 return ByteOffset >> 2; 2205 } 2206 2207 Optional<int64_t> getSMRDEncodedOffset(const MCSubtargetInfo &ST, 2208 int64_t ByteOffset, bool IsBuffer) { 2209 // The signed version is always a byte offset. 2210 if (!IsBuffer && hasSMRDSignedImmOffset(ST)) { 2211 assert(hasSMEMByteOffset(ST)); 2212 return isInt<20>(ByteOffset) ? Optional<int64_t>(ByteOffset) : None; 2213 } 2214 2215 if (!isDwordAligned(ByteOffset) && !hasSMEMByteOffset(ST)) 2216 return None; 2217 2218 int64_t EncodedOffset = convertSMRDOffsetUnits(ST, ByteOffset); 2219 return isLegalSMRDEncodedUnsignedOffset(ST, EncodedOffset) 2220 ? Optional<int64_t>(EncodedOffset) 2221 : None; 2222 } 2223 2224 Optional<int64_t> getSMRDEncodedLiteralOffset32(const MCSubtargetInfo &ST, 2225 int64_t ByteOffset) { 2226 if (!isCI(ST) || !isDwordAligned(ByteOffset)) 2227 return None; 2228 2229 int64_t EncodedOffset = convertSMRDOffsetUnits(ST, ByteOffset); 2230 return isUInt<32>(EncodedOffset) ? Optional<int64_t>(EncodedOffset) : None; 2231 } 2232 2233 unsigned getNumFlatOffsetBits(const MCSubtargetInfo &ST, bool Signed) { 2234 // Address offset is 12-bit signed for GFX10, 13-bit for GFX9 and GFX11+. 2235 if (AMDGPU::isGFX10(ST)) 2236 return Signed ? 12 : 11; 2237 2238 return Signed ? 13 : 12; 2239 } 2240 2241 // Given Imm, split it into the values to put into the SOffset and ImmOffset 2242 // fields in an MUBUF instruction. Return false if it is not possible (due to a 2243 // hardware bug needing a workaround). 2244 // 2245 // The required alignment ensures that individual address components remain 2246 // aligned if they are aligned to begin with. It also ensures that additional 2247 // offsets within the given alignment can be added to the resulting ImmOffset. 2248 bool splitMUBUFOffset(uint32_t Imm, uint32_t &SOffset, uint32_t &ImmOffset, 2249 const GCNSubtarget *Subtarget, Align Alignment) { 2250 const uint32_t MaxImm = alignDown(4095, Alignment.value()); 2251 uint32_t Overflow = 0; 2252 2253 if (Imm > MaxImm) { 2254 if (Imm <= MaxImm + 64) { 2255 // Use an SOffset inline constant for 4..64 2256 Overflow = Imm - MaxImm; 2257 Imm = MaxImm; 2258 } else { 2259 // Try to keep the same value in SOffset for adjacent loads, so that 2260 // the corresponding register contents can be re-used. 2261 // 2262 // Load values with all low-bits (except for alignment bits) set into 2263 // SOffset, so that a larger range of values can be covered using 2264 // s_movk_i32. 2265 // 2266 // Atomic operations fail to work correctly when individual address 2267 // components are unaligned, even if their sum is aligned. 2268 uint32_t High = (Imm + Alignment.value()) & ~4095; 2269 uint32_t Low = (Imm + Alignment.value()) & 4095; 2270 Imm = Low; 2271 Overflow = High - Alignment.value(); 2272 } 2273 } 2274 2275 // There is a hardware bug in SI and CI which prevents address clamping in 2276 // MUBUF instructions from working correctly with SOffsets. The immediate 2277 // offset is unaffected. 2278 if (Overflow > 0 && 2279 Subtarget->getGeneration() <= AMDGPUSubtarget::SEA_ISLANDS) 2280 return false; 2281 2282 ImmOffset = Imm; 2283 SOffset = Overflow; 2284 return true; 2285 } 2286 2287 SIModeRegisterDefaults::SIModeRegisterDefaults(const Function &F) { 2288 *this = getDefaultForCallingConv(F.getCallingConv()); 2289 2290 StringRef IEEEAttr = F.getFnAttribute("amdgpu-ieee").getValueAsString(); 2291 if (!IEEEAttr.empty()) 2292 IEEE = IEEEAttr == "true"; 2293 2294 StringRef DX10ClampAttr 2295 = F.getFnAttribute("amdgpu-dx10-clamp").getValueAsString(); 2296 if (!DX10ClampAttr.empty()) 2297 DX10Clamp = DX10ClampAttr == "true"; 2298 2299 StringRef DenormF32Attr = F.getFnAttribute("denormal-fp-math-f32").getValueAsString(); 2300 if (!DenormF32Attr.empty()) { 2301 DenormalMode DenormMode = parseDenormalFPAttribute(DenormF32Attr); 2302 FP32InputDenormals = DenormMode.Input == DenormalMode::IEEE; 2303 FP32OutputDenormals = DenormMode.Output == DenormalMode::IEEE; 2304 } 2305 2306 StringRef DenormAttr = F.getFnAttribute("denormal-fp-math").getValueAsString(); 2307 if (!DenormAttr.empty()) { 2308 DenormalMode DenormMode = parseDenormalFPAttribute(DenormAttr); 2309 2310 if (DenormF32Attr.empty()) { 2311 FP32InputDenormals = DenormMode.Input == DenormalMode::IEEE; 2312 FP32OutputDenormals = DenormMode.Output == DenormalMode::IEEE; 2313 } 2314 2315 FP64FP16InputDenormals = DenormMode.Input == DenormalMode::IEEE; 2316 FP64FP16OutputDenormals = DenormMode.Output == DenormalMode::IEEE; 2317 } 2318 } 2319 2320 namespace { 2321 2322 struct SourceOfDivergence { 2323 unsigned Intr; 2324 }; 2325 const SourceOfDivergence *lookupSourceOfDivergence(unsigned Intr); 2326 2327 #define GET_SourcesOfDivergence_IMPL 2328 #define GET_Gfx9BufferFormat_IMPL 2329 #define GET_Gfx10BufferFormat_IMPL 2330 #define GET_Gfx11PlusBufferFormat_IMPL 2331 #include "AMDGPUGenSearchableTables.inc" 2332 2333 } // end anonymous namespace 2334 2335 bool isIntrinsicSourceOfDivergence(unsigned IntrID) { 2336 return lookupSourceOfDivergence(IntrID); 2337 } 2338 2339 const GcnBufferFormatInfo *getGcnBufferFormatInfo(uint8_t BitsPerComp, 2340 uint8_t NumComponents, 2341 uint8_t NumFormat, 2342 const MCSubtargetInfo &STI) { 2343 return isGFX11Plus(STI) 2344 ? getGfx11PlusBufferFormatInfo(BitsPerComp, NumComponents, 2345 NumFormat) 2346 : isGFX10(STI) ? getGfx10BufferFormatInfo(BitsPerComp, 2347 NumComponents, NumFormat) 2348 : getGfx9BufferFormatInfo(BitsPerComp, 2349 NumComponents, NumFormat); 2350 } 2351 2352 const GcnBufferFormatInfo *getGcnBufferFormatInfo(uint8_t Format, 2353 const MCSubtargetInfo &STI) { 2354 return isGFX11Plus(STI) ? getGfx11PlusBufferFormatInfo(Format) 2355 : isGFX10(STI) ? getGfx10BufferFormatInfo(Format) 2356 : getGfx9BufferFormatInfo(Format); 2357 } 2358 2359 } // namespace AMDGPU 2360 2361 raw_ostream &operator<<(raw_ostream &OS, 2362 const AMDGPU::IsaInfo::TargetIDSetting S) { 2363 switch (S) { 2364 case (AMDGPU::IsaInfo::TargetIDSetting::Unsupported): 2365 OS << "Unsupported"; 2366 break; 2367 case (AMDGPU::IsaInfo::TargetIDSetting::Any): 2368 OS << "Any"; 2369 break; 2370 case (AMDGPU::IsaInfo::TargetIDSetting::Off): 2371 OS << "Off"; 2372 break; 2373 case (AMDGPU::IsaInfo::TargetIDSetting::On): 2374 OS << "On"; 2375 break; 2376 } 2377 return OS; 2378 } 2379 2380 } // namespace llvm 2381