1 //===- AMDGPUBaseInfo.cpp - AMDGPU Base encoding information --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "AMDGPUBaseInfo.h" 10 #include "AMDGPU.h" 11 #include "AMDGPUAsmUtils.h" 12 #include "AMDKernelCodeT.h" 13 #include "GCNSubtarget.h" 14 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 15 #include "llvm/BinaryFormat/ELF.h" 16 #include "llvm/IR/Attributes.h" 17 #include "llvm/IR/Function.h" 18 #include "llvm/IR/GlobalValue.h" 19 #include "llvm/IR/IntrinsicsAMDGPU.h" 20 #include "llvm/IR/IntrinsicsR600.h" 21 #include "llvm/IR/LLVMContext.h" 22 #include "llvm/MC/MCSubtargetInfo.h" 23 #include "llvm/Support/AMDHSAKernelDescriptor.h" 24 #include "llvm/Support/CommandLine.h" 25 #include "llvm/Support/TargetParser.h" 26 27 #define GET_INSTRINFO_NAMED_OPS 28 #define GET_INSTRMAP_INFO 29 #include "AMDGPUGenInstrInfo.inc" 30 31 static llvm::cl::opt<unsigned> AmdhsaCodeObjectVersion( 32 "amdhsa-code-object-version", llvm::cl::Hidden, 33 llvm::cl::desc("AMDHSA Code Object Version"), llvm::cl::init(4), 34 llvm::cl::ZeroOrMore); 35 36 namespace { 37 38 /// \returns Bit mask for given bit \p Shift and bit \p Width. 39 unsigned getBitMask(unsigned Shift, unsigned Width) { 40 return ((1 << Width) - 1) << Shift; 41 } 42 43 /// Packs \p Src into \p Dst for given bit \p Shift and bit \p Width. 44 /// 45 /// \returns Packed \p Dst. 46 unsigned packBits(unsigned Src, unsigned Dst, unsigned Shift, unsigned Width) { 47 unsigned Mask = getBitMask(Shift, Width); 48 return ((Src << Shift) & Mask) | (Dst & ~Mask); 49 } 50 51 /// Unpacks bits from \p Src for given bit \p Shift and bit \p Width. 52 /// 53 /// \returns Unpacked bits. 54 unsigned unpackBits(unsigned Src, unsigned Shift, unsigned Width) { 55 return (Src & getBitMask(Shift, Width)) >> Shift; 56 } 57 58 /// \returns Vmcnt bit shift (lower bits). 59 unsigned getVmcntBitShiftLo(unsigned VersionMajor) { 60 return VersionMajor >= 11 ? 10 : 0; 61 } 62 63 /// \returns Vmcnt bit width (lower bits). 64 unsigned getVmcntBitWidthLo(unsigned VersionMajor) { 65 return VersionMajor >= 11 ? 6 : 4; 66 } 67 68 /// \returns Expcnt bit shift. 69 unsigned getExpcntBitShift(unsigned VersionMajor) { 70 return VersionMajor >= 11 ? 0 : 4; 71 } 72 73 /// \returns Expcnt bit width. 74 unsigned getExpcntBitWidth(unsigned VersionMajor) { return 3; } 75 76 /// \returns Lgkmcnt bit shift. 77 unsigned getLgkmcntBitShift(unsigned VersionMajor) { 78 return VersionMajor >= 11 ? 4 : 8; 79 } 80 81 /// \returns Lgkmcnt bit width. 82 unsigned getLgkmcntBitWidth(unsigned VersionMajor) { 83 return VersionMajor >= 10 ? 6 : 4; 84 } 85 86 /// \returns Vmcnt bit shift (higher bits). 87 unsigned getVmcntBitShiftHi(unsigned VersionMajor) { return 14; } 88 89 /// \returns Vmcnt bit width (higher bits). 90 unsigned getVmcntBitWidthHi(unsigned VersionMajor) { 91 return (VersionMajor == 9 || VersionMajor == 10) ? 2 : 0; 92 } 93 94 } // end namespace anonymous 95 96 namespace llvm { 97 98 namespace AMDGPU { 99 100 Optional<uint8_t> getHsaAbiVersion(const MCSubtargetInfo *STI) { 101 if (STI && STI->getTargetTriple().getOS() != Triple::AMDHSA) 102 return None; 103 104 switch (AmdhsaCodeObjectVersion) { 105 case 2: 106 return ELF::ELFABIVERSION_AMDGPU_HSA_V2; 107 case 3: 108 return ELF::ELFABIVERSION_AMDGPU_HSA_V3; 109 case 4: 110 return ELF::ELFABIVERSION_AMDGPU_HSA_V4; 111 case 5: 112 return ELF::ELFABIVERSION_AMDGPU_HSA_V5; 113 default: 114 report_fatal_error(Twine("Unsupported AMDHSA Code Object Version ") + 115 Twine(AmdhsaCodeObjectVersion)); 116 } 117 } 118 119 bool isHsaAbiVersion2(const MCSubtargetInfo *STI) { 120 if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI)) 121 return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V2; 122 return false; 123 } 124 125 bool isHsaAbiVersion3(const MCSubtargetInfo *STI) { 126 if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI)) 127 return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V3; 128 return false; 129 } 130 131 bool isHsaAbiVersion4(const MCSubtargetInfo *STI) { 132 if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI)) 133 return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V4; 134 return false; 135 } 136 137 bool isHsaAbiVersion5(const MCSubtargetInfo *STI) { 138 if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI)) 139 return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V5; 140 return false; 141 } 142 143 bool isHsaAbiVersion3AndAbove(const MCSubtargetInfo *STI) { 144 return isHsaAbiVersion3(STI) || isHsaAbiVersion4(STI) || 145 isHsaAbiVersion5(STI); 146 } 147 148 unsigned getAmdhsaCodeObjectVersion() { 149 return AmdhsaCodeObjectVersion; 150 } 151 152 unsigned getMultigridSyncArgImplicitArgPosition() { 153 switch (AmdhsaCodeObjectVersion) { 154 case 2: 155 case 3: 156 case 4: 157 return 48; 158 case 5: 159 return AMDGPU::ImplicitArg::MULTIGRID_SYNC_ARG_OFFSET; 160 default: 161 llvm_unreachable("Unexpected code object version"); 162 return 0; 163 } 164 } 165 166 167 // FIXME: All such magic numbers about the ABI should be in a 168 // central TD file. 169 unsigned getHostcallImplicitArgPosition() { 170 switch (AmdhsaCodeObjectVersion) { 171 case 2: 172 case 3: 173 case 4: 174 return 24; 175 case 5: 176 return AMDGPU::ImplicitArg::HOSTCALL_PTR_OFFSET; 177 default: 178 llvm_unreachable("Unexpected code object version"); 179 return 0; 180 } 181 } 182 183 #define GET_MIMGBaseOpcodesTable_IMPL 184 #define GET_MIMGDimInfoTable_IMPL 185 #define GET_MIMGInfoTable_IMPL 186 #define GET_MIMGLZMappingTable_IMPL 187 #define GET_MIMGMIPMappingTable_IMPL 188 #define GET_MIMGBiasMappingTable_IMPL 189 #define GET_MIMGOffsetMappingTable_IMPL 190 #define GET_MIMGG16MappingTable_IMPL 191 #define GET_MAIInstInfoTable_IMPL 192 #include "AMDGPUGenSearchableTables.inc" 193 194 int getMIMGOpcode(unsigned BaseOpcode, unsigned MIMGEncoding, 195 unsigned VDataDwords, unsigned VAddrDwords) { 196 const MIMGInfo *Info = getMIMGOpcodeHelper(BaseOpcode, MIMGEncoding, 197 VDataDwords, VAddrDwords); 198 return Info ? Info->Opcode : -1; 199 } 200 201 const MIMGBaseOpcodeInfo *getMIMGBaseOpcode(unsigned Opc) { 202 const MIMGInfo *Info = getMIMGInfo(Opc); 203 return Info ? getMIMGBaseOpcodeInfo(Info->BaseOpcode) : nullptr; 204 } 205 206 int getMaskedMIMGOp(unsigned Opc, unsigned NewChannels) { 207 const MIMGInfo *OrigInfo = getMIMGInfo(Opc); 208 const MIMGInfo *NewInfo = 209 getMIMGOpcodeHelper(OrigInfo->BaseOpcode, OrigInfo->MIMGEncoding, 210 NewChannels, OrigInfo->VAddrDwords); 211 return NewInfo ? NewInfo->Opcode : -1; 212 } 213 214 unsigned getAddrSizeMIMGOp(const MIMGBaseOpcodeInfo *BaseOpcode, 215 const MIMGDimInfo *Dim, bool IsA16, 216 bool IsG16Supported) { 217 unsigned AddrWords = BaseOpcode->NumExtraArgs; 218 unsigned AddrComponents = (BaseOpcode->Coordinates ? Dim->NumCoords : 0) + 219 (BaseOpcode->LodOrClampOrMip ? 1 : 0); 220 if (IsA16) 221 AddrWords += divideCeil(AddrComponents, 2); 222 else 223 AddrWords += AddrComponents; 224 225 // Note: For subtargets that support A16 but not G16, enabling A16 also 226 // enables 16 bit gradients. 227 // For subtargets that support A16 (operand) and G16 (done with a different 228 // instruction encoding), they are independent. 229 230 if (BaseOpcode->Gradients) { 231 if ((IsA16 && !IsG16Supported) || BaseOpcode->G16) 232 // There are two gradients per coordinate, we pack them separately. 233 // For the 3d case, 234 // we get (dy/du, dx/du) (-, dz/du) (dy/dv, dx/dv) (-, dz/dv) 235 AddrWords += alignTo<2>(Dim->NumGradients / 2); 236 else 237 AddrWords += Dim->NumGradients; 238 } 239 return AddrWords; 240 } 241 242 struct MUBUFInfo { 243 uint16_t Opcode; 244 uint16_t BaseOpcode; 245 uint8_t elements; 246 bool has_vaddr; 247 bool has_srsrc; 248 bool has_soffset; 249 bool IsBufferInv; 250 }; 251 252 struct MTBUFInfo { 253 uint16_t Opcode; 254 uint16_t BaseOpcode; 255 uint8_t elements; 256 bool has_vaddr; 257 bool has_srsrc; 258 bool has_soffset; 259 }; 260 261 struct SMInfo { 262 uint16_t Opcode; 263 bool IsBuffer; 264 }; 265 266 struct VOPInfo { 267 uint16_t Opcode; 268 bool IsSingle; 269 }; 270 271 #define GET_MTBUFInfoTable_DECL 272 #define GET_MTBUFInfoTable_IMPL 273 #define GET_MUBUFInfoTable_DECL 274 #define GET_MUBUFInfoTable_IMPL 275 #define GET_SMInfoTable_DECL 276 #define GET_SMInfoTable_IMPL 277 #define GET_VOP1InfoTable_DECL 278 #define GET_VOP1InfoTable_IMPL 279 #define GET_VOP2InfoTable_DECL 280 #define GET_VOP2InfoTable_IMPL 281 #define GET_VOP3InfoTable_DECL 282 #define GET_VOP3InfoTable_IMPL 283 #include "AMDGPUGenSearchableTables.inc" 284 285 int getMTBUFBaseOpcode(unsigned Opc) { 286 const MTBUFInfo *Info = getMTBUFInfoFromOpcode(Opc); 287 return Info ? Info->BaseOpcode : -1; 288 } 289 290 int getMTBUFOpcode(unsigned BaseOpc, unsigned Elements) { 291 const MTBUFInfo *Info = getMTBUFInfoFromBaseOpcodeAndElements(BaseOpc, Elements); 292 return Info ? Info->Opcode : -1; 293 } 294 295 int getMTBUFElements(unsigned Opc) { 296 const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc); 297 return Info ? Info->elements : 0; 298 } 299 300 bool getMTBUFHasVAddr(unsigned Opc) { 301 const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc); 302 return Info ? Info->has_vaddr : false; 303 } 304 305 bool getMTBUFHasSrsrc(unsigned Opc) { 306 const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc); 307 return Info ? Info->has_srsrc : false; 308 } 309 310 bool getMTBUFHasSoffset(unsigned Opc) { 311 const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc); 312 return Info ? Info->has_soffset : false; 313 } 314 315 int getMUBUFBaseOpcode(unsigned Opc) { 316 const MUBUFInfo *Info = getMUBUFInfoFromOpcode(Opc); 317 return Info ? Info->BaseOpcode : -1; 318 } 319 320 int getMUBUFOpcode(unsigned BaseOpc, unsigned Elements) { 321 const MUBUFInfo *Info = getMUBUFInfoFromBaseOpcodeAndElements(BaseOpc, Elements); 322 return Info ? Info->Opcode : -1; 323 } 324 325 int getMUBUFElements(unsigned Opc) { 326 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc); 327 return Info ? Info->elements : 0; 328 } 329 330 bool getMUBUFHasVAddr(unsigned Opc) { 331 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc); 332 return Info ? Info->has_vaddr : false; 333 } 334 335 bool getMUBUFHasSrsrc(unsigned Opc) { 336 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc); 337 return Info ? Info->has_srsrc : false; 338 } 339 340 bool getMUBUFHasSoffset(unsigned Opc) { 341 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc); 342 return Info ? Info->has_soffset : false; 343 } 344 345 bool getMUBUFIsBufferInv(unsigned Opc) { 346 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc); 347 return Info ? Info->IsBufferInv : false; 348 } 349 350 bool getSMEMIsBuffer(unsigned Opc) { 351 const SMInfo *Info = getSMEMOpcodeHelper(Opc); 352 return Info ? Info->IsBuffer : false; 353 } 354 355 bool getVOP1IsSingle(unsigned Opc) { 356 const VOPInfo *Info = getVOP1OpcodeHelper(Opc); 357 return Info ? Info->IsSingle : false; 358 } 359 360 bool getVOP2IsSingle(unsigned Opc) { 361 const VOPInfo *Info = getVOP2OpcodeHelper(Opc); 362 return Info ? Info->IsSingle : false; 363 } 364 365 bool getVOP3IsSingle(unsigned Opc) { 366 const VOPInfo *Info = getVOP3OpcodeHelper(Opc); 367 return Info ? Info->IsSingle : false; 368 } 369 370 bool getMAIIsDGEMM(unsigned Opc) { 371 const MAIInstInfo *Info = getMAIInstInfoHelper(Opc); 372 return Info ? Info->is_dgemm : false; 373 } 374 375 bool getMAIIsGFX940XDL(unsigned Opc) { 376 const MAIInstInfo *Info = getMAIInstInfoHelper(Opc); 377 return Info ? Info->is_gfx940_xdl : false; 378 } 379 380 // Wrapper for Tablegen'd function. enum Subtarget is not defined in any 381 // header files, so we need to wrap it in a function that takes unsigned 382 // instead. 383 int getMCOpcode(uint16_t Opcode, unsigned Gen) { 384 return getMCOpcodeGen(Opcode, static_cast<Subtarget>(Gen)); 385 } 386 387 namespace IsaInfo { 388 389 AMDGPUTargetID::AMDGPUTargetID(const MCSubtargetInfo &STI) 390 : STI(STI), XnackSetting(TargetIDSetting::Any), 391 SramEccSetting(TargetIDSetting::Any) { 392 if (!STI.getFeatureBits().test(FeatureSupportsXNACK)) 393 XnackSetting = TargetIDSetting::Unsupported; 394 if (!STI.getFeatureBits().test(FeatureSupportsSRAMECC)) 395 SramEccSetting = TargetIDSetting::Unsupported; 396 } 397 398 void AMDGPUTargetID::setTargetIDFromFeaturesString(StringRef FS) { 399 // Check if xnack or sramecc is explicitly enabled or disabled. In the 400 // absence of the target features we assume we must generate code that can run 401 // in any environment. 402 SubtargetFeatures Features(FS); 403 Optional<bool> XnackRequested; 404 Optional<bool> SramEccRequested; 405 406 for (const std::string &Feature : Features.getFeatures()) { 407 if (Feature == "+xnack") 408 XnackRequested = true; 409 else if (Feature == "-xnack") 410 XnackRequested = false; 411 else if (Feature == "+sramecc") 412 SramEccRequested = true; 413 else if (Feature == "-sramecc") 414 SramEccRequested = false; 415 } 416 417 bool XnackSupported = isXnackSupported(); 418 bool SramEccSupported = isSramEccSupported(); 419 420 if (XnackRequested) { 421 if (XnackSupported) { 422 XnackSetting = 423 *XnackRequested ? TargetIDSetting::On : TargetIDSetting::Off; 424 } else { 425 // If a specific xnack setting was requested and this GPU does not support 426 // xnack emit a warning. Setting will remain set to "Unsupported". 427 if (*XnackRequested) { 428 errs() << "warning: xnack 'On' was requested for a processor that does " 429 "not support it!\n"; 430 } else { 431 errs() << "warning: xnack 'Off' was requested for a processor that " 432 "does not support it!\n"; 433 } 434 } 435 } 436 437 if (SramEccRequested) { 438 if (SramEccSupported) { 439 SramEccSetting = 440 *SramEccRequested ? TargetIDSetting::On : TargetIDSetting::Off; 441 } else { 442 // If a specific sramecc setting was requested and this GPU does not 443 // support sramecc emit a warning. Setting will remain set to 444 // "Unsupported". 445 if (*SramEccRequested) { 446 errs() << "warning: sramecc 'On' was requested for a processor that " 447 "does not support it!\n"; 448 } else { 449 errs() << "warning: sramecc 'Off' was requested for a processor that " 450 "does not support it!\n"; 451 } 452 } 453 } 454 } 455 456 static TargetIDSetting 457 getTargetIDSettingFromFeatureString(StringRef FeatureString) { 458 if (FeatureString.endswith("-")) 459 return TargetIDSetting::Off; 460 if (FeatureString.endswith("+")) 461 return TargetIDSetting::On; 462 463 llvm_unreachable("Malformed feature string"); 464 } 465 466 void AMDGPUTargetID::setTargetIDFromTargetIDStream(StringRef TargetID) { 467 SmallVector<StringRef, 3> TargetIDSplit; 468 TargetID.split(TargetIDSplit, ':'); 469 470 for (const auto &FeatureString : TargetIDSplit) { 471 if (FeatureString.startswith("xnack")) 472 XnackSetting = getTargetIDSettingFromFeatureString(FeatureString); 473 if (FeatureString.startswith("sramecc")) 474 SramEccSetting = getTargetIDSettingFromFeatureString(FeatureString); 475 } 476 } 477 478 std::string AMDGPUTargetID::toString() const { 479 std::string StringRep; 480 raw_string_ostream StreamRep(StringRep); 481 482 auto TargetTriple = STI.getTargetTriple(); 483 auto Version = getIsaVersion(STI.getCPU()); 484 485 StreamRep << TargetTriple.getArchName() << '-' 486 << TargetTriple.getVendorName() << '-' 487 << TargetTriple.getOSName() << '-' 488 << TargetTriple.getEnvironmentName() << '-'; 489 490 std::string Processor; 491 // TODO: Following else statement is present here because we used various 492 // alias names for GPUs up until GFX9 (e.g. 'fiji' is same as 'gfx803'). 493 // Remove once all aliases are removed from GCNProcessors.td. 494 if (Version.Major >= 9) 495 Processor = STI.getCPU().str(); 496 else 497 Processor = (Twine("gfx") + Twine(Version.Major) + Twine(Version.Minor) + 498 Twine(Version.Stepping)) 499 .str(); 500 501 std::string Features; 502 if (Optional<uint8_t> HsaAbiVersion = getHsaAbiVersion(&STI)) { 503 switch (*HsaAbiVersion) { 504 case ELF::ELFABIVERSION_AMDGPU_HSA_V2: 505 // Code object V2 only supported specific processors and had fixed 506 // settings for the XNACK. 507 if (Processor == "gfx600") { 508 } else if (Processor == "gfx601") { 509 } else if (Processor == "gfx602") { 510 } else if (Processor == "gfx700") { 511 } else if (Processor == "gfx701") { 512 } else if (Processor == "gfx702") { 513 } else if (Processor == "gfx703") { 514 } else if (Processor == "gfx704") { 515 } else if (Processor == "gfx705") { 516 } else if (Processor == "gfx801") { 517 if (!isXnackOnOrAny()) 518 report_fatal_error( 519 "AMD GPU code object V2 does not support processor " + 520 Twine(Processor) + " without XNACK"); 521 } else if (Processor == "gfx802") { 522 } else if (Processor == "gfx803") { 523 } else if (Processor == "gfx805") { 524 } else if (Processor == "gfx810") { 525 if (!isXnackOnOrAny()) 526 report_fatal_error( 527 "AMD GPU code object V2 does not support processor " + 528 Twine(Processor) + " without XNACK"); 529 } else if (Processor == "gfx900") { 530 if (isXnackOnOrAny()) 531 Processor = "gfx901"; 532 } else if (Processor == "gfx902") { 533 if (isXnackOnOrAny()) 534 Processor = "gfx903"; 535 } else if (Processor == "gfx904") { 536 if (isXnackOnOrAny()) 537 Processor = "gfx905"; 538 } else if (Processor == "gfx906") { 539 if (isXnackOnOrAny()) 540 Processor = "gfx907"; 541 } else if (Processor == "gfx90c") { 542 if (isXnackOnOrAny()) 543 report_fatal_error( 544 "AMD GPU code object V2 does not support processor " + 545 Twine(Processor) + " with XNACK being ON or ANY"); 546 } else { 547 report_fatal_error( 548 "AMD GPU code object V2 does not support processor " + 549 Twine(Processor)); 550 } 551 break; 552 case ELF::ELFABIVERSION_AMDGPU_HSA_V3: 553 // xnack. 554 if (isXnackOnOrAny()) 555 Features += "+xnack"; 556 // In code object v2 and v3, "sramecc" feature was spelled with a 557 // hyphen ("sram-ecc"). 558 if (isSramEccOnOrAny()) 559 Features += "+sram-ecc"; 560 break; 561 case ELF::ELFABIVERSION_AMDGPU_HSA_V4: 562 case ELF::ELFABIVERSION_AMDGPU_HSA_V5: 563 // sramecc. 564 if (getSramEccSetting() == TargetIDSetting::Off) 565 Features += ":sramecc-"; 566 else if (getSramEccSetting() == TargetIDSetting::On) 567 Features += ":sramecc+"; 568 // xnack. 569 if (getXnackSetting() == TargetIDSetting::Off) 570 Features += ":xnack-"; 571 else if (getXnackSetting() == TargetIDSetting::On) 572 Features += ":xnack+"; 573 break; 574 default: 575 break; 576 } 577 } 578 579 StreamRep << Processor << Features; 580 581 StreamRep.flush(); 582 return StringRep; 583 } 584 585 unsigned getWavefrontSize(const MCSubtargetInfo *STI) { 586 if (STI->getFeatureBits().test(FeatureWavefrontSize16)) 587 return 16; 588 if (STI->getFeatureBits().test(FeatureWavefrontSize32)) 589 return 32; 590 591 return 64; 592 } 593 594 unsigned getLocalMemorySize(const MCSubtargetInfo *STI) { 595 if (STI->getFeatureBits().test(FeatureLocalMemorySize32768)) 596 return 32768; 597 if (STI->getFeatureBits().test(FeatureLocalMemorySize65536)) 598 return 65536; 599 600 return 0; 601 } 602 603 unsigned getEUsPerCU(const MCSubtargetInfo *STI) { 604 // "Per CU" really means "per whatever functional block the waves of a 605 // workgroup must share". For gfx10 in CU mode this is the CU, which contains 606 // two SIMDs. 607 if (isGFX10Plus(*STI) && STI->getFeatureBits().test(FeatureCuMode)) 608 return 2; 609 // Pre-gfx10 a CU contains four SIMDs. For gfx10 in WGP mode the WGP contains 610 // two CUs, so a total of four SIMDs. 611 return 4; 612 } 613 614 unsigned getMaxWorkGroupsPerCU(const MCSubtargetInfo *STI, 615 unsigned FlatWorkGroupSize) { 616 assert(FlatWorkGroupSize != 0); 617 if (STI->getTargetTriple().getArch() != Triple::amdgcn) 618 return 8; 619 unsigned N = getWavesPerWorkGroup(STI, FlatWorkGroupSize); 620 if (N == 1) 621 return 40; 622 N = 40 / N; 623 return std::min(N, 16u); 624 } 625 626 unsigned getMinWavesPerEU(const MCSubtargetInfo *STI) { 627 return 1; 628 } 629 630 unsigned getMaxWavesPerEU(const MCSubtargetInfo *STI) { 631 // FIXME: Need to take scratch memory into account. 632 if (isGFX90A(*STI)) 633 return 8; 634 if (!isGFX10Plus(*STI)) 635 return 10; 636 return hasGFX10_3Insts(*STI) ? 16 : 20; 637 } 638 639 unsigned getWavesPerEUForWorkGroup(const MCSubtargetInfo *STI, 640 unsigned FlatWorkGroupSize) { 641 return divideCeil(getWavesPerWorkGroup(STI, FlatWorkGroupSize), 642 getEUsPerCU(STI)); 643 } 644 645 unsigned getMinFlatWorkGroupSize(const MCSubtargetInfo *STI) { 646 return 1; 647 } 648 649 unsigned getMaxFlatWorkGroupSize(const MCSubtargetInfo *STI) { 650 // Some subtargets allow encoding 2048, but this isn't tested or supported. 651 return 1024; 652 } 653 654 unsigned getWavesPerWorkGroup(const MCSubtargetInfo *STI, 655 unsigned FlatWorkGroupSize) { 656 return divideCeil(FlatWorkGroupSize, getWavefrontSize(STI)); 657 } 658 659 unsigned getSGPRAllocGranule(const MCSubtargetInfo *STI) { 660 IsaVersion Version = getIsaVersion(STI->getCPU()); 661 if (Version.Major >= 10) 662 return getAddressableNumSGPRs(STI); 663 if (Version.Major >= 8) 664 return 16; 665 return 8; 666 } 667 668 unsigned getSGPREncodingGranule(const MCSubtargetInfo *STI) { 669 return 8; 670 } 671 672 unsigned getTotalNumSGPRs(const MCSubtargetInfo *STI) { 673 IsaVersion Version = getIsaVersion(STI->getCPU()); 674 if (Version.Major >= 8) 675 return 800; 676 return 512; 677 } 678 679 unsigned getAddressableNumSGPRs(const MCSubtargetInfo *STI) { 680 if (STI->getFeatureBits().test(FeatureSGPRInitBug)) 681 return FIXED_NUM_SGPRS_FOR_INIT_BUG; 682 683 IsaVersion Version = getIsaVersion(STI->getCPU()); 684 if (Version.Major >= 10) 685 return 106; 686 if (Version.Major >= 8) 687 return 102; 688 return 104; 689 } 690 691 unsigned getMinNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) { 692 assert(WavesPerEU != 0); 693 694 IsaVersion Version = getIsaVersion(STI->getCPU()); 695 if (Version.Major >= 10) 696 return 0; 697 698 if (WavesPerEU >= getMaxWavesPerEU(STI)) 699 return 0; 700 701 unsigned MinNumSGPRs = getTotalNumSGPRs(STI) / (WavesPerEU + 1); 702 if (STI->getFeatureBits().test(FeatureTrapHandler)) 703 MinNumSGPRs -= std::min(MinNumSGPRs, (unsigned)TRAP_NUM_SGPRS); 704 MinNumSGPRs = alignDown(MinNumSGPRs, getSGPRAllocGranule(STI)) + 1; 705 return std::min(MinNumSGPRs, getAddressableNumSGPRs(STI)); 706 } 707 708 unsigned getMaxNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU, 709 bool Addressable) { 710 assert(WavesPerEU != 0); 711 712 unsigned AddressableNumSGPRs = getAddressableNumSGPRs(STI); 713 IsaVersion Version = getIsaVersion(STI->getCPU()); 714 if (Version.Major >= 10) 715 return Addressable ? AddressableNumSGPRs : 108; 716 if (Version.Major >= 8 && !Addressable) 717 AddressableNumSGPRs = 112; 718 unsigned MaxNumSGPRs = getTotalNumSGPRs(STI) / WavesPerEU; 719 if (STI->getFeatureBits().test(FeatureTrapHandler)) 720 MaxNumSGPRs -= std::min(MaxNumSGPRs, (unsigned)TRAP_NUM_SGPRS); 721 MaxNumSGPRs = alignDown(MaxNumSGPRs, getSGPRAllocGranule(STI)); 722 return std::min(MaxNumSGPRs, AddressableNumSGPRs); 723 } 724 725 unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed, 726 bool FlatScrUsed, bool XNACKUsed) { 727 unsigned ExtraSGPRs = 0; 728 if (VCCUsed) 729 ExtraSGPRs = 2; 730 731 IsaVersion Version = getIsaVersion(STI->getCPU()); 732 if (Version.Major >= 10) 733 return ExtraSGPRs; 734 735 if (Version.Major < 8) { 736 if (FlatScrUsed) 737 ExtraSGPRs = 4; 738 } else { 739 if (XNACKUsed) 740 ExtraSGPRs = 4; 741 742 if (FlatScrUsed || 743 STI->getFeatureBits().test(AMDGPU::FeatureArchitectedFlatScratch)) 744 ExtraSGPRs = 6; 745 } 746 747 return ExtraSGPRs; 748 } 749 750 unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed, 751 bool FlatScrUsed) { 752 return getNumExtraSGPRs(STI, VCCUsed, FlatScrUsed, 753 STI->getFeatureBits().test(AMDGPU::FeatureXNACK)); 754 } 755 756 unsigned getNumSGPRBlocks(const MCSubtargetInfo *STI, unsigned NumSGPRs) { 757 NumSGPRs = alignTo(std::max(1u, NumSGPRs), getSGPREncodingGranule(STI)); 758 // SGPRBlocks is actual number of SGPR blocks minus 1. 759 return NumSGPRs / getSGPREncodingGranule(STI) - 1; 760 } 761 762 unsigned getVGPRAllocGranule(const MCSubtargetInfo *STI, 763 Optional<bool> EnableWavefrontSize32) { 764 if (STI->getFeatureBits().test(FeatureGFX90AInsts)) 765 return 8; 766 767 bool IsWave32 = EnableWavefrontSize32 ? 768 *EnableWavefrontSize32 : 769 STI->getFeatureBits().test(FeatureWavefrontSize32); 770 771 if (hasGFX10_3Insts(*STI)) 772 return IsWave32 ? 16 : 8; 773 774 return IsWave32 ? 8 : 4; 775 } 776 777 unsigned getVGPREncodingGranule(const MCSubtargetInfo *STI, 778 Optional<bool> EnableWavefrontSize32) { 779 if (STI->getFeatureBits().test(FeatureGFX90AInsts)) 780 return 8; 781 782 bool IsWave32 = EnableWavefrontSize32 ? 783 *EnableWavefrontSize32 : 784 STI->getFeatureBits().test(FeatureWavefrontSize32); 785 786 return IsWave32 ? 8 : 4; 787 } 788 789 unsigned getTotalNumVGPRs(const MCSubtargetInfo *STI) { 790 if (STI->getFeatureBits().test(FeatureGFX90AInsts)) 791 return 512; 792 if (!isGFX10Plus(*STI)) 793 return 256; 794 return STI->getFeatureBits().test(FeatureWavefrontSize32) ? 1024 : 512; 795 } 796 797 unsigned getAddressableNumVGPRs(const MCSubtargetInfo *STI) { 798 if (STI->getFeatureBits().test(FeatureGFX90AInsts)) 799 return 512; 800 return 256; 801 } 802 803 unsigned getMinNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) { 804 assert(WavesPerEU != 0); 805 806 if (WavesPerEU >= getMaxWavesPerEU(STI)) 807 return 0; 808 unsigned MinNumVGPRs = 809 alignDown(getTotalNumVGPRs(STI) / (WavesPerEU + 1), 810 getVGPRAllocGranule(STI)) + 1; 811 return std::min(MinNumVGPRs, getAddressableNumVGPRs(STI)); 812 } 813 814 unsigned getMaxNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) { 815 assert(WavesPerEU != 0); 816 817 unsigned MaxNumVGPRs = alignDown(getTotalNumVGPRs(STI) / WavesPerEU, 818 getVGPRAllocGranule(STI)); 819 unsigned AddressableNumVGPRs = getAddressableNumVGPRs(STI); 820 return std::min(MaxNumVGPRs, AddressableNumVGPRs); 821 } 822 823 unsigned getNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumVGPRs, 824 Optional<bool> EnableWavefrontSize32) { 825 NumVGPRs = alignTo(std::max(1u, NumVGPRs), 826 getVGPREncodingGranule(STI, EnableWavefrontSize32)); 827 // VGPRBlocks is actual number of VGPR blocks minus 1. 828 return NumVGPRs / getVGPREncodingGranule(STI, EnableWavefrontSize32) - 1; 829 } 830 831 } // end namespace IsaInfo 832 833 void initDefaultAMDKernelCodeT(amd_kernel_code_t &Header, 834 const MCSubtargetInfo *STI) { 835 IsaVersion Version = getIsaVersion(STI->getCPU()); 836 837 memset(&Header, 0, sizeof(Header)); 838 839 Header.amd_kernel_code_version_major = 1; 840 Header.amd_kernel_code_version_minor = 2; 841 Header.amd_machine_kind = 1; // AMD_MACHINE_KIND_AMDGPU 842 Header.amd_machine_version_major = Version.Major; 843 Header.amd_machine_version_minor = Version.Minor; 844 Header.amd_machine_version_stepping = Version.Stepping; 845 Header.kernel_code_entry_byte_offset = sizeof(Header); 846 Header.wavefront_size = 6; 847 848 // If the code object does not support indirect functions, then the value must 849 // be 0xffffffff. 850 Header.call_convention = -1; 851 852 // These alignment values are specified in powers of two, so alignment = 853 // 2^n. The minimum alignment is 2^4 = 16. 854 Header.kernarg_segment_alignment = 4; 855 Header.group_segment_alignment = 4; 856 Header.private_segment_alignment = 4; 857 858 if (Version.Major >= 10) { 859 if (STI->getFeatureBits().test(FeatureWavefrontSize32)) { 860 Header.wavefront_size = 5; 861 Header.code_properties |= AMD_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32; 862 } 863 Header.compute_pgm_resource_registers |= 864 S_00B848_WGP_MODE(STI->getFeatureBits().test(FeatureCuMode) ? 0 : 1) | 865 S_00B848_MEM_ORDERED(1); 866 } 867 } 868 869 amdhsa::kernel_descriptor_t getDefaultAmdhsaKernelDescriptor( 870 const MCSubtargetInfo *STI) { 871 IsaVersion Version = getIsaVersion(STI->getCPU()); 872 873 amdhsa::kernel_descriptor_t KD; 874 memset(&KD, 0, sizeof(KD)); 875 876 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1, 877 amdhsa::COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64, 878 amdhsa::FLOAT_DENORM_MODE_FLUSH_NONE); 879 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1, 880 amdhsa::COMPUTE_PGM_RSRC1_ENABLE_DX10_CLAMP, 1); 881 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1, 882 amdhsa::COMPUTE_PGM_RSRC1_ENABLE_IEEE_MODE, 1); 883 AMDHSA_BITS_SET(KD.compute_pgm_rsrc2, 884 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X, 1); 885 if (Version.Major >= 10) { 886 AMDHSA_BITS_SET(KD.kernel_code_properties, 887 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32, 888 STI->getFeatureBits().test(FeatureWavefrontSize32) ? 1 : 0); 889 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1, 890 amdhsa::COMPUTE_PGM_RSRC1_WGP_MODE, 891 STI->getFeatureBits().test(FeatureCuMode) ? 0 : 1); 892 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1, 893 amdhsa::COMPUTE_PGM_RSRC1_MEM_ORDERED, 1); 894 } 895 if (AMDGPU::isGFX90A(*STI)) { 896 AMDHSA_BITS_SET(KD.compute_pgm_rsrc3, 897 amdhsa::COMPUTE_PGM_RSRC3_GFX90A_TG_SPLIT, 898 STI->getFeatureBits().test(FeatureTgSplit) ? 1 : 0); 899 } 900 return KD; 901 } 902 903 bool isGroupSegment(const GlobalValue *GV) { 904 return GV->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS; 905 } 906 907 bool isGlobalSegment(const GlobalValue *GV) { 908 return GV->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS; 909 } 910 911 bool isReadOnlySegment(const GlobalValue *GV) { 912 unsigned AS = GV->getAddressSpace(); 913 return AS == AMDGPUAS::CONSTANT_ADDRESS || 914 AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT; 915 } 916 917 bool shouldEmitConstantsToTextSection(const Triple &TT) { 918 return TT.getArch() == Triple::r600; 919 } 920 921 int getIntegerAttribute(const Function &F, StringRef Name, int Default) { 922 Attribute A = F.getFnAttribute(Name); 923 int Result = Default; 924 925 if (A.isStringAttribute()) { 926 StringRef Str = A.getValueAsString(); 927 if (Str.getAsInteger(0, Result)) { 928 LLVMContext &Ctx = F.getContext(); 929 Ctx.emitError("can't parse integer attribute " + Name); 930 } 931 } 932 933 return Result; 934 } 935 936 std::pair<int, int> getIntegerPairAttribute(const Function &F, 937 StringRef Name, 938 std::pair<int, int> Default, 939 bool OnlyFirstRequired) { 940 Attribute A = F.getFnAttribute(Name); 941 if (!A.isStringAttribute()) 942 return Default; 943 944 LLVMContext &Ctx = F.getContext(); 945 std::pair<int, int> Ints = Default; 946 std::pair<StringRef, StringRef> Strs = A.getValueAsString().split(','); 947 if (Strs.first.trim().getAsInteger(0, Ints.first)) { 948 Ctx.emitError("can't parse first integer attribute " + Name); 949 return Default; 950 } 951 if (Strs.second.trim().getAsInteger(0, Ints.second)) { 952 if (!OnlyFirstRequired || !Strs.second.trim().empty()) { 953 Ctx.emitError("can't parse second integer attribute " + Name); 954 return Default; 955 } 956 } 957 958 return Ints; 959 } 960 961 unsigned getVmcntBitMask(const IsaVersion &Version) { 962 return (1 << (getVmcntBitWidthLo(Version.Major) + 963 getVmcntBitWidthHi(Version.Major))) - 964 1; 965 } 966 967 unsigned getExpcntBitMask(const IsaVersion &Version) { 968 return (1 << getExpcntBitWidth(Version.Major)) - 1; 969 } 970 971 unsigned getLgkmcntBitMask(const IsaVersion &Version) { 972 return (1 << getLgkmcntBitWidth(Version.Major)) - 1; 973 } 974 975 unsigned getWaitcntBitMask(const IsaVersion &Version) { 976 unsigned VmcntLo = getBitMask(getVmcntBitShiftLo(Version.Major), 977 getVmcntBitWidthLo(Version.Major)); 978 unsigned Expcnt = getBitMask(getExpcntBitShift(Version.Major), 979 getExpcntBitWidth(Version.Major)); 980 unsigned Lgkmcnt = getBitMask(getLgkmcntBitShift(Version.Major), 981 getLgkmcntBitWidth(Version.Major)); 982 unsigned VmcntHi = getBitMask(getVmcntBitShiftHi(Version.Major), 983 getVmcntBitWidthHi(Version.Major)); 984 return VmcntLo | Expcnt | Lgkmcnt | VmcntHi; 985 } 986 987 unsigned decodeVmcnt(const IsaVersion &Version, unsigned Waitcnt) { 988 unsigned VmcntLo = unpackBits(Waitcnt, getVmcntBitShiftLo(Version.Major), 989 getVmcntBitWidthLo(Version.Major)); 990 unsigned VmcntHi = unpackBits(Waitcnt, getVmcntBitShiftHi(Version.Major), 991 getVmcntBitWidthHi(Version.Major)); 992 return VmcntLo | VmcntHi << getVmcntBitWidthLo(Version.Major); 993 } 994 995 unsigned decodeExpcnt(const IsaVersion &Version, unsigned Waitcnt) { 996 return unpackBits(Waitcnt, getExpcntBitShift(Version.Major), 997 getExpcntBitWidth(Version.Major)); 998 } 999 1000 unsigned decodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt) { 1001 return unpackBits(Waitcnt, getLgkmcntBitShift(Version.Major), 1002 getLgkmcntBitWidth(Version.Major)); 1003 } 1004 1005 void decodeWaitcnt(const IsaVersion &Version, unsigned Waitcnt, 1006 unsigned &Vmcnt, unsigned &Expcnt, unsigned &Lgkmcnt) { 1007 Vmcnt = decodeVmcnt(Version, Waitcnt); 1008 Expcnt = decodeExpcnt(Version, Waitcnt); 1009 Lgkmcnt = decodeLgkmcnt(Version, Waitcnt); 1010 } 1011 1012 Waitcnt decodeWaitcnt(const IsaVersion &Version, unsigned Encoded) { 1013 Waitcnt Decoded; 1014 Decoded.VmCnt = decodeVmcnt(Version, Encoded); 1015 Decoded.ExpCnt = decodeExpcnt(Version, Encoded); 1016 Decoded.LgkmCnt = decodeLgkmcnt(Version, Encoded); 1017 return Decoded; 1018 } 1019 1020 unsigned encodeVmcnt(const IsaVersion &Version, unsigned Waitcnt, 1021 unsigned Vmcnt) { 1022 Waitcnt = packBits(Vmcnt, Waitcnt, getVmcntBitShiftLo(Version.Major), 1023 getVmcntBitWidthLo(Version.Major)); 1024 return packBits(Vmcnt >> getVmcntBitWidthLo(Version.Major), Waitcnt, 1025 getVmcntBitShiftHi(Version.Major), 1026 getVmcntBitWidthHi(Version.Major)); 1027 } 1028 1029 unsigned encodeExpcnt(const IsaVersion &Version, unsigned Waitcnt, 1030 unsigned Expcnt) { 1031 return packBits(Expcnt, Waitcnt, getExpcntBitShift(Version.Major), 1032 getExpcntBitWidth(Version.Major)); 1033 } 1034 1035 unsigned encodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt, 1036 unsigned Lgkmcnt) { 1037 return packBits(Lgkmcnt, Waitcnt, getLgkmcntBitShift(Version.Major), 1038 getLgkmcntBitWidth(Version.Major)); 1039 } 1040 1041 unsigned encodeWaitcnt(const IsaVersion &Version, 1042 unsigned Vmcnt, unsigned Expcnt, unsigned Lgkmcnt) { 1043 unsigned Waitcnt = getWaitcntBitMask(Version); 1044 Waitcnt = encodeVmcnt(Version, Waitcnt, Vmcnt); 1045 Waitcnt = encodeExpcnt(Version, Waitcnt, Expcnt); 1046 Waitcnt = encodeLgkmcnt(Version, Waitcnt, Lgkmcnt); 1047 return Waitcnt; 1048 } 1049 1050 unsigned encodeWaitcnt(const IsaVersion &Version, const Waitcnt &Decoded) { 1051 return encodeWaitcnt(Version, Decoded.VmCnt, Decoded.ExpCnt, Decoded.LgkmCnt); 1052 } 1053 1054 //===----------------------------------------------------------------------===// 1055 // Custom Operands. 1056 // 1057 // A table of custom operands shall describe "primary" operand names 1058 // first followed by aliases if any. It is not required but recommended 1059 // to arrange operands so that operand encoding match operand position 1060 // in the table. This will make disassembly a bit more efficient. 1061 // Unused slots in the table shall have an empty name. 1062 // 1063 //===----------------------------------------------------------------------===// 1064 1065 template <class T> 1066 static bool isValidOpr(int Idx, const CustomOperand<T> OpInfo[], int OpInfoSize, 1067 T Context) { 1068 return 0 <= Idx && Idx < OpInfoSize && !OpInfo[Idx].Name.empty() && 1069 (!OpInfo[Idx].Cond || OpInfo[Idx].Cond(Context)); 1070 } 1071 1072 template <class T> 1073 static int getOprIdx(std::function<bool(const CustomOperand<T> &)> Test, 1074 const CustomOperand<T> OpInfo[], int OpInfoSize, 1075 T Context) { 1076 int InvalidIdx = OPR_ID_UNKNOWN; 1077 for (int Idx = 0; Idx < OpInfoSize; ++Idx) { 1078 if (Test(OpInfo[Idx])) { 1079 if (!OpInfo[Idx].Cond || OpInfo[Idx].Cond(Context)) 1080 return Idx; 1081 InvalidIdx = OPR_ID_UNSUPPORTED; 1082 } 1083 } 1084 return InvalidIdx; 1085 } 1086 1087 template <class T> 1088 static int getOprIdx(const StringRef Name, const CustomOperand<T> OpInfo[], 1089 int OpInfoSize, T Context) { 1090 auto Test = [=](const CustomOperand<T> &Op) { return Op.Name == Name; }; 1091 return getOprIdx<T>(Test, OpInfo, OpInfoSize, Context); 1092 } 1093 1094 template <class T> 1095 static int getOprIdx(int Id, const CustomOperand<T> OpInfo[], int OpInfoSize, 1096 T Context, bool QuickCheck = true) { 1097 auto Test = [=](const CustomOperand<T> &Op) { 1098 return Op.Encoding == Id && !Op.Name.empty(); 1099 }; 1100 // This is an optimization that should work in most cases. 1101 // As a side effect, it may cause selection of an alias 1102 // instead of a primary operand name in case of sparse tables. 1103 if (QuickCheck && isValidOpr<T>(Id, OpInfo, OpInfoSize, Context) && 1104 OpInfo[Id].Encoding == Id) { 1105 return Id; 1106 } 1107 return getOprIdx<T>(Test, OpInfo, OpInfoSize, Context); 1108 } 1109 1110 //===----------------------------------------------------------------------===// 1111 // Custom Operand Values 1112 //===----------------------------------------------------------------------===// 1113 1114 static unsigned getDefaultCustomOperandEncoding(const CustomOperandVal *Opr, 1115 int Size, 1116 const MCSubtargetInfo &STI) { 1117 unsigned Enc = 0; 1118 for (int Idx = 0; Idx < Size; ++Idx) { 1119 const auto &Op = Opr[Idx]; 1120 if (Op.isSupported(STI)) 1121 Enc |= Op.encode(Op.Default); 1122 } 1123 return Enc; 1124 } 1125 1126 static bool isSymbolicCustomOperandEncoding(const CustomOperandVal *Opr, 1127 int Size, unsigned Code, 1128 bool &HasNonDefaultVal, 1129 const MCSubtargetInfo &STI) { 1130 unsigned UsedOprMask = 0; 1131 HasNonDefaultVal = false; 1132 for (int Idx = 0; Idx < Size; ++Idx) { 1133 const auto &Op = Opr[Idx]; 1134 if (!Op.isSupported(STI)) 1135 continue; 1136 UsedOprMask |= Op.getMask(); 1137 unsigned Val = Op.decode(Code); 1138 if (!Op.isValid(Val)) 1139 return false; 1140 HasNonDefaultVal |= (Val != Op.Default); 1141 } 1142 return (Code & ~UsedOprMask) == 0; 1143 } 1144 1145 static bool decodeCustomOperand(const CustomOperandVal *Opr, int Size, 1146 unsigned Code, int &Idx, StringRef &Name, 1147 unsigned &Val, bool &IsDefault, 1148 const MCSubtargetInfo &STI) { 1149 while (Idx < Size) { 1150 const auto &Op = Opr[Idx++]; 1151 if (Op.isSupported(STI)) { 1152 Name = Op.Name; 1153 Val = Op.decode(Code); 1154 IsDefault = (Val == Op.Default); 1155 return true; 1156 } 1157 } 1158 1159 return false; 1160 } 1161 1162 static int encodeCustomOperandVal(const CustomOperandVal &Op, 1163 int64_t InputVal) { 1164 if (InputVal < 0 || InputVal > Op.Max) 1165 return OPR_VAL_INVALID; 1166 return Op.encode(InputVal); 1167 } 1168 1169 static int encodeCustomOperand(const CustomOperandVal *Opr, int Size, 1170 const StringRef Name, int64_t InputVal, 1171 unsigned &UsedOprMask, 1172 const MCSubtargetInfo &STI) { 1173 int InvalidId = OPR_ID_UNKNOWN; 1174 for (int Idx = 0; Idx < Size; ++Idx) { 1175 const auto &Op = Opr[Idx]; 1176 if (Op.Name == Name) { 1177 if (!Op.isSupported(STI)) { 1178 InvalidId = OPR_ID_UNSUPPORTED; 1179 continue; 1180 } 1181 auto OprMask = Op.getMask(); 1182 if (OprMask & UsedOprMask) 1183 return OPR_ID_DUPLICATE; 1184 UsedOprMask |= OprMask; 1185 return encodeCustomOperandVal(Op, InputVal); 1186 } 1187 } 1188 return InvalidId; 1189 } 1190 1191 //===----------------------------------------------------------------------===// 1192 // DepCtr 1193 //===----------------------------------------------------------------------===// 1194 1195 namespace DepCtr { 1196 1197 int getDefaultDepCtrEncoding(const MCSubtargetInfo &STI) { 1198 static int Default = -1; 1199 if (Default == -1) 1200 Default = getDefaultCustomOperandEncoding(DepCtrInfo, DEP_CTR_SIZE, STI); 1201 return Default; 1202 } 1203 1204 bool isSymbolicDepCtrEncoding(unsigned Code, bool &HasNonDefaultVal, 1205 const MCSubtargetInfo &STI) { 1206 return isSymbolicCustomOperandEncoding(DepCtrInfo, DEP_CTR_SIZE, Code, 1207 HasNonDefaultVal, STI); 1208 } 1209 1210 bool decodeDepCtr(unsigned Code, int &Id, StringRef &Name, unsigned &Val, 1211 bool &IsDefault, const MCSubtargetInfo &STI) { 1212 return decodeCustomOperand(DepCtrInfo, DEP_CTR_SIZE, Code, Id, Name, Val, 1213 IsDefault, STI); 1214 } 1215 1216 int encodeDepCtr(const StringRef Name, int64_t Val, unsigned &UsedOprMask, 1217 const MCSubtargetInfo &STI) { 1218 return encodeCustomOperand(DepCtrInfo, DEP_CTR_SIZE, Name, Val, UsedOprMask, 1219 STI); 1220 } 1221 1222 } // namespace DepCtr 1223 1224 //===----------------------------------------------------------------------===// 1225 // hwreg 1226 //===----------------------------------------------------------------------===// 1227 1228 namespace Hwreg { 1229 1230 int64_t getHwregId(const StringRef Name, const MCSubtargetInfo &STI) { 1231 int Idx = getOprIdx<const MCSubtargetInfo &>(Name, Opr, OPR_SIZE, STI); 1232 return (Idx < 0) ? Idx : Opr[Idx].Encoding; 1233 } 1234 1235 bool isValidHwreg(int64_t Id) { 1236 return 0 <= Id && isUInt<ID_WIDTH_>(Id); 1237 } 1238 1239 bool isValidHwregOffset(int64_t Offset) { 1240 return 0 <= Offset && isUInt<OFFSET_WIDTH_>(Offset); 1241 } 1242 1243 bool isValidHwregWidth(int64_t Width) { 1244 return 0 <= (Width - 1) && isUInt<WIDTH_M1_WIDTH_>(Width - 1); 1245 } 1246 1247 uint64_t encodeHwreg(uint64_t Id, uint64_t Offset, uint64_t Width) { 1248 return (Id << ID_SHIFT_) | 1249 (Offset << OFFSET_SHIFT_) | 1250 ((Width - 1) << WIDTH_M1_SHIFT_); 1251 } 1252 1253 StringRef getHwreg(unsigned Id, const MCSubtargetInfo &STI) { 1254 int Idx = getOprIdx<const MCSubtargetInfo &>(Id, Opr, OPR_SIZE, STI); 1255 return (Idx < 0) ? "" : Opr[Idx].Name; 1256 } 1257 1258 void decodeHwreg(unsigned Val, unsigned &Id, unsigned &Offset, unsigned &Width) { 1259 Id = (Val & ID_MASK_) >> ID_SHIFT_; 1260 Offset = (Val & OFFSET_MASK_) >> OFFSET_SHIFT_; 1261 Width = ((Val & WIDTH_M1_MASK_) >> WIDTH_M1_SHIFT_) + 1; 1262 } 1263 1264 } // namespace Hwreg 1265 1266 //===----------------------------------------------------------------------===// 1267 // exp tgt 1268 //===----------------------------------------------------------------------===// 1269 1270 namespace Exp { 1271 1272 struct ExpTgt { 1273 StringLiteral Name; 1274 unsigned Tgt; 1275 unsigned MaxIndex; 1276 }; 1277 1278 static constexpr ExpTgt ExpTgtInfo[] = { 1279 {{"null"}, ET_NULL, ET_NULL_MAX_IDX}, 1280 {{"mrtz"}, ET_MRTZ, ET_MRTZ_MAX_IDX}, 1281 {{"prim"}, ET_PRIM, ET_PRIM_MAX_IDX}, 1282 {{"mrt"}, ET_MRT0, ET_MRT_MAX_IDX}, 1283 {{"pos"}, ET_POS0, ET_POS_MAX_IDX}, 1284 {{"dual_src_blend"}, ET_DUAL_SRC_BLEND0, ET_DUAL_SRC_BLEND_MAX_IDX}, 1285 {{"param"}, ET_PARAM0, ET_PARAM_MAX_IDX}, 1286 }; 1287 1288 bool getTgtName(unsigned Id, StringRef &Name, int &Index) { 1289 for (const ExpTgt &Val : ExpTgtInfo) { 1290 if (Val.Tgt <= Id && Id <= Val.Tgt + Val.MaxIndex) { 1291 Index = (Val.MaxIndex == 0) ? -1 : (Id - Val.Tgt); 1292 Name = Val.Name; 1293 return true; 1294 } 1295 } 1296 return false; 1297 } 1298 1299 unsigned getTgtId(const StringRef Name) { 1300 1301 for (const ExpTgt &Val : ExpTgtInfo) { 1302 if (Val.MaxIndex == 0 && Name == Val.Name) 1303 return Val.Tgt; 1304 1305 if (Val.MaxIndex > 0 && Name.startswith(Val.Name)) { 1306 StringRef Suffix = Name.drop_front(Val.Name.size()); 1307 1308 unsigned Id; 1309 if (Suffix.getAsInteger(10, Id) || Id > Val.MaxIndex) 1310 return ET_INVALID; 1311 1312 // Disable leading zeroes 1313 if (Suffix.size() > 1 && Suffix[0] == '0') 1314 return ET_INVALID; 1315 1316 return Val.Tgt + Id; 1317 } 1318 } 1319 return ET_INVALID; 1320 } 1321 1322 bool isSupportedTgtId(unsigned Id, const MCSubtargetInfo &STI) { 1323 switch (Id) { 1324 case ET_NULL: 1325 return !isGFX11Plus(STI); 1326 case ET_POS4: 1327 case ET_PRIM: 1328 return isGFX10Plus(STI); 1329 case ET_DUAL_SRC_BLEND0: 1330 case ET_DUAL_SRC_BLEND1: 1331 return isGFX11Plus(STI); 1332 default: 1333 if (Id >= ET_PARAM0 && Id <= ET_PARAM31) 1334 return !isGFX11Plus(STI); 1335 return true; 1336 } 1337 } 1338 1339 } // namespace Exp 1340 1341 //===----------------------------------------------------------------------===// 1342 // MTBUF Format 1343 //===----------------------------------------------------------------------===// 1344 1345 namespace MTBUFFormat { 1346 1347 int64_t getDfmt(const StringRef Name) { 1348 for (int Id = DFMT_MIN; Id <= DFMT_MAX; ++Id) { 1349 if (Name == DfmtSymbolic[Id]) 1350 return Id; 1351 } 1352 return DFMT_UNDEF; 1353 } 1354 1355 StringRef getDfmtName(unsigned Id) { 1356 assert(Id <= DFMT_MAX); 1357 return DfmtSymbolic[Id]; 1358 } 1359 1360 static StringLiteral const *getNfmtLookupTable(const MCSubtargetInfo &STI) { 1361 if (isSI(STI) || isCI(STI)) 1362 return NfmtSymbolicSICI; 1363 if (isVI(STI) || isGFX9(STI)) 1364 return NfmtSymbolicVI; 1365 return NfmtSymbolicGFX10; 1366 } 1367 1368 int64_t getNfmt(const StringRef Name, const MCSubtargetInfo &STI) { 1369 auto lookupTable = getNfmtLookupTable(STI); 1370 for (int Id = NFMT_MIN; Id <= NFMT_MAX; ++Id) { 1371 if (Name == lookupTable[Id]) 1372 return Id; 1373 } 1374 return NFMT_UNDEF; 1375 } 1376 1377 StringRef getNfmtName(unsigned Id, const MCSubtargetInfo &STI) { 1378 assert(Id <= NFMT_MAX); 1379 return getNfmtLookupTable(STI)[Id]; 1380 } 1381 1382 bool isValidDfmtNfmt(unsigned Id, const MCSubtargetInfo &STI) { 1383 unsigned Dfmt; 1384 unsigned Nfmt; 1385 decodeDfmtNfmt(Id, Dfmt, Nfmt); 1386 return isValidNfmt(Nfmt, STI); 1387 } 1388 1389 bool isValidNfmt(unsigned Id, const MCSubtargetInfo &STI) { 1390 return !getNfmtName(Id, STI).empty(); 1391 } 1392 1393 int64_t encodeDfmtNfmt(unsigned Dfmt, unsigned Nfmt) { 1394 return (Dfmt << DFMT_SHIFT) | (Nfmt << NFMT_SHIFT); 1395 } 1396 1397 void decodeDfmtNfmt(unsigned Format, unsigned &Dfmt, unsigned &Nfmt) { 1398 Dfmt = (Format >> DFMT_SHIFT) & DFMT_MASK; 1399 Nfmt = (Format >> NFMT_SHIFT) & NFMT_MASK; 1400 } 1401 1402 int64_t getUnifiedFormat(const StringRef Name, const MCSubtargetInfo &STI) { 1403 if (isGFX11Plus(STI)) { 1404 for (int Id = UfmtGFX11::UFMT_FIRST; Id <= UfmtGFX11::UFMT_LAST; ++Id) { 1405 if (Name == UfmtSymbolicGFX11[Id]) 1406 return Id; 1407 } 1408 } else { 1409 for (int Id = UfmtGFX10::UFMT_FIRST; Id <= UfmtGFX10::UFMT_LAST; ++Id) { 1410 if (Name == UfmtSymbolicGFX10[Id]) 1411 return Id; 1412 } 1413 } 1414 return UFMT_UNDEF; 1415 } 1416 1417 StringRef getUnifiedFormatName(unsigned Id, const MCSubtargetInfo &STI) { 1418 if(isValidUnifiedFormat(Id, STI)) 1419 return isGFX10(STI) ? UfmtSymbolicGFX10[Id] : UfmtSymbolicGFX11[Id]; 1420 return ""; 1421 } 1422 1423 bool isValidUnifiedFormat(unsigned Id, const MCSubtargetInfo &STI) { 1424 return isGFX10(STI) ? Id <= UfmtGFX10::UFMT_LAST : Id <= UfmtGFX11::UFMT_LAST; 1425 } 1426 1427 int64_t convertDfmtNfmt2Ufmt(unsigned Dfmt, unsigned Nfmt, 1428 const MCSubtargetInfo &STI) { 1429 int64_t Fmt = encodeDfmtNfmt(Dfmt, Nfmt); 1430 if (isGFX11Plus(STI)) { 1431 for (int Id = UfmtGFX11::UFMT_FIRST; Id <= UfmtGFX11::UFMT_LAST; ++Id) { 1432 if (Fmt == DfmtNfmt2UFmtGFX11[Id]) 1433 return Id; 1434 } 1435 } else { 1436 for (int Id = UfmtGFX10::UFMT_FIRST; Id <= UfmtGFX10::UFMT_LAST; ++Id) { 1437 if (Fmt == DfmtNfmt2UFmtGFX10[Id]) 1438 return Id; 1439 } 1440 } 1441 return UFMT_UNDEF; 1442 } 1443 1444 bool isValidFormatEncoding(unsigned Val, const MCSubtargetInfo &STI) { 1445 return isGFX10Plus(STI) ? (Val <= UFMT_MAX) : (Val <= DFMT_NFMT_MAX); 1446 } 1447 1448 unsigned getDefaultFormatEncoding(const MCSubtargetInfo &STI) { 1449 if (isGFX10Plus(STI)) 1450 return UFMT_DEFAULT; 1451 return DFMT_NFMT_DEFAULT; 1452 } 1453 1454 } // namespace MTBUFFormat 1455 1456 //===----------------------------------------------------------------------===// 1457 // SendMsg 1458 //===----------------------------------------------------------------------===// 1459 1460 namespace SendMsg { 1461 1462 static uint64_t getMsgIdMask(const MCSubtargetInfo &STI) { 1463 return isGFX11Plus(STI) ? ID_MASK_GFX11Plus_ : ID_MASK_PreGFX11_; 1464 } 1465 1466 int64_t getMsgId(const StringRef Name, const MCSubtargetInfo &STI) { 1467 int Idx = getOprIdx<const MCSubtargetInfo &>(Name, Msg, MSG_SIZE, STI); 1468 return (Idx < 0) ? Idx : Msg[Idx].Encoding; 1469 } 1470 1471 bool isValidMsgId(int64_t MsgId, const MCSubtargetInfo &STI) { 1472 return (MsgId & ~(getMsgIdMask(STI))) == 0; 1473 } 1474 1475 StringRef getMsgName(int64_t MsgId, const MCSubtargetInfo &STI) { 1476 int Idx = getOprIdx<const MCSubtargetInfo &>(MsgId, Msg, MSG_SIZE, STI); 1477 return (Idx < 0) ? "" : Msg[Idx].Name; 1478 } 1479 1480 int64_t getMsgOpId(int64_t MsgId, const StringRef Name) { 1481 const char* const *S = (MsgId == ID_SYSMSG) ? OpSysSymbolic : OpGsSymbolic; 1482 const int F = (MsgId == ID_SYSMSG) ? OP_SYS_FIRST_ : OP_GS_FIRST_; 1483 const int L = (MsgId == ID_SYSMSG) ? OP_SYS_LAST_ : OP_GS_LAST_; 1484 for (int i = F; i < L; ++i) { 1485 if (Name == S[i]) { 1486 return i; 1487 } 1488 } 1489 return OP_UNKNOWN_; 1490 } 1491 1492 bool isValidMsgOp(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI, 1493 bool Strict) { 1494 assert(isValidMsgId(MsgId, STI)); 1495 1496 if (!Strict) 1497 return 0 <= OpId && isUInt<OP_WIDTH_>(OpId); 1498 1499 if (MsgId == ID_SYSMSG) 1500 return OP_SYS_FIRST_ <= OpId && OpId < OP_SYS_LAST_; 1501 if (!isGFX11Plus(STI)) { 1502 switch (MsgId) { 1503 case ID_GS_PreGFX11: 1504 return (OP_GS_FIRST_ <= OpId && OpId < OP_GS_LAST_) && OpId != OP_GS_NOP; 1505 case ID_GS_DONE_PreGFX11: 1506 return OP_GS_FIRST_ <= OpId && OpId < OP_GS_LAST_; 1507 } 1508 } 1509 return OpId == OP_NONE_; 1510 } 1511 1512 StringRef getMsgOpName(int64_t MsgId, int64_t OpId, 1513 const MCSubtargetInfo &STI) { 1514 assert(msgRequiresOp(MsgId, STI)); 1515 return (MsgId == ID_SYSMSG)? OpSysSymbolic[OpId] : OpGsSymbolic[OpId]; 1516 } 1517 1518 bool isValidMsgStream(int64_t MsgId, int64_t OpId, int64_t StreamId, 1519 const MCSubtargetInfo &STI, bool Strict) { 1520 assert(isValidMsgOp(MsgId, OpId, STI, Strict)); 1521 1522 if (!Strict) 1523 return 0 <= StreamId && isUInt<STREAM_ID_WIDTH_>(StreamId); 1524 1525 if (!isGFX11Plus(STI)) { 1526 switch (MsgId) { 1527 case ID_GS_PreGFX11: 1528 return STREAM_ID_FIRST_ <= StreamId && StreamId < STREAM_ID_LAST_; 1529 case ID_GS_DONE_PreGFX11: 1530 return (OpId == OP_GS_NOP) ? 1531 (StreamId == STREAM_ID_NONE_) : 1532 (STREAM_ID_FIRST_ <= StreamId && StreamId < STREAM_ID_LAST_); 1533 } 1534 } 1535 return StreamId == STREAM_ID_NONE_; 1536 } 1537 1538 bool msgRequiresOp(int64_t MsgId, const MCSubtargetInfo &STI) { 1539 return MsgId == ID_SYSMSG || 1540 (!isGFX11Plus(STI) && 1541 (MsgId == ID_GS_PreGFX11 || MsgId == ID_GS_DONE_PreGFX11)); 1542 } 1543 1544 bool msgSupportsStream(int64_t MsgId, int64_t OpId, 1545 const MCSubtargetInfo &STI) { 1546 return !isGFX11Plus(STI) && 1547 (MsgId == ID_GS_PreGFX11 || MsgId == ID_GS_DONE_PreGFX11) && 1548 OpId != OP_GS_NOP; 1549 } 1550 1551 void decodeMsg(unsigned Val, uint16_t &MsgId, uint16_t &OpId, 1552 uint16_t &StreamId, const MCSubtargetInfo &STI) { 1553 MsgId = Val & getMsgIdMask(STI); 1554 if (isGFX11Plus(STI)) { 1555 OpId = 0; 1556 StreamId = 0; 1557 } else { 1558 OpId = (Val & OP_MASK_) >> OP_SHIFT_; 1559 StreamId = (Val & STREAM_ID_MASK_) >> STREAM_ID_SHIFT_; 1560 } 1561 } 1562 1563 uint64_t encodeMsg(uint64_t MsgId, 1564 uint64_t OpId, 1565 uint64_t StreamId) { 1566 return MsgId | (OpId << OP_SHIFT_) | (StreamId << STREAM_ID_SHIFT_); 1567 } 1568 1569 } // namespace SendMsg 1570 1571 //===----------------------------------------------------------------------===// 1572 // 1573 //===----------------------------------------------------------------------===// 1574 1575 unsigned getInitialPSInputAddr(const Function &F) { 1576 return getIntegerAttribute(F, "InitialPSInputAddr", 0); 1577 } 1578 1579 bool getHasColorExport(const Function &F) { 1580 // As a safe default always respond as if PS has color exports. 1581 return getIntegerAttribute( 1582 F, "amdgpu-color-export", 1583 F.getCallingConv() == CallingConv::AMDGPU_PS ? 1 : 0) != 0; 1584 } 1585 1586 bool getHasDepthExport(const Function &F) { 1587 return getIntegerAttribute(F, "amdgpu-depth-export", 0) != 0; 1588 } 1589 1590 bool isShader(CallingConv::ID cc) { 1591 switch(cc) { 1592 case CallingConv::AMDGPU_VS: 1593 case CallingConv::AMDGPU_LS: 1594 case CallingConv::AMDGPU_HS: 1595 case CallingConv::AMDGPU_ES: 1596 case CallingConv::AMDGPU_GS: 1597 case CallingConv::AMDGPU_PS: 1598 case CallingConv::AMDGPU_CS: 1599 return true; 1600 default: 1601 return false; 1602 } 1603 } 1604 1605 bool isGraphics(CallingConv::ID cc) { 1606 return isShader(cc) || cc == CallingConv::AMDGPU_Gfx; 1607 } 1608 1609 bool isCompute(CallingConv::ID cc) { 1610 return !isGraphics(cc) || cc == CallingConv::AMDGPU_CS; 1611 } 1612 1613 bool isEntryFunctionCC(CallingConv::ID CC) { 1614 switch (CC) { 1615 case CallingConv::AMDGPU_KERNEL: 1616 case CallingConv::SPIR_KERNEL: 1617 case CallingConv::AMDGPU_VS: 1618 case CallingConv::AMDGPU_GS: 1619 case CallingConv::AMDGPU_PS: 1620 case CallingConv::AMDGPU_CS: 1621 case CallingConv::AMDGPU_ES: 1622 case CallingConv::AMDGPU_HS: 1623 case CallingConv::AMDGPU_LS: 1624 return true; 1625 default: 1626 return false; 1627 } 1628 } 1629 1630 bool isModuleEntryFunctionCC(CallingConv::ID CC) { 1631 switch (CC) { 1632 case CallingConv::AMDGPU_Gfx: 1633 return true; 1634 default: 1635 return isEntryFunctionCC(CC); 1636 } 1637 } 1638 1639 bool isKernelCC(const Function *Func) { 1640 return AMDGPU::isModuleEntryFunctionCC(Func->getCallingConv()); 1641 } 1642 1643 bool hasXNACK(const MCSubtargetInfo &STI) { 1644 return STI.getFeatureBits()[AMDGPU::FeatureXNACK]; 1645 } 1646 1647 bool hasSRAMECC(const MCSubtargetInfo &STI) { 1648 return STI.getFeatureBits()[AMDGPU::FeatureSRAMECC]; 1649 } 1650 1651 bool hasMIMG_R128(const MCSubtargetInfo &STI) { 1652 return STI.getFeatureBits()[AMDGPU::FeatureMIMG_R128] && !STI.getFeatureBits()[AMDGPU::FeatureR128A16]; 1653 } 1654 1655 bool hasGFX10A16(const MCSubtargetInfo &STI) { 1656 return STI.getFeatureBits()[AMDGPU::FeatureGFX10A16]; 1657 } 1658 1659 bool hasG16(const MCSubtargetInfo &STI) { 1660 return STI.getFeatureBits()[AMDGPU::FeatureG16]; 1661 } 1662 1663 bool hasPackedD16(const MCSubtargetInfo &STI) { 1664 return !STI.getFeatureBits()[AMDGPU::FeatureUnpackedD16VMem] && !isCI(STI) && 1665 !isSI(STI); 1666 } 1667 1668 bool isSI(const MCSubtargetInfo &STI) { 1669 return STI.getFeatureBits()[AMDGPU::FeatureSouthernIslands]; 1670 } 1671 1672 bool isCI(const MCSubtargetInfo &STI) { 1673 return STI.getFeatureBits()[AMDGPU::FeatureSeaIslands]; 1674 } 1675 1676 bool isVI(const MCSubtargetInfo &STI) { 1677 return STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]; 1678 } 1679 1680 bool isGFX9(const MCSubtargetInfo &STI) { 1681 return STI.getFeatureBits()[AMDGPU::FeatureGFX9]; 1682 } 1683 1684 bool isGFX9_GFX10(const MCSubtargetInfo &STI) { 1685 return isGFX9(STI) || isGFX10(STI); 1686 } 1687 1688 bool isGFX8_GFX9_GFX10(const MCSubtargetInfo &STI) { 1689 return isVI(STI) || isGFX9(STI) || isGFX10(STI); 1690 } 1691 1692 bool isGFX8Plus(const MCSubtargetInfo &STI) { 1693 return isVI(STI) || isGFX9Plus(STI); 1694 } 1695 1696 bool isGFX9Plus(const MCSubtargetInfo &STI) { 1697 return isGFX9(STI) || isGFX10Plus(STI); 1698 } 1699 1700 bool isGFX10(const MCSubtargetInfo &STI) { 1701 return STI.getFeatureBits()[AMDGPU::FeatureGFX10]; 1702 } 1703 1704 bool isGFX10Plus(const MCSubtargetInfo &STI) { 1705 return isGFX10(STI) || isGFX11Plus(STI); 1706 } 1707 1708 bool isGFX11(const MCSubtargetInfo &STI) { 1709 return STI.getFeatureBits()[AMDGPU::FeatureGFX11]; 1710 } 1711 1712 bool isGFX11Plus(const MCSubtargetInfo &STI) { 1713 return isGFX11(STI); 1714 } 1715 1716 bool isNotGFX11Plus(const MCSubtargetInfo &STI) { 1717 return !isGFX11Plus(STI); 1718 } 1719 1720 bool isNotGFX10Plus(const MCSubtargetInfo &STI) { 1721 return isSI(STI) || isCI(STI) || isVI(STI) || isGFX9(STI); 1722 } 1723 1724 bool isGFX10Before1030(const MCSubtargetInfo &STI) { 1725 return isGFX10(STI) && !AMDGPU::isGFX10_BEncoding(STI); 1726 } 1727 1728 bool isGCN3Encoding(const MCSubtargetInfo &STI) { 1729 return STI.getFeatureBits()[AMDGPU::FeatureGCN3Encoding]; 1730 } 1731 1732 bool isGFX10_AEncoding(const MCSubtargetInfo &STI) { 1733 return STI.getFeatureBits()[AMDGPU::FeatureGFX10_AEncoding]; 1734 } 1735 1736 bool isGFX10_BEncoding(const MCSubtargetInfo &STI) { 1737 return STI.getFeatureBits()[AMDGPU::FeatureGFX10_BEncoding]; 1738 } 1739 1740 bool hasGFX10_3Insts(const MCSubtargetInfo &STI) { 1741 return STI.getFeatureBits()[AMDGPU::FeatureGFX10_3Insts]; 1742 } 1743 1744 bool isGFX90A(const MCSubtargetInfo &STI) { 1745 return STI.getFeatureBits()[AMDGPU::FeatureGFX90AInsts]; 1746 } 1747 1748 bool isGFX940(const MCSubtargetInfo &STI) { 1749 return STI.getFeatureBits()[AMDGPU::FeatureGFX940Insts]; 1750 } 1751 1752 bool hasArchitectedFlatScratch(const MCSubtargetInfo &STI) { 1753 return STI.getFeatureBits()[AMDGPU::FeatureArchitectedFlatScratch]; 1754 } 1755 1756 bool hasMAIInsts(const MCSubtargetInfo &STI) { 1757 return STI.getFeatureBits()[AMDGPU::FeatureMAIInsts]; 1758 } 1759 1760 int32_t getTotalNumVGPRs(bool has90AInsts, int32_t ArgNumAGPR, 1761 int32_t ArgNumVGPR) { 1762 if (has90AInsts && ArgNumAGPR) 1763 return alignTo(ArgNumVGPR, 4) + ArgNumAGPR; 1764 return std::max(ArgNumVGPR, ArgNumAGPR); 1765 } 1766 1767 bool isSGPR(unsigned Reg, const MCRegisterInfo* TRI) { 1768 const MCRegisterClass SGPRClass = TRI->getRegClass(AMDGPU::SReg_32RegClassID); 1769 const unsigned FirstSubReg = TRI->getSubReg(Reg, AMDGPU::sub0); 1770 return SGPRClass.contains(FirstSubReg != 0 ? FirstSubReg : Reg) || 1771 Reg == AMDGPU::SCC; 1772 } 1773 1774 #define MAP_REG2REG \ 1775 using namespace AMDGPU; \ 1776 switch(Reg) { \ 1777 default: return Reg; \ 1778 CASE_CI_VI(FLAT_SCR) \ 1779 CASE_CI_VI(FLAT_SCR_LO) \ 1780 CASE_CI_VI(FLAT_SCR_HI) \ 1781 CASE_VI_GFX9PLUS(TTMP0) \ 1782 CASE_VI_GFX9PLUS(TTMP1) \ 1783 CASE_VI_GFX9PLUS(TTMP2) \ 1784 CASE_VI_GFX9PLUS(TTMP3) \ 1785 CASE_VI_GFX9PLUS(TTMP4) \ 1786 CASE_VI_GFX9PLUS(TTMP5) \ 1787 CASE_VI_GFX9PLUS(TTMP6) \ 1788 CASE_VI_GFX9PLUS(TTMP7) \ 1789 CASE_VI_GFX9PLUS(TTMP8) \ 1790 CASE_VI_GFX9PLUS(TTMP9) \ 1791 CASE_VI_GFX9PLUS(TTMP10) \ 1792 CASE_VI_GFX9PLUS(TTMP11) \ 1793 CASE_VI_GFX9PLUS(TTMP12) \ 1794 CASE_VI_GFX9PLUS(TTMP13) \ 1795 CASE_VI_GFX9PLUS(TTMP14) \ 1796 CASE_VI_GFX9PLUS(TTMP15) \ 1797 CASE_VI_GFX9PLUS(TTMP0_TTMP1) \ 1798 CASE_VI_GFX9PLUS(TTMP2_TTMP3) \ 1799 CASE_VI_GFX9PLUS(TTMP4_TTMP5) \ 1800 CASE_VI_GFX9PLUS(TTMP6_TTMP7) \ 1801 CASE_VI_GFX9PLUS(TTMP8_TTMP9) \ 1802 CASE_VI_GFX9PLUS(TTMP10_TTMP11) \ 1803 CASE_VI_GFX9PLUS(TTMP12_TTMP13) \ 1804 CASE_VI_GFX9PLUS(TTMP14_TTMP15) \ 1805 CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3) \ 1806 CASE_VI_GFX9PLUS(TTMP4_TTMP5_TTMP6_TTMP7) \ 1807 CASE_VI_GFX9PLUS(TTMP8_TTMP9_TTMP10_TTMP11) \ 1808 CASE_VI_GFX9PLUS(TTMP12_TTMP13_TTMP14_TTMP15) \ 1809 CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7) \ 1810 CASE_VI_GFX9PLUS(TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11) \ 1811 CASE_VI_GFX9PLUS(TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \ 1812 CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \ 1813 CASE_GFXPRE11_GFX11PLUS(M0) \ 1814 CASE_GFXPRE11_GFX11PLUS(SGPR_NULL) \ 1815 } 1816 1817 #define CASE_CI_VI(node) \ 1818 assert(!isSI(STI)); \ 1819 case node: return isCI(STI) ? node##_ci : node##_vi; 1820 1821 #define CASE_VI_GFX9PLUS(node) \ 1822 case node: return isGFX9Plus(STI) ? node##_gfx9plus : node##_vi; 1823 1824 #define CASE_GFXPRE11_GFX11PLUS(node) \ 1825 case node: return isGFX11Plus(STI) ? node##_gfx11plus : node##_gfxpre11; 1826 1827 unsigned getMCReg(unsigned Reg, const MCSubtargetInfo &STI) { 1828 if (STI.getTargetTriple().getArch() == Triple::r600) 1829 return Reg; 1830 MAP_REG2REG 1831 } 1832 1833 #undef CASE_CI_VI 1834 #undef CASE_VI_GFX9PLUS 1835 #undef CASE_GFXPRE11_GFX11PLUS 1836 1837 #define CASE_CI_VI(node) case node##_ci: case node##_vi: return node; 1838 #define CASE_VI_GFX9PLUS(node) case node##_vi: case node##_gfx9plus: return node; 1839 #define CASE_GFXPRE11_GFX11PLUS(node) case node##_gfx11plus: case node##_gfxpre11: return node; 1840 1841 unsigned mc2PseudoReg(unsigned Reg) { 1842 MAP_REG2REG 1843 } 1844 1845 #undef CASE_CI_VI 1846 #undef CASE_VI_GFX9PLUS 1847 #undef CASE_GFXPRE11_GFX11PLUS 1848 #undef MAP_REG2REG 1849 1850 bool isSISrcOperand(const MCInstrDesc &Desc, unsigned OpNo) { 1851 assert(OpNo < Desc.NumOperands); 1852 unsigned OpType = Desc.OpInfo[OpNo].OperandType; 1853 return OpType >= AMDGPU::OPERAND_SRC_FIRST && 1854 OpType <= AMDGPU::OPERAND_SRC_LAST; 1855 } 1856 1857 bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo) { 1858 assert(OpNo < Desc.NumOperands); 1859 unsigned OpType = Desc.OpInfo[OpNo].OperandType; 1860 switch (OpType) { 1861 case AMDGPU::OPERAND_REG_IMM_FP32: 1862 case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED: 1863 case AMDGPU::OPERAND_REG_IMM_FP64: 1864 case AMDGPU::OPERAND_REG_IMM_FP16: 1865 case AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED: 1866 case AMDGPU::OPERAND_REG_IMM_V2FP16: 1867 case AMDGPU::OPERAND_REG_IMM_V2INT16: 1868 case AMDGPU::OPERAND_REG_INLINE_C_FP32: 1869 case AMDGPU::OPERAND_REG_INLINE_C_FP64: 1870 case AMDGPU::OPERAND_REG_INLINE_C_FP16: 1871 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: 1872 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16: 1873 case AMDGPU::OPERAND_REG_INLINE_AC_FP32: 1874 case AMDGPU::OPERAND_REG_INLINE_AC_FP16: 1875 case AMDGPU::OPERAND_REG_INLINE_AC_V2FP16: 1876 case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16: 1877 case AMDGPU::OPERAND_REG_IMM_V2FP32: 1878 case AMDGPU::OPERAND_REG_INLINE_C_V2FP32: 1879 case AMDGPU::OPERAND_REG_INLINE_AC_FP64: 1880 return true; 1881 default: 1882 return false; 1883 } 1884 } 1885 1886 bool isSISrcInlinableOperand(const MCInstrDesc &Desc, unsigned OpNo) { 1887 assert(OpNo < Desc.NumOperands); 1888 unsigned OpType = Desc.OpInfo[OpNo].OperandType; 1889 return OpType >= AMDGPU::OPERAND_REG_INLINE_C_FIRST && 1890 OpType <= AMDGPU::OPERAND_REG_INLINE_C_LAST; 1891 } 1892 1893 // Avoid using MCRegisterClass::getSize, since that function will go away 1894 // (move from MC* level to Target* level). Return size in bits. 1895 unsigned getRegBitWidth(unsigned RCID) { 1896 switch (RCID) { 1897 case AMDGPU::VGPR_LO16RegClassID: 1898 case AMDGPU::VGPR_HI16RegClassID: 1899 case AMDGPU::SGPR_LO16RegClassID: 1900 case AMDGPU::AGPR_LO16RegClassID: 1901 return 16; 1902 case AMDGPU::SGPR_32RegClassID: 1903 case AMDGPU::VGPR_32RegClassID: 1904 case AMDGPU::VRegOrLds_32RegClassID: 1905 case AMDGPU::AGPR_32RegClassID: 1906 case AMDGPU::VS_32RegClassID: 1907 case AMDGPU::AV_32RegClassID: 1908 case AMDGPU::SReg_32RegClassID: 1909 case AMDGPU::SReg_32_XM0RegClassID: 1910 case AMDGPU::SRegOrLds_32RegClassID: 1911 return 32; 1912 case AMDGPU::SGPR_64RegClassID: 1913 case AMDGPU::VS_64RegClassID: 1914 case AMDGPU::SReg_64RegClassID: 1915 case AMDGPU::VReg_64RegClassID: 1916 case AMDGPU::AReg_64RegClassID: 1917 case AMDGPU::SReg_64_XEXECRegClassID: 1918 case AMDGPU::VReg_64_Align2RegClassID: 1919 case AMDGPU::AReg_64_Align2RegClassID: 1920 case AMDGPU::AV_64RegClassID: 1921 case AMDGPU::AV_64_Align2RegClassID: 1922 return 64; 1923 case AMDGPU::SGPR_96RegClassID: 1924 case AMDGPU::SReg_96RegClassID: 1925 case AMDGPU::VReg_96RegClassID: 1926 case AMDGPU::AReg_96RegClassID: 1927 case AMDGPU::VReg_96_Align2RegClassID: 1928 case AMDGPU::AReg_96_Align2RegClassID: 1929 case AMDGPU::AV_96RegClassID: 1930 case AMDGPU::AV_96_Align2RegClassID: 1931 return 96; 1932 case AMDGPU::SGPR_128RegClassID: 1933 case AMDGPU::SReg_128RegClassID: 1934 case AMDGPU::VReg_128RegClassID: 1935 case AMDGPU::AReg_128RegClassID: 1936 case AMDGPU::VReg_128_Align2RegClassID: 1937 case AMDGPU::AReg_128_Align2RegClassID: 1938 case AMDGPU::AV_128RegClassID: 1939 case AMDGPU::AV_128_Align2RegClassID: 1940 return 128; 1941 case AMDGPU::SGPR_160RegClassID: 1942 case AMDGPU::SReg_160RegClassID: 1943 case AMDGPU::VReg_160RegClassID: 1944 case AMDGPU::AReg_160RegClassID: 1945 case AMDGPU::VReg_160_Align2RegClassID: 1946 case AMDGPU::AReg_160_Align2RegClassID: 1947 case AMDGPU::AV_160RegClassID: 1948 case AMDGPU::AV_160_Align2RegClassID: 1949 return 160; 1950 case AMDGPU::SGPR_192RegClassID: 1951 case AMDGPU::SReg_192RegClassID: 1952 case AMDGPU::VReg_192RegClassID: 1953 case AMDGPU::AReg_192RegClassID: 1954 case AMDGPU::VReg_192_Align2RegClassID: 1955 case AMDGPU::AReg_192_Align2RegClassID: 1956 case AMDGPU::AV_192RegClassID: 1957 case AMDGPU::AV_192_Align2RegClassID: 1958 return 192; 1959 case AMDGPU::SGPR_224RegClassID: 1960 case AMDGPU::SReg_224RegClassID: 1961 case AMDGPU::VReg_224RegClassID: 1962 case AMDGPU::AReg_224RegClassID: 1963 case AMDGPU::VReg_224_Align2RegClassID: 1964 case AMDGPU::AReg_224_Align2RegClassID: 1965 case AMDGPU::AV_224RegClassID: 1966 case AMDGPU::AV_224_Align2RegClassID: 1967 return 224; 1968 case AMDGPU::SGPR_256RegClassID: 1969 case AMDGPU::SReg_256RegClassID: 1970 case AMDGPU::VReg_256RegClassID: 1971 case AMDGPU::AReg_256RegClassID: 1972 case AMDGPU::VReg_256_Align2RegClassID: 1973 case AMDGPU::AReg_256_Align2RegClassID: 1974 case AMDGPU::AV_256RegClassID: 1975 case AMDGPU::AV_256_Align2RegClassID: 1976 return 256; 1977 case AMDGPU::SGPR_512RegClassID: 1978 case AMDGPU::SReg_512RegClassID: 1979 case AMDGPU::VReg_512RegClassID: 1980 case AMDGPU::AReg_512RegClassID: 1981 case AMDGPU::VReg_512_Align2RegClassID: 1982 case AMDGPU::AReg_512_Align2RegClassID: 1983 case AMDGPU::AV_512RegClassID: 1984 case AMDGPU::AV_512_Align2RegClassID: 1985 return 512; 1986 case AMDGPU::SGPR_1024RegClassID: 1987 case AMDGPU::SReg_1024RegClassID: 1988 case AMDGPU::VReg_1024RegClassID: 1989 case AMDGPU::AReg_1024RegClassID: 1990 case AMDGPU::VReg_1024_Align2RegClassID: 1991 case AMDGPU::AReg_1024_Align2RegClassID: 1992 case AMDGPU::AV_1024RegClassID: 1993 case AMDGPU::AV_1024_Align2RegClassID: 1994 return 1024; 1995 default: 1996 llvm_unreachable("Unexpected register class"); 1997 } 1998 } 1999 2000 unsigned getRegBitWidth(const MCRegisterClass &RC) { 2001 return getRegBitWidth(RC.getID()); 2002 } 2003 2004 unsigned getRegOperandSize(const MCRegisterInfo *MRI, const MCInstrDesc &Desc, 2005 unsigned OpNo) { 2006 assert(OpNo < Desc.NumOperands); 2007 unsigned RCID = Desc.OpInfo[OpNo].RegClass; 2008 return getRegBitWidth(MRI->getRegClass(RCID)) / 8; 2009 } 2010 2011 bool isInlinableLiteral64(int64_t Literal, bool HasInv2Pi) { 2012 if (isInlinableIntLiteral(Literal)) 2013 return true; 2014 2015 uint64_t Val = static_cast<uint64_t>(Literal); 2016 return (Val == DoubleToBits(0.0)) || 2017 (Val == DoubleToBits(1.0)) || 2018 (Val == DoubleToBits(-1.0)) || 2019 (Val == DoubleToBits(0.5)) || 2020 (Val == DoubleToBits(-0.5)) || 2021 (Val == DoubleToBits(2.0)) || 2022 (Val == DoubleToBits(-2.0)) || 2023 (Val == DoubleToBits(4.0)) || 2024 (Val == DoubleToBits(-4.0)) || 2025 (Val == 0x3fc45f306dc9c882 && HasInv2Pi); 2026 } 2027 2028 bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi) { 2029 if (isInlinableIntLiteral(Literal)) 2030 return true; 2031 2032 // The actual type of the operand does not seem to matter as long 2033 // as the bits match one of the inline immediate values. For example: 2034 // 2035 // -nan has the hexadecimal encoding of 0xfffffffe which is -2 in decimal, 2036 // so it is a legal inline immediate. 2037 // 2038 // 1065353216 has the hexadecimal encoding 0x3f800000 which is 1.0f in 2039 // floating-point, so it is a legal inline immediate. 2040 2041 uint32_t Val = static_cast<uint32_t>(Literal); 2042 return (Val == FloatToBits(0.0f)) || 2043 (Val == FloatToBits(1.0f)) || 2044 (Val == FloatToBits(-1.0f)) || 2045 (Val == FloatToBits(0.5f)) || 2046 (Val == FloatToBits(-0.5f)) || 2047 (Val == FloatToBits(2.0f)) || 2048 (Val == FloatToBits(-2.0f)) || 2049 (Val == FloatToBits(4.0f)) || 2050 (Val == FloatToBits(-4.0f)) || 2051 (Val == 0x3e22f983 && HasInv2Pi); 2052 } 2053 2054 bool isInlinableLiteral16(int16_t Literal, bool HasInv2Pi) { 2055 if (!HasInv2Pi) 2056 return false; 2057 2058 if (isInlinableIntLiteral(Literal)) 2059 return true; 2060 2061 uint16_t Val = static_cast<uint16_t>(Literal); 2062 return Val == 0x3C00 || // 1.0 2063 Val == 0xBC00 || // -1.0 2064 Val == 0x3800 || // 0.5 2065 Val == 0xB800 || // -0.5 2066 Val == 0x4000 || // 2.0 2067 Val == 0xC000 || // -2.0 2068 Val == 0x4400 || // 4.0 2069 Val == 0xC400 || // -4.0 2070 Val == 0x3118; // 1/2pi 2071 } 2072 2073 bool isInlinableLiteralV216(int32_t Literal, bool HasInv2Pi) { 2074 assert(HasInv2Pi); 2075 2076 if (isInt<16>(Literal) || isUInt<16>(Literal)) { 2077 int16_t Trunc = static_cast<int16_t>(Literal); 2078 return AMDGPU::isInlinableLiteral16(Trunc, HasInv2Pi); 2079 } 2080 if (!(Literal & 0xffff)) 2081 return AMDGPU::isInlinableLiteral16(Literal >> 16, HasInv2Pi); 2082 2083 int16_t Lo16 = static_cast<int16_t>(Literal); 2084 int16_t Hi16 = static_cast<int16_t>(Literal >> 16); 2085 return Lo16 == Hi16 && isInlinableLiteral16(Lo16, HasInv2Pi); 2086 } 2087 2088 bool isInlinableIntLiteralV216(int32_t Literal) { 2089 int16_t Lo16 = static_cast<int16_t>(Literal); 2090 if (isInt<16>(Literal) || isUInt<16>(Literal)) 2091 return isInlinableIntLiteral(Lo16); 2092 2093 int16_t Hi16 = static_cast<int16_t>(Literal >> 16); 2094 if (!(Literal & 0xffff)) 2095 return isInlinableIntLiteral(Hi16); 2096 return Lo16 == Hi16 && isInlinableIntLiteral(Lo16); 2097 } 2098 2099 bool isFoldableLiteralV216(int32_t Literal, bool HasInv2Pi) { 2100 assert(HasInv2Pi); 2101 2102 int16_t Lo16 = static_cast<int16_t>(Literal); 2103 if (isInt<16>(Literal) || isUInt<16>(Literal)) 2104 return true; 2105 2106 int16_t Hi16 = static_cast<int16_t>(Literal >> 16); 2107 if (!(Literal & 0xffff)) 2108 return true; 2109 return Lo16 == Hi16; 2110 } 2111 2112 bool isArgPassedInSGPR(const Argument *A) { 2113 const Function *F = A->getParent(); 2114 2115 // Arguments to compute shaders are never a source of divergence. 2116 CallingConv::ID CC = F->getCallingConv(); 2117 switch (CC) { 2118 case CallingConv::AMDGPU_KERNEL: 2119 case CallingConv::SPIR_KERNEL: 2120 return true; 2121 case CallingConv::AMDGPU_VS: 2122 case CallingConv::AMDGPU_LS: 2123 case CallingConv::AMDGPU_HS: 2124 case CallingConv::AMDGPU_ES: 2125 case CallingConv::AMDGPU_GS: 2126 case CallingConv::AMDGPU_PS: 2127 case CallingConv::AMDGPU_CS: 2128 case CallingConv::AMDGPU_Gfx: 2129 // For non-compute shaders, SGPR inputs are marked with either inreg or byval. 2130 // Everything else is in VGPRs. 2131 return F->getAttributes().hasParamAttr(A->getArgNo(), Attribute::InReg) || 2132 F->getAttributes().hasParamAttr(A->getArgNo(), Attribute::ByVal); 2133 default: 2134 // TODO: Should calls support inreg for SGPR inputs? 2135 return false; 2136 } 2137 } 2138 2139 static bool hasSMEMByteOffset(const MCSubtargetInfo &ST) { 2140 return isGCN3Encoding(ST) || isGFX10Plus(ST); 2141 } 2142 2143 static bool hasSMRDSignedImmOffset(const MCSubtargetInfo &ST) { 2144 return isGFX9Plus(ST); 2145 } 2146 2147 bool isLegalSMRDEncodedUnsignedOffset(const MCSubtargetInfo &ST, 2148 int64_t EncodedOffset) { 2149 return hasSMEMByteOffset(ST) ? isUInt<20>(EncodedOffset) 2150 : isUInt<8>(EncodedOffset); 2151 } 2152 2153 bool isLegalSMRDEncodedSignedOffset(const MCSubtargetInfo &ST, 2154 int64_t EncodedOffset, 2155 bool IsBuffer) { 2156 return !IsBuffer && 2157 hasSMRDSignedImmOffset(ST) && 2158 isInt<21>(EncodedOffset); 2159 } 2160 2161 static bool isDwordAligned(uint64_t ByteOffset) { 2162 return (ByteOffset & 3) == 0; 2163 } 2164 2165 uint64_t convertSMRDOffsetUnits(const MCSubtargetInfo &ST, 2166 uint64_t ByteOffset) { 2167 if (hasSMEMByteOffset(ST)) 2168 return ByteOffset; 2169 2170 assert(isDwordAligned(ByteOffset)); 2171 return ByteOffset >> 2; 2172 } 2173 2174 Optional<int64_t> getSMRDEncodedOffset(const MCSubtargetInfo &ST, 2175 int64_t ByteOffset, bool IsBuffer) { 2176 // The signed version is always a byte offset. 2177 if (!IsBuffer && hasSMRDSignedImmOffset(ST)) { 2178 assert(hasSMEMByteOffset(ST)); 2179 return isInt<20>(ByteOffset) ? Optional<int64_t>(ByteOffset) : None; 2180 } 2181 2182 if (!isDwordAligned(ByteOffset) && !hasSMEMByteOffset(ST)) 2183 return None; 2184 2185 int64_t EncodedOffset = convertSMRDOffsetUnits(ST, ByteOffset); 2186 return isLegalSMRDEncodedUnsignedOffset(ST, EncodedOffset) 2187 ? Optional<int64_t>(EncodedOffset) 2188 : None; 2189 } 2190 2191 Optional<int64_t> getSMRDEncodedLiteralOffset32(const MCSubtargetInfo &ST, 2192 int64_t ByteOffset) { 2193 if (!isCI(ST) || !isDwordAligned(ByteOffset)) 2194 return None; 2195 2196 int64_t EncodedOffset = convertSMRDOffsetUnits(ST, ByteOffset); 2197 return isUInt<32>(EncodedOffset) ? Optional<int64_t>(EncodedOffset) : None; 2198 } 2199 2200 unsigned getNumFlatOffsetBits(const MCSubtargetInfo &ST, bool Signed) { 2201 // Address offset is 12-bit signed for GFX10, 13-bit for GFX9 and GFX11+. 2202 if (AMDGPU::isGFX10(ST)) 2203 return Signed ? 12 : 11; 2204 2205 return Signed ? 13 : 12; 2206 } 2207 2208 // Given Imm, split it into the values to put into the SOffset and ImmOffset 2209 // fields in an MUBUF instruction. Return false if it is not possible (due to a 2210 // hardware bug needing a workaround). 2211 // 2212 // The required alignment ensures that individual address components remain 2213 // aligned if they are aligned to begin with. It also ensures that additional 2214 // offsets within the given alignment can be added to the resulting ImmOffset. 2215 bool splitMUBUFOffset(uint32_t Imm, uint32_t &SOffset, uint32_t &ImmOffset, 2216 const GCNSubtarget *Subtarget, Align Alignment) { 2217 const uint32_t MaxImm = alignDown(4095, Alignment.value()); 2218 uint32_t Overflow = 0; 2219 2220 if (Imm > MaxImm) { 2221 if (Imm <= MaxImm + 64) { 2222 // Use an SOffset inline constant for 4..64 2223 Overflow = Imm - MaxImm; 2224 Imm = MaxImm; 2225 } else { 2226 // Try to keep the same value in SOffset for adjacent loads, so that 2227 // the corresponding register contents can be re-used. 2228 // 2229 // Load values with all low-bits (except for alignment bits) set into 2230 // SOffset, so that a larger range of values can be covered using 2231 // s_movk_i32. 2232 // 2233 // Atomic operations fail to work correctly when individual address 2234 // components are unaligned, even if their sum is aligned. 2235 uint32_t High = (Imm + Alignment.value()) & ~4095; 2236 uint32_t Low = (Imm + Alignment.value()) & 4095; 2237 Imm = Low; 2238 Overflow = High - Alignment.value(); 2239 } 2240 } 2241 2242 // There is a hardware bug in SI and CI which prevents address clamping in 2243 // MUBUF instructions from working correctly with SOffsets. The immediate 2244 // offset is unaffected. 2245 if (Overflow > 0 && 2246 Subtarget->getGeneration() <= AMDGPUSubtarget::SEA_ISLANDS) 2247 return false; 2248 2249 ImmOffset = Imm; 2250 SOffset = Overflow; 2251 return true; 2252 } 2253 2254 SIModeRegisterDefaults::SIModeRegisterDefaults(const Function &F) { 2255 *this = getDefaultForCallingConv(F.getCallingConv()); 2256 2257 StringRef IEEEAttr = F.getFnAttribute("amdgpu-ieee").getValueAsString(); 2258 if (!IEEEAttr.empty()) 2259 IEEE = IEEEAttr == "true"; 2260 2261 StringRef DX10ClampAttr 2262 = F.getFnAttribute("amdgpu-dx10-clamp").getValueAsString(); 2263 if (!DX10ClampAttr.empty()) 2264 DX10Clamp = DX10ClampAttr == "true"; 2265 2266 StringRef DenormF32Attr = F.getFnAttribute("denormal-fp-math-f32").getValueAsString(); 2267 if (!DenormF32Attr.empty()) { 2268 DenormalMode DenormMode = parseDenormalFPAttribute(DenormF32Attr); 2269 FP32InputDenormals = DenormMode.Input == DenormalMode::IEEE; 2270 FP32OutputDenormals = DenormMode.Output == DenormalMode::IEEE; 2271 } 2272 2273 StringRef DenormAttr = F.getFnAttribute("denormal-fp-math").getValueAsString(); 2274 if (!DenormAttr.empty()) { 2275 DenormalMode DenormMode = parseDenormalFPAttribute(DenormAttr); 2276 2277 if (DenormF32Attr.empty()) { 2278 FP32InputDenormals = DenormMode.Input == DenormalMode::IEEE; 2279 FP32OutputDenormals = DenormMode.Output == DenormalMode::IEEE; 2280 } 2281 2282 FP64FP16InputDenormals = DenormMode.Input == DenormalMode::IEEE; 2283 FP64FP16OutputDenormals = DenormMode.Output == DenormalMode::IEEE; 2284 } 2285 } 2286 2287 namespace { 2288 2289 struct SourceOfDivergence { 2290 unsigned Intr; 2291 }; 2292 const SourceOfDivergence *lookupSourceOfDivergence(unsigned Intr); 2293 2294 #define GET_SourcesOfDivergence_IMPL 2295 #define GET_Gfx9BufferFormat_IMPL 2296 #define GET_Gfx10BufferFormat_IMPL 2297 #define GET_Gfx11PlusBufferFormat_IMPL 2298 #include "AMDGPUGenSearchableTables.inc" 2299 2300 } // end anonymous namespace 2301 2302 bool isIntrinsicSourceOfDivergence(unsigned IntrID) { 2303 return lookupSourceOfDivergence(IntrID); 2304 } 2305 2306 const GcnBufferFormatInfo *getGcnBufferFormatInfo(uint8_t BitsPerComp, 2307 uint8_t NumComponents, 2308 uint8_t NumFormat, 2309 const MCSubtargetInfo &STI) { 2310 return isGFX11Plus(STI) 2311 ? getGfx11PlusBufferFormatInfo(BitsPerComp, NumComponents, 2312 NumFormat) 2313 : isGFX10(STI) ? getGfx10BufferFormatInfo(BitsPerComp, 2314 NumComponents, NumFormat) 2315 : getGfx9BufferFormatInfo(BitsPerComp, 2316 NumComponents, NumFormat); 2317 } 2318 2319 const GcnBufferFormatInfo *getGcnBufferFormatInfo(uint8_t Format, 2320 const MCSubtargetInfo &STI) { 2321 return isGFX11Plus(STI) ? getGfx11PlusBufferFormatInfo(Format) 2322 : isGFX10(STI) ? getGfx10BufferFormatInfo(Format) 2323 : getGfx9BufferFormatInfo(Format); 2324 } 2325 2326 } // namespace AMDGPU 2327 2328 raw_ostream &operator<<(raw_ostream &OS, 2329 const AMDGPU::IsaInfo::TargetIDSetting S) { 2330 switch (S) { 2331 case (AMDGPU::IsaInfo::TargetIDSetting::Unsupported): 2332 OS << "Unsupported"; 2333 break; 2334 case (AMDGPU::IsaInfo::TargetIDSetting::Any): 2335 OS << "Any"; 2336 break; 2337 case (AMDGPU::IsaInfo::TargetIDSetting::Off): 2338 OS << "Off"; 2339 break; 2340 case (AMDGPU::IsaInfo::TargetIDSetting::On): 2341 OS << "On"; 2342 break; 2343 } 2344 return OS; 2345 } 2346 2347 } // namespace llvm 2348