1 //===- AMDGPUBaseInfo.cpp - AMDGPU Base encoding information --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "AMDGPUBaseInfo.h" 10 #include "AMDGPU.h" 11 #include "AMDGPUAsmUtils.h" 12 #include "AMDKernelCodeT.h" 13 #include "GCNSubtarget.h" 14 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 15 #include "llvm/BinaryFormat/ELF.h" 16 #include "llvm/IR/Attributes.h" 17 #include "llvm/IR/Function.h" 18 #include "llvm/IR/GlobalValue.h" 19 #include "llvm/IR/IntrinsicsAMDGPU.h" 20 #include "llvm/IR/IntrinsicsR600.h" 21 #include "llvm/IR/LLVMContext.h" 22 #include "llvm/MC/MCSubtargetInfo.h" 23 #include "llvm/Support/AMDHSAKernelDescriptor.h" 24 #include "llvm/Support/CommandLine.h" 25 #include "llvm/Support/TargetParser.h" 26 27 #define GET_INSTRINFO_NAMED_OPS 28 #define GET_INSTRMAP_INFO 29 #include "AMDGPUGenInstrInfo.inc" 30 31 static llvm::cl::opt<unsigned> AmdhsaCodeObjectVersion( 32 "amdhsa-code-object-version", llvm::cl::Hidden, 33 llvm::cl::desc("AMDHSA Code Object Version"), llvm::cl::init(4), 34 llvm::cl::ZeroOrMore); 35 36 namespace { 37 38 /// \returns Bit mask for given bit \p Shift and bit \p Width. 39 unsigned getBitMask(unsigned Shift, unsigned Width) { 40 return ((1 << Width) - 1) << Shift; 41 } 42 43 /// Packs \p Src into \p Dst for given bit \p Shift and bit \p Width. 44 /// 45 /// \returns Packed \p Dst. 46 unsigned packBits(unsigned Src, unsigned Dst, unsigned Shift, unsigned Width) { 47 Dst &= ~(1 << Shift) & ~getBitMask(Shift, Width); 48 Dst |= (Src << Shift) & getBitMask(Shift, Width); 49 return Dst; 50 } 51 52 /// Unpacks bits from \p Src for given bit \p Shift and bit \p Width. 53 /// 54 /// \returns Unpacked bits. 55 unsigned unpackBits(unsigned Src, unsigned Shift, unsigned Width) { 56 return (Src & getBitMask(Shift, Width)) >> Shift; 57 } 58 59 /// \returns Vmcnt bit shift (lower bits). 60 unsigned getVmcntBitShiftLo() { return 0; } 61 62 /// \returns Vmcnt bit width (lower bits). 63 unsigned getVmcntBitWidthLo() { return 4; } 64 65 /// \returns Expcnt bit shift. 66 unsigned getExpcntBitShift() { return 4; } 67 68 /// \returns Expcnt bit width. 69 unsigned getExpcntBitWidth() { return 3; } 70 71 /// \returns Lgkmcnt bit shift. 72 unsigned getLgkmcntBitShift() { return 8; } 73 74 /// \returns Lgkmcnt bit width. 75 unsigned getLgkmcntBitWidth(unsigned VersionMajor) { 76 return (VersionMajor >= 10) ? 6 : 4; 77 } 78 79 /// \returns Vmcnt bit shift (higher bits). 80 unsigned getVmcntBitShiftHi() { return 14; } 81 82 /// \returns Vmcnt bit width (higher bits). 83 unsigned getVmcntBitWidthHi() { return 2; } 84 85 } // end namespace anonymous 86 87 namespace llvm { 88 89 namespace AMDGPU { 90 91 Optional<uint8_t> getHsaAbiVersion(const MCSubtargetInfo *STI) { 92 if (STI && STI->getTargetTriple().getOS() != Triple::AMDHSA) 93 return None; 94 95 switch (AmdhsaCodeObjectVersion) { 96 case 2: 97 return ELF::ELFABIVERSION_AMDGPU_HSA_V2; 98 case 3: 99 return ELF::ELFABIVERSION_AMDGPU_HSA_V3; 100 case 4: 101 return ELF::ELFABIVERSION_AMDGPU_HSA_V4; 102 case 5: 103 return ELF::ELFABIVERSION_AMDGPU_HSA_V5; 104 default: 105 report_fatal_error(Twine("Unsupported AMDHSA Code Object Version ") + 106 Twine(AmdhsaCodeObjectVersion)); 107 } 108 } 109 110 bool isHsaAbiVersion2(const MCSubtargetInfo *STI) { 111 if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI)) 112 return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V2; 113 return false; 114 } 115 116 bool isHsaAbiVersion3(const MCSubtargetInfo *STI) { 117 if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI)) 118 return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V3; 119 return false; 120 } 121 122 bool isHsaAbiVersion4(const MCSubtargetInfo *STI) { 123 if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI)) 124 return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V4; 125 return false; 126 } 127 128 bool isHsaAbiVersion5(const MCSubtargetInfo *STI) { 129 if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI)) 130 return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V5; 131 return false; 132 } 133 134 bool isHsaAbiVersion3AndAbove(const MCSubtargetInfo *STI) { 135 return isHsaAbiVersion3(STI) || isHsaAbiVersion4(STI) || 136 isHsaAbiVersion5(STI); 137 } 138 139 unsigned getAmdhsaCodeObjectVersion() { 140 return AmdhsaCodeObjectVersion; 141 } 142 143 unsigned getMultigridSyncArgImplicitArgPosition() { 144 switch (AmdhsaCodeObjectVersion) { 145 case 2: 146 case 3: 147 case 4: 148 return 48; 149 case 5: 150 return AMDGPU::ImplicitArg::MULTIGRID_SYNC_ARG_OFFSET; 151 default: 152 llvm_unreachable("Unexpected code object version"); 153 return 0; 154 } 155 } 156 157 158 // FIXME: All such magic numbers about the ABI should be in a 159 // central TD file. 160 unsigned getHostcallImplicitArgPosition() { 161 switch (AmdhsaCodeObjectVersion) { 162 case 2: 163 case 3: 164 case 4: 165 return 24; 166 case 5: 167 return AMDGPU::ImplicitArg::HOSTCALL_PTR_OFFSET; 168 default: 169 llvm_unreachable("Unexpected code object version"); 170 return 0; 171 } 172 } 173 174 #define GET_MIMGBaseOpcodesTable_IMPL 175 #define GET_MIMGDimInfoTable_IMPL 176 #define GET_MIMGInfoTable_IMPL 177 #define GET_MIMGLZMappingTable_IMPL 178 #define GET_MIMGMIPMappingTable_IMPL 179 #define GET_MIMGBiasMappingTable_IMPL 180 #define GET_MIMGOffsetMappingTable_IMPL 181 #define GET_MIMGG16MappingTable_IMPL 182 #define GET_MAIInstInfoTable_IMPL 183 #include "AMDGPUGenSearchableTables.inc" 184 185 int getMIMGOpcode(unsigned BaseOpcode, unsigned MIMGEncoding, 186 unsigned VDataDwords, unsigned VAddrDwords) { 187 const MIMGInfo *Info = getMIMGOpcodeHelper(BaseOpcode, MIMGEncoding, 188 VDataDwords, VAddrDwords); 189 return Info ? Info->Opcode : -1; 190 } 191 192 const MIMGBaseOpcodeInfo *getMIMGBaseOpcode(unsigned Opc) { 193 const MIMGInfo *Info = getMIMGInfo(Opc); 194 return Info ? getMIMGBaseOpcodeInfo(Info->BaseOpcode) : nullptr; 195 } 196 197 int getMaskedMIMGOp(unsigned Opc, unsigned NewChannels) { 198 const MIMGInfo *OrigInfo = getMIMGInfo(Opc); 199 const MIMGInfo *NewInfo = 200 getMIMGOpcodeHelper(OrigInfo->BaseOpcode, OrigInfo->MIMGEncoding, 201 NewChannels, OrigInfo->VAddrDwords); 202 return NewInfo ? NewInfo->Opcode : -1; 203 } 204 205 unsigned getAddrSizeMIMGOp(const MIMGBaseOpcodeInfo *BaseOpcode, 206 const MIMGDimInfo *Dim, bool IsA16, 207 bool IsG16Supported) { 208 unsigned AddrWords = BaseOpcode->NumExtraArgs; 209 unsigned AddrComponents = (BaseOpcode->Coordinates ? Dim->NumCoords : 0) + 210 (BaseOpcode->LodOrClampOrMip ? 1 : 0); 211 if (IsA16) 212 AddrWords += divideCeil(AddrComponents, 2); 213 else 214 AddrWords += AddrComponents; 215 216 // Note: For subtargets that support A16 but not G16, enabling A16 also 217 // enables 16 bit gradients. 218 // For subtargets that support A16 (operand) and G16 (done with a different 219 // instruction encoding), they are independent. 220 221 if (BaseOpcode->Gradients) { 222 if ((IsA16 && !IsG16Supported) || BaseOpcode->G16) 223 // There are two gradients per coordinate, we pack them separately. 224 // For the 3d case, 225 // we get (dy/du, dx/du) (-, dz/du) (dy/dv, dx/dv) (-, dz/dv) 226 AddrWords += alignTo<2>(Dim->NumGradients / 2); 227 else 228 AddrWords += Dim->NumGradients; 229 } 230 return AddrWords; 231 } 232 233 struct MUBUFInfo { 234 uint16_t Opcode; 235 uint16_t BaseOpcode; 236 uint8_t elements; 237 bool has_vaddr; 238 bool has_srsrc; 239 bool has_soffset; 240 bool IsBufferInv; 241 }; 242 243 struct MTBUFInfo { 244 uint16_t Opcode; 245 uint16_t BaseOpcode; 246 uint8_t elements; 247 bool has_vaddr; 248 bool has_srsrc; 249 bool has_soffset; 250 }; 251 252 struct SMInfo { 253 uint16_t Opcode; 254 bool IsBuffer; 255 }; 256 257 struct VOPInfo { 258 uint16_t Opcode; 259 bool IsSingle; 260 }; 261 262 #define GET_MTBUFInfoTable_DECL 263 #define GET_MTBUFInfoTable_IMPL 264 #define GET_MUBUFInfoTable_DECL 265 #define GET_MUBUFInfoTable_IMPL 266 #define GET_SMInfoTable_DECL 267 #define GET_SMInfoTable_IMPL 268 #define GET_VOP1InfoTable_DECL 269 #define GET_VOP1InfoTable_IMPL 270 #define GET_VOP2InfoTable_DECL 271 #define GET_VOP2InfoTable_IMPL 272 #define GET_VOP3InfoTable_DECL 273 #define GET_VOP3InfoTable_IMPL 274 #include "AMDGPUGenSearchableTables.inc" 275 276 int getMTBUFBaseOpcode(unsigned Opc) { 277 const MTBUFInfo *Info = getMTBUFInfoFromOpcode(Opc); 278 return Info ? Info->BaseOpcode : -1; 279 } 280 281 int getMTBUFOpcode(unsigned BaseOpc, unsigned Elements) { 282 const MTBUFInfo *Info = getMTBUFInfoFromBaseOpcodeAndElements(BaseOpc, Elements); 283 return Info ? Info->Opcode : -1; 284 } 285 286 int getMTBUFElements(unsigned Opc) { 287 const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc); 288 return Info ? Info->elements : 0; 289 } 290 291 bool getMTBUFHasVAddr(unsigned Opc) { 292 const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc); 293 return Info ? Info->has_vaddr : false; 294 } 295 296 bool getMTBUFHasSrsrc(unsigned Opc) { 297 const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc); 298 return Info ? Info->has_srsrc : false; 299 } 300 301 bool getMTBUFHasSoffset(unsigned Opc) { 302 const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc); 303 return Info ? Info->has_soffset : false; 304 } 305 306 int getMUBUFBaseOpcode(unsigned Opc) { 307 const MUBUFInfo *Info = getMUBUFInfoFromOpcode(Opc); 308 return Info ? Info->BaseOpcode : -1; 309 } 310 311 int getMUBUFOpcode(unsigned BaseOpc, unsigned Elements) { 312 const MUBUFInfo *Info = getMUBUFInfoFromBaseOpcodeAndElements(BaseOpc, Elements); 313 return Info ? Info->Opcode : -1; 314 } 315 316 int getMUBUFElements(unsigned Opc) { 317 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc); 318 return Info ? Info->elements : 0; 319 } 320 321 bool getMUBUFHasVAddr(unsigned Opc) { 322 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc); 323 return Info ? Info->has_vaddr : false; 324 } 325 326 bool getMUBUFHasSrsrc(unsigned Opc) { 327 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc); 328 return Info ? Info->has_srsrc : false; 329 } 330 331 bool getMUBUFHasSoffset(unsigned Opc) { 332 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc); 333 return Info ? Info->has_soffset : false; 334 } 335 336 bool getMUBUFIsBufferInv(unsigned Opc) { 337 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc); 338 return Info ? Info->IsBufferInv : false; 339 } 340 341 bool getSMEMIsBuffer(unsigned Opc) { 342 const SMInfo *Info = getSMEMOpcodeHelper(Opc); 343 return Info ? Info->IsBuffer : false; 344 } 345 346 bool getVOP1IsSingle(unsigned Opc) { 347 const VOPInfo *Info = getVOP1OpcodeHelper(Opc); 348 return Info ? Info->IsSingle : false; 349 } 350 351 bool getVOP2IsSingle(unsigned Opc) { 352 const VOPInfo *Info = getVOP2OpcodeHelper(Opc); 353 return Info ? Info->IsSingle : false; 354 } 355 356 bool getVOP3IsSingle(unsigned Opc) { 357 const VOPInfo *Info = getVOP3OpcodeHelper(Opc); 358 return Info ? Info->IsSingle : false; 359 } 360 361 bool getMAIIsDGEMM(unsigned Opc) { 362 const MAIInstInfo *Info = getMAIInstInfoHelper(Opc); 363 return Info ? Info->is_dgemm : false; 364 } 365 366 bool getMAIIsGFX940XDL(unsigned Opc) { 367 const MAIInstInfo *Info = getMAIInstInfoHelper(Opc); 368 return Info ? Info->is_gfx940_xdl : false; 369 } 370 371 // Wrapper for Tablegen'd function. enum Subtarget is not defined in any 372 // header files, so we need to wrap it in a function that takes unsigned 373 // instead. 374 int getMCOpcode(uint16_t Opcode, unsigned Gen) { 375 return getMCOpcodeGen(Opcode, static_cast<Subtarget>(Gen)); 376 } 377 378 namespace IsaInfo { 379 380 AMDGPUTargetID::AMDGPUTargetID(const MCSubtargetInfo &STI) 381 : STI(STI), XnackSetting(TargetIDSetting::Any), 382 SramEccSetting(TargetIDSetting::Any) { 383 if (!STI.getFeatureBits().test(FeatureSupportsXNACK)) 384 XnackSetting = TargetIDSetting::Unsupported; 385 if (!STI.getFeatureBits().test(FeatureSupportsSRAMECC)) 386 SramEccSetting = TargetIDSetting::Unsupported; 387 } 388 389 void AMDGPUTargetID::setTargetIDFromFeaturesString(StringRef FS) { 390 // Check if xnack or sramecc is explicitly enabled or disabled. In the 391 // absence of the target features we assume we must generate code that can run 392 // in any environment. 393 SubtargetFeatures Features(FS); 394 Optional<bool> XnackRequested; 395 Optional<bool> SramEccRequested; 396 397 for (const std::string &Feature : Features.getFeatures()) { 398 if (Feature == "+xnack") 399 XnackRequested = true; 400 else if (Feature == "-xnack") 401 XnackRequested = false; 402 else if (Feature == "+sramecc") 403 SramEccRequested = true; 404 else if (Feature == "-sramecc") 405 SramEccRequested = false; 406 } 407 408 bool XnackSupported = isXnackSupported(); 409 bool SramEccSupported = isSramEccSupported(); 410 411 if (XnackRequested) { 412 if (XnackSupported) { 413 XnackSetting = 414 *XnackRequested ? TargetIDSetting::On : TargetIDSetting::Off; 415 } else { 416 // If a specific xnack setting was requested and this GPU does not support 417 // xnack emit a warning. Setting will remain set to "Unsupported". 418 if (*XnackRequested) { 419 errs() << "warning: xnack 'On' was requested for a processor that does " 420 "not support it!\n"; 421 } else { 422 errs() << "warning: xnack 'Off' was requested for a processor that " 423 "does not support it!\n"; 424 } 425 } 426 } 427 428 if (SramEccRequested) { 429 if (SramEccSupported) { 430 SramEccSetting = 431 *SramEccRequested ? TargetIDSetting::On : TargetIDSetting::Off; 432 } else { 433 // If a specific sramecc setting was requested and this GPU does not 434 // support sramecc emit a warning. Setting will remain set to 435 // "Unsupported". 436 if (*SramEccRequested) { 437 errs() << "warning: sramecc 'On' was requested for a processor that " 438 "does not support it!\n"; 439 } else { 440 errs() << "warning: sramecc 'Off' was requested for a processor that " 441 "does not support it!\n"; 442 } 443 } 444 } 445 } 446 447 static TargetIDSetting 448 getTargetIDSettingFromFeatureString(StringRef FeatureString) { 449 if (FeatureString.endswith("-")) 450 return TargetIDSetting::Off; 451 if (FeatureString.endswith("+")) 452 return TargetIDSetting::On; 453 454 llvm_unreachable("Malformed feature string"); 455 } 456 457 void AMDGPUTargetID::setTargetIDFromTargetIDStream(StringRef TargetID) { 458 SmallVector<StringRef, 3> TargetIDSplit; 459 TargetID.split(TargetIDSplit, ':'); 460 461 for (const auto &FeatureString : TargetIDSplit) { 462 if (FeatureString.startswith("xnack")) 463 XnackSetting = getTargetIDSettingFromFeatureString(FeatureString); 464 if (FeatureString.startswith("sramecc")) 465 SramEccSetting = getTargetIDSettingFromFeatureString(FeatureString); 466 } 467 } 468 469 std::string AMDGPUTargetID::toString() const { 470 std::string StringRep; 471 raw_string_ostream StreamRep(StringRep); 472 473 auto TargetTriple = STI.getTargetTriple(); 474 auto Version = getIsaVersion(STI.getCPU()); 475 476 StreamRep << TargetTriple.getArchName() << '-' 477 << TargetTriple.getVendorName() << '-' 478 << TargetTriple.getOSName() << '-' 479 << TargetTriple.getEnvironmentName() << '-'; 480 481 std::string Processor; 482 // TODO: Following else statement is present here because we used various 483 // alias names for GPUs up until GFX9 (e.g. 'fiji' is same as 'gfx803'). 484 // Remove once all aliases are removed from GCNProcessors.td. 485 if (Version.Major >= 9) 486 Processor = STI.getCPU().str(); 487 else 488 Processor = (Twine("gfx") + Twine(Version.Major) + Twine(Version.Minor) + 489 Twine(Version.Stepping)) 490 .str(); 491 492 std::string Features; 493 if (Optional<uint8_t> HsaAbiVersion = getHsaAbiVersion(&STI)) { 494 switch (*HsaAbiVersion) { 495 case ELF::ELFABIVERSION_AMDGPU_HSA_V2: 496 // Code object V2 only supported specific processors and had fixed 497 // settings for the XNACK. 498 if (Processor == "gfx600") { 499 } else if (Processor == "gfx601") { 500 } else if (Processor == "gfx602") { 501 } else if (Processor == "gfx700") { 502 } else if (Processor == "gfx701") { 503 } else if (Processor == "gfx702") { 504 } else if (Processor == "gfx703") { 505 } else if (Processor == "gfx704") { 506 } else if (Processor == "gfx705") { 507 } else if (Processor == "gfx801") { 508 if (!isXnackOnOrAny()) 509 report_fatal_error( 510 "AMD GPU code object V2 does not support processor " + 511 Twine(Processor) + " without XNACK"); 512 } else if (Processor == "gfx802") { 513 } else if (Processor == "gfx803") { 514 } else if (Processor == "gfx805") { 515 } else if (Processor == "gfx810") { 516 if (!isXnackOnOrAny()) 517 report_fatal_error( 518 "AMD GPU code object V2 does not support processor " + 519 Twine(Processor) + " without XNACK"); 520 } else if (Processor == "gfx900") { 521 if (isXnackOnOrAny()) 522 Processor = "gfx901"; 523 } else if (Processor == "gfx902") { 524 if (isXnackOnOrAny()) 525 Processor = "gfx903"; 526 } else if (Processor == "gfx904") { 527 if (isXnackOnOrAny()) 528 Processor = "gfx905"; 529 } else if (Processor == "gfx906") { 530 if (isXnackOnOrAny()) 531 Processor = "gfx907"; 532 } else if (Processor == "gfx90c") { 533 if (isXnackOnOrAny()) 534 report_fatal_error( 535 "AMD GPU code object V2 does not support processor " + 536 Twine(Processor) + " with XNACK being ON or ANY"); 537 } else { 538 report_fatal_error( 539 "AMD GPU code object V2 does not support processor " + 540 Twine(Processor)); 541 } 542 break; 543 case ELF::ELFABIVERSION_AMDGPU_HSA_V3: 544 // xnack. 545 if (isXnackOnOrAny()) 546 Features += "+xnack"; 547 // In code object v2 and v3, "sramecc" feature was spelled with a 548 // hyphen ("sram-ecc"). 549 if (isSramEccOnOrAny()) 550 Features += "+sram-ecc"; 551 break; 552 case ELF::ELFABIVERSION_AMDGPU_HSA_V4: 553 case ELF::ELFABIVERSION_AMDGPU_HSA_V5: 554 // sramecc. 555 if (getSramEccSetting() == TargetIDSetting::Off) 556 Features += ":sramecc-"; 557 else if (getSramEccSetting() == TargetIDSetting::On) 558 Features += ":sramecc+"; 559 // xnack. 560 if (getXnackSetting() == TargetIDSetting::Off) 561 Features += ":xnack-"; 562 else if (getXnackSetting() == TargetIDSetting::On) 563 Features += ":xnack+"; 564 break; 565 default: 566 break; 567 } 568 } 569 570 StreamRep << Processor << Features; 571 572 StreamRep.flush(); 573 return StringRep; 574 } 575 576 unsigned getWavefrontSize(const MCSubtargetInfo *STI) { 577 if (STI->getFeatureBits().test(FeatureWavefrontSize16)) 578 return 16; 579 if (STI->getFeatureBits().test(FeatureWavefrontSize32)) 580 return 32; 581 582 return 64; 583 } 584 585 unsigned getLocalMemorySize(const MCSubtargetInfo *STI) { 586 if (STI->getFeatureBits().test(FeatureLocalMemorySize32768)) 587 return 32768; 588 if (STI->getFeatureBits().test(FeatureLocalMemorySize65536)) 589 return 65536; 590 591 return 0; 592 } 593 594 unsigned getEUsPerCU(const MCSubtargetInfo *STI) { 595 // "Per CU" really means "per whatever functional block the waves of a 596 // workgroup must share". For gfx10 in CU mode this is the CU, which contains 597 // two SIMDs. 598 if (isGFX10Plus(*STI) && STI->getFeatureBits().test(FeatureCuMode)) 599 return 2; 600 // Pre-gfx10 a CU contains four SIMDs. For gfx10 in WGP mode the WGP contains 601 // two CUs, so a total of four SIMDs. 602 return 4; 603 } 604 605 unsigned getMaxWorkGroupsPerCU(const MCSubtargetInfo *STI, 606 unsigned FlatWorkGroupSize) { 607 assert(FlatWorkGroupSize != 0); 608 if (STI->getTargetTriple().getArch() != Triple::amdgcn) 609 return 8; 610 unsigned N = getWavesPerWorkGroup(STI, FlatWorkGroupSize); 611 if (N == 1) 612 return 40; 613 N = 40 / N; 614 return std::min(N, 16u); 615 } 616 617 unsigned getMinWavesPerEU(const MCSubtargetInfo *STI) { 618 return 1; 619 } 620 621 unsigned getMaxWavesPerEU(const MCSubtargetInfo *STI) { 622 // FIXME: Need to take scratch memory into account. 623 if (isGFX90A(*STI)) 624 return 8; 625 if (!isGFX10Plus(*STI)) 626 return 10; 627 return hasGFX10_3Insts(*STI) ? 16 : 20; 628 } 629 630 unsigned getWavesPerEUForWorkGroup(const MCSubtargetInfo *STI, 631 unsigned FlatWorkGroupSize) { 632 return divideCeil(getWavesPerWorkGroup(STI, FlatWorkGroupSize), 633 getEUsPerCU(STI)); 634 } 635 636 unsigned getMinFlatWorkGroupSize(const MCSubtargetInfo *STI) { 637 return 1; 638 } 639 640 unsigned getMaxFlatWorkGroupSize(const MCSubtargetInfo *STI) { 641 // Some subtargets allow encoding 2048, but this isn't tested or supported. 642 return 1024; 643 } 644 645 unsigned getWavesPerWorkGroup(const MCSubtargetInfo *STI, 646 unsigned FlatWorkGroupSize) { 647 return divideCeil(FlatWorkGroupSize, getWavefrontSize(STI)); 648 } 649 650 unsigned getSGPRAllocGranule(const MCSubtargetInfo *STI) { 651 IsaVersion Version = getIsaVersion(STI->getCPU()); 652 if (Version.Major >= 10) 653 return getAddressableNumSGPRs(STI); 654 if (Version.Major >= 8) 655 return 16; 656 return 8; 657 } 658 659 unsigned getSGPREncodingGranule(const MCSubtargetInfo *STI) { 660 return 8; 661 } 662 663 unsigned getTotalNumSGPRs(const MCSubtargetInfo *STI) { 664 IsaVersion Version = getIsaVersion(STI->getCPU()); 665 if (Version.Major >= 8) 666 return 800; 667 return 512; 668 } 669 670 unsigned getAddressableNumSGPRs(const MCSubtargetInfo *STI) { 671 if (STI->getFeatureBits().test(FeatureSGPRInitBug)) 672 return FIXED_NUM_SGPRS_FOR_INIT_BUG; 673 674 IsaVersion Version = getIsaVersion(STI->getCPU()); 675 if (Version.Major >= 10) 676 return 106; 677 if (Version.Major >= 8) 678 return 102; 679 return 104; 680 } 681 682 unsigned getMinNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) { 683 assert(WavesPerEU != 0); 684 685 IsaVersion Version = getIsaVersion(STI->getCPU()); 686 if (Version.Major >= 10) 687 return 0; 688 689 if (WavesPerEU >= getMaxWavesPerEU(STI)) 690 return 0; 691 692 unsigned MinNumSGPRs = getTotalNumSGPRs(STI) / (WavesPerEU + 1); 693 if (STI->getFeatureBits().test(FeatureTrapHandler)) 694 MinNumSGPRs -= std::min(MinNumSGPRs, (unsigned)TRAP_NUM_SGPRS); 695 MinNumSGPRs = alignDown(MinNumSGPRs, getSGPRAllocGranule(STI)) + 1; 696 return std::min(MinNumSGPRs, getAddressableNumSGPRs(STI)); 697 } 698 699 unsigned getMaxNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU, 700 bool Addressable) { 701 assert(WavesPerEU != 0); 702 703 unsigned AddressableNumSGPRs = getAddressableNumSGPRs(STI); 704 IsaVersion Version = getIsaVersion(STI->getCPU()); 705 if (Version.Major >= 10) 706 return Addressable ? AddressableNumSGPRs : 108; 707 if (Version.Major >= 8 && !Addressable) 708 AddressableNumSGPRs = 112; 709 unsigned MaxNumSGPRs = getTotalNumSGPRs(STI) / WavesPerEU; 710 if (STI->getFeatureBits().test(FeatureTrapHandler)) 711 MaxNumSGPRs -= std::min(MaxNumSGPRs, (unsigned)TRAP_NUM_SGPRS); 712 MaxNumSGPRs = alignDown(MaxNumSGPRs, getSGPRAllocGranule(STI)); 713 return std::min(MaxNumSGPRs, AddressableNumSGPRs); 714 } 715 716 unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed, 717 bool FlatScrUsed, bool XNACKUsed) { 718 unsigned ExtraSGPRs = 0; 719 if (VCCUsed) 720 ExtraSGPRs = 2; 721 722 IsaVersion Version = getIsaVersion(STI->getCPU()); 723 if (Version.Major >= 10) 724 return ExtraSGPRs; 725 726 if (Version.Major < 8) { 727 if (FlatScrUsed) 728 ExtraSGPRs = 4; 729 } else { 730 if (XNACKUsed) 731 ExtraSGPRs = 4; 732 733 if (FlatScrUsed || 734 STI->getFeatureBits().test(AMDGPU::FeatureArchitectedFlatScratch)) 735 ExtraSGPRs = 6; 736 } 737 738 return ExtraSGPRs; 739 } 740 741 unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed, 742 bool FlatScrUsed) { 743 return getNumExtraSGPRs(STI, VCCUsed, FlatScrUsed, 744 STI->getFeatureBits().test(AMDGPU::FeatureXNACK)); 745 } 746 747 unsigned getNumSGPRBlocks(const MCSubtargetInfo *STI, unsigned NumSGPRs) { 748 NumSGPRs = alignTo(std::max(1u, NumSGPRs), getSGPREncodingGranule(STI)); 749 // SGPRBlocks is actual number of SGPR blocks minus 1. 750 return NumSGPRs / getSGPREncodingGranule(STI) - 1; 751 } 752 753 unsigned getVGPRAllocGranule(const MCSubtargetInfo *STI, 754 Optional<bool> EnableWavefrontSize32) { 755 if (STI->getFeatureBits().test(FeatureGFX90AInsts)) 756 return 8; 757 758 bool IsWave32 = EnableWavefrontSize32 ? 759 *EnableWavefrontSize32 : 760 STI->getFeatureBits().test(FeatureWavefrontSize32); 761 762 if (hasGFX10_3Insts(*STI)) 763 return IsWave32 ? 16 : 8; 764 765 return IsWave32 ? 8 : 4; 766 } 767 768 unsigned getVGPREncodingGranule(const MCSubtargetInfo *STI, 769 Optional<bool> EnableWavefrontSize32) { 770 if (STI->getFeatureBits().test(FeatureGFX90AInsts)) 771 return 8; 772 773 bool IsWave32 = EnableWavefrontSize32 ? 774 *EnableWavefrontSize32 : 775 STI->getFeatureBits().test(FeatureWavefrontSize32); 776 777 return IsWave32 ? 8 : 4; 778 } 779 780 unsigned getTotalNumVGPRs(const MCSubtargetInfo *STI) { 781 if (STI->getFeatureBits().test(FeatureGFX90AInsts)) 782 return 512; 783 if (!isGFX10Plus(*STI)) 784 return 256; 785 return STI->getFeatureBits().test(FeatureWavefrontSize32) ? 1024 : 512; 786 } 787 788 unsigned getAddressableNumVGPRs(const MCSubtargetInfo *STI) { 789 if (STI->getFeatureBits().test(FeatureGFX90AInsts)) 790 return 512; 791 return 256; 792 } 793 794 unsigned getMinNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) { 795 assert(WavesPerEU != 0); 796 797 if (WavesPerEU >= getMaxWavesPerEU(STI)) 798 return 0; 799 unsigned MinNumVGPRs = 800 alignDown(getTotalNumVGPRs(STI) / (WavesPerEU + 1), 801 getVGPRAllocGranule(STI)) + 1; 802 return std::min(MinNumVGPRs, getAddressableNumVGPRs(STI)); 803 } 804 805 unsigned getMaxNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) { 806 assert(WavesPerEU != 0); 807 808 unsigned MaxNumVGPRs = alignDown(getTotalNumVGPRs(STI) / WavesPerEU, 809 getVGPRAllocGranule(STI)); 810 unsigned AddressableNumVGPRs = getAddressableNumVGPRs(STI); 811 return std::min(MaxNumVGPRs, AddressableNumVGPRs); 812 } 813 814 unsigned getNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumVGPRs, 815 Optional<bool> EnableWavefrontSize32) { 816 NumVGPRs = alignTo(std::max(1u, NumVGPRs), 817 getVGPREncodingGranule(STI, EnableWavefrontSize32)); 818 // VGPRBlocks is actual number of VGPR blocks minus 1. 819 return NumVGPRs / getVGPREncodingGranule(STI, EnableWavefrontSize32) - 1; 820 } 821 822 } // end namespace IsaInfo 823 824 void initDefaultAMDKernelCodeT(amd_kernel_code_t &Header, 825 const MCSubtargetInfo *STI) { 826 IsaVersion Version = getIsaVersion(STI->getCPU()); 827 828 memset(&Header, 0, sizeof(Header)); 829 830 Header.amd_kernel_code_version_major = 1; 831 Header.amd_kernel_code_version_minor = 2; 832 Header.amd_machine_kind = 1; // AMD_MACHINE_KIND_AMDGPU 833 Header.amd_machine_version_major = Version.Major; 834 Header.amd_machine_version_minor = Version.Minor; 835 Header.amd_machine_version_stepping = Version.Stepping; 836 Header.kernel_code_entry_byte_offset = sizeof(Header); 837 Header.wavefront_size = 6; 838 839 // If the code object does not support indirect functions, then the value must 840 // be 0xffffffff. 841 Header.call_convention = -1; 842 843 // These alignment values are specified in powers of two, so alignment = 844 // 2^n. The minimum alignment is 2^4 = 16. 845 Header.kernarg_segment_alignment = 4; 846 Header.group_segment_alignment = 4; 847 Header.private_segment_alignment = 4; 848 849 if (Version.Major >= 10) { 850 if (STI->getFeatureBits().test(FeatureWavefrontSize32)) { 851 Header.wavefront_size = 5; 852 Header.code_properties |= AMD_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32; 853 } 854 Header.compute_pgm_resource_registers |= 855 S_00B848_WGP_MODE(STI->getFeatureBits().test(FeatureCuMode) ? 0 : 1) | 856 S_00B848_MEM_ORDERED(1); 857 } 858 } 859 860 amdhsa::kernel_descriptor_t getDefaultAmdhsaKernelDescriptor( 861 const MCSubtargetInfo *STI) { 862 IsaVersion Version = getIsaVersion(STI->getCPU()); 863 864 amdhsa::kernel_descriptor_t KD; 865 memset(&KD, 0, sizeof(KD)); 866 867 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1, 868 amdhsa::COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64, 869 amdhsa::FLOAT_DENORM_MODE_FLUSH_NONE); 870 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1, 871 amdhsa::COMPUTE_PGM_RSRC1_ENABLE_DX10_CLAMP, 1); 872 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1, 873 amdhsa::COMPUTE_PGM_RSRC1_ENABLE_IEEE_MODE, 1); 874 AMDHSA_BITS_SET(KD.compute_pgm_rsrc2, 875 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X, 1); 876 if (Version.Major >= 10) { 877 AMDHSA_BITS_SET(KD.kernel_code_properties, 878 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32, 879 STI->getFeatureBits().test(FeatureWavefrontSize32) ? 1 : 0); 880 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1, 881 amdhsa::COMPUTE_PGM_RSRC1_WGP_MODE, 882 STI->getFeatureBits().test(FeatureCuMode) ? 0 : 1); 883 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1, 884 amdhsa::COMPUTE_PGM_RSRC1_MEM_ORDERED, 1); 885 } 886 if (AMDGPU::isGFX90A(*STI)) { 887 AMDHSA_BITS_SET(KD.compute_pgm_rsrc3, 888 amdhsa::COMPUTE_PGM_RSRC3_GFX90A_TG_SPLIT, 889 STI->getFeatureBits().test(FeatureTgSplit) ? 1 : 0); 890 } 891 return KD; 892 } 893 894 bool isGroupSegment(const GlobalValue *GV) { 895 return GV->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS; 896 } 897 898 bool isGlobalSegment(const GlobalValue *GV) { 899 return GV->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS; 900 } 901 902 bool isReadOnlySegment(const GlobalValue *GV) { 903 unsigned AS = GV->getAddressSpace(); 904 return AS == AMDGPUAS::CONSTANT_ADDRESS || 905 AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT; 906 } 907 908 bool shouldEmitConstantsToTextSection(const Triple &TT) { 909 return TT.getArch() == Triple::r600; 910 } 911 912 int getIntegerAttribute(const Function &F, StringRef Name, int Default) { 913 Attribute A = F.getFnAttribute(Name); 914 int Result = Default; 915 916 if (A.isStringAttribute()) { 917 StringRef Str = A.getValueAsString(); 918 if (Str.getAsInteger(0, Result)) { 919 LLVMContext &Ctx = F.getContext(); 920 Ctx.emitError("can't parse integer attribute " + Name); 921 } 922 } 923 924 return Result; 925 } 926 927 std::pair<int, int> getIntegerPairAttribute(const Function &F, 928 StringRef Name, 929 std::pair<int, int> Default, 930 bool OnlyFirstRequired) { 931 Attribute A = F.getFnAttribute(Name); 932 if (!A.isStringAttribute()) 933 return Default; 934 935 LLVMContext &Ctx = F.getContext(); 936 std::pair<int, int> Ints = Default; 937 std::pair<StringRef, StringRef> Strs = A.getValueAsString().split(','); 938 if (Strs.first.trim().getAsInteger(0, Ints.first)) { 939 Ctx.emitError("can't parse first integer attribute " + Name); 940 return Default; 941 } 942 if (Strs.second.trim().getAsInteger(0, Ints.second)) { 943 if (!OnlyFirstRequired || !Strs.second.trim().empty()) { 944 Ctx.emitError("can't parse second integer attribute " + Name); 945 return Default; 946 } 947 } 948 949 return Ints; 950 } 951 952 unsigned getVmcntBitMask(const IsaVersion &Version) { 953 unsigned VmcntLo = (1 << getVmcntBitWidthLo()) - 1; 954 if (Version.Major < 9) 955 return VmcntLo; 956 957 unsigned VmcntHi = ((1 << getVmcntBitWidthHi()) - 1) << getVmcntBitWidthLo(); 958 return VmcntLo | VmcntHi; 959 } 960 961 unsigned getExpcntBitMask(const IsaVersion &Version) { 962 return (1 << getExpcntBitWidth()) - 1; 963 } 964 965 unsigned getLgkmcntBitMask(const IsaVersion &Version) { 966 return (1 << getLgkmcntBitWidth(Version.Major)) - 1; 967 } 968 969 unsigned getWaitcntBitMask(const IsaVersion &Version) { 970 unsigned VmcntLo = getBitMask(getVmcntBitShiftLo(), getVmcntBitWidthLo()); 971 unsigned Expcnt = getBitMask(getExpcntBitShift(), getExpcntBitWidth()); 972 unsigned Lgkmcnt = getBitMask(getLgkmcntBitShift(), 973 getLgkmcntBitWidth(Version.Major)); 974 unsigned Waitcnt = VmcntLo | Expcnt | Lgkmcnt; 975 if (Version.Major < 9) 976 return Waitcnt; 977 978 unsigned VmcntHi = getBitMask(getVmcntBitShiftHi(), getVmcntBitWidthHi()); 979 return Waitcnt | VmcntHi; 980 } 981 982 unsigned decodeVmcnt(const IsaVersion &Version, unsigned Waitcnt) { 983 unsigned VmcntLo = 984 unpackBits(Waitcnt, getVmcntBitShiftLo(), getVmcntBitWidthLo()); 985 if (Version.Major < 9) 986 return VmcntLo; 987 988 unsigned VmcntHi = 989 unpackBits(Waitcnt, getVmcntBitShiftHi(), getVmcntBitWidthHi()); 990 VmcntHi <<= getVmcntBitWidthLo(); 991 return VmcntLo | VmcntHi; 992 } 993 994 unsigned decodeExpcnt(const IsaVersion &Version, unsigned Waitcnt) { 995 return unpackBits(Waitcnt, getExpcntBitShift(), getExpcntBitWidth()); 996 } 997 998 unsigned decodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt) { 999 return unpackBits(Waitcnt, getLgkmcntBitShift(), 1000 getLgkmcntBitWidth(Version.Major)); 1001 } 1002 1003 void decodeWaitcnt(const IsaVersion &Version, unsigned Waitcnt, 1004 unsigned &Vmcnt, unsigned &Expcnt, unsigned &Lgkmcnt) { 1005 Vmcnt = decodeVmcnt(Version, Waitcnt); 1006 Expcnt = decodeExpcnt(Version, Waitcnt); 1007 Lgkmcnt = decodeLgkmcnt(Version, Waitcnt); 1008 } 1009 1010 Waitcnt decodeWaitcnt(const IsaVersion &Version, unsigned Encoded) { 1011 Waitcnt Decoded; 1012 Decoded.VmCnt = decodeVmcnt(Version, Encoded); 1013 Decoded.ExpCnt = decodeExpcnt(Version, Encoded); 1014 Decoded.LgkmCnt = decodeLgkmcnt(Version, Encoded); 1015 return Decoded; 1016 } 1017 1018 unsigned encodeVmcnt(const IsaVersion &Version, unsigned Waitcnt, 1019 unsigned Vmcnt) { 1020 Waitcnt = 1021 packBits(Vmcnt, Waitcnt, getVmcntBitShiftLo(), getVmcntBitWidthLo()); 1022 if (Version.Major < 9) 1023 return Waitcnt; 1024 1025 Vmcnt >>= getVmcntBitWidthLo(); 1026 return packBits(Vmcnt, Waitcnt, getVmcntBitShiftHi(), getVmcntBitWidthHi()); 1027 } 1028 1029 unsigned encodeExpcnt(const IsaVersion &Version, unsigned Waitcnt, 1030 unsigned Expcnt) { 1031 return packBits(Expcnt, Waitcnt, getExpcntBitShift(), getExpcntBitWidth()); 1032 } 1033 1034 unsigned encodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt, 1035 unsigned Lgkmcnt) { 1036 return packBits(Lgkmcnt, Waitcnt, getLgkmcntBitShift(), 1037 getLgkmcntBitWidth(Version.Major)); 1038 } 1039 1040 unsigned encodeWaitcnt(const IsaVersion &Version, 1041 unsigned Vmcnt, unsigned Expcnt, unsigned Lgkmcnt) { 1042 unsigned Waitcnt = getWaitcntBitMask(Version); 1043 Waitcnt = encodeVmcnt(Version, Waitcnt, Vmcnt); 1044 Waitcnt = encodeExpcnt(Version, Waitcnt, Expcnt); 1045 Waitcnt = encodeLgkmcnt(Version, Waitcnt, Lgkmcnt); 1046 return Waitcnt; 1047 } 1048 1049 unsigned encodeWaitcnt(const IsaVersion &Version, const Waitcnt &Decoded) { 1050 return encodeWaitcnt(Version, Decoded.VmCnt, Decoded.ExpCnt, Decoded.LgkmCnt); 1051 } 1052 1053 //===----------------------------------------------------------------------===// 1054 // Custom Operands. 1055 // 1056 // A table of custom operands shall describe "primary" operand names 1057 // first followed by aliases if any. It is not required but recommended 1058 // to arrange operands so that operand encoding match operand position 1059 // in the table. This will make disassembly a bit more efficient. 1060 // Unused slots in the table shall have an empty name. 1061 // 1062 //===----------------------------------------------------------------------===// 1063 1064 template <class T> 1065 static bool isValidOpr(int Idx, const CustomOperand<T> OpInfo[], int OpInfoSize, 1066 T Context) { 1067 return 0 <= Idx && Idx < OpInfoSize && !OpInfo[Idx].Name.empty() && 1068 (!OpInfo[Idx].Cond || OpInfo[Idx].Cond(Context)); 1069 } 1070 1071 template <class T> 1072 static int getOprIdx(std::function<bool(const CustomOperand<T> &)> Test, 1073 const CustomOperand<T> OpInfo[], int OpInfoSize, 1074 T Context) { 1075 int InvalidIdx = OPR_ID_UNKNOWN; 1076 for (int Idx = 0; Idx < OpInfoSize; ++Idx) { 1077 if (Test(OpInfo[Idx])) { 1078 if (!OpInfo[Idx].Cond || OpInfo[Idx].Cond(Context)) 1079 return Idx; 1080 InvalidIdx = OPR_ID_UNSUPPORTED; 1081 } 1082 } 1083 return InvalidIdx; 1084 } 1085 1086 template <class T> 1087 static int getOprIdx(const StringRef Name, const CustomOperand<T> OpInfo[], 1088 int OpInfoSize, T Context) { 1089 auto Test = [=](const CustomOperand<T> &Op) { return Op.Name == Name; }; 1090 return getOprIdx<T>(Test, OpInfo, OpInfoSize, Context); 1091 } 1092 1093 template <class T> 1094 static int getOprIdx(int Id, const CustomOperand<T> OpInfo[], int OpInfoSize, 1095 T Context, bool QuickCheck = true) { 1096 auto Test = [=](const CustomOperand<T> &Op) { 1097 return Op.Encoding == Id && !Op.Name.empty(); 1098 }; 1099 // This is an optimization that should work in most cases. 1100 // As a side effect, it may cause selection of an alias 1101 // instead of a primary operand name in case of sparse tables. 1102 if (QuickCheck && isValidOpr<T>(Id, OpInfo, OpInfoSize, Context) && 1103 OpInfo[Id].Encoding == Id) { 1104 return Id; 1105 } 1106 return getOprIdx<T>(Test, OpInfo, OpInfoSize, Context); 1107 } 1108 1109 //===----------------------------------------------------------------------===// 1110 // Custom Operand Values 1111 //===----------------------------------------------------------------------===// 1112 1113 static unsigned getDefaultCustomOperandEncoding(const CustomOperandVal *Opr, 1114 int Size, 1115 const MCSubtargetInfo &STI) { 1116 unsigned Enc = 0; 1117 for (int Idx = 0; Idx < Size; ++Idx) { 1118 const auto &Op = Opr[Idx]; 1119 if (Op.isSupported(STI)) 1120 Enc |= Op.encode(Op.Default); 1121 } 1122 return Enc; 1123 } 1124 1125 static bool isSymbolicCustomOperandEncoding(const CustomOperandVal *Opr, 1126 int Size, unsigned Code, 1127 bool &HasNonDefaultVal, 1128 const MCSubtargetInfo &STI) { 1129 unsigned UsedOprMask = 0; 1130 HasNonDefaultVal = false; 1131 for (int Idx = 0; Idx < Size; ++Idx) { 1132 const auto &Op = Opr[Idx]; 1133 if (!Op.isSupported(STI)) 1134 continue; 1135 UsedOprMask |= Op.getMask(); 1136 unsigned Val = Op.decode(Code); 1137 if (!Op.isValid(Val)) 1138 return false; 1139 HasNonDefaultVal |= (Val != Op.Default); 1140 } 1141 return (Code & ~UsedOprMask) == 0; 1142 } 1143 1144 static bool decodeCustomOperand(const CustomOperandVal *Opr, int Size, 1145 unsigned Code, int &Idx, StringRef &Name, 1146 unsigned &Val, bool &IsDefault, 1147 const MCSubtargetInfo &STI) { 1148 while (Idx < Size) { 1149 const auto &Op = Opr[Idx++]; 1150 if (Op.isSupported(STI)) { 1151 Name = Op.Name; 1152 Val = Op.decode(Code); 1153 IsDefault = (Val == Op.Default); 1154 return true; 1155 } 1156 } 1157 1158 return false; 1159 } 1160 1161 static int encodeCustomOperandVal(const CustomOperandVal &Op, 1162 int64_t InputVal) { 1163 if (InputVal < 0 || InputVal > Op.Max) 1164 return OPR_VAL_INVALID; 1165 return Op.encode(InputVal); 1166 } 1167 1168 static int encodeCustomOperand(const CustomOperandVal *Opr, int Size, 1169 const StringRef Name, int64_t InputVal, 1170 unsigned &UsedOprMask, 1171 const MCSubtargetInfo &STI) { 1172 int InvalidId = OPR_ID_UNKNOWN; 1173 for (int Idx = 0; Idx < Size; ++Idx) { 1174 const auto &Op = Opr[Idx]; 1175 if (Op.Name == Name) { 1176 if (!Op.isSupported(STI)) { 1177 InvalidId = OPR_ID_UNSUPPORTED; 1178 continue; 1179 } 1180 auto OprMask = Op.getMask(); 1181 if (OprMask & UsedOprMask) 1182 return OPR_ID_DUPLICATE; 1183 UsedOprMask |= OprMask; 1184 return encodeCustomOperandVal(Op, InputVal); 1185 } 1186 } 1187 return InvalidId; 1188 } 1189 1190 //===----------------------------------------------------------------------===// 1191 // DepCtr 1192 //===----------------------------------------------------------------------===// 1193 1194 namespace DepCtr { 1195 1196 int getDefaultDepCtrEncoding(const MCSubtargetInfo &STI) { 1197 static int Default = -1; 1198 if (Default == -1) 1199 Default = getDefaultCustomOperandEncoding(DepCtrInfo, DEP_CTR_SIZE, STI); 1200 return Default; 1201 } 1202 1203 bool isSymbolicDepCtrEncoding(unsigned Code, bool &HasNonDefaultVal, 1204 const MCSubtargetInfo &STI) { 1205 return isSymbolicCustomOperandEncoding(DepCtrInfo, DEP_CTR_SIZE, Code, 1206 HasNonDefaultVal, STI); 1207 } 1208 1209 bool decodeDepCtr(unsigned Code, int &Id, StringRef &Name, unsigned &Val, 1210 bool &IsDefault, const MCSubtargetInfo &STI) { 1211 return decodeCustomOperand(DepCtrInfo, DEP_CTR_SIZE, Code, Id, Name, Val, 1212 IsDefault, STI); 1213 } 1214 1215 int encodeDepCtr(const StringRef Name, int64_t Val, unsigned &UsedOprMask, 1216 const MCSubtargetInfo &STI) { 1217 return encodeCustomOperand(DepCtrInfo, DEP_CTR_SIZE, Name, Val, UsedOprMask, 1218 STI); 1219 } 1220 1221 } // namespace DepCtr 1222 1223 //===----------------------------------------------------------------------===// 1224 // hwreg 1225 //===----------------------------------------------------------------------===// 1226 1227 namespace Hwreg { 1228 1229 int64_t getHwregId(const StringRef Name, const MCSubtargetInfo &STI) { 1230 int Idx = getOprIdx<const MCSubtargetInfo &>(Name, Opr, OPR_SIZE, STI); 1231 return (Idx < 0) ? Idx : Opr[Idx].Encoding; 1232 } 1233 1234 bool isValidHwreg(int64_t Id) { 1235 return 0 <= Id && isUInt<ID_WIDTH_>(Id); 1236 } 1237 1238 bool isValidHwregOffset(int64_t Offset) { 1239 return 0 <= Offset && isUInt<OFFSET_WIDTH_>(Offset); 1240 } 1241 1242 bool isValidHwregWidth(int64_t Width) { 1243 return 0 <= (Width - 1) && isUInt<WIDTH_M1_WIDTH_>(Width - 1); 1244 } 1245 1246 uint64_t encodeHwreg(uint64_t Id, uint64_t Offset, uint64_t Width) { 1247 return (Id << ID_SHIFT_) | 1248 (Offset << OFFSET_SHIFT_) | 1249 ((Width - 1) << WIDTH_M1_SHIFT_); 1250 } 1251 1252 StringRef getHwreg(unsigned Id, const MCSubtargetInfo &STI) { 1253 int Idx = getOprIdx<const MCSubtargetInfo &>(Id, Opr, OPR_SIZE, STI); 1254 return (Idx < 0) ? "" : Opr[Idx].Name; 1255 } 1256 1257 void decodeHwreg(unsigned Val, unsigned &Id, unsigned &Offset, unsigned &Width) { 1258 Id = (Val & ID_MASK_) >> ID_SHIFT_; 1259 Offset = (Val & OFFSET_MASK_) >> OFFSET_SHIFT_; 1260 Width = ((Val & WIDTH_M1_MASK_) >> WIDTH_M1_SHIFT_) + 1; 1261 } 1262 1263 } // namespace Hwreg 1264 1265 //===----------------------------------------------------------------------===// 1266 // exp tgt 1267 //===----------------------------------------------------------------------===// 1268 1269 namespace Exp { 1270 1271 struct ExpTgt { 1272 StringLiteral Name; 1273 unsigned Tgt; 1274 unsigned MaxIndex; 1275 }; 1276 1277 static constexpr ExpTgt ExpTgtInfo[] = { 1278 {{"null"}, ET_NULL, ET_NULL_MAX_IDX}, 1279 {{"mrtz"}, ET_MRTZ, ET_MRTZ_MAX_IDX}, 1280 {{"prim"}, ET_PRIM, ET_PRIM_MAX_IDX}, 1281 {{"mrt"}, ET_MRT0, ET_MRT_MAX_IDX}, 1282 {{"pos"}, ET_POS0, ET_POS_MAX_IDX}, 1283 {{"param"}, ET_PARAM0, ET_PARAM_MAX_IDX}, 1284 }; 1285 1286 bool getTgtName(unsigned Id, StringRef &Name, int &Index) { 1287 for (const ExpTgt &Val : ExpTgtInfo) { 1288 if (Val.Tgt <= Id && Id <= Val.Tgt + Val.MaxIndex) { 1289 Index = (Val.MaxIndex == 0) ? -1 : (Id - Val.Tgt); 1290 Name = Val.Name; 1291 return true; 1292 } 1293 } 1294 return false; 1295 } 1296 1297 unsigned getTgtId(const StringRef Name) { 1298 1299 for (const ExpTgt &Val : ExpTgtInfo) { 1300 if (Val.MaxIndex == 0 && Name == Val.Name) 1301 return Val.Tgt; 1302 1303 if (Val.MaxIndex > 0 && Name.startswith(Val.Name)) { 1304 StringRef Suffix = Name.drop_front(Val.Name.size()); 1305 1306 unsigned Id; 1307 if (Suffix.getAsInteger(10, Id) || Id > Val.MaxIndex) 1308 return ET_INVALID; 1309 1310 // Disable leading zeroes 1311 if (Suffix.size() > 1 && Suffix[0] == '0') 1312 return ET_INVALID; 1313 1314 return Val.Tgt + Id; 1315 } 1316 } 1317 return ET_INVALID; 1318 } 1319 1320 bool isSupportedTgtId(unsigned Id, const MCSubtargetInfo &STI) { 1321 return (Id != ET_POS4 && Id != ET_PRIM) || isGFX10Plus(STI); 1322 } 1323 1324 } // namespace Exp 1325 1326 //===----------------------------------------------------------------------===// 1327 // MTBUF Format 1328 //===----------------------------------------------------------------------===// 1329 1330 namespace MTBUFFormat { 1331 1332 int64_t getDfmt(const StringRef Name) { 1333 for (int Id = DFMT_MIN; Id <= DFMT_MAX; ++Id) { 1334 if (Name == DfmtSymbolic[Id]) 1335 return Id; 1336 } 1337 return DFMT_UNDEF; 1338 } 1339 1340 StringRef getDfmtName(unsigned Id) { 1341 assert(Id <= DFMT_MAX); 1342 return DfmtSymbolic[Id]; 1343 } 1344 1345 static StringLiteral const *getNfmtLookupTable(const MCSubtargetInfo &STI) { 1346 if (isSI(STI) || isCI(STI)) 1347 return NfmtSymbolicSICI; 1348 if (isVI(STI) || isGFX9(STI)) 1349 return NfmtSymbolicVI; 1350 return NfmtSymbolicGFX10; 1351 } 1352 1353 int64_t getNfmt(const StringRef Name, const MCSubtargetInfo &STI) { 1354 auto lookupTable = getNfmtLookupTable(STI); 1355 for (int Id = NFMT_MIN; Id <= NFMT_MAX; ++Id) { 1356 if (Name == lookupTable[Id]) 1357 return Id; 1358 } 1359 return NFMT_UNDEF; 1360 } 1361 1362 StringRef getNfmtName(unsigned Id, const MCSubtargetInfo &STI) { 1363 assert(Id <= NFMT_MAX); 1364 return getNfmtLookupTable(STI)[Id]; 1365 } 1366 1367 bool isValidDfmtNfmt(unsigned Id, const MCSubtargetInfo &STI) { 1368 unsigned Dfmt; 1369 unsigned Nfmt; 1370 decodeDfmtNfmt(Id, Dfmt, Nfmt); 1371 return isValidNfmt(Nfmt, STI); 1372 } 1373 1374 bool isValidNfmt(unsigned Id, const MCSubtargetInfo &STI) { 1375 return !getNfmtName(Id, STI).empty(); 1376 } 1377 1378 int64_t encodeDfmtNfmt(unsigned Dfmt, unsigned Nfmt) { 1379 return (Dfmt << DFMT_SHIFT) | (Nfmt << NFMT_SHIFT); 1380 } 1381 1382 void decodeDfmtNfmt(unsigned Format, unsigned &Dfmt, unsigned &Nfmt) { 1383 Dfmt = (Format >> DFMT_SHIFT) & DFMT_MASK; 1384 Nfmt = (Format >> NFMT_SHIFT) & NFMT_MASK; 1385 } 1386 1387 int64_t getUnifiedFormat(const StringRef Name, const MCSubtargetInfo &STI) { 1388 if (isGFX11Plus(STI)) { 1389 for (int Id = UfmtGFX11::UFMT_FIRST; Id <= UfmtGFX11::UFMT_LAST; ++Id) { 1390 if (Name == UfmtSymbolicGFX11[Id]) 1391 return Id; 1392 } 1393 } else { 1394 for (int Id = UfmtGFX10::UFMT_FIRST; Id <= UfmtGFX10::UFMT_LAST; ++Id) { 1395 if (Name == UfmtSymbolicGFX10[Id]) 1396 return Id; 1397 } 1398 } 1399 return UFMT_UNDEF; 1400 } 1401 1402 StringRef getUnifiedFormatName(unsigned Id, const MCSubtargetInfo &STI) { 1403 if(isValidUnifiedFormat(Id, STI)) 1404 return isGFX10(STI) ? UfmtSymbolicGFX10[Id] : UfmtSymbolicGFX11[Id]; 1405 return ""; 1406 } 1407 1408 bool isValidUnifiedFormat(unsigned Id, const MCSubtargetInfo &STI) { 1409 return isGFX10(STI) ? Id <= UfmtGFX10::UFMT_LAST : Id <= UfmtGFX11::UFMT_LAST; 1410 } 1411 1412 int64_t convertDfmtNfmt2Ufmt(unsigned Dfmt, unsigned Nfmt, 1413 const MCSubtargetInfo &STI) { 1414 int64_t Fmt = encodeDfmtNfmt(Dfmt, Nfmt); 1415 if (isGFX11Plus(STI)) { 1416 for (int Id = UfmtGFX11::UFMT_FIRST; Id <= UfmtGFX11::UFMT_LAST; ++Id) { 1417 if (Fmt == DfmtNfmt2UFmtGFX11[Id]) 1418 return Id; 1419 } 1420 } else { 1421 for (int Id = UfmtGFX10::UFMT_FIRST; Id <= UfmtGFX10::UFMT_LAST; ++Id) { 1422 if (Fmt == DfmtNfmt2UFmtGFX10[Id]) 1423 return Id; 1424 } 1425 } 1426 return UFMT_UNDEF; 1427 } 1428 1429 bool isValidFormatEncoding(unsigned Val, const MCSubtargetInfo &STI) { 1430 return isGFX10Plus(STI) ? (Val <= UFMT_MAX) : (Val <= DFMT_NFMT_MAX); 1431 } 1432 1433 unsigned getDefaultFormatEncoding(const MCSubtargetInfo &STI) { 1434 if (isGFX10Plus(STI)) 1435 return UFMT_DEFAULT; 1436 return DFMT_NFMT_DEFAULT; 1437 } 1438 1439 } // namespace MTBUFFormat 1440 1441 //===----------------------------------------------------------------------===// 1442 // SendMsg 1443 //===----------------------------------------------------------------------===// 1444 1445 namespace SendMsg { 1446 1447 int64_t getMsgId(const StringRef Name, const MCSubtargetInfo &STI) { 1448 int Idx = getOprIdx<const MCSubtargetInfo &>(Name, Msg, MSG_SIZE, STI); 1449 return (Idx < 0) ? Idx : Msg[Idx].Encoding; 1450 } 1451 1452 bool isValidMsgId(int64_t MsgId) { 1453 return 0 <= MsgId && isUInt<ID_WIDTH_>(MsgId); 1454 } 1455 1456 StringRef getMsgName(int64_t MsgId, const MCSubtargetInfo &STI) { 1457 int Idx = getOprIdx<const MCSubtargetInfo &>(MsgId, Msg, MSG_SIZE, STI); 1458 return (Idx < 0) ? "" : Msg[Idx].Name; 1459 } 1460 1461 int64_t getMsgOpId(int64_t MsgId, const StringRef Name) { 1462 const char* const *S = (MsgId == ID_SYSMSG) ? OpSysSymbolic : OpGsSymbolic; 1463 const int F = (MsgId == ID_SYSMSG) ? OP_SYS_FIRST_ : OP_GS_FIRST_; 1464 const int L = (MsgId == ID_SYSMSG) ? OP_SYS_LAST_ : OP_GS_LAST_; 1465 for (int i = F; i < L; ++i) { 1466 if (Name == S[i]) { 1467 return i; 1468 } 1469 } 1470 return OP_UNKNOWN_; 1471 } 1472 1473 bool isValidMsgOp(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI, 1474 bool Strict) { 1475 assert(isValidMsgId(MsgId)); 1476 1477 if (!Strict) 1478 return 0 <= OpId && isUInt<OP_WIDTH_>(OpId); 1479 1480 switch(MsgId) 1481 { 1482 case ID_GS: 1483 return (OP_GS_FIRST_ <= OpId && OpId < OP_GS_LAST_) && OpId != OP_GS_NOP; 1484 case ID_GS_DONE: 1485 return OP_GS_FIRST_ <= OpId && OpId < OP_GS_LAST_; 1486 case ID_SYSMSG: 1487 return OP_SYS_FIRST_ <= OpId && OpId < OP_SYS_LAST_; 1488 default: 1489 return OpId == OP_NONE_; 1490 } 1491 } 1492 1493 StringRef getMsgOpName(int64_t MsgId, int64_t OpId) { 1494 assert(msgRequiresOp(MsgId)); 1495 return (MsgId == ID_SYSMSG)? OpSysSymbolic[OpId] : OpGsSymbolic[OpId]; 1496 } 1497 1498 bool isValidMsgStream(int64_t MsgId, int64_t OpId, int64_t StreamId, 1499 const MCSubtargetInfo &STI, bool Strict) { 1500 assert(isValidMsgOp(MsgId, OpId, STI, Strict)); 1501 1502 if (!Strict) 1503 return 0 <= StreamId && isUInt<STREAM_ID_WIDTH_>(StreamId); 1504 1505 switch(MsgId) 1506 { 1507 case ID_GS: 1508 return STREAM_ID_FIRST_ <= StreamId && StreamId < STREAM_ID_LAST_; 1509 case ID_GS_DONE: 1510 return (OpId == OP_GS_NOP)? 1511 (StreamId == STREAM_ID_NONE_) : 1512 (STREAM_ID_FIRST_ <= StreamId && StreamId < STREAM_ID_LAST_); 1513 default: 1514 return StreamId == STREAM_ID_NONE_; 1515 } 1516 } 1517 1518 bool msgRequiresOp(int64_t MsgId) { 1519 return MsgId == ID_GS || MsgId == ID_GS_DONE || MsgId == ID_SYSMSG; 1520 } 1521 1522 bool msgSupportsStream(int64_t MsgId, int64_t OpId) { 1523 return (MsgId == ID_GS || MsgId == ID_GS_DONE) && OpId != OP_GS_NOP; 1524 } 1525 1526 void decodeMsg(unsigned Val, 1527 uint16_t &MsgId, 1528 uint16_t &OpId, 1529 uint16_t &StreamId) { 1530 MsgId = Val & ID_MASK_; 1531 OpId = (Val & OP_MASK_) >> OP_SHIFT_; 1532 StreamId = (Val & STREAM_ID_MASK_) >> STREAM_ID_SHIFT_; 1533 } 1534 1535 uint64_t encodeMsg(uint64_t MsgId, 1536 uint64_t OpId, 1537 uint64_t StreamId) { 1538 return (MsgId << ID_SHIFT_) | 1539 (OpId << OP_SHIFT_) | 1540 (StreamId << STREAM_ID_SHIFT_); 1541 } 1542 1543 } // namespace SendMsg 1544 1545 //===----------------------------------------------------------------------===// 1546 // 1547 //===----------------------------------------------------------------------===// 1548 1549 unsigned getInitialPSInputAddr(const Function &F) { 1550 return getIntegerAttribute(F, "InitialPSInputAddr", 0); 1551 } 1552 1553 bool getHasColorExport(const Function &F) { 1554 // As a safe default always respond as if PS has color exports. 1555 return getIntegerAttribute( 1556 F, "amdgpu-color-export", 1557 F.getCallingConv() == CallingConv::AMDGPU_PS ? 1 : 0) != 0; 1558 } 1559 1560 bool getHasDepthExport(const Function &F) { 1561 return getIntegerAttribute(F, "amdgpu-depth-export", 0) != 0; 1562 } 1563 1564 bool isShader(CallingConv::ID cc) { 1565 switch(cc) { 1566 case CallingConv::AMDGPU_VS: 1567 case CallingConv::AMDGPU_LS: 1568 case CallingConv::AMDGPU_HS: 1569 case CallingConv::AMDGPU_ES: 1570 case CallingConv::AMDGPU_GS: 1571 case CallingConv::AMDGPU_PS: 1572 case CallingConv::AMDGPU_CS: 1573 return true; 1574 default: 1575 return false; 1576 } 1577 } 1578 1579 bool isGraphics(CallingConv::ID cc) { 1580 return isShader(cc) || cc == CallingConv::AMDGPU_Gfx; 1581 } 1582 1583 bool isCompute(CallingConv::ID cc) { 1584 return !isGraphics(cc) || cc == CallingConv::AMDGPU_CS; 1585 } 1586 1587 bool isEntryFunctionCC(CallingConv::ID CC) { 1588 switch (CC) { 1589 case CallingConv::AMDGPU_KERNEL: 1590 case CallingConv::SPIR_KERNEL: 1591 case CallingConv::AMDGPU_VS: 1592 case CallingConv::AMDGPU_GS: 1593 case CallingConv::AMDGPU_PS: 1594 case CallingConv::AMDGPU_CS: 1595 case CallingConv::AMDGPU_ES: 1596 case CallingConv::AMDGPU_HS: 1597 case CallingConv::AMDGPU_LS: 1598 return true; 1599 default: 1600 return false; 1601 } 1602 } 1603 1604 bool isModuleEntryFunctionCC(CallingConv::ID CC) { 1605 switch (CC) { 1606 case CallingConv::AMDGPU_Gfx: 1607 return true; 1608 default: 1609 return isEntryFunctionCC(CC); 1610 } 1611 } 1612 1613 bool isKernelCC(const Function *Func) { 1614 return AMDGPU::isModuleEntryFunctionCC(Func->getCallingConv()); 1615 } 1616 1617 bool hasXNACK(const MCSubtargetInfo &STI) { 1618 return STI.getFeatureBits()[AMDGPU::FeatureXNACK]; 1619 } 1620 1621 bool hasSRAMECC(const MCSubtargetInfo &STI) { 1622 return STI.getFeatureBits()[AMDGPU::FeatureSRAMECC]; 1623 } 1624 1625 bool hasMIMG_R128(const MCSubtargetInfo &STI) { 1626 return STI.getFeatureBits()[AMDGPU::FeatureMIMG_R128] && !STI.getFeatureBits()[AMDGPU::FeatureR128A16]; 1627 } 1628 1629 bool hasGFX10A16(const MCSubtargetInfo &STI) { 1630 return STI.getFeatureBits()[AMDGPU::FeatureGFX10A16]; 1631 } 1632 1633 bool hasG16(const MCSubtargetInfo &STI) { 1634 return STI.getFeatureBits()[AMDGPU::FeatureG16]; 1635 } 1636 1637 bool hasPackedD16(const MCSubtargetInfo &STI) { 1638 return !STI.getFeatureBits()[AMDGPU::FeatureUnpackedD16VMem] && !isCI(STI) && 1639 !isSI(STI); 1640 } 1641 1642 bool isSI(const MCSubtargetInfo &STI) { 1643 return STI.getFeatureBits()[AMDGPU::FeatureSouthernIslands]; 1644 } 1645 1646 bool isCI(const MCSubtargetInfo &STI) { 1647 return STI.getFeatureBits()[AMDGPU::FeatureSeaIslands]; 1648 } 1649 1650 bool isVI(const MCSubtargetInfo &STI) { 1651 return STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]; 1652 } 1653 1654 bool isGFX9(const MCSubtargetInfo &STI) { 1655 return STI.getFeatureBits()[AMDGPU::FeatureGFX9]; 1656 } 1657 1658 bool isGFX9_GFX10(const MCSubtargetInfo &STI) { 1659 return isGFX9(STI) || isGFX10(STI); 1660 } 1661 1662 bool isGFX8_GFX9_GFX10(const MCSubtargetInfo &STI) { 1663 return isVI(STI) || isGFX9(STI) || isGFX10(STI); 1664 } 1665 1666 bool isGFX8Plus(const MCSubtargetInfo &STI) { 1667 return isVI(STI) || isGFX9Plus(STI); 1668 } 1669 1670 bool isGFX9Plus(const MCSubtargetInfo &STI) { 1671 return isGFX9(STI) || isGFX10Plus(STI); 1672 } 1673 1674 bool isGFX10(const MCSubtargetInfo &STI) { 1675 return STI.getFeatureBits()[AMDGPU::FeatureGFX10]; 1676 } 1677 1678 bool isGFX10Plus(const MCSubtargetInfo &STI) { 1679 return isGFX10(STI) || isGFX11Plus(STI); 1680 } 1681 1682 bool isGFX11(const MCSubtargetInfo &STI) { 1683 return STI.getFeatureBits()[AMDGPU::FeatureGFX11]; 1684 } 1685 1686 bool isGFX11Plus(const MCSubtargetInfo &STI) { 1687 return isGFX11(STI); 1688 } 1689 1690 bool isNotGFX11Plus(const MCSubtargetInfo &STI) { 1691 return !isGFX11Plus(STI); 1692 } 1693 1694 bool isNotGFX10Plus(const MCSubtargetInfo &STI) { 1695 return isSI(STI) || isCI(STI) || isVI(STI) || isGFX9(STI); 1696 } 1697 1698 bool isGFX10Before1030(const MCSubtargetInfo &STI) { 1699 return isGFX10(STI) && !AMDGPU::isGFX10_BEncoding(STI); 1700 } 1701 1702 bool isGCN3Encoding(const MCSubtargetInfo &STI) { 1703 return STI.getFeatureBits()[AMDGPU::FeatureGCN3Encoding]; 1704 } 1705 1706 bool isGFX10_AEncoding(const MCSubtargetInfo &STI) { 1707 return STI.getFeatureBits()[AMDGPU::FeatureGFX10_AEncoding]; 1708 } 1709 1710 bool isGFX10_BEncoding(const MCSubtargetInfo &STI) { 1711 return STI.getFeatureBits()[AMDGPU::FeatureGFX10_BEncoding]; 1712 } 1713 1714 bool hasGFX10_3Insts(const MCSubtargetInfo &STI) { 1715 return STI.getFeatureBits()[AMDGPU::FeatureGFX10_3Insts]; 1716 } 1717 1718 bool isGFX90A(const MCSubtargetInfo &STI) { 1719 return STI.getFeatureBits()[AMDGPU::FeatureGFX90AInsts]; 1720 } 1721 1722 bool isGFX940(const MCSubtargetInfo &STI) { 1723 return STI.getFeatureBits()[AMDGPU::FeatureGFX940Insts]; 1724 } 1725 1726 bool hasArchitectedFlatScratch(const MCSubtargetInfo &STI) { 1727 return STI.getFeatureBits()[AMDGPU::FeatureArchitectedFlatScratch]; 1728 } 1729 1730 bool hasMAIInsts(const MCSubtargetInfo &STI) { 1731 return STI.getFeatureBits()[AMDGPU::FeatureMAIInsts]; 1732 } 1733 1734 int32_t getTotalNumVGPRs(bool has90AInsts, int32_t ArgNumAGPR, 1735 int32_t ArgNumVGPR) { 1736 if (has90AInsts && ArgNumAGPR) 1737 return alignTo(ArgNumVGPR, 4) + ArgNumAGPR; 1738 return std::max(ArgNumVGPR, ArgNumAGPR); 1739 } 1740 1741 bool isSGPR(unsigned Reg, const MCRegisterInfo* TRI) { 1742 const MCRegisterClass SGPRClass = TRI->getRegClass(AMDGPU::SReg_32RegClassID); 1743 const unsigned FirstSubReg = TRI->getSubReg(Reg, AMDGPU::sub0); 1744 return SGPRClass.contains(FirstSubReg != 0 ? FirstSubReg : Reg) || 1745 Reg == AMDGPU::SCC; 1746 } 1747 1748 #define MAP_REG2REG \ 1749 using namespace AMDGPU; \ 1750 switch(Reg) { \ 1751 default: return Reg; \ 1752 CASE_CI_VI(FLAT_SCR) \ 1753 CASE_CI_VI(FLAT_SCR_LO) \ 1754 CASE_CI_VI(FLAT_SCR_HI) \ 1755 CASE_VI_GFX9PLUS(TTMP0) \ 1756 CASE_VI_GFX9PLUS(TTMP1) \ 1757 CASE_VI_GFX9PLUS(TTMP2) \ 1758 CASE_VI_GFX9PLUS(TTMP3) \ 1759 CASE_VI_GFX9PLUS(TTMP4) \ 1760 CASE_VI_GFX9PLUS(TTMP5) \ 1761 CASE_VI_GFX9PLUS(TTMP6) \ 1762 CASE_VI_GFX9PLUS(TTMP7) \ 1763 CASE_VI_GFX9PLUS(TTMP8) \ 1764 CASE_VI_GFX9PLUS(TTMP9) \ 1765 CASE_VI_GFX9PLUS(TTMP10) \ 1766 CASE_VI_GFX9PLUS(TTMP11) \ 1767 CASE_VI_GFX9PLUS(TTMP12) \ 1768 CASE_VI_GFX9PLUS(TTMP13) \ 1769 CASE_VI_GFX9PLUS(TTMP14) \ 1770 CASE_VI_GFX9PLUS(TTMP15) \ 1771 CASE_VI_GFX9PLUS(TTMP0_TTMP1) \ 1772 CASE_VI_GFX9PLUS(TTMP2_TTMP3) \ 1773 CASE_VI_GFX9PLUS(TTMP4_TTMP5) \ 1774 CASE_VI_GFX9PLUS(TTMP6_TTMP7) \ 1775 CASE_VI_GFX9PLUS(TTMP8_TTMP9) \ 1776 CASE_VI_GFX9PLUS(TTMP10_TTMP11) \ 1777 CASE_VI_GFX9PLUS(TTMP12_TTMP13) \ 1778 CASE_VI_GFX9PLUS(TTMP14_TTMP15) \ 1779 CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3) \ 1780 CASE_VI_GFX9PLUS(TTMP4_TTMP5_TTMP6_TTMP7) \ 1781 CASE_VI_GFX9PLUS(TTMP8_TTMP9_TTMP10_TTMP11) \ 1782 CASE_VI_GFX9PLUS(TTMP12_TTMP13_TTMP14_TTMP15) \ 1783 CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7) \ 1784 CASE_VI_GFX9PLUS(TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11) \ 1785 CASE_VI_GFX9PLUS(TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \ 1786 CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \ 1787 CASE_GFXPRE11_GFX11PLUS(M0) \ 1788 CASE_GFXPRE11_GFX11PLUS(SGPR_NULL) \ 1789 } 1790 1791 #define CASE_CI_VI(node) \ 1792 assert(!isSI(STI)); \ 1793 case node: return isCI(STI) ? node##_ci : node##_vi; 1794 1795 #define CASE_VI_GFX9PLUS(node) \ 1796 case node: return isGFX9Plus(STI) ? node##_gfx9plus : node##_vi; 1797 1798 #define CASE_GFXPRE11_GFX11PLUS(node) \ 1799 case node: return isGFX11Plus(STI) ? node##_gfx11plus : node##_gfxpre11; 1800 1801 unsigned getMCReg(unsigned Reg, const MCSubtargetInfo &STI) { 1802 if (STI.getTargetTriple().getArch() == Triple::r600) 1803 return Reg; 1804 MAP_REG2REG 1805 } 1806 1807 #undef CASE_CI_VI 1808 #undef CASE_VI_GFX9PLUS 1809 #undef CASE_GFXPRE11_GFX11PLUS 1810 1811 #define CASE_CI_VI(node) case node##_ci: case node##_vi: return node; 1812 #define CASE_VI_GFX9PLUS(node) case node##_vi: case node##_gfx9plus: return node; 1813 #define CASE_GFXPRE11_GFX11PLUS(node) case node##_gfx11plus: case node##_gfxpre11: return node; 1814 1815 unsigned mc2PseudoReg(unsigned Reg) { 1816 MAP_REG2REG 1817 } 1818 1819 #undef CASE_CI_VI 1820 #undef CASE_VI_GFX9PLUS 1821 #undef CASE_GFXPRE11_GFX11PLUS 1822 #undef MAP_REG2REG 1823 1824 bool isSISrcOperand(const MCInstrDesc &Desc, unsigned OpNo) { 1825 assert(OpNo < Desc.NumOperands); 1826 unsigned OpType = Desc.OpInfo[OpNo].OperandType; 1827 return OpType >= AMDGPU::OPERAND_SRC_FIRST && 1828 OpType <= AMDGPU::OPERAND_SRC_LAST; 1829 } 1830 1831 bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo) { 1832 assert(OpNo < Desc.NumOperands); 1833 unsigned OpType = Desc.OpInfo[OpNo].OperandType; 1834 switch (OpType) { 1835 case AMDGPU::OPERAND_REG_IMM_FP32: 1836 case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED: 1837 case AMDGPU::OPERAND_REG_IMM_FP64: 1838 case AMDGPU::OPERAND_REG_IMM_FP16: 1839 case AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED: 1840 case AMDGPU::OPERAND_REG_IMM_V2FP16: 1841 case AMDGPU::OPERAND_REG_IMM_V2INT16: 1842 case AMDGPU::OPERAND_REG_INLINE_C_FP32: 1843 case AMDGPU::OPERAND_REG_INLINE_C_FP64: 1844 case AMDGPU::OPERAND_REG_INLINE_C_FP16: 1845 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: 1846 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16: 1847 case AMDGPU::OPERAND_REG_INLINE_AC_FP32: 1848 case AMDGPU::OPERAND_REG_INLINE_AC_FP16: 1849 case AMDGPU::OPERAND_REG_INLINE_AC_V2FP16: 1850 case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16: 1851 case AMDGPU::OPERAND_REG_IMM_V2FP32: 1852 case AMDGPU::OPERAND_REG_INLINE_C_V2FP32: 1853 case AMDGPU::OPERAND_REG_INLINE_AC_FP64: 1854 return true; 1855 default: 1856 return false; 1857 } 1858 } 1859 1860 bool isSISrcInlinableOperand(const MCInstrDesc &Desc, unsigned OpNo) { 1861 assert(OpNo < Desc.NumOperands); 1862 unsigned OpType = Desc.OpInfo[OpNo].OperandType; 1863 return OpType >= AMDGPU::OPERAND_REG_INLINE_C_FIRST && 1864 OpType <= AMDGPU::OPERAND_REG_INLINE_C_LAST; 1865 } 1866 1867 // Avoid using MCRegisterClass::getSize, since that function will go away 1868 // (move from MC* level to Target* level). Return size in bits. 1869 unsigned getRegBitWidth(unsigned RCID) { 1870 switch (RCID) { 1871 case AMDGPU::VGPR_LO16RegClassID: 1872 case AMDGPU::VGPR_HI16RegClassID: 1873 case AMDGPU::SGPR_LO16RegClassID: 1874 case AMDGPU::AGPR_LO16RegClassID: 1875 return 16; 1876 case AMDGPU::SGPR_32RegClassID: 1877 case AMDGPU::VGPR_32RegClassID: 1878 case AMDGPU::VRegOrLds_32RegClassID: 1879 case AMDGPU::AGPR_32RegClassID: 1880 case AMDGPU::VS_32RegClassID: 1881 case AMDGPU::AV_32RegClassID: 1882 case AMDGPU::SReg_32RegClassID: 1883 case AMDGPU::SReg_32_XM0RegClassID: 1884 case AMDGPU::SRegOrLds_32RegClassID: 1885 return 32; 1886 case AMDGPU::SGPR_64RegClassID: 1887 case AMDGPU::VS_64RegClassID: 1888 case AMDGPU::SReg_64RegClassID: 1889 case AMDGPU::VReg_64RegClassID: 1890 case AMDGPU::AReg_64RegClassID: 1891 case AMDGPU::SReg_64_XEXECRegClassID: 1892 case AMDGPU::VReg_64_Align2RegClassID: 1893 case AMDGPU::AReg_64_Align2RegClassID: 1894 case AMDGPU::AV_64RegClassID: 1895 case AMDGPU::AV_64_Align2RegClassID: 1896 return 64; 1897 case AMDGPU::SGPR_96RegClassID: 1898 case AMDGPU::SReg_96RegClassID: 1899 case AMDGPU::VReg_96RegClassID: 1900 case AMDGPU::AReg_96RegClassID: 1901 case AMDGPU::VReg_96_Align2RegClassID: 1902 case AMDGPU::AReg_96_Align2RegClassID: 1903 case AMDGPU::AV_96RegClassID: 1904 case AMDGPU::AV_96_Align2RegClassID: 1905 return 96; 1906 case AMDGPU::SGPR_128RegClassID: 1907 case AMDGPU::SReg_128RegClassID: 1908 case AMDGPU::VReg_128RegClassID: 1909 case AMDGPU::AReg_128RegClassID: 1910 case AMDGPU::VReg_128_Align2RegClassID: 1911 case AMDGPU::AReg_128_Align2RegClassID: 1912 case AMDGPU::AV_128RegClassID: 1913 case AMDGPU::AV_128_Align2RegClassID: 1914 return 128; 1915 case AMDGPU::SGPR_160RegClassID: 1916 case AMDGPU::SReg_160RegClassID: 1917 case AMDGPU::VReg_160RegClassID: 1918 case AMDGPU::AReg_160RegClassID: 1919 case AMDGPU::VReg_160_Align2RegClassID: 1920 case AMDGPU::AReg_160_Align2RegClassID: 1921 case AMDGPU::AV_160RegClassID: 1922 case AMDGPU::AV_160_Align2RegClassID: 1923 return 160; 1924 case AMDGPU::SGPR_192RegClassID: 1925 case AMDGPU::SReg_192RegClassID: 1926 case AMDGPU::VReg_192RegClassID: 1927 case AMDGPU::AReg_192RegClassID: 1928 case AMDGPU::VReg_192_Align2RegClassID: 1929 case AMDGPU::AReg_192_Align2RegClassID: 1930 case AMDGPU::AV_192RegClassID: 1931 case AMDGPU::AV_192_Align2RegClassID: 1932 return 192; 1933 case AMDGPU::SGPR_224RegClassID: 1934 case AMDGPU::SReg_224RegClassID: 1935 case AMDGPU::VReg_224RegClassID: 1936 case AMDGPU::AReg_224RegClassID: 1937 case AMDGPU::VReg_224_Align2RegClassID: 1938 case AMDGPU::AReg_224_Align2RegClassID: 1939 case AMDGPU::AV_224RegClassID: 1940 case AMDGPU::AV_224_Align2RegClassID: 1941 return 224; 1942 case AMDGPU::SGPR_256RegClassID: 1943 case AMDGPU::SReg_256RegClassID: 1944 case AMDGPU::VReg_256RegClassID: 1945 case AMDGPU::AReg_256RegClassID: 1946 case AMDGPU::VReg_256_Align2RegClassID: 1947 case AMDGPU::AReg_256_Align2RegClassID: 1948 case AMDGPU::AV_256RegClassID: 1949 case AMDGPU::AV_256_Align2RegClassID: 1950 return 256; 1951 case AMDGPU::SGPR_512RegClassID: 1952 case AMDGPU::SReg_512RegClassID: 1953 case AMDGPU::VReg_512RegClassID: 1954 case AMDGPU::AReg_512RegClassID: 1955 case AMDGPU::VReg_512_Align2RegClassID: 1956 case AMDGPU::AReg_512_Align2RegClassID: 1957 case AMDGPU::AV_512RegClassID: 1958 case AMDGPU::AV_512_Align2RegClassID: 1959 return 512; 1960 case AMDGPU::SGPR_1024RegClassID: 1961 case AMDGPU::SReg_1024RegClassID: 1962 case AMDGPU::VReg_1024RegClassID: 1963 case AMDGPU::AReg_1024RegClassID: 1964 case AMDGPU::VReg_1024_Align2RegClassID: 1965 case AMDGPU::AReg_1024_Align2RegClassID: 1966 case AMDGPU::AV_1024RegClassID: 1967 case AMDGPU::AV_1024_Align2RegClassID: 1968 return 1024; 1969 default: 1970 llvm_unreachable("Unexpected register class"); 1971 } 1972 } 1973 1974 unsigned getRegBitWidth(const MCRegisterClass &RC) { 1975 return getRegBitWidth(RC.getID()); 1976 } 1977 1978 unsigned getRegOperandSize(const MCRegisterInfo *MRI, const MCInstrDesc &Desc, 1979 unsigned OpNo) { 1980 assert(OpNo < Desc.NumOperands); 1981 unsigned RCID = Desc.OpInfo[OpNo].RegClass; 1982 return getRegBitWidth(MRI->getRegClass(RCID)) / 8; 1983 } 1984 1985 bool isInlinableLiteral64(int64_t Literal, bool HasInv2Pi) { 1986 if (isInlinableIntLiteral(Literal)) 1987 return true; 1988 1989 uint64_t Val = static_cast<uint64_t>(Literal); 1990 return (Val == DoubleToBits(0.0)) || 1991 (Val == DoubleToBits(1.0)) || 1992 (Val == DoubleToBits(-1.0)) || 1993 (Val == DoubleToBits(0.5)) || 1994 (Val == DoubleToBits(-0.5)) || 1995 (Val == DoubleToBits(2.0)) || 1996 (Val == DoubleToBits(-2.0)) || 1997 (Val == DoubleToBits(4.0)) || 1998 (Val == DoubleToBits(-4.0)) || 1999 (Val == 0x3fc45f306dc9c882 && HasInv2Pi); 2000 } 2001 2002 bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi) { 2003 if (isInlinableIntLiteral(Literal)) 2004 return true; 2005 2006 // The actual type of the operand does not seem to matter as long 2007 // as the bits match one of the inline immediate values. For example: 2008 // 2009 // -nan has the hexadecimal encoding of 0xfffffffe which is -2 in decimal, 2010 // so it is a legal inline immediate. 2011 // 2012 // 1065353216 has the hexadecimal encoding 0x3f800000 which is 1.0f in 2013 // floating-point, so it is a legal inline immediate. 2014 2015 uint32_t Val = static_cast<uint32_t>(Literal); 2016 return (Val == FloatToBits(0.0f)) || 2017 (Val == FloatToBits(1.0f)) || 2018 (Val == FloatToBits(-1.0f)) || 2019 (Val == FloatToBits(0.5f)) || 2020 (Val == FloatToBits(-0.5f)) || 2021 (Val == FloatToBits(2.0f)) || 2022 (Val == FloatToBits(-2.0f)) || 2023 (Val == FloatToBits(4.0f)) || 2024 (Val == FloatToBits(-4.0f)) || 2025 (Val == 0x3e22f983 && HasInv2Pi); 2026 } 2027 2028 bool isInlinableLiteral16(int16_t Literal, bool HasInv2Pi) { 2029 if (!HasInv2Pi) 2030 return false; 2031 2032 if (isInlinableIntLiteral(Literal)) 2033 return true; 2034 2035 uint16_t Val = static_cast<uint16_t>(Literal); 2036 return Val == 0x3C00 || // 1.0 2037 Val == 0xBC00 || // -1.0 2038 Val == 0x3800 || // 0.5 2039 Val == 0xB800 || // -0.5 2040 Val == 0x4000 || // 2.0 2041 Val == 0xC000 || // -2.0 2042 Val == 0x4400 || // 4.0 2043 Val == 0xC400 || // -4.0 2044 Val == 0x3118; // 1/2pi 2045 } 2046 2047 bool isInlinableLiteralV216(int32_t Literal, bool HasInv2Pi) { 2048 assert(HasInv2Pi); 2049 2050 if (isInt<16>(Literal) || isUInt<16>(Literal)) { 2051 int16_t Trunc = static_cast<int16_t>(Literal); 2052 return AMDGPU::isInlinableLiteral16(Trunc, HasInv2Pi); 2053 } 2054 if (!(Literal & 0xffff)) 2055 return AMDGPU::isInlinableLiteral16(Literal >> 16, HasInv2Pi); 2056 2057 int16_t Lo16 = static_cast<int16_t>(Literal); 2058 int16_t Hi16 = static_cast<int16_t>(Literal >> 16); 2059 return Lo16 == Hi16 && isInlinableLiteral16(Lo16, HasInv2Pi); 2060 } 2061 2062 bool isInlinableIntLiteralV216(int32_t Literal) { 2063 int16_t Lo16 = static_cast<int16_t>(Literal); 2064 if (isInt<16>(Literal) || isUInt<16>(Literal)) 2065 return isInlinableIntLiteral(Lo16); 2066 2067 int16_t Hi16 = static_cast<int16_t>(Literal >> 16); 2068 if (!(Literal & 0xffff)) 2069 return isInlinableIntLiteral(Hi16); 2070 return Lo16 == Hi16 && isInlinableIntLiteral(Lo16); 2071 } 2072 2073 bool isFoldableLiteralV216(int32_t Literal, bool HasInv2Pi) { 2074 assert(HasInv2Pi); 2075 2076 int16_t Lo16 = static_cast<int16_t>(Literal); 2077 if (isInt<16>(Literal) || isUInt<16>(Literal)) 2078 return true; 2079 2080 int16_t Hi16 = static_cast<int16_t>(Literal >> 16); 2081 if (!(Literal & 0xffff)) 2082 return true; 2083 return Lo16 == Hi16; 2084 } 2085 2086 bool isArgPassedInSGPR(const Argument *A) { 2087 const Function *F = A->getParent(); 2088 2089 // Arguments to compute shaders are never a source of divergence. 2090 CallingConv::ID CC = F->getCallingConv(); 2091 switch (CC) { 2092 case CallingConv::AMDGPU_KERNEL: 2093 case CallingConv::SPIR_KERNEL: 2094 return true; 2095 case CallingConv::AMDGPU_VS: 2096 case CallingConv::AMDGPU_LS: 2097 case CallingConv::AMDGPU_HS: 2098 case CallingConv::AMDGPU_ES: 2099 case CallingConv::AMDGPU_GS: 2100 case CallingConv::AMDGPU_PS: 2101 case CallingConv::AMDGPU_CS: 2102 case CallingConv::AMDGPU_Gfx: 2103 // For non-compute shaders, SGPR inputs are marked with either inreg or byval. 2104 // Everything else is in VGPRs. 2105 return F->getAttributes().hasParamAttr(A->getArgNo(), Attribute::InReg) || 2106 F->getAttributes().hasParamAttr(A->getArgNo(), Attribute::ByVal); 2107 default: 2108 // TODO: Should calls support inreg for SGPR inputs? 2109 return false; 2110 } 2111 } 2112 2113 static bool hasSMEMByteOffset(const MCSubtargetInfo &ST) { 2114 return isGCN3Encoding(ST) || isGFX10Plus(ST); 2115 } 2116 2117 static bool hasSMRDSignedImmOffset(const MCSubtargetInfo &ST) { 2118 return isGFX9Plus(ST); 2119 } 2120 2121 bool isLegalSMRDEncodedUnsignedOffset(const MCSubtargetInfo &ST, 2122 int64_t EncodedOffset) { 2123 return hasSMEMByteOffset(ST) ? isUInt<20>(EncodedOffset) 2124 : isUInt<8>(EncodedOffset); 2125 } 2126 2127 bool isLegalSMRDEncodedSignedOffset(const MCSubtargetInfo &ST, 2128 int64_t EncodedOffset, 2129 bool IsBuffer) { 2130 return !IsBuffer && 2131 hasSMRDSignedImmOffset(ST) && 2132 isInt<21>(EncodedOffset); 2133 } 2134 2135 static bool isDwordAligned(uint64_t ByteOffset) { 2136 return (ByteOffset & 3) == 0; 2137 } 2138 2139 uint64_t convertSMRDOffsetUnits(const MCSubtargetInfo &ST, 2140 uint64_t ByteOffset) { 2141 if (hasSMEMByteOffset(ST)) 2142 return ByteOffset; 2143 2144 assert(isDwordAligned(ByteOffset)); 2145 return ByteOffset >> 2; 2146 } 2147 2148 Optional<int64_t> getSMRDEncodedOffset(const MCSubtargetInfo &ST, 2149 int64_t ByteOffset, bool IsBuffer) { 2150 // The signed version is always a byte offset. 2151 if (!IsBuffer && hasSMRDSignedImmOffset(ST)) { 2152 assert(hasSMEMByteOffset(ST)); 2153 return isInt<20>(ByteOffset) ? Optional<int64_t>(ByteOffset) : None; 2154 } 2155 2156 if (!isDwordAligned(ByteOffset) && !hasSMEMByteOffset(ST)) 2157 return None; 2158 2159 int64_t EncodedOffset = convertSMRDOffsetUnits(ST, ByteOffset); 2160 return isLegalSMRDEncodedUnsignedOffset(ST, EncodedOffset) 2161 ? Optional<int64_t>(EncodedOffset) 2162 : None; 2163 } 2164 2165 Optional<int64_t> getSMRDEncodedLiteralOffset32(const MCSubtargetInfo &ST, 2166 int64_t ByteOffset) { 2167 if (!isCI(ST) || !isDwordAligned(ByteOffset)) 2168 return None; 2169 2170 int64_t EncodedOffset = convertSMRDOffsetUnits(ST, ByteOffset); 2171 return isUInt<32>(EncodedOffset) ? Optional<int64_t>(EncodedOffset) : None; 2172 } 2173 2174 unsigned getNumFlatOffsetBits(const MCSubtargetInfo &ST, bool Signed) { 2175 // Address offset is 12-bit signed for GFX10, 13-bit for GFX9. 2176 if (AMDGPU::isGFX10(ST)) 2177 return Signed ? 12 : 11; 2178 2179 return Signed ? 13 : 12; 2180 } 2181 2182 // Given Imm, split it into the values to put into the SOffset and ImmOffset 2183 // fields in an MUBUF instruction. Return false if it is not possible (due to a 2184 // hardware bug needing a workaround). 2185 // 2186 // The required alignment ensures that individual address components remain 2187 // aligned if they are aligned to begin with. It also ensures that additional 2188 // offsets within the given alignment can be added to the resulting ImmOffset. 2189 bool splitMUBUFOffset(uint32_t Imm, uint32_t &SOffset, uint32_t &ImmOffset, 2190 const GCNSubtarget *Subtarget, Align Alignment) { 2191 const uint32_t MaxImm = alignDown(4095, Alignment.value()); 2192 uint32_t Overflow = 0; 2193 2194 if (Imm > MaxImm) { 2195 if (Imm <= MaxImm + 64) { 2196 // Use an SOffset inline constant for 4..64 2197 Overflow = Imm - MaxImm; 2198 Imm = MaxImm; 2199 } else { 2200 // Try to keep the same value in SOffset for adjacent loads, so that 2201 // the corresponding register contents can be re-used. 2202 // 2203 // Load values with all low-bits (except for alignment bits) set into 2204 // SOffset, so that a larger range of values can be covered using 2205 // s_movk_i32. 2206 // 2207 // Atomic operations fail to work correctly when individual address 2208 // components are unaligned, even if their sum is aligned. 2209 uint32_t High = (Imm + Alignment.value()) & ~4095; 2210 uint32_t Low = (Imm + Alignment.value()) & 4095; 2211 Imm = Low; 2212 Overflow = High - Alignment.value(); 2213 } 2214 } 2215 2216 // There is a hardware bug in SI and CI which prevents address clamping in 2217 // MUBUF instructions from working correctly with SOffsets. The immediate 2218 // offset is unaffected. 2219 if (Overflow > 0 && 2220 Subtarget->getGeneration() <= AMDGPUSubtarget::SEA_ISLANDS) 2221 return false; 2222 2223 ImmOffset = Imm; 2224 SOffset = Overflow; 2225 return true; 2226 } 2227 2228 SIModeRegisterDefaults::SIModeRegisterDefaults(const Function &F) { 2229 *this = getDefaultForCallingConv(F.getCallingConv()); 2230 2231 StringRef IEEEAttr = F.getFnAttribute("amdgpu-ieee").getValueAsString(); 2232 if (!IEEEAttr.empty()) 2233 IEEE = IEEEAttr == "true"; 2234 2235 StringRef DX10ClampAttr 2236 = F.getFnAttribute("amdgpu-dx10-clamp").getValueAsString(); 2237 if (!DX10ClampAttr.empty()) 2238 DX10Clamp = DX10ClampAttr == "true"; 2239 2240 StringRef DenormF32Attr = F.getFnAttribute("denormal-fp-math-f32").getValueAsString(); 2241 if (!DenormF32Attr.empty()) { 2242 DenormalMode DenormMode = parseDenormalFPAttribute(DenormF32Attr); 2243 FP32InputDenormals = DenormMode.Input == DenormalMode::IEEE; 2244 FP32OutputDenormals = DenormMode.Output == DenormalMode::IEEE; 2245 } 2246 2247 StringRef DenormAttr = F.getFnAttribute("denormal-fp-math").getValueAsString(); 2248 if (!DenormAttr.empty()) { 2249 DenormalMode DenormMode = parseDenormalFPAttribute(DenormAttr); 2250 2251 if (DenormF32Attr.empty()) { 2252 FP32InputDenormals = DenormMode.Input == DenormalMode::IEEE; 2253 FP32OutputDenormals = DenormMode.Output == DenormalMode::IEEE; 2254 } 2255 2256 FP64FP16InputDenormals = DenormMode.Input == DenormalMode::IEEE; 2257 FP64FP16OutputDenormals = DenormMode.Output == DenormalMode::IEEE; 2258 } 2259 } 2260 2261 namespace { 2262 2263 struct SourceOfDivergence { 2264 unsigned Intr; 2265 }; 2266 const SourceOfDivergence *lookupSourceOfDivergence(unsigned Intr); 2267 2268 #define GET_SourcesOfDivergence_IMPL 2269 #define GET_Gfx9BufferFormat_IMPL 2270 #define GET_Gfx10BufferFormat_IMPL 2271 #define GET_Gfx11PlusBufferFormat_IMPL 2272 #include "AMDGPUGenSearchableTables.inc" 2273 2274 } // end anonymous namespace 2275 2276 bool isIntrinsicSourceOfDivergence(unsigned IntrID) { 2277 return lookupSourceOfDivergence(IntrID); 2278 } 2279 2280 const GcnBufferFormatInfo *getGcnBufferFormatInfo(uint8_t BitsPerComp, 2281 uint8_t NumComponents, 2282 uint8_t NumFormat, 2283 const MCSubtargetInfo &STI) { 2284 return isGFX11Plus(STI) 2285 ? getGfx11PlusBufferFormatInfo(BitsPerComp, NumComponents, 2286 NumFormat) 2287 : isGFX10(STI) ? getGfx10BufferFormatInfo(BitsPerComp, 2288 NumComponents, NumFormat) 2289 : getGfx9BufferFormatInfo(BitsPerComp, 2290 NumComponents, NumFormat); 2291 } 2292 2293 const GcnBufferFormatInfo *getGcnBufferFormatInfo(uint8_t Format, 2294 const MCSubtargetInfo &STI) { 2295 return isGFX11Plus(STI) ? getGfx11PlusBufferFormatInfo(Format) 2296 : isGFX10(STI) ? getGfx10BufferFormatInfo(Format) 2297 : getGfx9BufferFormatInfo(Format); 2298 } 2299 2300 } // namespace AMDGPU 2301 2302 raw_ostream &operator<<(raw_ostream &OS, 2303 const AMDGPU::IsaInfo::TargetIDSetting S) { 2304 switch (S) { 2305 case (AMDGPU::IsaInfo::TargetIDSetting::Unsupported): 2306 OS << "Unsupported"; 2307 break; 2308 case (AMDGPU::IsaInfo::TargetIDSetting::Any): 2309 OS << "Any"; 2310 break; 2311 case (AMDGPU::IsaInfo::TargetIDSetting::Off): 2312 OS << "Off"; 2313 break; 2314 case (AMDGPU::IsaInfo::TargetIDSetting::On): 2315 OS << "On"; 2316 break; 2317 } 2318 return OS; 2319 } 2320 2321 } // namespace llvm 2322