1 //===- AMDGPUBaseInfo.cpp - AMDGPU Base encoding information --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "AMDGPUBaseInfo.h" 10 #include "AMDGPUTargetTransformInfo.h" 11 #include "AMDGPU.h" 12 #include "SIDefines.h" 13 #include "AMDGPUAsmUtils.h" 14 #include "llvm/ADT/StringRef.h" 15 #include "llvm/ADT/Triple.h" 16 #include "llvm/BinaryFormat/ELF.h" 17 #include "llvm/CodeGen/MachineMemOperand.h" 18 #include "llvm/IR/Attributes.h" 19 #include "llvm/IR/Constants.h" 20 #include "llvm/IR/Function.h" 21 #include "llvm/IR/GlobalValue.h" 22 #include "llvm/IR/Instruction.h" 23 #include "llvm/IR/LLVMContext.h" 24 #include "llvm/IR/Module.h" 25 #include "llvm/MC/MCContext.h" 26 #include "llvm/MC/MCInstrDesc.h" 27 #include "llvm/MC/MCInstrInfo.h" 28 #include "llvm/MC/MCRegisterInfo.h" 29 #include "llvm/MC/MCSectionELF.h" 30 #include "llvm/MC/MCSubtargetInfo.h" 31 #include "llvm/MC/SubtargetFeature.h" 32 #include "llvm/Support/Casting.h" 33 #include "llvm/Support/ErrorHandling.h" 34 #include "llvm/Support/MathExtras.h" 35 #include <algorithm> 36 #include <cassert> 37 #include <cstdint> 38 #include <cstring> 39 #include <utility> 40 41 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 42 43 #define GET_INSTRINFO_NAMED_OPS 44 #define GET_INSTRMAP_INFO 45 #include "AMDGPUGenInstrInfo.inc" 46 #undef GET_INSTRMAP_INFO 47 #undef GET_INSTRINFO_NAMED_OPS 48 49 namespace { 50 51 /// \returns Bit mask for given bit \p Shift and bit \p Width. 52 unsigned getBitMask(unsigned Shift, unsigned Width) { 53 return ((1 << Width) - 1) << Shift; 54 } 55 56 /// Packs \p Src into \p Dst for given bit \p Shift and bit \p Width. 57 /// 58 /// \returns Packed \p Dst. 59 unsigned packBits(unsigned Src, unsigned Dst, unsigned Shift, unsigned Width) { 60 Dst &= ~(1 << Shift) & ~getBitMask(Shift, Width); 61 Dst |= (Src << Shift) & getBitMask(Shift, Width); 62 return Dst; 63 } 64 65 /// Unpacks bits from \p Src for given bit \p Shift and bit \p Width. 66 /// 67 /// \returns Unpacked bits. 68 unsigned unpackBits(unsigned Src, unsigned Shift, unsigned Width) { 69 return (Src & getBitMask(Shift, Width)) >> Shift; 70 } 71 72 /// \returns Vmcnt bit shift (lower bits). 73 unsigned getVmcntBitShiftLo() { return 0; } 74 75 /// \returns Vmcnt bit width (lower bits). 76 unsigned getVmcntBitWidthLo() { return 4; } 77 78 /// \returns Expcnt bit shift. 79 unsigned getExpcntBitShift() { return 4; } 80 81 /// \returns Expcnt bit width. 82 unsigned getExpcntBitWidth() { return 3; } 83 84 /// \returns Lgkmcnt bit shift. 85 unsigned getLgkmcntBitShift() { return 8; } 86 87 /// \returns Lgkmcnt bit width. 88 unsigned getLgkmcntBitWidth(unsigned VersionMajor) { 89 return (VersionMajor >= 10) ? 6 : 4; 90 } 91 92 /// \returns Vmcnt bit shift (higher bits). 93 unsigned getVmcntBitShiftHi() { return 14; } 94 95 /// \returns Vmcnt bit width (higher bits). 96 unsigned getVmcntBitWidthHi() { return 2; } 97 98 } // end namespace anonymous 99 100 namespace llvm { 101 102 namespace AMDGPU { 103 104 #define GET_MIMGBaseOpcodesTable_IMPL 105 #define GET_MIMGDimInfoTable_IMPL 106 #define GET_MIMGInfoTable_IMPL 107 #define GET_MIMGLZMappingTable_IMPL 108 #define GET_MIMGMIPMappingTable_IMPL 109 #include "AMDGPUGenSearchableTables.inc" 110 111 int getMIMGOpcode(unsigned BaseOpcode, unsigned MIMGEncoding, 112 unsigned VDataDwords, unsigned VAddrDwords) { 113 const MIMGInfo *Info = getMIMGOpcodeHelper(BaseOpcode, MIMGEncoding, 114 VDataDwords, VAddrDwords); 115 return Info ? Info->Opcode : -1; 116 } 117 118 const MIMGBaseOpcodeInfo *getMIMGBaseOpcode(unsigned Opc) { 119 const MIMGInfo *Info = getMIMGInfo(Opc); 120 return Info ? getMIMGBaseOpcodeInfo(Info->BaseOpcode) : nullptr; 121 } 122 123 int getMaskedMIMGOp(unsigned Opc, unsigned NewChannels) { 124 const MIMGInfo *OrigInfo = getMIMGInfo(Opc); 125 const MIMGInfo *NewInfo = 126 getMIMGOpcodeHelper(OrigInfo->BaseOpcode, OrigInfo->MIMGEncoding, 127 NewChannels, OrigInfo->VAddrDwords); 128 return NewInfo ? NewInfo->Opcode : -1; 129 } 130 131 struct MUBUFInfo { 132 uint16_t Opcode; 133 uint16_t BaseOpcode; 134 uint8_t dwords; 135 bool has_vaddr; 136 bool has_srsrc; 137 bool has_soffset; 138 }; 139 140 #define GET_MUBUFInfoTable_DECL 141 #define GET_MUBUFInfoTable_IMPL 142 #include "AMDGPUGenSearchableTables.inc" 143 144 int getMUBUFBaseOpcode(unsigned Opc) { 145 const MUBUFInfo *Info = getMUBUFInfoFromOpcode(Opc); 146 return Info ? Info->BaseOpcode : -1; 147 } 148 149 int getMUBUFOpcode(unsigned BaseOpc, unsigned Dwords) { 150 const MUBUFInfo *Info = getMUBUFInfoFromBaseOpcodeAndDwords(BaseOpc, Dwords); 151 return Info ? Info->Opcode : -1; 152 } 153 154 int getMUBUFDwords(unsigned Opc) { 155 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc); 156 return Info ? Info->dwords : 0; 157 } 158 159 bool getMUBUFHasVAddr(unsigned Opc) { 160 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc); 161 return Info ? Info->has_vaddr : false; 162 } 163 164 bool getMUBUFHasSrsrc(unsigned Opc) { 165 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc); 166 return Info ? Info->has_srsrc : false; 167 } 168 169 bool getMUBUFHasSoffset(unsigned Opc) { 170 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc); 171 return Info ? Info->has_soffset : false; 172 } 173 174 // Wrapper for Tablegen'd function. enum Subtarget is not defined in any 175 // header files, so we need to wrap it in a function that takes unsigned 176 // instead. 177 int getMCOpcode(uint16_t Opcode, unsigned Gen) { 178 return getMCOpcodeGen(Opcode, static_cast<Subtarget>(Gen)); 179 } 180 181 namespace IsaInfo { 182 183 void streamIsaVersion(const MCSubtargetInfo *STI, raw_ostream &Stream) { 184 auto TargetTriple = STI->getTargetTriple(); 185 auto Version = getIsaVersion(STI->getCPU()); 186 187 Stream << TargetTriple.getArchName() << '-' 188 << TargetTriple.getVendorName() << '-' 189 << TargetTriple.getOSName() << '-' 190 << TargetTriple.getEnvironmentName() << '-' 191 << "gfx" 192 << Version.Major 193 << Version.Minor 194 << Version.Stepping; 195 196 if (hasXNACK(*STI)) 197 Stream << "+xnack"; 198 if (hasSRAMECC(*STI)) 199 Stream << "+sram-ecc"; 200 201 Stream.flush(); 202 } 203 204 bool hasCodeObjectV3(const MCSubtargetInfo *STI) { 205 return STI->getTargetTriple().getOS() == Triple::AMDHSA && 206 STI->getFeatureBits().test(FeatureCodeObjectV3); 207 } 208 209 unsigned getWavefrontSize(const MCSubtargetInfo *STI) { 210 if (STI->getFeatureBits().test(FeatureWavefrontSize16)) 211 return 16; 212 if (STI->getFeatureBits().test(FeatureWavefrontSize32)) 213 return 32; 214 215 return 64; 216 } 217 218 unsigned getLocalMemorySize(const MCSubtargetInfo *STI) { 219 if (STI->getFeatureBits().test(FeatureLocalMemorySize32768)) 220 return 32768; 221 if (STI->getFeatureBits().test(FeatureLocalMemorySize65536)) 222 return 65536; 223 224 return 0; 225 } 226 227 unsigned getEUsPerCU(const MCSubtargetInfo *STI) { 228 return 4; 229 } 230 231 unsigned getMaxWorkGroupsPerCU(const MCSubtargetInfo *STI, 232 unsigned FlatWorkGroupSize) { 233 assert(FlatWorkGroupSize != 0); 234 if (STI->getTargetTriple().getArch() != Triple::amdgcn) 235 return 8; 236 unsigned N = getWavesPerWorkGroup(STI, FlatWorkGroupSize); 237 if (N == 1) 238 return 40; 239 N = 40 / N; 240 return std::min(N, 16u); 241 } 242 243 unsigned getMaxWavesPerCU(const MCSubtargetInfo *STI) { 244 return getMaxWavesPerEU() * getEUsPerCU(STI); 245 } 246 247 unsigned getMaxWavesPerCU(const MCSubtargetInfo *STI, 248 unsigned FlatWorkGroupSize) { 249 return getWavesPerWorkGroup(STI, FlatWorkGroupSize); 250 } 251 252 unsigned getMinWavesPerEU(const MCSubtargetInfo *STI) { 253 return 1; 254 } 255 256 unsigned getMaxWavesPerEU() { 257 // FIXME: Need to take scratch memory into account. 258 return 10; 259 } 260 261 unsigned getMaxWavesPerEU(const MCSubtargetInfo *STI, 262 unsigned FlatWorkGroupSize) { 263 return alignTo(getMaxWavesPerCU(STI, FlatWorkGroupSize), 264 getEUsPerCU(STI)) / getEUsPerCU(STI); 265 } 266 267 unsigned getMinFlatWorkGroupSize(const MCSubtargetInfo *STI) { 268 return 1; 269 } 270 271 unsigned getMaxFlatWorkGroupSize(const MCSubtargetInfo *STI) { 272 return 2048; 273 } 274 275 unsigned getWavesPerWorkGroup(const MCSubtargetInfo *STI, 276 unsigned FlatWorkGroupSize) { 277 return alignTo(FlatWorkGroupSize, getWavefrontSize(STI)) / 278 getWavefrontSize(STI); 279 } 280 281 unsigned getSGPRAllocGranule(const MCSubtargetInfo *STI) { 282 IsaVersion Version = getIsaVersion(STI->getCPU()); 283 if (Version.Major >= 10) 284 return getAddressableNumSGPRs(STI); 285 if (Version.Major >= 8) 286 return 16; 287 return 8; 288 } 289 290 unsigned getSGPREncodingGranule(const MCSubtargetInfo *STI) { 291 return 8; 292 } 293 294 unsigned getTotalNumSGPRs(const MCSubtargetInfo *STI) { 295 IsaVersion Version = getIsaVersion(STI->getCPU()); 296 if (Version.Major >= 8) 297 return 800; 298 return 512; 299 } 300 301 unsigned getAddressableNumSGPRs(const MCSubtargetInfo *STI) { 302 if (STI->getFeatureBits().test(FeatureSGPRInitBug)) 303 return FIXED_NUM_SGPRS_FOR_INIT_BUG; 304 305 IsaVersion Version = getIsaVersion(STI->getCPU()); 306 if (Version.Major >= 10) 307 return 106; 308 if (Version.Major >= 8) 309 return 102; 310 return 104; 311 } 312 313 unsigned getMinNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) { 314 assert(WavesPerEU != 0); 315 316 IsaVersion Version = getIsaVersion(STI->getCPU()); 317 if (Version.Major >= 10) 318 return 0; 319 320 if (WavesPerEU >= getMaxWavesPerEU()) 321 return 0; 322 323 unsigned MinNumSGPRs = getTotalNumSGPRs(STI) / (WavesPerEU + 1); 324 if (STI->getFeatureBits().test(FeatureTrapHandler)) 325 MinNumSGPRs -= std::min(MinNumSGPRs, (unsigned)TRAP_NUM_SGPRS); 326 MinNumSGPRs = alignDown(MinNumSGPRs, getSGPRAllocGranule(STI)) + 1; 327 return std::min(MinNumSGPRs, getAddressableNumSGPRs(STI)); 328 } 329 330 unsigned getMaxNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU, 331 bool Addressable) { 332 assert(WavesPerEU != 0); 333 334 unsigned AddressableNumSGPRs = getAddressableNumSGPRs(STI); 335 IsaVersion Version = getIsaVersion(STI->getCPU()); 336 if (Version.Major >= 10) 337 return Addressable ? AddressableNumSGPRs : 108; 338 if (Version.Major >= 8 && !Addressable) 339 AddressableNumSGPRs = 112; 340 unsigned MaxNumSGPRs = getTotalNumSGPRs(STI) / WavesPerEU; 341 if (STI->getFeatureBits().test(FeatureTrapHandler)) 342 MaxNumSGPRs -= std::min(MaxNumSGPRs, (unsigned)TRAP_NUM_SGPRS); 343 MaxNumSGPRs = alignDown(MaxNumSGPRs, getSGPRAllocGranule(STI)); 344 return std::min(MaxNumSGPRs, AddressableNumSGPRs); 345 } 346 347 unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed, 348 bool FlatScrUsed, bool XNACKUsed) { 349 unsigned ExtraSGPRs = 0; 350 if (VCCUsed) 351 ExtraSGPRs = 2; 352 353 IsaVersion Version = getIsaVersion(STI->getCPU()); 354 if (Version.Major >= 10) 355 return ExtraSGPRs; 356 357 if (Version.Major < 8) { 358 if (FlatScrUsed) 359 ExtraSGPRs = 4; 360 } else { 361 if (XNACKUsed) 362 ExtraSGPRs = 4; 363 364 if (FlatScrUsed) 365 ExtraSGPRs = 6; 366 } 367 368 return ExtraSGPRs; 369 } 370 371 unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed, 372 bool FlatScrUsed) { 373 return getNumExtraSGPRs(STI, VCCUsed, FlatScrUsed, 374 STI->getFeatureBits().test(AMDGPU::FeatureXNACK)); 375 } 376 377 unsigned getNumSGPRBlocks(const MCSubtargetInfo *STI, unsigned NumSGPRs) { 378 NumSGPRs = alignTo(std::max(1u, NumSGPRs), getSGPREncodingGranule(STI)); 379 // SGPRBlocks is actual number of SGPR blocks minus 1. 380 return NumSGPRs / getSGPREncodingGranule(STI) - 1; 381 } 382 383 unsigned getVGPRAllocGranule(const MCSubtargetInfo *STI, 384 Optional<bool> EnableWavefrontSize32) { 385 bool IsWave32 = EnableWavefrontSize32 ? 386 *EnableWavefrontSize32 : 387 STI->getFeatureBits().test(FeatureWavefrontSize32); 388 return IsWave32 ? 8 : 4; 389 } 390 391 unsigned getVGPREncodingGranule(const MCSubtargetInfo *STI, 392 Optional<bool> EnableWavefrontSize32) { 393 return getVGPRAllocGranule(STI, EnableWavefrontSize32); 394 } 395 396 unsigned getTotalNumVGPRs(const MCSubtargetInfo *STI) { 397 return 256; 398 } 399 400 unsigned getAddressableNumVGPRs(const MCSubtargetInfo *STI) { 401 return getTotalNumVGPRs(STI); 402 } 403 404 unsigned getMinNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) { 405 assert(WavesPerEU != 0); 406 407 if (WavesPerEU >= getMaxWavesPerEU()) 408 return 0; 409 unsigned MinNumVGPRs = 410 alignDown(getTotalNumVGPRs(STI) / (WavesPerEU + 1), 411 getVGPRAllocGranule(STI)) + 1; 412 return std::min(MinNumVGPRs, getAddressableNumVGPRs(STI)); 413 } 414 415 unsigned getMaxNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) { 416 assert(WavesPerEU != 0); 417 418 unsigned MaxNumVGPRs = alignDown(getTotalNumVGPRs(STI) / WavesPerEU, 419 getVGPRAllocGranule(STI)); 420 unsigned AddressableNumVGPRs = getAddressableNumVGPRs(STI); 421 return std::min(MaxNumVGPRs, AddressableNumVGPRs); 422 } 423 424 unsigned getNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumVGPRs, 425 Optional<bool> EnableWavefrontSize32) { 426 NumVGPRs = alignTo(std::max(1u, NumVGPRs), 427 getVGPREncodingGranule(STI, EnableWavefrontSize32)); 428 // VGPRBlocks is actual number of VGPR blocks minus 1. 429 return NumVGPRs / getVGPREncodingGranule(STI, EnableWavefrontSize32) - 1; 430 } 431 432 } // end namespace IsaInfo 433 434 void initDefaultAMDKernelCodeT(amd_kernel_code_t &Header, 435 const MCSubtargetInfo *STI) { 436 IsaVersion Version = getIsaVersion(STI->getCPU()); 437 438 memset(&Header, 0, sizeof(Header)); 439 440 Header.amd_kernel_code_version_major = 1; 441 Header.amd_kernel_code_version_minor = 2; 442 Header.amd_machine_kind = 1; // AMD_MACHINE_KIND_AMDGPU 443 Header.amd_machine_version_major = Version.Major; 444 Header.amd_machine_version_minor = Version.Minor; 445 Header.amd_machine_version_stepping = Version.Stepping; 446 Header.kernel_code_entry_byte_offset = sizeof(Header); 447 Header.wavefront_size = 6; 448 449 // If the code object does not support indirect functions, then the value must 450 // be 0xffffffff. 451 Header.call_convention = -1; 452 453 // These alignment values are specified in powers of two, so alignment = 454 // 2^n. The minimum alignment is 2^4 = 16. 455 Header.kernarg_segment_alignment = 4; 456 Header.group_segment_alignment = 4; 457 Header.private_segment_alignment = 4; 458 459 if (Version.Major >= 10) { 460 if (STI->getFeatureBits().test(FeatureWavefrontSize32)) { 461 Header.wavefront_size = 5; 462 Header.code_properties |= AMD_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32; 463 } 464 Header.compute_pgm_resource_registers |= 465 S_00B848_WGP_MODE(STI->getFeatureBits().test(FeatureCuMode) ? 0 : 1) | 466 S_00B848_MEM_ORDERED(1); 467 } 468 } 469 470 amdhsa::kernel_descriptor_t getDefaultAmdhsaKernelDescriptor( 471 const MCSubtargetInfo *STI) { 472 IsaVersion Version = getIsaVersion(STI->getCPU()); 473 474 amdhsa::kernel_descriptor_t KD; 475 memset(&KD, 0, sizeof(KD)); 476 477 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1, 478 amdhsa::COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64, 479 amdhsa::FLOAT_DENORM_MODE_FLUSH_NONE); 480 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1, 481 amdhsa::COMPUTE_PGM_RSRC1_ENABLE_DX10_CLAMP, 1); 482 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1, 483 amdhsa::COMPUTE_PGM_RSRC1_ENABLE_IEEE_MODE, 1); 484 AMDHSA_BITS_SET(KD.compute_pgm_rsrc2, 485 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X, 1); 486 if (Version.Major >= 10) { 487 AMDHSA_BITS_SET(KD.kernel_code_properties, 488 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32, 489 STI->getFeatureBits().test(FeatureWavefrontSize32) ? 1 : 0); 490 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1, 491 amdhsa::COMPUTE_PGM_RSRC1_WGP_MODE, 492 STI->getFeatureBits().test(FeatureCuMode) ? 0 : 1); 493 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1, 494 amdhsa::COMPUTE_PGM_RSRC1_MEM_ORDERED, 1); 495 } 496 return KD; 497 } 498 499 bool isGroupSegment(const GlobalValue *GV) { 500 return GV->getType()->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS; 501 } 502 503 bool isGlobalSegment(const GlobalValue *GV) { 504 return GV->getType()->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS; 505 } 506 507 bool isReadOnlySegment(const GlobalValue *GV) { 508 return GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS || 509 GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT; 510 } 511 512 bool shouldEmitConstantsToTextSection(const Triple &TT) { 513 return TT.getOS() != Triple::AMDHSA; 514 } 515 516 int getIntegerAttribute(const Function &F, StringRef Name, int Default) { 517 Attribute A = F.getFnAttribute(Name); 518 int Result = Default; 519 520 if (A.isStringAttribute()) { 521 StringRef Str = A.getValueAsString(); 522 if (Str.getAsInteger(0, Result)) { 523 LLVMContext &Ctx = F.getContext(); 524 Ctx.emitError("can't parse integer attribute " + Name); 525 } 526 } 527 528 return Result; 529 } 530 531 std::pair<int, int> getIntegerPairAttribute(const Function &F, 532 StringRef Name, 533 std::pair<int, int> Default, 534 bool OnlyFirstRequired) { 535 Attribute A = F.getFnAttribute(Name); 536 if (!A.isStringAttribute()) 537 return Default; 538 539 LLVMContext &Ctx = F.getContext(); 540 std::pair<int, int> Ints = Default; 541 std::pair<StringRef, StringRef> Strs = A.getValueAsString().split(','); 542 if (Strs.first.trim().getAsInteger(0, Ints.first)) { 543 Ctx.emitError("can't parse first integer attribute " + Name); 544 return Default; 545 } 546 if (Strs.second.trim().getAsInteger(0, Ints.second)) { 547 if (!OnlyFirstRequired || !Strs.second.trim().empty()) { 548 Ctx.emitError("can't parse second integer attribute " + Name); 549 return Default; 550 } 551 } 552 553 return Ints; 554 } 555 556 unsigned getVmcntBitMask(const IsaVersion &Version) { 557 unsigned VmcntLo = (1 << getVmcntBitWidthLo()) - 1; 558 if (Version.Major < 9) 559 return VmcntLo; 560 561 unsigned VmcntHi = ((1 << getVmcntBitWidthHi()) - 1) << getVmcntBitWidthLo(); 562 return VmcntLo | VmcntHi; 563 } 564 565 unsigned getExpcntBitMask(const IsaVersion &Version) { 566 return (1 << getExpcntBitWidth()) - 1; 567 } 568 569 unsigned getLgkmcntBitMask(const IsaVersion &Version) { 570 return (1 << getLgkmcntBitWidth(Version.Major)) - 1; 571 } 572 573 unsigned getWaitcntBitMask(const IsaVersion &Version) { 574 unsigned VmcntLo = getBitMask(getVmcntBitShiftLo(), getVmcntBitWidthLo()); 575 unsigned Expcnt = getBitMask(getExpcntBitShift(), getExpcntBitWidth()); 576 unsigned Lgkmcnt = getBitMask(getLgkmcntBitShift(), 577 getLgkmcntBitWidth(Version.Major)); 578 unsigned Waitcnt = VmcntLo | Expcnt | Lgkmcnt; 579 if (Version.Major < 9) 580 return Waitcnt; 581 582 unsigned VmcntHi = getBitMask(getVmcntBitShiftHi(), getVmcntBitWidthHi()); 583 return Waitcnt | VmcntHi; 584 } 585 586 unsigned decodeVmcnt(const IsaVersion &Version, unsigned Waitcnt) { 587 unsigned VmcntLo = 588 unpackBits(Waitcnt, getVmcntBitShiftLo(), getVmcntBitWidthLo()); 589 if (Version.Major < 9) 590 return VmcntLo; 591 592 unsigned VmcntHi = 593 unpackBits(Waitcnt, getVmcntBitShiftHi(), getVmcntBitWidthHi()); 594 VmcntHi <<= getVmcntBitWidthLo(); 595 return VmcntLo | VmcntHi; 596 } 597 598 unsigned decodeExpcnt(const IsaVersion &Version, unsigned Waitcnt) { 599 return unpackBits(Waitcnt, getExpcntBitShift(), getExpcntBitWidth()); 600 } 601 602 unsigned decodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt) { 603 return unpackBits(Waitcnt, getLgkmcntBitShift(), 604 getLgkmcntBitWidth(Version.Major)); 605 } 606 607 void decodeWaitcnt(const IsaVersion &Version, unsigned Waitcnt, 608 unsigned &Vmcnt, unsigned &Expcnt, unsigned &Lgkmcnt) { 609 Vmcnt = decodeVmcnt(Version, Waitcnt); 610 Expcnt = decodeExpcnt(Version, Waitcnt); 611 Lgkmcnt = decodeLgkmcnt(Version, Waitcnt); 612 } 613 614 Waitcnt decodeWaitcnt(const IsaVersion &Version, unsigned Encoded) { 615 Waitcnt Decoded; 616 Decoded.VmCnt = decodeVmcnt(Version, Encoded); 617 Decoded.ExpCnt = decodeExpcnt(Version, Encoded); 618 Decoded.LgkmCnt = decodeLgkmcnt(Version, Encoded); 619 return Decoded; 620 } 621 622 unsigned encodeVmcnt(const IsaVersion &Version, unsigned Waitcnt, 623 unsigned Vmcnt) { 624 Waitcnt = 625 packBits(Vmcnt, Waitcnt, getVmcntBitShiftLo(), getVmcntBitWidthLo()); 626 if (Version.Major < 9) 627 return Waitcnt; 628 629 Vmcnt >>= getVmcntBitWidthLo(); 630 return packBits(Vmcnt, Waitcnt, getVmcntBitShiftHi(), getVmcntBitWidthHi()); 631 } 632 633 unsigned encodeExpcnt(const IsaVersion &Version, unsigned Waitcnt, 634 unsigned Expcnt) { 635 return packBits(Expcnt, Waitcnt, getExpcntBitShift(), getExpcntBitWidth()); 636 } 637 638 unsigned encodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt, 639 unsigned Lgkmcnt) { 640 return packBits(Lgkmcnt, Waitcnt, getLgkmcntBitShift(), 641 getLgkmcntBitWidth(Version.Major)); 642 } 643 644 unsigned encodeWaitcnt(const IsaVersion &Version, 645 unsigned Vmcnt, unsigned Expcnt, unsigned Lgkmcnt) { 646 unsigned Waitcnt = getWaitcntBitMask(Version); 647 Waitcnt = encodeVmcnt(Version, Waitcnt, Vmcnt); 648 Waitcnt = encodeExpcnt(Version, Waitcnt, Expcnt); 649 Waitcnt = encodeLgkmcnt(Version, Waitcnt, Lgkmcnt); 650 return Waitcnt; 651 } 652 653 unsigned encodeWaitcnt(const IsaVersion &Version, const Waitcnt &Decoded) { 654 return encodeWaitcnt(Version, Decoded.VmCnt, Decoded.ExpCnt, Decoded.LgkmCnt); 655 } 656 657 //===----------------------------------------------------------------------===// 658 // hwreg 659 //===----------------------------------------------------------------------===// 660 661 namespace Hwreg { 662 663 int64_t getHwregId(const StringRef Name) { 664 for (int Id = ID_SYMBOLIC_FIRST_; Id < ID_SYMBOLIC_LAST_; ++Id) { 665 if (IdSymbolic[Id] && Name == IdSymbolic[Id]) 666 return Id; 667 } 668 return ID_UNKNOWN_; 669 } 670 671 static unsigned getLastSymbolicHwreg(const MCSubtargetInfo &STI) { 672 if (isSI(STI) || isCI(STI) || isVI(STI)) 673 return ID_SYMBOLIC_FIRST_GFX9_; 674 else if (isGFX9(STI)) 675 return ID_SYMBOLIC_FIRST_GFX10_; 676 else 677 return ID_SYMBOLIC_LAST_; 678 } 679 680 bool isValidHwreg(int64_t Id, const MCSubtargetInfo &STI) { 681 return ID_SYMBOLIC_FIRST_ <= Id && Id < getLastSymbolicHwreg(STI) && 682 IdSymbolic[Id]; 683 } 684 685 bool isValidHwreg(int64_t Id) { 686 return 0 <= Id && isUInt<ID_WIDTH_>(Id); 687 } 688 689 bool isValidHwregOffset(int64_t Offset) { 690 return 0 <= Offset && isUInt<OFFSET_WIDTH_>(Offset); 691 } 692 693 bool isValidHwregWidth(int64_t Width) { 694 return 0 <= (Width - 1) && isUInt<WIDTH_M1_WIDTH_>(Width - 1); 695 } 696 697 int64_t encodeHwreg(int64_t Id, int64_t Offset, int64_t Width) { 698 return (Id << ID_SHIFT_) | 699 (Offset << OFFSET_SHIFT_) | 700 ((Width - 1) << WIDTH_M1_SHIFT_); 701 } 702 703 StringRef getHwreg(unsigned Id, const MCSubtargetInfo &STI) { 704 return isValidHwreg(Id, STI) ? IdSymbolic[Id] : ""; 705 } 706 707 void decodeHwreg(unsigned Val, unsigned &Id, unsigned &Offset, unsigned &Width) { 708 Id = (Val & ID_MASK_) >> ID_SHIFT_; 709 Offset = (Val & OFFSET_MASK_) >> OFFSET_SHIFT_; 710 Width = ((Val & WIDTH_M1_MASK_) >> WIDTH_M1_SHIFT_) + 1; 711 } 712 713 } // namespace Hwreg 714 715 //===----------------------------------------------------------------------===// 716 // 717 //===----------------------------------------------------------------------===// 718 719 unsigned getInitialPSInputAddr(const Function &F) { 720 return getIntegerAttribute(F, "InitialPSInputAddr", 0); 721 } 722 723 bool isShader(CallingConv::ID cc) { 724 switch(cc) { 725 case CallingConv::AMDGPU_VS: 726 case CallingConv::AMDGPU_LS: 727 case CallingConv::AMDGPU_HS: 728 case CallingConv::AMDGPU_ES: 729 case CallingConv::AMDGPU_GS: 730 case CallingConv::AMDGPU_PS: 731 case CallingConv::AMDGPU_CS: 732 return true; 733 default: 734 return false; 735 } 736 } 737 738 bool isCompute(CallingConv::ID cc) { 739 return !isShader(cc) || cc == CallingConv::AMDGPU_CS; 740 } 741 742 bool isEntryFunctionCC(CallingConv::ID CC) { 743 switch (CC) { 744 case CallingConv::AMDGPU_KERNEL: 745 case CallingConv::SPIR_KERNEL: 746 case CallingConv::AMDGPU_VS: 747 case CallingConv::AMDGPU_GS: 748 case CallingConv::AMDGPU_PS: 749 case CallingConv::AMDGPU_CS: 750 case CallingConv::AMDGPU_ES: 751 case CallingConv::AMDGPU_HS: 752 case CallingConv::AMDGPU_LS: 753 return true; 754 default: 755 return false; 756 } 757 } 758 759 bool hasXNACK(const MCSubtargetInfo &STI) { 760 return STI.getFeatureBits()[AMDGPU::FeatureXNACK]; 761 } 762 763 bool hasSRAMECC(const MCSubtargetInfo &STI) { 764 return STI.getFeatureBits()[AMDGPU::FeatureSRAMECC]; 765 } 766 767 bool hasMIMG_R128(const MCSubtargetInfo &STI) { 768 return STI.getFeatureBits()[AMDGPU::FeatureMIMG_R128]; 769 } 770 771 bool hasPackedD16(const MCSubtargetInfo &STI) { 772 return !STI.getFeatureBits()[AMDGPU::FeatureUnpackedD16VMem]; 773 } 774 775 bool isSI(const MCSubtargetInfo &STI) { 776 return STI.getFeatureBits()[AMDGPU::FeatureSouthernIslands]; 777 } 778 779 bool isCI(const MCSubtargetInfo &STI) { 780 return STI.getFeatureBits()[AMDGPU::FeatureSeaIslands]; 781 } 782 783 bool isVI(const MCSubtargetInfo &STI) { 784 return STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]; 785 } 786 787 bool isGFX9(const MCSubtargetInfo &STI) { 788 return STI.getFeatureBits()[AMDGPU::FeatureGFX9]; 789 } 790 791 bool isGFX10(const MCSubtargetInfo &STI) { 792 return STI.getFeatureBits()[AMDGPU::FeatureGFX10]; 793 } 794 795 bool isGCN3Encoding(const MCSubtargetInfo &STI) { 796 return STI.getFeatureBits()[AMDGPU::FeatureGCN3Encoding]; 797 } 798 799 bool isSGPR(unsigned Reg, const MCRegisterInfo* TRI) { 800 const MCRegisterClass SGPRClass = TRI->getRegClass(AMDGPU::SReg_32RegClassID); 801 const unsigned FirstSubReg = TRI->getSubReg(Reg, 1); 802 return SGPRClass.contains(FirstSubReg != 0 ? FirstSubReg : Reg) || 803 Reg == AMDGPU::SCC; 804 } 805 806 bool isRegIntersect(unsigned Reg0, unsigned Reg1, const MCRegisterInfo* TRI) { 807 for (MCRegAliasIterator R(Reg0, TRI, true); R.isValid(); ++R) { 808 if (*R == Reg1) return true; 809 } 810 return false; 811 } 812 813 #define MAP_REG2REG \ 814 using namespace AMDGPU; \ 815 switch(Reg) { \ 816 default: return Reg; \ 817 CASE_CI_VI(FLAT_SCR) \ 818 CASE_CI_VI(FLAT_SCR_LO) \ 819 CASE_CI_VI(FLAT_SCR_HI) \ 820 CASE_VI_GFX9_GFX10(TTMP0) \ 821 CASE_VI_GFX9_GFX10(TTMP1) \ 822 CASE_VI_GFX9_GFX10(TTMP2) \ 823 CASE_VI_GFX9_GFX10(TTMP3) \ 824 CASE_VI_GFX9_GFX10(TTMP4) \ 825 CASE_VI_GFX9_GFX10(TTMP5) \ 826 CASE_VI_GFX9_GFX10(TTMP6) \ 827 CASE_VI_GFX9_GFX10(TTMP7) \ 828 CASE_VI_GFX9_GFX10(TTMP8) \ 829 CASE_VI_GFX9_GFX10(TTMP9) \ 830 CASE_VI_GFX9_GFX10(TTMP10) \ 831 CASE_VI_GFX9_GFX10(TTMP11) \ 832 CASE_VI_GFX9_GFX10(TTMP12) \ 833 CASE_VI_GFX9_GFX10(TTMP13) \ 834 CASE_VI_GFX9_GFX10(TTMP14) \ 835 CASE_VI_GFX9_GFX10(TTMP15) \ 836 CASE_VI_GFX9_GFX10(TTMP0_TTMP1) \ 837 CASE_VI_GFX9_GFX10(TTMP2_TTMP3) \ 838 CASE_VI_GFX9_GFX10(TTMP4_TTMP5) \ 839 CASE_VI_GFX9_GFX10(TTMP6_TTMP7) \ 840 CASE_VI_GFX9_GFX10(TTMP8_TTMP9) \ 841 CASE_VI_GFX9_GFX10(TTMP10_TTMP11) \ 842 CASE_VI_GFX9_GFX10(TTMP12_TTMP13) \ 843 CASE_VI_GFX9_GFX10(TTMP14_TTMP15) \ 844 CASE_VI_GFX9_GFX10(TTMP0_TTMP1_TTMP2_TTMP3) \ 845 CASE_VI_GFX9_GFX10(TTMP4_TTMP5_TTMP6_TTMP7) \ 846 CASE_VI_GFX9_GFX10(TTMP8_TTMP9_TTMP10_TTMP11) \ 847 CASE_VI_GFX9_GFX10(TTMP12_TTMP13_TTMP14_TTMP15) \ 848 CASE_VI_GFX9_GFX10(TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7) \ 849 CASE_VI_GFX9_GFX10(TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11) \ 850 CASE_VI_GFX9_GFX10(TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \ 851 CASE_VI_GFX9_GFX10(TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \ 852 } 853 854 #define CASE_CI_VI(node) \ 855 assert(!isSI(STI)); \ 856 case node: return isCI(STI) ? node##_ci : node##_vi; 857 858 #define CASE_VI_GFX9_GFX10(node) \ 859 case node: return (isGFX9(STI) || isGFX10(STI)) ? node##_gfx9_gfx10 : node##_vi; 860 861 unsigned getMCReg(unsigned Reg, const MCSubtargetInfo &STI) { 862 if (STI.getTargetTriple().getArch() == Triple::r600) 863 return Reg; 864 MAP_REG2REG 865 } 866 867 #undef CASE_CI_VI 868 #undef CASE_VI_GFX9_GFX10 869 870 #define CASE_CI_VI(node) case node##_ci: case node##_vi: return node; 871 #define CASE_VI_GFX9_GFX10(node) case node##_vi: case node##_gfx9_gfx10: return node; 872 873 unsigned mc2PseudoReg(unsigned Reg) { 874 MAP_REG2REG 875 } 876 877 #undef CASE_CI_VI 878 #undef CASE_VI_GFX9_GFX10 879 #undef MAP_REG2REG 880 881 bool isSISrcOperand(const MCInstrDesc &Desc, unsigned OpNo) { 882 assert(OpNo < Desc.NumOperands); 883 unsigned OpType = Desc.OpInfo[OpNo].OperandType; 884 return OpType >= AMDGPU::OPERAND_SRC_FIRST && 885 OpType <= AMDGPU::OPERAND_SRC_LAST; 886 } 887 888 bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo) { 889 assert(OpNo < Desc.NumOperands); 890 unsigned OpType = Desc.OpInfo[OpNo].OperandType; 891 switch (OpType) { 892 case AMDGPU::OPERAND_REG_IMM_FP32: 893 case AMDGPU::OPERAND_REG_IMM_FP64: 894 case AMDGPU::OPERAND_REG_IMM_FP16: 895 case AMDGPU::OPERAND_REG_IMM_V2FP16: 896 case AMDGPU::OPERAND_REG_IMM_V2INT16: 897 case AMDGPU::OPERAND_REG_INLINE_C_FP32: 898 case AMDGPU::OPERAND_REG_INLINE_C_FP64: 899 case AMDGPU::OPERAND_REG_INLINE_C_FP16: 900 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: 901 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16: 902 return true; 903 default: 904 return false; 905 } 906 } 907 908 bool isSISrcInlinableOperand(const MCInstrDesc &Desc, unsigned OpNo) { 909 assert(OpNo < Desc.NumOperands); 910 unsigned OpType = Desc.OpInfo[OpNo].OperandType; 911 return OpType >= AMDGPU::OPERAND_REG_INLINE_C_FIRST && 912 OpType <= AMDGPU::OPERAND_REG_INLINE_C_LAST; 913 } 914 915 // Avoid using MCRegisterClass::getSize, since that function will go away 916 // (move from MC* level to Target* level). Return size in bits. 917 unsigned getRegBitWidth(unsigned RCID) { 918 switch (RCID) { 919 case AMDGPU::SGPR_32RegClassID: 920 case AMDGPU::VGPR_32RegClassID: 921 case AMDGPU::VRegOrLds_32RegClassID: 922 case AMDGPU::VS_32RegClassID: 923 case AMDGPU::SReg_32RegClassID: 924 case AMDGPU::SReg_32_XM0RegClassID: 925 case AMDGPU::SRegOrLds_32RegClassID: 926 return 32; 927 case AMDGPU::SGPR_64RegClassID: 928 case AMDGPU::VS_64RegClassID: 929 case AMDGPU::SReg_64RegClassID: 930 case AMDGPU::VReg_64RegClassID: 931 case AMDGPU::SReg_64_XEXECRegClassID: 932 return 64; 933 case AMDGPU::SGPR_96RegClassID: 934 case AMDGPU::SReg_96RegClassID: 935 case AMDGPU::VReg_96RegClassID: 936 return 96; 937 case AMDGPU::SGPR_128RegClassID: 938 case AMDGPU::SReg_128RegClassID: 939 case AMDGPU::VReg_128RegClassID: 940 return 128; 941 case AMDGPU::SGPR_160RegClassID: 942 case AMDGPU::SReg_160RegClassID: 943 case AMDGPU::VReg_160RegClassID: 944 return 160; 945 case AMDGPU::SReg_256RegClassID: 946 case AMDGPU::VReg_256RegClassID: 947 return 256; 948 case AMDGPU::SReg_512RegClassID: 949 case AMDGPU::VReg_512RegClassID: 950 return 512; 951 default: 952 llvm_unreachable("Unexpected register class"); 953 } 954 } 955 956 unsigned getRegBitWidth(const MCRegisterClass &RC) { 957 return getRegBitWidth(RC.getID()); 958 } 959 960 unsigned getRegOperandSize(const MCRegisterInfo *MRI, const MCInstrDesc &Desc, 961 unsigned OpNo) { 962 assert(OpNo < Desc.NumOperands); 963 unsigned RCID = Desc.OpInfo[OpNo].RegClass; 964 return getRegBitWidth(MRI->getRegClass(RCID)) / 8; 965 } 966 967 bool isInlinableLiteral64(int64_t Literal, bool HasInv2Pi) { 968 if (Literal >= -16 && Literal <= 64) 969 return true; 970 971 uint64_t Val = static_cast<uint64_t>(Literal); 972 return (Val == DoubleToBits(0.0)) || 973 (Val == DoubleToBits(1.0)) || 974 (Val == DoubleToBits(-1.0)) || 975 (Val == DoubleToBits(0.5)) || 976 (Val == DoubleToBits(-0.5)) || 977 (Val == DoubleToBits(2.0)) || 978 (Val == DoubleToBits(-2.0)) || 979 (Val == DoubleToBits(4.0)) || 980 (Val == DoubleToBits(-4.0)) || 981 (Val == 0x3fc45f306dc9c882 && HasInv2Pi); 982 } 983 984 bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi) { 985 if (Literal >= -16 && Literal <= 64) 986 return true; 987 988 // The actual type of the operand does not seem to matter as long 989 // as the bits match one of the inline immediate values. For example: 990 // 991 // -nan has the hexadecimal encoding of 0xfffffffe which is -2 in decimal, 992 // so it is a legal inline immediate. 993 // 994 // 1065353216 has the hexadecimal encoding 0x3f800000 which is 1.0f in 995 // floating-point, so it is a legal inline immediate. 996 997 uint32_t Val = static_cast<uint32_t>(Literal); 998 return (Val == FloatToBits(0.0f)) || 999 (Val == FloatToBits(1.0f)) || 1000 (Val == FloatToBits(-1.0f)) || 1001 (Val == FloatToBits(0.5f)) || 1002 (Val == FloatToBits(-0.5f)) || 1003 (Val == FloatToBits(2.0f)) || 1004 (Val == FloatToBits(-2.0f)) || 1005 (Val == FloatToBits(4.0f)) || 1006 (Val == FloatToBits(-4.0f)) || 1007 (Val == 0x3e22f983 && HasInv2Pi); 1008 } 1009 1010 bool isInlinableLiteral16(int16_t Literal, bool HasInv2Pi) { 1011 if (!HasInv2Pi) 1012 return false; 1013 1014 if (Literal >= -16 && Literal <= 64) 1015 return true; 1016 1017 uint16_t Val = static_cast<uint16_t>(Literal); 1018 return Val == 0x3C00 || // 1.0 1019 Val == 0xBC00 || // -1.0 1020 Val == 0x3800 || // 0.5 1021 Val == 0xB800 || // -0.5 1022 Val == 0x4000 || // 2.0 1023 Val == 0xC000 || // -2.0 1024 Val == 0x4400 || // 4.0 1025 Val == 0xC400 || // -4.0 1026 Val == 0x3118; // 1/2pi 1027 } 1028 1029 bool isInlinableLiteralV216(int32_t Literal, bool HasInv2Pi) { 1030 assert(HasInv2Pi); 1031 1032 if (isInt<16>(Literal) || isUInt<16>(Literal)) { 1033 int16_t Trunc = static_cast<int16_t>(Literal); 1034 return AMDGPU::isInlinableLiteral16(Trunc, HasInv2Pi); 1035 } 1036 if (!(Literal & 0xffff)) 1037 return AMDGPU::isInlinableLiteral16(Literal >> 16, HasInv2Pi); 1038 1039 int16_t Lo16 = static_cast<int16_t>(Literal); 1040 int16_t Hi16 = static_cast<int16_t>(Literal >> 16); 1041 return Lo16 == Hi16 && isInlinableLiteral16(Lo16, HasInv2Pi); 1042 } 1043 1044 bool isArgPassedInSGPR(const Argument *A) { 1045 const Function *F = A->getParent(); 1046 1047 // Arguments to compute shaders are never a source of divergence. 1048 CallingConv::ID CC = F->getCallingConv(); 1049 switch (CC) { 1050 case CallingConv::AMDGPU_KERNEL: 1051 case CallingConv::SPIR_KERNEL: 1052 return true; 1053 case CallingConv::AMDGPU_VS: 1054 case CallingConv::AMDGPU_LS: 1055 case CallingConv::AMDGPU_HS: 1056 case CallingConv::AMDGPU_ES: 1057 case CallingConv::AMDGPU_GS: 1058 case CallingConv::AMDGPU_PS: 1059 case CallingConv::AMDGPU_CS: 1060 // For non-compute shaders, SGPR inputs are marked with either inreg or byval. 1061 // Everything else is in VGPRs. 1062 return F->getAttributes().hasParamAttribute(A->getArgNo(), Attribute::InReg) || 1063 F->getAttributes().hasParamAttribute(A->getArgNo(), Attribute::ByVal); 1064 default: 1065 // TODO: Should calls support inreg for SGPR inputs? 1066 return false; 1067 } 1068 } 1069 1070 static bool hasSMEMByteOffset(const MCSubtargetInfo &ST) { 1071 return isGCN3Encoding(ST) || isGFX10(ST); 1072 } 1073 1074 int64_t getSMRDEncodedOffset(const MCSubtargetInfo &ST, int64_t ByteOffset) { 1075 if (hasSMEMByteOffset(ST)) 1076 return ByteOffset; 1077 return ByteOffset >> 2; 1078 } 1079 1080 bool isLegalSMRDImmOffset(const MCSubtargetInfo &ST, int64_t ByteOffset) { 1081 int64_t EncodedOffset = getSMRDEncodedOffset(ST, ByteOffset); 1082 return (hasSMEMByteOffset(ST)) ? 1083 isUInt<20>(EncodedOffset) : isUInt<8>(EncodedOffset); 1084 } 1085 1086 // Given Imm, split it into the values to put into the SOffset and ImmOffset 1087 // fields in an MUBUF instruction. Return false if it is not possible (due to a 1088 // hardware bug needing a workaround). 1089 // 1090 // The required alignment ensures that individual address components remain 1091 // aligned if they are aligned to begin with. It also ensures that additional 1092 // offsets within the given alignment can be added to the resulting ImmOffset. 1093 bool splitMUBUFOffset(uint32_t Imm, uint32_t &SOffset, uint32_t &ImmOffset, 1094 const GCNSubtarget *Subtarget, uint32_t Align) { 1095 const uint32_t MaxImm = alignDown(4095, Align); 1096 uint32_t Overflow = 0; 1097 1098 if (Imm > MaxImm) { 1099 if (Imm <= MaxImm + 64) { 1100 // Use an SOffset inline constant for 4..64 1101 Overflow = Imm - MaxImm; 1102 Imm = MaxImm; 1103 } else { 1104 // Try to keep the same value in SOffset for adjacent loads, so that 1105 // the corresponding register contents can be re-used. 1106 // 1107 // Load values with all low-bits (except for alignment bits) set into 1108 // SOffset, so that a larger range of values can be covered using 1109 // s_movk_i32. 1110 // 1111 // Atomic operations fail to work correctly when individual address 1112 // components are unaligned, even if their sum is aligned. 1113 uint32_t High = (Imm + Align) & ~4095; 1114 uint32_t Low = (Imm + Align) & 4095; 1115 Imm = Low; 1116 Overflow = High - Align; 1117 } 1118 } 1119 1120 // There is a hardware bug in SI and CI which prevents address clamping in 1121 // MUBUF instructions from working correctly with SOffsets. The immediate 1122 // offset is unaffected. 1123 if (Overflow > 0 && 1124 Subtarget->getGeneration() <= AMDGPUSubtarget::SEA_ISLANDS) 1125 return false; 1126 1127 ImmOffset = Imm; 1128 SOffset = Overflow; 1129 return true; 1130 } 1131 1132 SIModeRegisterDefaults::SIModeRegisterDefaults(const Function &F) { 1133 *this = getDefaultForCallingConv(F.getCallingConv()); 1134 1135 StringRef IEEEAttr = F.getFnAttribute("amdgpu-ieee").getValueAsString(); 1136 if (!IEEEAttr.empty()) 1137 IEEE = IEEEAttr == "true"; 1138 1139 StringRef DX10ClampAttr 1140 = F.getFnAttribute("amdgpu-dx10-clamp").getValueAsString(); 1141 if (!DX10ClampAttr.empty()) 1142 DX10Clamp = DX10ClampAttr == "true"; 1143 } 1144 1145 namespace { 1146 1147 struct SourceOfDivergence { 1148 unsigned Intr; 1149 }; 1150 const SourceOfDivergence *lookupSourceOfDivergence(unsigned Intr); 1151 1152 #define GET_SourcesOfDivergence_IMPL 1153 #include "AMDGPUGenSearchableTables.inc" 1154 1155 } // end anonymous namespace 1156 1157 bool isIntrinsicSourceOfDivergence(unsigned IntrID) { 1158 return lookupSourceOfDivergence(IntrID); 1159 } 1160 1161 } // namespace AMDGPU 1162 } // namespace llvm 1163