1 //===- AMDGPUBaseInfo.cpp - AMDGPU Base encoding information --------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 10 #include "AMDGPU.h" 11 #include "AMDGPUBaseInfo.h" 12 #include "SIDefines.h" 13 #include "llvm/ADT/StringRef.h" 14 #include "llvm/ADT/Triple.h" 15 #include "llvm/CodeGen/MachineMemOperand.h" 16 #include "llvm/IR/Attributes.h" 17 #include "llvm/IR/Constants.h" 18 #include "llvm/IR/Function.h" 19 #include "llvm/IR/GlobalValue.h" 20 #include "llvm/IR/Instruction.h" 21 #include "llvm/IR/LLVMContext.h" 22 #include "llvm/MC/MCContext.h" 23 #include "llvm/MC/MCInstrDesc.h" 24 #include "llvm/MC/MCRegisterInfo.h" 25 #include "llvm/MC/MCSectionELF.h" 26 #include "llvm/MC/MCSubtargetInfo.h" 27 #include "llvm/MC/SubtargetFeature.h" 28 #include "llvm/Support/Casting.h" 29 #include "llvm/Support/ELF.h" 30 #include "llvm/Support/ErrorHandling.h" 31 #include "llvm/Support/MathExtras.h" 32 #include <algorithm> 33 #include <cassert> 34 #include <cstdint> 35 #include <cstring> 36 #include <utility> 37 38 #define GET_SUBTARGETINFO_ENUM 39 #include "AMDGPUGenSubtargetInfo.inc" 40 #undef GET_SUBTARGETINFO_ENUM 41 42 #define GET_REGINFO_ENUM 43 #include "AMDGPUGenRegisterInfo.inc" 44 #undef GET_REGINFO_ENUM 45 46 #define GET_INSTRINFO_NAMED_OPS 47 #define GET_INSTRINFO_ENUM 48 #include "AMDGPUGenInstrInfo.inc" 49 #undef GET_INSTRINFO_NAMED_OPS 50 #undef GET_INSTRINFO_ENUM 51 52 namespace { 53 54 /// \returns Bit mask for given bit \p Shift and bit \p Width. 55 unsigned getBitMask(unsigned Shift, unsigned Width) { 56 return ((1 << Width) - 1) << Shift; 57 } 58 59 /// \brief Packs \p Src into \p Dst for given bit \p Shift and bit \p Width. 60 /// 61 /// \returns Packed \p Dst. 62 unsigned packBits(unsigned Src, unsigned Dst, unsigned Shift, unsigned Width) { 63 Dst &= ~(1 << Shift) & ~getBitMask(Shift, Width); 64 Dst |= (Src << Shift) & getBitMask(Shift, Width); 65 return Dst; 66 } 67 68 /// \brief Unpacks bits from \p Src for given bit \p Shift and bit \p Width. 69 /// 70 /// \returns Unpacked bits. 71 unsigned unpackBits(unsigned Src, unsigned Shift, unsigned Width) { 72 return (Src & getBitMask(Shift, Width)) >> Shift; 73 } 74 75 /// \returns Vmcnt bit shift (lower bits). 76 unsigned getVmcntBitShiftLo() { return 0; } 77 78 /// \returns Vmcnt bit width (lower bits). 79 unsigned getVmcntBitWidthLo() { return 4; } 80 81 /// \returns Expcnt bit shift. 82 unsigned getExpcntBitShift() { return 4; } 83 84 /// \returns Expcnt bit width. 85 unsigned getExpcntBitWidth() { return 3; } 86 87 /// \returns Lgkmcnt bit shift. 88 unsigned getLgkmcntBitShift() { return 8; } 89 90 /// \returns Lgkmcnt bit width. 91 unsigned getLgkmcntBitWidth() { return 4; } 92 93 /// \returns Vmcnt bit shift (higher bits). 94 unsigned getVmcntBitShiftHi() { return 14; } 95 96 /// \returns Vmcnt bit width (higher bits). 97 unsigned getVmcntBitWidthHi() { return 2; } 98 99 } // end namespace anonymous 100 101 namespace llvm { 102 namespace AMDGPU { 103 104 namespace IsaInfo { 105 106 IsaVersion getIsaVersion(const FeatureBitset &Features) { 107 // CI. 108 if (Features.test(FeatureISAVersion7_0_0)) 109 return {7, 0, 0}; 110 if (Features.test(FeatureISAVersion7_0_1)) 111 return {7, 0, 1}; 112 if (Features.test(FeatureISAVersion7_0_2)) 113 return {7, 0, 2}; 114 115 // VI. 116 if (Features.test(FeatureISAVersion8_0_0)) 117 return {8, 0, 0}; 118 if (Features.test(FeatureISAVersion8_0_1)) 119 return {8, 0, 1}; 120 if (Features.test(FeatureISAVersion8_0_2)) 121 return {8, 0, 2}; 122 if (Features.test(FeatureISAVersion8_0_3)) 123 return {8, 0, 3}; 124 if (Features.test(FeatureISAVersion8_0_4)) 125 return {8, 0, 4}; 126 if (Features.test(FeatureISAVersion8_1_0)) 127 return {8, 1, 0}; 128 129 // GFX9. 130 if (Features.test(FeatureISAVersion9_0_0)) 131 return {9, 0, 0}; 132 if (Features.test(FeatureISAVersion9_0_1)) 133 return {9, 0, 1}; 134 135 if (!Features.test(FeatureGCN) || Features.test(FeatureSouthernIslands)) 136 return {0, 0, 0}; 137 return {7, 0, 0}; 138 } 139 140 unsigned getWavefrontSize(const FeatureBitset &Features) { 141 if (Features.test(FeatureWavefrontSize16)) 142 return 16; 143 if (Features.test(FeatureWavefrontSize32)) 144 return 32; 145 146 return 64; 147 } 148 149 unsigned getLocalMemorySize(const FeatureBitset &Features) { 150 if (Features.test(FeatureLocalMemorySize32768)) 151 return 32768; 152 if (Features.test(FeatureLocalMemorySize65536)) 153 return 65536; 154 155 return 0; 156 } 157 158 unsigned getEUsPerCU(const FeatureBitset &Features) { 159 return 4; 160 } 161 162 unsigned getMaxWorkGroupsPerCU(const FeatureBitset &Features, 163 unsigned FlatWorkGroupSize) { 164 if (!Features.test(FeatureGCN)) 165 return 8; 166 unsigned N = getWavesPerWorkGroup(Features, FlatWorkGroupSize); 167 if (N == 1) 168 return 40; 169 N = 40 / N; 170 return std::min(N, 16u); 171 } 172 173 unsigned getMaxWavesPerCU(const FeatureBitset &Features) { 174 return getMaxWavesPerEU(Features) * getEUsPerCU(Features); 175 } 176 177 unsigned getMaxWavesPerCU(const FeatureBitset &Features, 178 unsigned FlatWorkGroupSize) { 179 return getWavesPerWorkGroup(Features, FlatWorkGroupSize); 180 } 181 182 unsigned getMinWavesPerEU(const FeatureBitset &Features) { 183 return 1; 184 } 185 186 unsigned getMaxWavesPerEU(const FeatureBitset &Features) { 187 if (!Features.test(FeatureGCN)) 188 return 8; 189 // FIXME: Need to take scratch memory into account. 190 return 10; 191 } 192 193 unsigned getMaxWavesPerEU(const FeatureBitset &Features, 194 unsigned FlatWorkGroupSize) { 195 return alignTo(getMaxWavesPerCU(Features, FlatWorkGroupSize), 196 getEUsPerCU(Features)) / getEUsPerCU(Features); 197 } 198 199 unsigned getMinFlatWorkGroupSize(const FeatureBitset &Features) { 200 return 1; 201 } 202 203 unsigned getMaxFlatWorkGroupSize(const FeatureBitset &Features) { 204 return 2048; 205 } 206 207 unsigned getWavesPerWorkGroup(const FeatureBitset &Features, 208 unsigned FlatWorkGroupSize) { 209 return alignTo(FlatWorkGroupSize, getWavefrontSize(Features)) / 210 getWavefrontSize(Features); 211 } 212 213 unsigned getSGPRAllocGranule(const FeatureBitset &Features) { 214 IsaVersion Version = getIsaVersion(Features); 215 if (Version.Major >= 8) 216 return 16; 217 return 8; 218 } 219 220 unsigned getSGPREncodingGranule(const FeatureBitset &Features) { 221 return 8; 222 } 223 224 unsigned getTotalNumSGPRs(const FeatureBitset &Features) { 225 IsaVersion Version = getIsaVersion(Features); 226 if (Version.Major >= 8) 227 return 800; 228 return 512; 229 } 230 231 unsigned getAddressableNumSGPRs(const FeatureBitset &Features) { 232 if (Features.test(FeatureSGPRInitBug)) 233 return FIXED_NUM_SGPRS_FOR_INIT_BUG; 234 235 IsaVersion Version = getIsaVersion(Features); 236 if (Version.Major >= 8) 237 return 102; 238 return 104; 239 } 240 241 unsigned getMinNumSGPRs(const FeatureBitset &Features, unsigned WavesPerEU) { 242 assert(WavesPerEU != 0); 243 244 if (WavesPerEU >= getMaxWavesPerEU(Features)) 245 return 0; 246 unsigned MinNumSGPRs = 247 alignDown(getTotalNumSGPRs(Features) / (WavesPerEU + 1), 248 getSGPRAllocGranule(Features)) + 1; 249 return std::min(MinNumSGPRs, getAddressableNumSGPRs(Features)); 250 } 251 252 unsigned getMaxNumSGPRs(const FeatureBitset &Features, unsigned WavesPerEU, 253 bool Addressable) { 254 assert(WavesPerEU != 0); 255 256 IsaVersion Version = getIsaVersion(Features); 257 unsigned MaxNumSGPRs = alignDown(getTotalNumSGPRs(Features) / WavesPerEU, 258 getSGPRAllocGranule(Features)); 259 unsigned AddressableNumSGPRs = getAddressableNumSGPRs(Features); 260 if (Version.Major >= 8 && !Addressable) 261 AddressableNumSGPRs = 112; 262 return std::min(MaxNumSGPRs, AddressableNumSGPRs); 263 } 264 265 unsigned getVGPRAllocGranule(const FeatureBitset &Features) { 266 return 4; 267 } 268 269 unsigned getVGPREncodingGranule(const FeatureBitset &Features) { 270 return getVGPRAllocGranule(Features); 271 } 272 273 unsigned getTotalNumVGPRs(const FeatureBitset &Features) { 274 return 256; 275 } 276 277 unsigned getAddressableNumVGPRs(const FeatureBitset &Features) { 278 return getTotalNumVGPRs(Features); 279 } 280 281 unsigned getMinNumVGPRs(const FeatureBitset &Features, unsigned WavesPerEU) { 282 assert(WavesPerEU != 0); 283 284 if (WavesPerEU >= getMaxWavesPerEU(Features)) 285 return 0; 286 unsigned MinNumVGPRs = 287 alignDown(getTotalNumVGPRs(Features) / (WavesPerEU + 1), 288 getVGPRAllocGranule(Features)) + 1; 289 return std::min(MinNumVGPRs, getAddressableNumVGPRs(Features)); 290 } 291 292 unsigned getMaxNumVGPRs(const FeatureBitset &Features, unsigned WavesPerEU) { 293 assert(WavesPerEU != 0); 294 295 unsigned MaxNumVGPRs = alignDown(getTotalNumVGPRs(Features) / WavesPerEU, 296 getVGPRAllocGranule(Features)); 297 unsigned AddressableNumVGPRs = getAddressableNumVGPRs(Features); 298 return std::min(MaxNumVGPRs, AddressableNumVGPRs); 299 } 300 301 } // end namespace IsaInfo 302 303 void initDefaultAMDKernelCodeT(amd_kernel_code_t &Header, 304 const FeatureBitset &Features) { 305 IsaInfo::IsaVersion ISA = IsaInfo::getIsaVersion(Features); 306 307 memset(&Header, 0, sizeof(Header)); 308 309 Header.amd_kernel_code_version_major = 1; 310 Header.amd_kernel_code_version_minor = 1; 311 Header.amd_machine_kind = 1; // AMD_MACHINE_KIND_AMDGPU 312 Header.amd_machine_version_major = ISA.Major; 313 Header.amd_machine_version_minor = ISA.Minor; 314 Header.amd_machine_version_stepping = ISA.Stepping; 315 Header.kernel_code_entry_byte_offset = sizeof(Header); 316 // wavefront_size is specified as a power of 2: 2^6 = 64 threads. 317 Header.wavefront_size = 6; 318 319 // If the code object does not support indirect functions, then the value must 320 // be 0xffffffff. 321 Header.call_convention = -1; 322 323 // These alignment values are specified in powers of two, so alignment = 324 // 2^n. The minimum alignment is 2^4 = 16. 325 Header.kernarg_segment_alignment = 4; 326 Header.group_segment_alignment = 4; 327 Header.private_segment_alignment = 4; 328 } 329 330 MCSection *getHSATextSection(MCContext &Ctx) { 331 return Ctx.getELFSection(".hsatext", ELF::SHT_PROGBITS, 332 ELF::SHF_ALLOC | ELF::SHF_WRITE | 333 ELF::SHF_EXECINSTR | 334 ELF::SHF_AMDGPU_HSA_AGENT | 335 ELF::SHF_AMDGPU_HSA_CODE); 336 } 337 338 MCSection *getHSADataGlobalAgentSection(MCContext &Ctx) { 339 return Ctx.getELFSection(".hsadata_global_agent", ELF::SHT_PROGBITS, 340 ELF::SHF_ALLOC | ELF::SHF_WRITE | 341 ELF::SHF_AMDGPU_HSA_GLOBAL | 342 ELF::SHF_AMDGPU_HSA_AGENT); 343 } 344 345 MCSection *getHSADataGlobalProgramSection(MCContext &Ctx) { 346 return Ctx.getELFSection(".hsadata_global_program", ELF::SHT_PROGBITS, 347 ELF::SHF_ALLOC | ELF::SHF_WRITE | 348 ELF::SHF_AMDGPU_HSA_GLOBAL); 349 } 350 351 MCSection *getHSARodataReadonlyAgentSection(MCContext &Ctx) { 352 return Ctx.getELFSection(".hsarodata_readonly_agent", ELF::SHT_PROGBITS, 353 ELF::SHF_ALLOC | ELF::SHF_AMDGPU_HSA_READONLY | 354 ELF::SHF_AMDGPU_HSA_AGENT); 355 } 356 357 bool isGroupSegment(const GlobalValue *GV) { 358 return GV->getType()->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS; 359 } 360 361 bool isGlobalSegment(const GlobalValue *GV) { 362 return GV->getType()->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS; 363 } 364 365 bool isReadOnlySegment(const GlobalValue *GV) { 366 return GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS; 367 } 368 369 bool shouldEmitConstantsToTextSection(const Triple &TT) { 370 return TT.getOS() != Triple::AMDHSA; 371 } 372 373 int getIntegerAttribute(const Function &F, StringRef Name, int Default) { 374 Attribute A = F.getFnAttribute(Name); 375 int Result = Default; 376 377 if (A.isStringAttribute()) { 378 StringRef Str = A.getValueAsString(); 379 if (Str.getAsInteger(0, Result)) { 380 LLVMContext &Ctx = F.getContext(); 381 Ctx.emitError("can't parse integer attribute " + Name); 382 } 383 } 384 385 return Result; 386 } 387 388 std::pair<int, int> getIntegerPairAttribute(const Function &F, 389 StringRef Name, 390 std::pair<int, int> Default, 391 bool OnlyFirstRequired) { 392 Attribute A = F.getFnAttribute(Name); 393 if (!A.isStringAttribute()) 394 return Default; 395 396 LLVMContext &Ctx = F.getContext(); 397 std::pair<int, int> Ints = Default; 398 std::pair<StringRef, StringRef> Strs = A.getValueAsString().split(','); 399 if (Strs.first.trim().getAsInteger(0, Ints.first)) { 400 Ctx.emitError("can't parse first integer attribute " + Name); 401 return Default; 402 } 403 if (Strs.second.trim().getAsInteger(0, Ints.second)) { 404 if (!OnlyFirstRequired || !Strs.second.trim().empty()) { 405 Ctx.emitError("can't parse second integer attribute " + Name); 406 return Default; 407 } 408 } 409 410 return Ints; 411 } 412 413 unsigned getVmcntBitMask(const IsaInfo::IsaVersion &Version) { 414 unsigned VmcntLo = (1 << getVmcntBitWidthLo()) - 1; 415 if (Version.Major < 9) 416 return VmcntLo; 417 418 unsigned VmcntHi = ((1 << getVmcntBitWidthHi()) - 1) << getVmcntBitWidthLo(); 419 return VmcntLo | VmcntHi; 420 } 421 422 unsigned getExpcntBitMask(const IsaInfo::IsaVersion &Version) { 423 return (1 << getExpcntBitWidth()) - 1; 424 } 425 426 unsigned getLgkmcntBitMask(const IsaInfo::IsaVersion &Version) { 427 return (1 << getLgkmcntBitWidth()) - 1; 428 } 429 430 unsigned getWaitcntBitMask(const IsaInfo::IsaVersion &Version) { 431 unsigned VmcntLo = getBitMask(getVmcntBitShiftLo(), getVmcntBitWidthLo()); 432 unsigned Expcnt = getBitMask(getExpcntBitShift(), getExpcntBitWidth()); 433 unsigned Lgkmcnt = getBitMask(getLgkmcntBitShift(), getLgkmcntBitWidth()); 434 unsigned Waitcnt = VmcntLo | Expcnt | Lgkmcnt; 435 if (Version.Major < 9) 436 return Waitcnt; 437 438 unsigned VmcntHi = getBitMask(getVmcntBitShiftHi(), getVmcntBitWidthHi()); 439 return Waitcnt | VmcntHi; 440 } 441 442 unsigned decodeVmcnt(const IsaInfo::IsaVersion &Version, unsigned Waitcnt) { 443 unsigned VmcntLo = 444 unpackBits(Waitcnt, getVmcntBitShiftLo(), getVmcntBitWidthLo()); 445 if (Version.Major < 9) 446 return VmcntLo; 447 448 unsigned VmcntHi = 449 unpackBits(Waitcnt, getVmcntBitShiftHi(), getVmcntBitWidthHi()); 450 VmcntHi <<= getVmcntBitWidthLo(); 451 return VmcntLo | VmcntHi; 452 } 453 454 unsigned decodeExpcnt(const IsaInfo::IsaVersion &Version, unsigned Waitcnt) { 455 return unpackBits(Waitcnt, getExpcntBitShift(), getExpcntBitWidth()); 456 } 457 458 unsigned decodeLgkmcnt(const IsaInfo::IsaVersion &Version, unsigned Waitcnt) { 459 return unpackBits(Waitcnt, getLgkmcntBitShift(), getLgkmcntBitWidth()); 460 } 461 462 void decodeWaitcnt(const IsaInfo::IsaVersion &Version, unsigned Waitcnt, 463 unsigned &Vmcnt, unsigned &Expcnt, unsigned &Lgkmcnt) { 464 Vmcnt = decodeVmcnt(Version, Waitcnt); 465 Expcnt = decodeExpcnt(Version, Waitcnt); 466 Lgkmcnt = decodeLgkmcnt(Version, Waitcnt); 467 } 468 469 unsigned encodeVmcnt(const IsaInfo::IsaVersion &Version, unsigned Waitcnt, 470 unsigned Vmcnt) { 471 Waitcnt = 472 packBits(Vmcnt, Waitcnt, getVmcntBitShiftLo(), getVmcntBitWidthLo()); 473 if (Version.Major < 9) 474 return Waitcnt; 475 476 Vmcnt >>= getVmcntBitWidthLo(); 477 return packBits(Vmcnt, Waitcnt, getVmcntBitShiftHi(), getVmcntBitWidthHi()); 478 } 479 480 unsigned encodeExpcnt(const IsaInfo::IsaVersion &Version, unsigned Waitcnt, 481 unsigned Expcnt) { 482 return packBits(Expcnt, Waitcnt, getExpcntBitShift(), getExpcntBitWidth()); 483 } 484 485 unsigned encodeLgkmcnt(const IsaInfo::IsaVersion &Version, unsigned Waitcnt, 486 unsigned Lgkmcnt) { 487 return packBits(Lgkmcnt, Waitcnt, getLgkmcntBitShift(), getLgkmcntBitWidth()); 488 } 489 490 unsigned encodeWaitcnt(const IsaInfo::IsaVersion &Version, 491 unsigned Vmcnt, unsigned Expcnt, unsigned Lgkmcnt) { 492 unsigned Waitcnt = getWaitcntBitMask(Version); 493 Waitcnt = encodeVmcnt(Version, Waitcnt, Vmcnt); 494 Waitcnt = encodeExpcnt(Version, Waitcnt, Expcnt); 495 Waitcnt = encodeLgkmcnt(Version, Waitcnt, Lgkmcnt); 496 return Waitcnt; 497 } 498 499 unsigned getInitialPSInputAddr(const Function &F) { 500 return getIntegerAttribute(F, "InitialPSInputAddr", 0); 501 } 502 503 bool isShader(CallingConv::ID cc) { 504 switch(cc) { 505 case CallingConv::AMDGPU_VS: 506 case CallingConv::AMDGPU_GS: 507 case CallingConv::AMDGPU_PS: 508 case CallingConv::AMDGPU_CS: 509 return true; 510 default: 511 return false; 512 } 513 } 514 515 bool isCompute(CallingConv::ID cc) { 516 return !isShader(cc) || cc == CallingConv::AMDGPU_CS; 517 } 518 519 bool isSI(const MCSubtargetInfo &STI) { 520 return STI.getFeatureBits()[AMDGPU::FeatureSouthernIslands]; 521 } 522 523 bool isCI(const MCSubtargetInfo &STI) { 524 return STI.getFeatureBits()[AMDGPU::FeatureSeaIslands]; 525 } 526 527 bool isVI(const MCSubtargetInfo &STI) { 528 return STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]; 529 } 530 531 unsigned getMCReg(unsigned Reg, const MCSubtargetInfo &STI) { 532 533 switch(Reg) { 534 default: break; 535 case AMDGPU::FLAT_SCR: 536 assert(!isSI(STI)); 537 return isCI(STI) ? AMDGPU::FLAT_SCR_ci : AMDGPU::FLAT_SCR_vi; 538 539 case AMDGPU::FLAT_SCR_LO: 540 assert(!isSI(STI)); 541 return isCI(STI) ? AMDGPU::FLAT_SCR_LO_ci : AMDGPU::FLAT_SCR_LO_vi; 542 543 case AMDGPU::FLAT_SCR_HI: 544 assert(!isSI(STI)); 545 return isCI(STI) ? AMDGPU::FLAT_SCR_HI_ci : AMDGPU::FLAT_SCR_HI_vi; 546 } 547 return Reg; 548 } 549 550 bool isSISrcOperand(const MCInstrDesc &Desc, unsigned OpNo) { 551 assert(OpNo < Desc.NumOperands); 552 unsigned OpType = Desc.OpInfo[OpNo].OperandType; 553 return OpType >= AMDGPU::OPERAND_SRC_FIRST && 554 OpType <= AMDGPU::OPERAND_SRC_LAST; 555 } 556 557 bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo) { 558 assert(OpNo < Desc.NumOperands); 559 unsigned OpType = Desc.OpInfo[OpNo].OperandType; 560 switch (OpType) { 561 case AMDGPU::OPERAND_REG_IMM_FP32: 562 case AMDGPU::OPERAND_REG_IMM_FP64: 563 case AMDGPU::OPERAND_REG_IMM_FP16: 564 case AMDGPU::OPERAND_REG_INLINE_C_FP32: 565 case AMDGPU::OPERAND_REG_INLINE_C_FP64: 566 case AMDGPU::OPERAND_REG_INLINE_C_FP16: 567 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: 568 return true; 569 default: 570 return false; 571 } 572 } 573 574 bool isSISrcInlinableOperand(const MCInstrDesc &Desc, unsigned OpNo) { 575 assert(OpNo < Desc.NumOperands); 576 unsigned OpType = Desc.OpInfo[OpNo].OperandType; 577 return OpType >= AMDGPU::OPERAND_REG_INLINE_C_FIRST && 578 OpType <= AMDGPU::OPERAND_REG_INLINE_C_LAST; 579 } 580 581 // Avoid using MCRegisterClass::getSize, since that function will go away 582 // (move from MC* level to Target* level). Return size in bits. 583 unsigned getRegBitWidth(unsigned RCID) { 584 switch (RCID) { 585 case AMDGPU::SGPR_32RegClassID: 586 case AMDGPU::VGPR_32RegClassID: 587 case AMDGPU::VS_32RegClassID: 588 case AMDGPU::SReg_32RegClassID: 589 case AMDGPU::SReg_32_XM0RegClassID: 590 return 32; 591 case AMDGPU::SGPR_64RegClassID: 592 case AMDGPU::VS_64RegClassID: 593 case AMDGPU::SReg_64RegClassID: 594 case AMDGPU::VReg_64RegClassID: 595 return 64; 596 case AMDGPU::VReg_96RegClassID: 597 return 96; 598 case AMDGPU::SGPR_128RegClassID: 599 case AMDGPU::SReg_128RegClassID: 600 case AMDGPU::VReg_128RegClassID: 601 return 128; 602 case AMDGPU::SReg_256RegClassID: 603 case AMDGPU::VReg_256RegClassID: 604 return 256; 605 case AMDGPU::SReg_512RegClassID: 606 case AMDGPU::VReg_512RegClassID: 607 return 512; 608 default: 609 llvm_unreachable("Unexpected register class"); 610 } 611 } 612 613 unsigned getRegBitWidth(const MCRegisterClass &RC) { 614 return getRegBitWidth(RC.getID()); 615 } 616 617 unsigned getRegOperandSize(const MCRegisterInfo *MRI, const MCInstrDesc &Desc, 618 unsigned OpNo) { 619 assert(OpNo < Desc.NumOperands); 620 unsigned RCID = Desc.OpInfo[OpNo].RegClass; 621 return getRegBitWidth(MRI->getRegClass(RCID)) / 8; 622 } 623 624 bool isInlinableLiteral64(int64_t Literal, bool HasInv2Pi) { 625 if (Literal >= -16 && Literal <= 64) 626 return true; 627 628 uint64_t Val = static_cast<uint64_t>(Literal); 629 return (Val == DoubleToBits(0.0)) || 630 (Val == DoubleToBits(1.0)) || 631 (Val == DoubleToBits(-1.0)) || 632 (Val == DoubleToBits(0.5)) || 633 (Val == DoubleToBits(-0.5)) || 634 (Val == DoubleToBits(2.0)) || 635 (Val == DoubleToBits(-2.0)) || 636 (Val == DoubleToBits(4.0)) || 637 (Val == DoubleToBits(-4.0)) || 638 (Val == 0x3fc45f306dc9c882 && HasInv2Pi); 639 } 640 641 bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi) { 642 if (Literal >= -16 && Literal <= 64) 643 return true; 644 645 // The actual type of the operand does not seem to matter as long 646 // as the bits match one of the inline immediate values. For example: 647 // 648 // -nan has the hexadecimal encoding of 0xfffffffe which is -2 in decimal, 649 // so it is a legal inline immediate. 650 // 651 // 1065353216 has the hexadecimal encoding 0x3f800000 which is 1.0f in 652 // floating-point, so it is a legal inline immediate. 653 654 uint32_t Val = static_cast<uint32_t>(Literal); 655 return (Val == FloatToBits(0.0f)) || 656 (Val == FloatToBits(1.0f)) || 657 (Val == FloatToBits(-1.0f)) || 658 (Val == FloatToBits(0.5f)) || 659 (Val == FloatToBits(-0.5f)) || 660 (Val == FloatToBits(2.0f)) || 661 (Val == FloatToBits(-2.0f)) || 662 (Val == FloatToBits(4.0f)) || 663 (Val == FloatToBits(-4.0f)) || 664 (Val == 0x3e22f983 && HasInv2Pi); 665 } 666 667 bool isInlinableLiteral16(int16_t Literal, bool HasInv2Pi) { 668 if (!HasInv2Pi) 669 return false; 670 671 if (Literal >= -16 && Literal <= 64) 672 return true; 673 674 uint16_t Val = static_cast<uint16_t>(Literal); 675 return Val == 0x3C00 || // 1.0 676 Val == 0xBC00 || // -1.0 677 Val == 0x3800 || // 0.5 678 Val == 0xB800 || // -0.5 679 Val == 0x4000 || // 2.0 680 Val == 0xC000 || // -2.0 681 Val == 0x4400 || // 4.0 682 Val == 0xC400 || // -4.0 683 Val == 0x3118; // 1/2pi 684 } 685 686 bool isInlinableLiteralV216(int32_t Literal, bool HasInv2Pi) { 687 assert(HasInv2Pi); 688 689 int16_t Lo16 = static_cast<int16_t>(Literal); 690 int16_t Hi16 = static_cast<int16_t>(Literal >> 16); 691 return Lo16 == Hi16 && isInlinableLiteral16(Lo16, HasInv2Pi); 692 } 693 694 bool isUniformMMO(const MachineMemOperand *MMO) { 695 const Value *Ptr = MMO->getValue(); 696 // UndefValue means this is a load of a kernel input. These are uniform. 697 // Sometimes LDS instructions have constant pointers. 698 // If Ptr is null, then that means this mem operand contains a 699 // PseudoSourceValue like GOT. 700 if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) || 701 isa<Constant>(Ptr) || isa<GlobalValue>(Ptr)) 702 return true; 703 704 const Instruction *I = dyn_cast<Instruction>(Ptr); 705 return I && I->getMetadata("amdgpu.uniform"); 706 } 707 708 int64_t getSMRDEncodedOffset(const MCSubtargetInfo &ST, int64_t ByteOffset) { 709 if (isSI(ST) || isCI(ST)) 710 return ByteOffset >> 2; 711 712 return ByteOffset; 713 } 714 715 bool isLegalSMRDImmOffset(const MCSubtargetInfo &ST, int64_t ByteOffset) { 716 int64_t EncodedOffset = getSMRDEncodedOffset(ST, ByteOffset); 717 return isSI(ST) || isCI(ST) ? isUInt<8>(EncodedOffset) : 718 isUInt<20>(EncodedOffset); 719 } 720 721 } // end namespace AMDGPU 722 } // end namespace llvm 723