1 //===-- AMDGPUDisassembler.cpp - Disassembler for AMDGPU ISA --------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 //===----------------------------------------------------------------------===// 11 // 12 /// \file 13 /// 14 /// This file contains definition for AMDGPU ISA disassembler 15 // 16 //===----------------------------------------------------------------------===// 17 18 // ToDo: What to do with instruction suffixes (v_mov_b32 vs v_mov_b32_e32)? 19 20 #include "AMDGPUDisassembler.h" 21 #include "AMDGPU.h" 22 #include "AMDGPURegisterInfo.h" 23 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 24 #include "SIDefines.h" 25 #include "Utils/AMDGPUBaseInfo.h" 26 27 #include "llvm/BinaryFormat/ELF.h" 28 #include "llvm/MC/MCContext.h" 29 #include "llvm/MC/MCFixedLenDisassembler.h" 30 #include "llvm/MC/MCInst.h" 31 #include "llvm/MC/MCInstrDesc.h" 32 #include "llvm/MC/MCSubtargetInfo.h" 33 #include "llvm/Support/Debug.h" 34 #include "llvm/Support/Endian.h" 35 #include "llvm/Support/TargetRegistry.h" 36 37 using namespace llvm; 38 39 #define DEBUG_TYPE "amdgpu-disassembler" 40 41 typedef llvm::MCDisassembler::DecodeStatus DecodeStatus; 42 43 44 inline static MCDisassembler::DecodeStatus 45 addOperand(MCInst &Inst, const MCOperand& Opnd) { 46 Inst.addOperand(Opnd); 47 return Opnd.isValid() ? 48 MCDisassembler::Success : 49 MCDisassembler::SoftFail; 50 } 51 52 static int insertNamedMCOperand(MCInst &MI, const MCOperand &Op, 53 uint16_t NameIdx) { 54 int OpIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), NameIdx); 55 if (OpIdx != -1) { 56 auto I = MI.begin(); 57 std::advance(I, OpIdx); 58 MI.insert(I, Op); 59 } 60 return OpIdx; 61 } 62 63 static DecodeStatus decodeSoppBrTarget(MCInst &Inst, unsigned Imm, 64 uint64_t Addr, const void *Decoder) { 65 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); 66 67 APInt SignedOffset(18, Imm * 4, true); 68 int64_t Offset = (SignedOffset.sext(64) + 4 + Addr).getSExtValue(); 69 70 if (DAsm->tryAddingSymbolicOperand(Inst, Offset, Addr, true, 2, 2)) 71 return MCDisassembler::Success; 72 return addOperand(Inst, MCOperand::createImm(Imm)); 73 } 74 75 #define DECODE_OPERAND(StaticDecoderName, DecoderName) \ 76 static DecodeStatus StaticDecoderName(MCInst &Inst, \ 77 unsigned Imm, \ 78 uint64_t /*Addr*/, \ 79 const void *Decoder) { \ 80 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); \ 81 return addOperand(Inst, DAsm->DecoderName(Imm)); \ 82 } 83 84 #define DECODE_OPERAND_REG(RegClass) \ 85 DECODE_OPERAND(Decode##RegClass##RegisterClass, decodeOperand_##RegClass) 86 87 DECODE_OPERAND_REG(VGPR_32) 88 DECODE_OPERAND_REG(VS_32) 89 DECODE_OPERAND_REG(VS_64) 90 DECODE_OPERAND_REG(VS_128) 91 92 DECODE_OPERAND_REG(VReg_64) 93 DECODE_OPERAND_REG(VReg_96) 94 DECODE_OPERAND_REG(VReg_128) 95 96 DECODE_OPERAND_REG(SReg_32) 97 DECODE_OPERAND_REG(SReg_32_XM0_XEXEC) 98 DECODE_OPERAND_REG(SReg_64) 99 DECODE_OPERAND_REG(SReg_64_XEXEC) 100 DECODE_OPERAND_REG(SReg_128) 101 DECODE_OPERAND_REG(SReg_256) 102 DECODE_OPERAND_REG(SReg_512) 103 104 105 static DecodeStatus decodeOperand_VSrc16(MCInst &Inst, 106 unsigned Imm, 107 uint64_t Addr, 108 const void *Decoder) { 109 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); 110 return addOperand(Inst, DAsm->decodeOperand_VSrc16(Imm)); 111 } 112 113 static DecodeStatus decodeOperand_VSrcV216(MCInst &Inst, 114 unsigned Imm, 115 uint64_t Addr, 116 const void *Decoder) { 117 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); 118 return addOperand(Inst, DAsm->decodeOperand_VSrcV216(Imm)); 119 } 120 121 #define DECODE_SDWA(DecName) \ 122 DECODE_OPERAND(decodeSDWA##DecName, decodeSDWA##DecName) 123 124 DECODE_SDWA(Src32) 125 DECODE_SDWA(Src16) 126 DECODE_SDWA(VopcDst) 127 128 #include "AMDGPUGenDisassemblerTables.inc" 129 130 //===----------------------------------------------------------------------===// 131 // 132 //===----------------------------------------------------------------------===// 133 134 template <typename T> static inline T eatBytes(ArrayRef<uint8_t>& Bytes) { 135 assert(Bytes.size() >= sizeof(T)); 136 const auto Res = support::endian::read<T, support::endianness::little>(Bytes.data()); 137 Bytes = Bytes.slice(sizeof(T)); 138 return Res; 139 } 140 141 DecodeStatus AMDGPUDisassembler::tryDecodeInst(const uint8_t* Table, 142 MCInst &MI, 143 uint64_t Inst, 144 uint64_t Address) const { 145 assert(MI.getOpcode() == 0); 146 assert(MI.getNumOperands() == 0); 147 MCInst TmpInst; 148 HasLiteral = false; 149 const auto SavedBytes = Bytes; 150 if (decodeInstruction(Table, TmpInst, Inst, Address, this, STI)) { 151 MI = TmpInst; 152 return MCDisassembler::Success; 153 } 154 Bytes = SavedBytes; 155 return MCDisassembler::Fail; 156 } 157 158 DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size, 159 ArrayRef<uint8_t> Bytes_, 160 uint64_t Address, 161 raw_ostream &WS, 162 raw_ostream &CS) const { 163 CommentStream = &CS; 164 bool IsSDWA = false; 165 166 // ToDo: AMDGPUDisassembler supports only VI ISA. 167 if (!STI.getFeatureBits()[AMDGPU::FeatureGCN3Encoding]) 168 report_fatal_error("Disassembly not yet supported for subtarget"); 169 170 const unsigned MaxInstBytesNum = (std::min)((size_t)8, Bytes_.size()); 171 Bytes = Bytes_.slice(0, MaxInstBytesNum); 172 173 DecodeStatus Res = MCDisassembler::Fail; 174 do { 175 // ToDo: better to switch encoding length using some bit predicate 176 // but it is unknown yet, so try all we can 177 178 // Try to decode DPP and SDWA first to solve conflict with VOP1 and VOP2 179 // encodings 180 if (Bytes.size() >= 8) { 181 const uint64_t QW = eatBytes<uint64_t>(Bytes); 182 Res = tryDecodeInst(DecoderTableDPP64, MI, QW, Address); 183 if (Res) break; 184 185 Res = tryDecodeInst(DecoderTableSDWA64, MI, QW, Address); 186 if (Res) { IsSDWA = true; break; } 187 188 Res = tryDecodeInst(DecoderTableSDWA964, MI, QW, Address); 189 if (Res) { IsSDWA = true; break; } 190 } 191 192 // Reinitialize Bytes as DPP64 could have eaten too much 193 Bytes = Bytes_.slice(0, MaxInstBytesNum); 194 195 // Try decode 32-bit instruction 196 if (Bytes.size() < 4) break; 197 const uint32_t DW = eatBytes<uint32_t>(Bytes); 198 Res = tryDecodeInst(DecoderTableVI32, MI, DW, Address); 199 if (Res) break; 200 201 Res = tryDecodeInst(DecoderTableAMDGPU32, MI, DW, Address); 202 if (Res) break; 203 204 if (Bytes.size() < 4) break; 205 const uint64_t QW = ((uint64_t)eatBytes<uint32_t>(Bytes) << 32) | DW; 206 Res = tryDecodeInst(DecoderTableVI64, MI, QW, Address); 207 if (Res) break; 208 209 Res = tryDecodeInst(DecoderTableAMDGPU64, MI, QW, Address); 210 } while (false); 211 212 if (Res && (MI.getOpcode() == AMDGPU::V_MAC_F32_e64_vi || 213 MI.getOpcode() == AMDGPU::V_MAC_F32_e64_si || 214 MI.getOpcode() == AMDGPU::V_MAC_F16_e64_vi)) { 215 // Insert dummy unused src2_modifiers. 216 insertNamedMCOperand(MI, MCOperand::createImm(0), 217 AMDGPU::OpName::src2_modifiers); 218 } 219 220 if (Res && IsSDWA) 221 Res = convertSDWAInst(MI); 222 223 Size = Res ? (MaxInstBytesNum - Bytes.size()) : 0; 224 return Res; 225 } 226 227 DecodeStatus AMDGPUDisassembler::convertSDWAInst(MCInst &MI) const { 228 if (STI.getFeatureBits()[AMDGPU::FeatureGFX9]) { 229 if (AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sdst) != -1) 230 // VOPC - insert clamp 231 insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::clamp); 232 } else if (STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]) { 233 int SDst = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sdst); 234 if (SDst != -1) { 235 // VOPC - insert VCC register as sdst 236 insertNamedMCOperand(MI, MCOperand::createReg(AMDGPU::VCC), 237 AMDGPU::OpName::sdst); 238 } else { 239 // VOP1/2 - insert omod if present in instruction 240 insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::omod); 241 } 242 } 243 return MCDisassembler::Success; 244 } 245 246 const char* AMDGPUDisassembler::getRegClassName(unsigned RegClassID) const { 247 return getContext().getRegisterInfo()-> 248 getRegClassName(&AMDGPUMCRegisterClasses[RegClassID]); 249 } 250 251 inline 252 MCOperand AMDGPUDisassembler::errOperand(unsigned V, 253 const Twine& ErrMsg) const { 254 *CommentStream << "Error: " + ErrMsg; 255 256 // ToDo: add support for error operands to MCInst.h 257 // return MCOperand::createError(V); 258 return MCOperand(); 259 } 260 261 inline 262 MCOperand AMDGPUDisassembler::createRegOperand(unsigned int RegId) const { 263 return MCOperand::createReg(RegId); 264 } 265 266 inline 267 MCOperand AMDGPUDisassembler::createRegOperand(unsigned RegClassID, 268 unsigned Val) const { 269 const auto& RegCl = AMDGPUMCRegisterClasses[RegClassID]; 270 if (Val >= RegCl.getNumRegs()) 271 return errOperand(Val, Twine(getRegClassName(RegClassID)) + 272 ": unknown register " + Twine(Val)); 273 return createRegOperand(RegCl.getRegister(Val)); 274 } 275 276 inline 277 MCOperand AMDGPUDisassembler::createSRegOperand(unsigned SRegClassID, 278 unsigned Val) const { 279 // ToDo: SI/CI have 104 SGPRs, VI - 102 280 // Valery: here we accepting as much as we can, let assembler sort it out 281 int shift = 0; 282 switch (SRegClassID) { 283 case AMDGPU::SGPR_32RegClassID: 284 case AMDGPU::TTMP_32RegClassID: 285 break; 286 case AMDGPU::SGPR_64RegClassID: 287 case AMDGPU::TTMP_64RegClassID: 288 shift = 1; 289 break; 290 case AMDGPU::SGPR_128RegClassID: 291 case AMDGPU::TTMP_128RegClassID: 292 // ToDo: unclear if s[100:104] is available on VI. Can we use VCC as SGPR in 293 // this bundle? 294 case AMDGPU::SReg_256RegClassID: 295 // ToDo: unclear if s[96:104] is available on VI. Can we use VCC as SGPR in 296 // this bundle? 297 case AMDGPU::SReg_512RegClassID: 298 shift = 2; 299 break; 300 // ToDo: unclear if s[88:104] is available on VI. Can we use VCC as SGPR in 301 // this bundle? 302 default: 303 llvm_unreachable("unhandled register class"); 304 } 305 306 if (Val % (1 << shift)) { 307 *CommentStream << "Warning: " << getRegClassName(SRegClassID) 308 << ": scalar reg isn't aligned " << Val; 309 } 310 311 return createRegOperand(SRegClassID, Val >> shift); 312 } 313 314 MCOperand AMDGPUDisassembler::decodeOperand_VS_32(unsigned Val) const { 315 return decodeSrcOp(OPW32, Val); 316 } 317 318 MCOperand AMDGPUDisassembler::decodeOperand_VS_64(unsigned Val) const { 319 return decodeSrcOp(OPW64, Val); 320 } 321 322 MCOperand AMDGPUDisassembler::decodeOperand_VS_128(unsigned Val) const { 323 return decodeSrcOp(OPW128, Val); 324 } 325 326 MCOperand AMDGPUDisassembler::decodeOperand_VSrc16(unsigned Val) const { 327 return decodeSrcOp(OPW16, Val); 328 } 329 330 MCOperand AMDGPUDisassembler::decodeOperand_VSrcV216(unsigned Val) const { 331 return decodeSrcOp(OPWV216, Val); 332 } 333 334 MCOperand AMDGPUDisassembler::decodeOperand_VGPR_32(unsigned Val) const { 335 // Some instructions have operand restrictions beyond what the encoding 336 // allows. Some ordinarily VSrc_32 operands are VGPR_32, so clear the extra 337 // high bit. 338 Val &= 255; 339 340 return createRegOperand(AMDGPU::VGPR_32RegClassID, Val); 341 } 342 343 MCOperand AMDGPUDisassembler::decodeOperand_VReg_64(unsigned Val) const { 344 return createRegOperand(AMDGPU::VReg_64RegClassID, Val); 345 } 346 347 MCOperand AMDGPUDisassembler::decodeOperand_VReg_96(unsigned Val) const { 348 return createRegOperand(AMDGPU::VReg_96RegClassID, Val); 349 } 350 351 MCOperand AMDGPUDisassembler::decodeOperand_VReg_128(unsigned Val) const { 352 return createRegOperand(AMDGPU::VReg_128RegClassID, Val); 353 } 354 355 MCOperand AMDGPUDisassembler::decodeOperand_SReg_32(unsigned Val) const { 356 // table-gen generated disassembler doesn't care about operand types 357 // leaving only registry class so SSrc_32 operand turns into SReg_32 358 // and therefore we accept immediates and literals here as well 359 return decodeSrcOp(OPW32, Val); 360 } 361 362 MCOperand AMDGPUDisassembler::decodeOperand_SReg_32_XM0_XEXEC( 363 unsigned Val) const { 364 // SReg_32_XM0 is SReg_32 without M0 or EXEC_LO/EXEC_HI 365 return decodeOperand_SReg_32(Val); 366 } 367 368 MCOperand AMDGPUDisassembler::decodeOperand_SReg_64(unsigned Val) const { 369 return decodeSrcOp(OPW64, Val); 370 } 371 372 MCOperand AMDGPUDisassembler::decodeOperand_SReg_64_XEXEC(unsigned Val) const { 373 return decodeSrcOp(OPW64, Val); 374 } 375 376 MCOperand AMDGPUDisassembler::decodeOperand_SReg_128(unsigned Val) const { 377 return decodeSrcOp(OPW128, Val); 378 } 379 380 MCOperand AMDGPUDisassembler::decodeOperand_SReg_256(unsigned Val) const { 381 return createSRegOperand(AMDGPU::SReg_256RegClassID, Val); 382 } 383 384 MCOperand AMDGPUDisassembler::decodeOperand_SReg_512(unsigned Val) const { 385 return createSRegOperand(AMDGPU::SReg_512RegClassID, Val); 386 } 387 388 389 MCOperand AMDGPUDisassembler::decodeLiteralConstant() const { 390 // For now all literal constants are supposed to be unsigned integer 391 // ToDo: deal with signed/unsigned 64-bit integer constants 392 // ToDo: deal with float/double constants 393 if (!HasLiteral) { 394 if (Bytes.size() < 4) { 395 return errOperand(0, "cannot read literal, inst bytes left " + 396 Twine(Bytes.size())); 397 } 398 HasLiteral = true; 399 Literal = eatBytes<uint32_t>(Bytes); 400 } 401 return MCOperand::createImm(Literal); 402 } 403 404 MCOperand AMDGPUDisassembler::decodeIntImmed(unsigned Imm) { 405 using namespace AMDGPU::EncValues; 406 assert(Imm >= INLINE_INTEGER_C_MIN && Imm <= INLINE_INTEGER_C_MAX); 407 return MCOperand::createImm((Imm <= INLINE_INTEGER_C_POSITIVE_MAX) ? 408 (static_cast<int64_t>(Imm) - INLINE_INTEGER_C_MIN) : 409 (INLINE_INTEGER_C_POSITIVE_MAX - static_cast<int64_t>(Imm))); 410 // Cast prevents negative overflow. 411 } 412 413 static int64_t getInlineImmVal32(unsigned Imm) { 414 switch (Imm) { 415 case 240: 416 return FloatToBits(0.5f); 417 case 241: 418 return FloatToBits(-0.5f); 419 case 242: 420 return FloatToBits(1.0f); 421 case 243: 422 return FloatToBits(-1.0f); 423 case 244: 424 return FloatToBits(2.0f); 425 case 245: 426 return FloatToBits(-2.0f); 427 case 246: 428 return FloatToBits(4.0f); 429 case 247: 430 return FloatToBits(-4.0f); 431 case 248: // 1 / (2 * PI) 432 return 0x3e22f983; 433 default: 434 llvm_unreachable("invalid fp inline imm"); 435 } 436 } 437 438 static int64_t getInlineImmVal64(unsigned Imm) { 439 switch (Imm) { 440 case 240: 441 return DoubleToBits(0.5); 442 case 241: 443 return DoubleToBits(-0.5); 444 case 242: 445 return DoubleToBits(1.0); 446 case 243: 447 return DoubleToBits(-1.0); 448 case 244: 449 return DoubleToBits(2.0); 450 case 245: 451 return DoubleToBits(-2.0); 452 case 246: 453 return DoubleToBits(4.0); 454 case 247: 455 return DoubleToBits(-4.0); 456 case 248: // 1 / (2 * PI) 457 return 0x3fc45f306dc9c882; 458 default: 459 llvm_unreachable("invalid fp inline imm"); 460 } 461 } 462 463 static int64_t getInlineImmVal16(unsigned Imm) { 464 switch (Imm) { 465 case 240: 466 return 0x3800; 467 case 241: 468 return 0xB800; 469 case 242: 470 return 0x3C00; 471 case 243: 472 return 0xBC00; 473 case 244: 474 return 0x4000; 475 case 245: 476 return 0xC000; 477 case 246: 478 return 0x4400; 479 case 247: 480 return 0xC400; 481 case 248: // 1 / (2 * PI) 482 return 0x3118; 483 default: 484 llvm_unreachable("invalid fp inline imm"); 485 } 486 } 487 488 MCOperand AMDGPUDisassembler::decodeFPImmed(OpWidthTy Width, unsigned Imm) { 489 assert(Imm >= AMDGPU::EncValues::INLINE_FLOATING_C_MIN 490 && Imm <= AMDGPU::EncValues::INLINE_FLOATING_C_MAX); 491 492 // ToDo: case 248: 1/(2*PI) - is allowed only on VI 493 switch (Width) { 494 case OPW32: 495 return MCOperand::createImm(getInlineImmVal32(Imm)); 496 case OPW64: 497 return MCOperand::createImm(getInlineImmVal64(Imm)); 498 case OPW16: 499 case OPWV216: 500 return MCOperand::createImm(getInlineImmVal16(Imm)); 501 default: 502 llvm_unreachable("implement me"); 503 } 504 } 505 506 unsigned AMDGPUDisassembler::getVgprClassId(const OpWidthTy Width) const { 507 using namespace AMDGPU; 508 assert(OPW_FIRST_ <= Width && Width < OPW_LAST_); 509 switch (Width) { 510 default: // fall 511 case OPW32: 512 case OPW16: 513 case OPWV216: 514 return VGPR_32RegClassID; 515 case OPW64: return VReg_64RegClassID; 516 case OPW128: return VReg_128RegClassID; 517 } 518 } 519 520 unsigned AMDGPUDisassembler::getSgprClassId(const OpWidthTy Width) const { 521 using namespace AMDGPU; 522 assert(OPW_FIRST_ <= Width && Width < OPW_LAST_); 523 switch (Width) { 524 default: // fall 525 case OPW32: 526 case OPW16: 527 case OPWV216: 528 return SGPR_32RegClassID; 529 case OPW64: return SGPR_64RegClassID; 530 case OPW128: return SGPR_128RegClassID; 531 } 532 } 533 534 unsigned AMDGPUDisassembler::getTtmpClassId(const OpWidthTy Width) const { 535 using namespace AMDGPU; 536 assert(OPW_FIRST_ <= Width && Width < OPW_LAST_); 537 switch (Width) { 538 default: // fall 539 case OPW32: 540 case OPW16: 541 case OPWV216: 542 return TTMP_32RegClassID; 543 case OPW64: return TTMP_64RegClassID; 544 case OPW128: return TTMP_128RegClassID; 545 } 546 } 547 548 MCOperand AMDGPUDisassembler::decodeSrcOp(const OpWidthTy Width, unsigned Val) const { 549 using namespace AMDGPU::EncValues; 550 assert(Val < 512); // enum9 551 552 if (VGPR_MIN <= Val && Val <= VGPR_MAX) { 553 return createRegOperand(getVgprClassId(Width), Val - VGPR_MIN); 554 } 555 if (Val <= SGPR_MAX) { 556 assert(SGPR_MIN == 0); // "SGPR_MIN <= Val" is always true and causes compilation warning. 557 return createSRegOperand(getSgprClassId(Width), Val - SGPR_MIN); 558 } 559 if (TTMP_MIN <= Val && Val <= TTMP_MAX) { 560 return createSRegOperand(getTtmpClassId(Width), Val - TTMP_MIN); 561 } 562 563 if (INLINE_INTEGER_C_MIN <= Val && Val <= INLINE_INTEGER_C_MAX) 564 return decodeIntImmed(Val); 565 566 if (INLINE_FLOATING_C_MIN <= Val && Val <= INLINE_FLOATING_C_MAX) 567 return decodeFPImmed(Width, Val); 568 569 if (Val == LITERAL_CONST) 570 return decodeLiteralConstant(); 571 572 switch (Width) { 573 case OPW32: 574 case OPW16: 575 case OPWV216: 576 return decodeSpecialReg32(Val); 577 case OPW64: 578 return decodeSpecialReg64(Val); 579 default: 580 llvm_unreachable("unexpected immediate type"); 581 } 582 } 583 584 MCOperand AMDGPUDisassembler::decodeSpecialReg32(unsigned Val) const { 585 using namespace AMDGPU; 586 switch (Val) { 587 case 102: return createRegOperand(getMCReg(FLAT_SCR_LO, STI)); 588 case 103: return createRegOperand(getMCReg(FLAT_SCR_HI, STI)); 589 // ToDo: no support for xnack_mask_lo/_hi register 590 case 104: 591 case 105: break; 592 case 106: return createRegOperand(VCC_LO); 593 case 107: return createRegOperand(VCC_HI); 594 case 108: return createRegOperand(TBA_LO); 595 case 109: return createRegOperand(TBA_HI); 596 case 110: return createRegOperand(TMA_LO); 597 case 111: return createRegOperand(TMA_HI); 598 case 124: return createRegOperand(M0); 599 case 126: return createRegOperand(EXEC_LO); 600 case 127: return createRegOperand(EXEC_HI); 601 case 235: return createRegOperand(SRC_SHARED_BASE); 602 case 236: return createRegOperand(SRC_SHARED_LIMIT); 603 case 237: return createRegOperand(SRC_PRIVATE_BASE); 604 case 238: return createRegOperand(SRC_PRIVATE_LIMIT); 605 // TODO: SRC_POPS_EXITING_WAVE_ID 606 // ToDo: no support for vccz register 607 case 251: break; 608 // ToDo: no support for execz register 609 case 252: break; 610 case 253: return createRegOperand(SCC); 611 default: break; 612 } 613 return errOperand(Val, "unknown operand encoding " + Twine(Val)); 614 } 615 616 MCOperand AMDGPUDisassembler::decodeSpecialReg64(unsigned Val) const { 617 using namespace AMDGPU; 618 switch (Val) { 619 case 102: return createRegOperand(getMCReg(FLAT_SCR, STI)); 620 case 106: return createRegOperand(VCC); 621 case 108: return createRegOperand(TBA); 622 case 110: return createRegOperand(TMA); 623 case 126: return createRegOperand(EXEC); 624 default: break; 625 } 626 return errOperand(Val, "unknown operand encoding " + Twine(Val)); 627 } 628 629 MCOperand AMDGPUDisassembler::decodeSDWASrc(const OpWidthTy Width, 630 unsigned Val) const { 631 using namespace AMDGPU::SDWA; 632 633 if (STI.getFeatureBits()[AMDGPU::FeatureGFX9]) { 634 // XXX: static_cast<int> is needed to avoid stupid warning: 635 // compare with unsigned is always true 636 if (SDWA9EncValues::SRC_VGPR_MIN <= static_cast<int>(Val) && 637 Val <= SDWA9EncValues::SRC_VGPR_MAX) { 638 return createRegOperand(getVgprClassId(Width), 639 Val - SDWA9EncValues::SRC_VGPR_MIN); 640 } 641 if (SDWA9EncValues::SRC_SGPR_MIN <= Val && 642 Val <= SDWA9EncValues::SRC_SGPR_MAX) { 643 return createSRegOperand(getSgprClassId(Width), 644 Val - SDWA9EncValues::SRC_SGPR_MIN); 645 } 646 647 return decodeSpecialReg32(Val - SDWA9EncValues::SRC_SGPR_MIN); 648 } else if (STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]) { 649 return createRegOperand(getVgprClassId(Width), Val); 650 } 651 llvm_unreachable("unsupported target"); 652 } 653 654 MCOperand AMDGPUDisassembler::decodeSDWASrc16(unsigned Val) const { 655 return decodeSDWASrc(OPW16, Val); 656 } 657 658 MCOperand AMDGPUDisassembler::decodeSDWASrc32(unsigned Val) const { 659 return decodeSDWASrc(OPW32, Val); 660 } 661 662 663 MCOperand AMDGPUDisassembler::decodeSDWAVopcDst(unsigned Val) const { 664 using namespace AMDGPU::SDWA; 665 666 assert(STI.getFeatureBits()[AMDGPU::FeatureGFX9] && 667 "SDWAVopcDst should be present only on GFX9"); 668 if (Val & SDWA9EncValues::VOPC_DST_VCC_MASK) { 669 Val &= SDWA9EncValues::VOPC_DST_SGPR_MASK; 670 if (Val > AMDGPU::EncValues::SGPR_MAX) { 671 return decodeSpecialReg64(Val); 672 } else { 673 return createSRegOperand(getSgprClassId(OPW64), Val); 674 } 675 } else { 676 return createRegOperand(AMDGPU::VCC); 677 } 678 } 679 680 //===----------------------------------------------------------------------===// 681 // AMDGPUSymbolizer 682 //===----------------------------------------------------------------------===// 683 684 // Try to find symbol name for specified label 685 bool AMDGPUSymbolizer::tryAddingSymbolicOperand(MCInst &Inst, 686 raw_ostream &/*cStream*/, int64_t Value, 687 uint64_t /*Address*/, bool IsBranch, 688 uint64_t /*Offset*/, uint64_t /*InstSize*/) { 689 typedef std::tuple<uint64_t, StringRef, uint8_t> SymbolInfoTy; 690 typedef std::vector<SymbolInfoTy> SectionSymbolsTy; 691 692 if (!IsBranch) { 693 return false; 694 } 695 696 auto *Symbols = static_cast<SectionSymbolsTy *>(DisInfo); 697 auto Result = std::find_if(Symbols->begin(), Symbols->end(), 698 [Value](const SymbolInfoTy& Val) { 699 return std::get<0>(Val) == static_cast<uint64_t>(Value) 700 && std::get<2>(Val) == ELF::STT_NOTYPE; 701 }); 702 if (Result != Symbols->end()) { 703 auto *Sym = Ctx.getOrCreateSymbol(std::get<1>(*Result)); 704 const auto *Add = MCSymbolRefExpr::create(Sym, Ctx); 705 Inst.addOperand(MCOperand::createExpr(Add)); 706 return true; 707 } 708 return false; 709 } 710 711 void AMDGPUSymbolizer::tryAddingPcLoadReferenceComment(raw_ostream &cStream, 712 int64_t Value, 713 uint64_t Address) { 714 llvm_unreachable("unimplemented"); 715 } 716 717 //===----------------------------------------------------------------------===// 718 // Initialization 719 //===----------------------------------------------------------------------===// 720 721 static MCSymbolizer *createAMDGPUSymbolizer(const Triple &/*TT*/, 722 LLVMOpInfoCallback /*GetOpInfo*/, 723 LLVMSymbolLookupCallback /*SymbolLookUp*/, 724 void *DisInfo, 725 MCContext *Ctx, 726 std::unique_ptr<MCRelocationInfo> &&RelInfo) { 727 return new AMDGPUSymbolizer(*Ctx, std::move(RelInfo), DisInfo); 728 } 729 730 static MCDisassembler *createAMDGPUDisassembler(const Target &T, 731 const MCSubtargetInfo &STI, 732 MCContext &Ctx) { 733 return new AMDGPUDisassembler(STI, Ctx); 734 } 735 736 extern "C" void LLVMInitializeAMDGPUDisassembler() { 737 TargetRegistry::RegisterMCDisassembler(getTheGCNTarget(), 738 createAMDGPUDisassembler); 739 TargetRegistry::RegisterMCSymbolizer(getTheGCNTarget(), 740 createAMDGPUSymbolizer); 741 } 742