1 //===-- AMDGPUDisassembler.cpp - Disassembler for AMDGPU ISA --------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 //===----------------------------------------------------------------------===// 11 // 12 /// \file 13 /// 14 /// This file contains definition for AMDGPU ISA disassembler 15 // 16 //===----------------------------------------------------------------------===// 17 18 // ToDo: What to do with instruction suffixes (v_mov_b32 vs v_mov_b32_e32)? 19 20 #include "AMDGPUDisassembler.h" 21 #include "AMDGPU.h" 22 #include "AMDGPURegisterInfo.h" 23 #include "SIDefines.h" 24 #include "Utils/AMDGPUBaseInfo.h" 25 26 #include "llvm/MC/MCContext.h" 27 #include "llvm/MC/MCFixedLenDisassembler.h" 28 #include "llvm/MC/MCInst.h" 29 #include "llvm/MC/MCInstrDesc.h" 30 #include "llvm/MC/MCSubtargetInfo.h" 31 #include "llvm/Support/ELF.h" 32 #include "llvm/Support/Endian.h" 33 #include "llvm/Support/Debug.h" 34 #include "llvm/Support/TargetRegistry.h" 35 36 37 using namespace llvm; 38 39 #define DEBUG_TYPE "amdgpu-disassembler" 40 41 typedef llvm::MCDisassembler::DecodeStatus DecodeStatus; 42 43 44 inline static MCDisassembler::DecodeStatus 45 addOperand(MCInst &Inst, const MCOperand& Opnd) { 46 Inst.addOperand(Opnd); 47 return Opnd.isValid() ? 48 MCDisassembler::Success : 49 MCDisassembler::SoftFail; 50 } 51 52 static DecodeStatus decodeSoppBrTarget(MCInst &Inst, unsigned Imm, 53 uint64_t Addr, const void *Decoder) { 54 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); 55 56 APInt SignedOffset(18, Imm * 4, true); 57 int64_t Offset = (SignedOffset.sext(64) + 4 + Addr).getSExtValue(); 58 59 if (DAsm->tryAddingSymbolicOperand(Inst, Offset, Addr, true, 2, 2)) 60 return MCDisassembler::Success; 61 return addOperand(Inst, MCOperand::createImm(Imm)); 62 } 63 64 #define DECODE_OPERAND2(RegClass, DecName) \ 65 static DecodeStatus Decode##RegClass##RegisterClass(MCInst &Inst, \ 66 unsigned Imm, \ 67 uint64_t /*Addr*/, \ 68 const void *Decoder) { \ 69 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); \ 70 return addOperand(Inst, DAsm->decodeOperand_##DecName(Imm)); \ 71 } 72 73 #define DECODE_OPERAND(RegClass) DECODE_OPERAND2(RegClass, RegClass) 74 75 DECODE_OPERAND(VGPR_32) 76 DECODE_OPERAND(VS_32) 77 DECODE_OPERAND(VS_64) 78 79 DECODE_OPERAND(VReg_64) 80 DECODE_OPERAND(VReg_96) 81 DECODE_OPERAND(VReg_128) 82 83 DECODE_OPERAND(SReg_32) 84 DECODE_OPERAND(SReg_32_XM0_XEXEC) 85 DECODE_OPERAND(SReg_64) 86 DECODE_OPERAND(SReg_64_XEXEC) 87 DECODE_OPERAND(SReg_128) 88 DECODE_OPERAND(SReg_256) 89 DECODE_OPERAND(SReg_512) 90 91 92 static DecodeStatus decodeOperand_VSrc16(MCInst &Inst, 93 unsigned Imm, 94 uint64_t Addr, 95 const void *Decoder) { 96 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); 97 return addOperand(Inst, DAsm->decodeOperand_VSrc16(Imm)); 98 } 99 100 #define GET_SUBTARGETINFO_ENUM 101 #include "AMDGPUGenSubtargetInfo.inc" 102 #undef GET_SUBTARGETINFO_ENUM 103 104 #include "AMDGPUGenDisassemblerTables.inc" 105 106 //===----------------------------------------------------------------------===// 107 // 108 //===----------------------------------------------------------------------===// 109 110 template <typename T> static inline T eatBytes(ArrayRef<uint8_t>& Bytes) { 111 assert(Bytes.size() >= sizeof(T)); 112 const auto Res = support::endian::read<T, support::endianness::little>(Bytes.data()); 113 Bytes = Bytes.slice(sizeof(T)); 114 return Res; 115 } 116 117 DecodeStatus AMDGPUDisassembler::tryDecodeInst(const uint8_t* Table, 118 MCInst &MI, 119 uint64_t Inst, 120 uint64_t Address) const { 121 assert(MI.getOpcode() == 0); 122 assert(MI.getNumOperands() == 0); 123 MCInst TmpInst; 124 const auto SavedBytes = Bytes; 125 if (decodeInstruction(Table, TmpInst, Inst, Address, this, STI)) { 126 MI = TmpInst; 127 return MCDisassembler::Success; 128 } 129 Bytes = SavedBytes; 130 return MCDisassembler::Fail; 131 } 132 133 DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size, 134 ArrayRef<uint8_t> Bytes_, 135 uint64_t Address, 136 raw_ostream &WS, 137 raw_ostream &CS) const { 138 CommentStream = &CS; 139 140 // ToDo: AMDGPUDisassembler supports only VI ISA. 141 assert(AMDGPU::isVI(STI) && "Can disassemble only VI ISA."); 142 143 const unsigned MaxInstBytesNum = (std::min)((size_t)8, Bytes_.size()); 144 Bytes = Bytes_.slice(0, MaxInstBytesNum); 145 146 DecodeStatus Res = MCDisassembler::Fail; 147 do { 148 // ToDo: better to switch encoding length using some bit predicate 149 // but it is unknown yet, so try all we can 150 151 // Try to decode DPP and SDWA first to solve conflict with VOP1 and VOP2 152 // encodings 153 if (Bytes.size() >= 8) { 154 const uint64_t QW = eatBytes<uint64_t>(Bytes); 155 Res = tryDecodeInst(DecoderTableDPP64, MI, QW, Address); 156 if (Res) break; 157 158 Res = tryDecodeInst(DecoderTableSDWA64, MI, QW, Address); 159 if (Res) break; 160 } 161 162 // Reinitialize Bytes as DPP64 could have eaten too much 163 Bytes = Bytes_.slice(0, MaxInstBytesNum); 164 165 // Try decode 32-bit instruction 166 if (Bytes.size() < 4) break; 167 const uint32_t DW = eatBytes<uint32_t>(Bytes); 168 Res = tryDecodeInst(DecoderTableVI32, MI, DW, Address); 169 if (Res) break; 170 171 Res = tryDecodeInst(DecoderTableAMDGPU32, MI, DW, Address); 172 if (Res) break; 173 174 if (Bytes.size() < 4) break; 175 const uint64_t QW = ((uint64_t)eatBytes<uint32_t>(Bytes) << 32) | DW; 176 Res = tryDecodeInst(DecoderTableVI64, MI, QW, Address); 177 if (Res) break; 178 179 Res = tryDecodeInst(DecoderTableAMDGPU64, MI, QW, Address); 180 } while (false); 181 182 Size = Res ? (MaxInstBytesNum - Bytes.size()) : 0; 183 return Res; 184 } 185 186 const char* AMDGPUDisassembler::getRegClassName(unsigned RegClassID) const { 187 return getContext().getRegisterInfo()-> 188 getRegClassName(&AMDGPUMCRegisterClasses[RegClassID]); 189 } 190 191 inline 192 MCOperand AMDGPUDisassembler::errOperand(unsigned V, 193 const Twine& ErrMsg) const { 194 *CommentStream << "Error: " + ErrMsg; 195 196 // ToDo: add support for error operands to MCInst.h 197 // return MCOperand::createError(V); 198 return MCOperand(); 199 } 200 201 inline 202 MCOperand AMDGPUDisassembler::createRegOperand(unsigned int RegId) const { 203 return MCOperand::createReg(RegId); 204 } 205 206 inline 207 MCOperand AMDGPUDisassembler::createRegOperand(unsigned RegClassID, 208 unsigned Val) const { 209 const auto& RegCl = AMDGPUMCRegisterClasses[RegClassID]; 210 if (Val >= RegCl.getNumRegs()) 211 return errOperand(Val, Twine(getRegClassName(RegClassID)) + 212 ": unknown register " + Twine(Val)); 213 return createRegOperand(RegCl.getRegister(Val)); 214 } 215 216 inline 217 MCOperand AMDGPUDisassembler::createSRegOperand(unsigned SRegClassID, 218 unsigned Val) const { 219 // ToDo: SI/CI have 104 SGPRs, VI - 102 220 // Valery: here we accepting as much as we can, let assembler sort it out 221 int shift = 0; 222 switch (SRegClassID) { 223 case AMDGPU::SGPR_32RegClassID: 224 case AMDGPU::TTMP_32RegClassID: 225 break; 226 case AMDGPU::SGPR_64RegClassID: 227 case AMDGPU::TTMP_64RegClassID: 228 shift = 1; 229 break; 230 case AMDGPU::SGPR_128RegClassID: 231 case AMDGPU::TTMP_128RegClassID: 232 // ToDo: unclear if s[100:104] is available on VI. Can we use VCC as SGPR in 233 // this bundle? 234 case AMDGPU::SReg_256RegClassID: 235 // ToDo: unclear if s[96:104] is available on VI. Can we use VCC as SGPR in 236 // this bundle? 237 case AMDGPU::SReg_512RegClassID: 238 shift = 2; 239 break; 240 // ToDo: unclear if s[88:104] is available on VI. Can we use VCC as SGPR in 241 // this bundle? 242 default: 243 llvm_unreachable("unhandled register class"); 244 } 245 246 if (Val % (1 << shift)) { 247 *CommentStream << "Warning: " << getRegClassName(SRegClassID) 248 << ": scalar reg isn't aligned " << Val; 249 } 250 251 return createRegOperand(SRegClassID, Val >> shift); 252 } 253 254 MCOperand AMDGPUDisassembler::decodeOperand_VS_32(unsigned Val) const { 255 return decodeSrcOp(OPW32, Val); 256 } 257 258 MCOperand AMDGPUDisassembler::decodeOperand_VS_64(unsigned Val) const { 259 return decodeSrcOp(OPW64, Val); 260 } 261 262 MCOperand AMDGPUDisassembler::decodeOperand_VSrc16(unsigned Val) const { 263 return decodeSrcOp(OPW16, Val); 264 } 265 266 MCOperand AMDGPUDisassembler::decodeOperand_VGPR_32(unsigned Val) const { 267 // Some instructions have operand restrictions beyond what the encoding 268 // allows. Some ordinarily VSrc_32 operands are VGPR_32, so clear the extra 269 // high bit. 270 Val &= 255; 271 272 return createRegOperand(AMDGPU::VGPR_32RegClassID, Val); 273 } 274 275 MCOperand AMDGPUDisassembler::decodeOperand_VReg_64(unsigned Val) const { 276 return createRegOperand(AMDGPU::VReg_64RegClassID, Val); 277 } 278 279 MCOperand AMDGPUDisassembler::decodeOperand_VReg_96(unsigned Val) const { 280 return createRegOperand(AMDGPU::VReg_96RegClassID, Val); 281 } 282 283 MCOperand AMDGPUDisassembler::decodeOperand_VReg_128(unsigned Val) const { 284 return createRegOperand(AMDGPU::VReg_128RegClassID, Val); 285 } 286 287 MCOperand AMDGPUDisassembler::decodeOperand_SReg_32(unsigned Val) const { 288 // table-gen generated disassembler doesn't care about operand types 289 // leaving only registry class so SSrc_32 operand turns into SReg_32 290 // and therefore we accept immediates and literals here as well 291 return decodeSrcOp(OPW32, Val); 292 } 293 294 MCOperand AMDGPUDisassembler::decodeOperand_SReg_32_XM0_XEXEC( 295 unsigned Val) const { 296 // SReg_32_XM0 is SReg_32 without M0 or EXEC_LO/EXEC_HI 297 return decodeOperand_SReg_32(Val); 298 } 299 300 MCOperand AMDGPUDisassembler::decodeOperand_SReg_64(unsigned Val) const { 301 return decodeSrcOp(OPW64, Val); 302 } 303 304 MCOperand AMDGPUDisassembler::decodeOperand_SReg_64_XEXEC(unsigned Val) const { 305 return decodeSrcOp(OPW64, Val); 306 } 307 308 MCOperand AMDGPUDisassembler::decodeOperand_SReg_128(unsigned Val) const { 309 return decodeSrcOp(OPW128, Val); 310 } 311 312 MCOperand AMDGPUDisassembler::decodeOperand_SReg_256(unsigned Val) const { 313 return createSRegOperand(AMDGPU::SReg_256RegClassID, Val); 314 } 315 316 MCOperand AMDGPUDisassembler::decodeOperand_SReg_512(unsigned Val) const { 317 return createSRegOperand(AMDGPU::SReg_512RegClassID, Val); 318 } 319 320 321 MCOperand AMDGPUDisassembler::decodeLiteralConstant() const { 322 // For now all literal constants are supposed to be unsigned integer 323 // ToDo: deal with signed/unsigned 64-bit integer constants 324 // ToDo: deal with float/double constants 325 if (Bytes.size() < 4) 326 return errOperand(0, "cannot read literal, inst bytes left " + 327 Twine(Bytes.size())); 328 return MCOperand::createImm(eatBytes<uint32_t>(Bytes)); 329 } 330 331 MCOperand AMDGPUDisassembler::decodeIntImmed(unsigned Imm) { 332 using namespace AMDGPU::EncValues; 333 assert(Imm >= INLINE_INTEGER_C_MIN && Imm <= INLINE_INTEGER_C_MAX); 334 return MCOperand::createImm((Imm <= INLINE_INTEGER_C_POSITIVE_MAX) ? 335 (static_cast<int64_t>(Imm) - INLINE_INTEGER_C_MIN) : 336 (INLINE_INTEGER_C_POSITIVE_MAX - static_cast<int64_t>(Imm))); 337 // Cast prevents negative overflow. 338 } 339 340 static int64_t getInlineImmVal32(unsigned Imm) { 341 switch (Imm) { 342 case 240: 343 return FloatToBits(0.5f); 344 case 241: 345 return FloatToBits(-0.5f); 346 case 242: 347 return FloatToBits(1.0f); 348 case 243: 349 return FloatToBits(-1.0f); 350 case 244: 351 return FloatToBits(2.0f); 352 case 245: 353 return FloatToBits(-2.0f); 354 case 246: 355 return FloatToBits(4.0f); 356 case 247: 357 return FloatToBits(-4.0f); 358 case 248: // 1 / (2 * PI) 359 return 0x3e22f983; 360 default: 361 llvm_unreachable("invalid fp inline imm"); 362 } 363 } 364 365 static int64_t getInlineImmVal64(unsigned Imm) { 366 switch (Imm) { 367 case 240: 368 return DoubleToBits(0.5); 369 case 241: 370 return DoubleToBits(-0.5); 371 case 242: 372 return DoubleToBits(1.0); 373 case 243: 374 return DoubleToBits(-1.0); 375 case 244: 376 return DoubleToBits(2.0); 377 case 245: 378 return DoubleToBits(-2.0); 379 case 246: 380 return DoubleToBits(4.0); 381 case 247: 382 return DoubleToBits(-4.0); 383 case 248: // 1 / (2 * PI) 384 return 0x3fc45f306dc9c882; 385 default: 386 llvm_unreachable("invalid fp inline imm"); 387 } 388 } 389 390 static int64_t getInlineImmVal16(unsigned Imm) { 391 switch (Imm) { 392 case 240: 393 return 0x3800; 394 case 241: 395 return 0xB800; 396 case 242: 397 return 0x3C00; 398 case 243: 399 return 0xBC00; 400 case 244: 401 return 0x4000; 402 case 245: 403 return 0xC000; 404 case 246: 405 return 0x4400; 406 case 247: 407 return 0xC400; 408 case 248: // 1 / (2 * PI) 409 return 0x3118; 410 default: 411 llvm_unreachable("invalid fp inline imm"); 412 } 413 } 414 415 MCOperand AMDGPUDisassembler::decodeFPImmed(OpWidthTy Width, unsigned Imm) { 416 assert(Imm >= AMDGPU::EncValues::INLINE_FLOATING_C_MIN 417 && Imm <= AMDGPU::EncValues::INLINE_FLOATING_C_MAX); 418 419 // ToDo: case 248: 1/(2*PI) - is allowed only on VI 420 switch (Width) { 421 case OPW32: 422 return MCOperand::createImm(getInlineImmVal32(Imm)); 423 case OPW64: 424 return MCOperand::createImm(getInlineImmVal64(Imm)); 425 case OPW16: 426 return MCOperand::createImm(getInlineImmVal16(Imm)); 427 default: 428 llvm_unreachable("implement me"); 429 } 430 } 431 432 unsigned AMDGPUDisassembler::getVgprClassId(const OpWidthTy Width) const { 433 using namespace AMDGPU; 434 assert(OPW_FIRST_ <= Width && Width < OPW_LAST_); 435 switch (Width) { 436 default: // fall 437 case OPW32: 438 case OPW16: 439 return VGPR_32RegClassID; 440 case OPW64: return VReg_64RegClassID; 441 case OPW128: return VReg_128RegClassID; 442 } 443 } 444 445 unsigned AMDGPUDisassembler::getSgprClassId(const OpWidthTy Width) const { 446 using namespace AMDGPU; 447 assert(OPW_FIRST_ <= Width && Width < OPW_LAST_); 448 switch (Width) { 449 default: // fall 450 case OPW32: 451 case OPW16: 452 return SGPR_32RegClassID; 453 case OPW64: return SGPR_64RegClassID; 454 case OPW128: return SGPR_128RegClassID; 455 } 456 } 457 458 unsigned AMDGPUDisassembler::getTtmpClassId(const OpWidthTy Width) const { 459 using namespace AMDGPU; 460 assert(OPW_FIRST_ <= Width && Width < OPW_LAST_); 461 switch (Width) { 462 default: // fall 463 case OPW32: 464 case OPW16: 465 return TTMP_32RegClassID; 466 case OPW64: return TTMP_64RegClassID; 467 case OPW128: return TTMP_128RegClassID; 468 } 469 } 470 471 MCOperand AMDGPUDisassembler::decodeSrcOp(const OpWidthTy Width, unsigned Val) const { 472 using namespace AMDGPU::EncValues; 473 assert(Val < 512); // enum9 474 475 if (VGPR_MIN <= Val && Val <= VGPR_MAX) { 476 return createRegOperand(getVgprClassId(Width), Val - VGPR_MIN); 477 } 478 if (Val <= SGPR_MAX) { 479 assert(SGPR_MIN == 0); // "SGPR_MIN <= Val" is always true and causes compilation warning. 480 return createSRegOperand(getSgprClassId(Width), Val - SGPR_MIN); 481 } 482 if (TTMP_MIN <= Val && Val <= TTMP_MAX) { 483 return createSRegOperand(getTtmpClassId(Width), Val - TTMP_MIN); 484 } 485 486 assert(Width == OPW16 || Width == OPW32 || Width == OPW64); 487 488 if (INLINE_INTEGER_C_MIN <= Val && Val <= INLINE_INTEGER_C_MAX) 489 return decodeIntImmed(Val); 490 491 if (INLINE_FLOATING_C_MIN <= Val && Val <= INLINE_FLOATING_C_MAX) 492 return decodeFPImmed(Width, Val); 493 494 if (Val == LITERAL_CONST) 495 return decodeLiteralConstant(); 496 497 switch (Width) { 498 case OPW32: 499 case OPW16: 500 return decodeSpecialReg32(Val); 501 case OPW64: 502 return decodeSpecialReg64(Val); 503 default: 504 llvm_unreachable("unexpected immediate type"); 505 } 506 } 507 508 MCOperand AMDGPUDisassembler::decodeSpecialReg32(unsigned Val) const { 509 using namespace AMDGPU; 510 switch (Val) { 511 case 102: return createRegOperand(getMCReg(FLAT_SCR_LO, STI)); 512 case 103: return createRegOperand(getMCReg(FLAT_SCR_HI, STI)); 513 // ToDo: no support for xnack_mask_lo/_hi register 514 case 104: 515 case 105: break; 516 case 106: return createRegOperand(VCC_LO); 517 case 107: return createRegOperand(VCC_HI); 518 case 108: return createRegOperand(TBA_LO); 519 case 109: return createRegOperand(TBA_HI); 520 case 110: return createRegOperand(TMA_LO); 521 case 111: return createRegOperand(TMA_HI); 522 case 124: return createRegOperand(M0); 523 case 126: return createRegOperand(EXEC_LO); 524 case 127: return createRegOperand(EXEC_HI); 525 // ToDo: no support for vccz register 526 case 251: break; 527 // ToDo: no support for execz register 528 case 252: break; 529 case 253: return createRegOperand(SCC); 530 default: break; 531 } 532 return errOperand(Val, "unknown operand encoding " + Twine(Val)); 533 } 534 535 MCOperand AMDGPUDisassembler::decodeSpecialReg64(unsigned Val) const { 536 using namespace AMDGPU; 537 switch (Val) { 538 case 102: return createRegOperand(getMCReg(FLAT_SCR, STI)); 539 case 106: return createRegOperand(VCC); 540 case 108: return createRegOperand(TBA); 541 case 110: return createRegOperand(TMA); 542 case 126: return createRegOperand(EXEC); 543 default: break; 544 } 545 return errOperand(Val, "unknown operand encoding " + Twine(Val)); 546 } 547 548 //===----------------------------------------------------------------------===// 549 // AMDGPUSymbolizer 550 //===----------------------------------------------------------------------===// 551 552 // Try to find symbol name for specified label 553 bool AMDGPUSymbolizer::tryAddingSymbolicOperand(MCInst &Inst, 554 raw_ostream &/*cStream*/, int64_t Value, 555 uint64_t /*Address*/, bool IsBranch, 556 uint64_t /*Offset*/, uint64_t /*InstSize*/) { 557 typedef std::tuple<uint64_t, StringRef, uint8_t> SymbolInfoTy; 558 typedef std::vector<SymbolInfoTy> SectionSymbolsTy; 559 560 if (!IsBranch) { 561 return false; 562 } 563 564 auto *Symbols = static_cast<SectionSymbolsTy *>(DisInfo); 565 auto Result = std::find_if(Symbols->begin(), Symbols->end(), 566 [Value](const SymbolInfoTy& Val) { 567 return std::get<0>(Val) == static_cast<uint64_t>(Value) 568 && std::get<2>(Val) == ELF::STT_NOTYPE; 569 }); 570 if (Result != Symbols->end()) { 571 auto *Sym = Ctx.getOrCreateSymbol(std::get<1>(*Result)); 572 const auto *Add = MCSymbolRefExpr::create(Sym, Ctx); 573 Inst.addOperand(MCOperand::createExpr(Add)); 574 return true; 575 } 576 return false; 577 } 578 579 void AMDGPUSymbolizer::tryAddingPcLoadReferenceComment(raw_ostream &cStream, 580 int64_t Value, 581 uint64_t Address) { 582 llvm_unreachable("unimplemented"); 583 } 584 585 //===----------------------------------------------------------------------===// 586 // Initialization 587 //===----------------------------------------------------------------------===// 588 589 static MCSymbolizer *createAMDGPUSymbolizer(const Triple &/*TT*/, 590 LLVMOpInfoCallback /*GetOpInfo*/, 591 LLVMSymbolLookupCallback /*SymbolLookUp*/, 592 void *DisInfo, 593 MCContext *Ctx, 594 std::unique_ptr<MCRelocationInfo> &&RelInfo) { 595 return new AMDGPUSymbolizer(*Ctx, std::move(RelInfo), DisInfo); 596 } 597 598 static MCDisassembler *createAMDGPUDisassembler(const Target &T, 599 const MCSubtargetInfo &STI, 600 MCContext &Ctx) { 601 return new AMDGPUDisassembler(STI, Ctx); 602 } 603 604 extern "C" void LLVMInitializeAMDGPUDisassembler() { 605 TargetRegistry::RegisterMCDisassembler(getTheGCNTarget(), 606 createAMDGPUDisassembler); 607 TargetRegistry::RegisterMCSymbolizer(getTheGCNTarget(), 608 createAMDGPUSymbolizer); 609 } 610