1 //===- AMDGPUDisassembler.cpp - Disassembler for AMDGPU ISA ---------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 //===----------------------------------------------------------------------===// 11 // 12 /// \file 13 /// 14 /// This file contains definition for AMDGPU ISA disassembler 15 // 16 //===----------------------------------------------------------------------===// 17 18 // ToDo: What to do with instruction suffixes (v_mov_b32 vs v_mov_b32_e32)? 19 20 #include "Disassembler/AMDGPUDisassembler.h" 21 #include "AMDGPU.h" 22 #include "AMDGPURegisterInfo.h" 23 #include "SIDefines.h" 24 #include "Utils/AMDGPUBaseInfo.h" 25 #include "llvm-c/Disassembler.h" 26 #include "llvm/ADT/APInt.h" 27 #include "llvm/ADT/ArrayRef.h" 28 #include "llvm/ADT/Twine.h" 29 #include "llvm/BinaryFormat/ELF.h" 30 #include "llvm/MC/MCContext.h" 31 #include "llvm/MC/MCDisassembler/MCDisassembler.h" 32 #include "llvm/MC/MCExpr.h" 33 #include "llvm/MC/MCFixedLenDisassembler.h" 34 #include "llvm/MC/MCInst.h" 35 #include "llvm/MC/MCSubtargetInfo.h" 36 #include "llvm/Support/Endian.h" 37 #include "llvm/Support/ErrorHandling.h" 38 #include "llvm/Support/MathExtras.h" 39 #include "llvm/Support/TargetRegistry.h" 40 #include "llvm/Support/raw_ostream.h" 41 #include <algorithm> 42 #include <cassert> 43 #include <cstddef> 44 #include <cstdint> 45 #include <iterator> 46 #include <tuple> 47 #include <vector> 48 49 using namespace llvm; 50 51 #define DEBUG_TYPE "amdgpu-disassembler" 52 53 using DecodeStatus = llvm::MCDisassembler::DecodeStatus; 54 55 inline static MCDisassembler::DecodeStatus 56 addOperand(MCInst &Inst, const MCOperand& Opnd) { 57 Inst.addOperand(Opnd); 58 return Opnd.isValid() ? 59 MCDisassembler::Success : 60 MCDisassembler::SoftFail; 61 } 62 63 static int insertNamedMCOperand(MCInst &MI, const MCOperand &Op, 64 uint16_t NameIdx) { 65 int OpIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), NameIdx); 66 if (OpIdx != -1) { 67 auto I = MI.begin(); 68 std::advance(I, OpIdx); 69 MI.insert(I, Op); 70 } 71 return OpIdx; 72 } 73 74 static DecodeStatus decodeSoppBrTarget(MCInst &Inst, unsigned Imm, 75 uint64_t Addr, const void *Decoder) { 76 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); 77 78 APInt SignedOffset(18, Imm * 4, true); 79 int64_t Offset = (SignedOffset.sext(64) + 4 + Addr).getSExtValue(); 80 81 if (DAsm->tryAddingSymbolicOperand(Inst, Offset, Addr, true, 2, 2)) 82 return MCDisassembler::Success; 83 return addOperand(Inst, MCOperand::createImm(Imm)); 84 } 85 86 #define DECODE_OPERAND(StaticDecoderName, DecoderName) \ 87 static DecodeStatus StaticDecoderName(MCInst &Inst, \ 88 unsigned Imm, \ 89 uint64_t /*Addr*/, \ 90 const void *Decoder) { \ 91 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); \ 92 return addOperand(Inst, DAsm->DecoderName(Imm)); \ 93 } 94 95 #define DECODE_OPERAND_REG(RegClass) \ 96 DECODE_OPERAND(Decode##RegClass##RegisterClass, decodeOperand_##RegClass) 97 98 DECODE_OPERAND_REG(VGPR_32) 99 DECODE_OPERAND_REG(VS_32) 100 DECODE_OPERAND_REG(VS_64) 101 DECODE_OPERAND_REG(VS_128) 102 103 DECODE_OPERAND_REG(VReg_64) 104 DECODE_OPERAND_REG(VReg_96) 105 DECODE_OPERAND_REG(VReg_128) 106 107 DECODE_OPERAND_REG(SReg_32) 108 DECODE_OPERAND_REG(SReg_32_XM0_XEXEC) 109 DECODE_OPERAND_REG(SReg_32_XEXEC_HI) 110 DECODE_OPERAND_REG(SReg_64) 111 DECODE_OPERAND_REG(SReg_64_XEXEC) 112 DECODE_OPERAND_REG(SReg_128) 113 DECODE_OPERAND_REG(SReg_256) 114 DECODE_OPERAND_REG(SReg_512) 115 116 static DecodeStatus decodeOperand_VSrc16(MCInst &Inst, 117 unsigned Imm, 118 uint64_t Addr, 119 const void *Decoder) { 120 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); 121 return addOperand(Inst, DAsm->decodeOperand_VSrc16(Imm)); 122 } 123 124 static DecodeStatus decodeOperand_VSrcV216(MCInst &Inst, 125 unsigned Imm, 126 uint64_t Addr, 127 const void *Decoder) { 128 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); 129 return addOperand(Inst, DAsm->decodeOperand_VSrcV216(Imm)); 130 } 131 132 #define DECODE_SDWA(DecName) \ 133 DECODE_OPERAND(decodeSDWA##DecName, decodeSDWA##DecName) 134 135 DECODE_SDWA(Src32) 136 DECODE_SDWA(Src16) 137 DECODE_SDWA(VopcDst) 138 139 #include "AMDGPUGenDisassemblerTables.inc" 140 141 //===----------------------------------------------------------------------===// 142 // 143 //===----------------------------------------------------------------------===// 144 145 template <typename T> static inline T eatBytes(ArrayRef<uint8_t>& Bytes) { 146 assert(Bytes.size() >= sizeof(T)); 147 const auto Res = support::endian::read<T, support::endianness::little>(Bytes.data()); 148 Bytes = Bytes.slice(sizeof(T)); 149 return Res; 150 } 151 152 DecodeStatus AMDGPUDisassembler::tryDecodeInst(const uint8_t* Table, 153 MCInst &MI, 154 uint64_t Inst, 155 uint64_t Address) const { 156 assert(MI.getOpcode() == 0); 157 assert(MI.getNumOperands() == 0); 158 MCInst TmpInst; 159 HasLiteral = false; 160 const auto SavedBytes = Bytes; 161 if (decodeInstruction(Table, TmpInst, Inst, Address, this, STI)) { 162 MI = TmpInst; 163 return MCDisassembler::Success; 164 } 165 Bytes = SavedBytes; 166 return MCDisassembler::Fail; 167 } 168 169 DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size, 170 ArrayRef<uint8_t> Bytes_, 171 uint64_t Address, 172 raw_ostream &WS, 173 raw_ostream &CS) const { 174 CommentStream = &CS; 175 bool IsSDWA = false; 176 177 // ToDo: AMDGPUDisassembler supports only VI ISA. 178 if (!STI.getFeatureBits()[AMDGPU::FeatureGCN3Encoding]) 179 report_fatal_error("Disassembly not yet supported for subtarget"); 180 181 const unsigned MaxInstBytesNum = (std::min)((size_t)8, Bytes_.size()); 182 Bytes = Bytes_.slice(0, MaxInstBytesNum); 183 184 DecodeStatus Res = MCDisassembler::Fail; 185 do { 186 // ToDo: better to switch encoding length using some bit predicate 187 // but it is unknown yet, so try all we can 188 189 // Try to decode DPP and SDWA first to solve conflict with VOP1 and VOP2 190 // encodings 191 if (Bytes.size() >= 8) { 192 const uint64_t QW = eatBytes<uint64_t>(Bytes); 193 Res = tryDecodeInst(DecoderTableDPP64, MI, QW, Address); 194 if (Res) break; 195 196 Res = tryDecodeInst(DecoderTableSDWA64, MI, QW, Address); 197 if (Res) { IsSDWA = true; break; } 198 199 Res = tryDecodeInst(DecoderTableSDWA964, MI, QW, Address); 200 if (Res) { IsSDWA = true; break; } 201 202 if (STI.getFeatureBits()[AMDGPU::FeatureUnpackedD16VMem]) { 203 Res = tryDecodeInst(DecoderTableGFX80_UNPACKED64, MI, QW, Address); 204 if (Res) 205 break; 206 } 207 208 // Some GFX9 subtargets repurposed the v_mad_mix_f32, v_mad_mixlo_f16 and 209 // v_mad_mixhi_f16 for FMA variants. Try to decode using this special 210 // table first so we print the correct name. 211 if (STI.getFeatureBits()[AMDGPU::FeatureFmaMixInsts]) { 212 Res = tryDecodeInst(DecoderTableGFX9_DL64, MI, QW, Address); 213 if (Res) 214 break; 215 } 216 } 217 218 // Reinitialize Bytes as DPP64 could have eaten too much 219 Bytes = Bytes_.slice(0, MaxInstBytesNum); 220 221 // Try decode 32-bit instruction 222 if (Bytes.size() < 4) break; 223 const uint32_t DW = eatBytes<uint32_t>(Bytes); 224 Res = tryDecodeInst(DecoderTableVI32, MI, DW, Address); 225 if (Res) break; 226 227 Res = tryDecodeInst(DecoderTableAMDGPU32, MI, DW, Address); 228 if (Res) break; 229 230 Res = tryDecodeInst(DecoderTableGFX932, MI, DW, Address); 231 if (Res) break; 232 233 if (Bytes.size() < 4) break; 234 const uint64_t QW = ((uint64_t)eatBytes<uint32_t>(Bytes) << 32) | DW; 235 Res = tryDecodeInst(DecoderTableVI64, MI, QW, Address); 236 if (Res) break; 237 238 Res = tryDecodeInst(DecoderTableAMDGPU64, MI, QW, Address); 239 if (Res) break; 240 241 Res = tryDecodeInst(DecoderTableGFX964, MI, QW, Address); 242 } while (false); 243 244 if (Res && (MI.getOpcode() == AMDGPU::V_MAC_F32_e64_vi || 245 MI.getOpcode() == AMDGPU::V_MAC_F32_e64_si || 246 MI.getOpcode() == AMDGPU::V_MAC_F16_e64_vi)) { 247 // Insert dummy unused src2_modifiers. 248 insertNamedMCOperand(MI, MCOperand::createImm(0), 249 AMDGPU::OpName::src2_modifiers); 250 } 251 252 if (Res && (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::MIMG)) { 253 Res = convertMIMGInst(MI); 254 } 255 256 if (Res && IsSDWA) 257 Res = convertSDWAInst(MI); 258 259 // if the opcode was not recognized we'll assume a Size of 4 bytes 260 // (unless there are fewer bytes left) 261 Size = Res ? (MaxInstBytesNum - Bytes.size()) 262 : std::min((size_t)4, Bytes_.size()); 263 return Res; 264 } 265 266 DecodeStatus AMDGPUDisassembler::convertSDWAInst(MCInst &MI) const { 267 if (STI.getFeatureBits()[AMDGPU::FeatureGFX9]) { 268 if (AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sdst) != -1) 269 // VOPC - insert clamp 270 insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::clamp); 271 } else if (STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]) { 272 int SDst = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sdst); 273 if (SDst != -1) { 274 // VOPC - insert VCC register as sdst 275 insertNamedMCOperand(MI, createRegOperand(AMDGPU::VCC), 276 AMDGPU::OpName::sdst); 277 } else { 278 // VOP1/2 - insert omod if present in instruction 279 insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::omod); 280 } 281 } 282 return MCDisassembler::Success; 283 } 284 285 // Note that MIMG format provides no information about VADDR size. 286 // Consequently, decoded instructions always show address 287 // as if it has 1 dword, which could be not really so. 288 DecodeStatus AMDGPUDisassembler::convertMIMGInst(MCInst &MI) const { 289 290 if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::Gather4) { 291 return MCDisassembler::Success; 292 } 293 294 int VDstIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 295 AMDGPU::OpName::vdst); 296 297 int VDataIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 298 AMDGPU::OpName::vdata); 299 300 int DMaskIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 301 AMDGPU::OpName::dmask); 302 303 int TFEIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 304 AMDGPU::OpName::tfe); 305 306 assert(VDataIdx != -1); 307 assert(DMaskIdx != -1); 308 assert(TFEIdx != -1); 309 310 bool IsAtomic = (VDstIdx != -1); 311 312 unsigned DMask = MI.getOperand(DMaskIdx).getImm() & 0xf; 313 if (DMask == 0) 314 return MCDisassembler::Success; 315 316 unsigned DstSize = countPopulation(DMask); 317 if (DstSize == 1) 318 return MCDisassembler::Success; 319 320 bool D16 = MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::D16; 321 if (D16 && AMDGPU::hasPackedD16(STI)) { 322 DstSize = (DstSize + 1) / 2; 323 } 324 325 // FIXME: Add tfe support 326 if (MI.getOperand(TFEIdx).getImm()) 327 return MCDisassembler::Success; 328 329 int NewOpcode = -1; 330 331 if (IsAtomic) { 332 if (DMask == 0x1 || DMask == 0x3 || DMask == 0xF) { 333 NewOpcode = AMDGPU::getMaskedMIMGAtomicOp(*MCII, MI.getOpcode(), DstSize); 334 } 335 if (NewOpcode == -1) return MCDisassembler::Success; 336 } else { 337 NewOpcode = AMDGPU::getMaskedMIMGOp(*MCII, MI.getOpcode(), DstSize); 338 assert(NewOpcode != -1 && "could not find matching mimg channel instruction"); 339 } 340 341 auto RCID = MCII->get(NewOpcode).OpInfo[VDataIdx].RegClass; 342 343 // Get first subregister of VData 344 unsigned Vdata0 = MI.getOperand(VDataIdx).getReg(); 345 unsigned VdataSub0 = MRI.getSubReg(Vdata0, AMDGPU::sub0); 346 Vdata0 = (VdataSub0 != 0)? VdataSub0 : Vdata0; 347 348 // Widen the register to the correct number of enabled channels. 349 auto NewVdata = MRI.getMatchingSuperReg(Vdata0, AMDGPU::sub0, 350 &MRI.getRegClass(RCID)); 351 if (NewVdata == AMDGPU::NoRegister) { 352 // It's possible to encode this such that the low register + enabled 353 // components exceeds the register count. 354 return MCDisassembler::Success; 355 } 356 357 MI.setOpcode(NewOpcode); 358 // vaddr will be always appear as a single VGPR. This will look different than 359 // how it is usually emitted because the number of register components is not 360 // in the instruction encoding. 361 MI.getOperand(VDataIdx) = MCOperand::createReg(NewVdata); 362 363 if (IsAtomic) { 364 // Atomic operations have an additional operand (a copy of data) 365 MI.getOperand(VDstIdx) = MCOperand::createReg(NewVdata); 366 } 367 368 return MCDisassembler::Success; 369 } 370 371 const char* AMDGPUDisassembler::getRegClassName(unsigned RegClassID) const { 372 return getContext().getRegisterInfo()-> 373 getRegClassName(&AMDGPUMCRegisterClasses[RegClassID]); 374 } 375 376 inline 377 MCOperand AMDGPUDisassembler::errOperand(unsigned V, 378 const Twine& ErrMsg) const { 379 *CommentStream << "Error: " + ErrMsg; 380 381 // ToDo: add support for error operands to MCInst.h 382 // return MCOperand::createError(V); 383 return MCOperand(); 384 } 385 386 inline 387 MCOperand AMDGPUDisassembler::createRegOperand(unsigned int RegId) const { 388 return MCOperand::createReg(AMDGPU::getMCReg(RegId, STI)); 389 } 390 391 inline 392 MCOperand AMDGPUDisassembler::createRegOperand(unsigned RegClassID, 393 unsigned Val) const { 394 const auto& RegCl = AMDGPUMCRegisterClasses[RegClassID]; 395 if (Val >= RegCl.getNumRegs()) 396 return errOperand(Val, Twine(getRegClassName(RegClassID)) + 397 ": unknown register " + Twine(Val)); 398 return createRegOperand(RegCl.getRegister(Val)); 399 } 400 401 inline 402 MCOperand AMDGPUDisassembler::createSRegOperand(unsigned SRegClassID, 403 unsigned Val) const { 404 // ToDo: SI/CI have 104 SGPRs, VI - 102 405 // Valery: here we accepting as much as we can, let assembler sort it out 406 int shift = 0; 407 switch (SRegClassID) { 408 case AMDGPU::SGPR_32RegClassID: 409 case AMDGPU::TTMP_32RegClassID: 410 break; 411 case AMDGPU::SGPR_64RegClassID: 412 case AMDGPU::TTMP_64RegClassID: 413 shift = 1; 414 break; 415 case AMDGPU::SGPR_128RegClassID: 416 case AMDGPU::TTMP_128RegClassID: 417 // ToDo: unclear if s[100:104] is available on VI. Can we use VCC as SGPR in 418 // this bundle? 419 case AMDGPU::SGPR_256RegClassID: 420 case AMDGPU::TTMP_256RegClassID: 421 // ToDo: unclear if s[96:104] is available on VI. Can we use VCC as SGPR in 422 // this bundle? 423 case AMDGPU::SGPR_512RegClassID: 424 case AMDGPU::TTMP_512RegClassID: 425 shift = 2; 426 break; 427 // ToDo: unclear if s[88:104] is available on VI. Can we use VCC as SGPR in 428 // this bundle? 429 default: 430 llvm_unreachable("unhandled register class"); 431 } 432 433 if (Val % (1 << shift)) { 434 *CommentStream << "Warning: " << getRegClassName(SRegClassID) 435 << ": scalar reg isn't aligned " << Val; 436 } 437 438 return createRegOperand(SRegClassID, Val >> shift); 439 } 440 441 MCOperand AMDGPUDisassembler::decodeOperand_VS_32(unsigned Val) const { 442 return decodeSrcOp(OPW32, Val); 443 } 444 445 MCOperand AMDGPUDisassembler::decodeOperand_VS_64(unsigned Val) const { 446 return decodeSrcOp(OPW64, Val); 447 } 448 449 MCOperand AMDGPUDisassembler::decodeOperand_VS_128(unsigned Val) const { 450 return decodeSrcOp(OPW128, Val); 451 } 452 453 MCOperand AMDGPUDisassembler::decodeOperand_VSrc16(unsigned Val) const { 454 return decodeSrcOp(OPW16, Val); 455 } 456 457 MCOperand AMDGPUDisassembler::decodeOperand_VSrcV216(unsigned Val) const { 458 return decodeSrcOp(OPWV216, Val); 459 } 460 461 MCOperand AMDGPUDisassembler::decodeOperand_VGPR_32(unsigned Val) const { 462 // Some instructions have operand restrictions beyond what the encoding 463 // allows. Some ordinarily VSrc_32 operands are VGPR_32, so clear the extra 464 // high bit. 465 Val &= 255; 466 467 return createRegOperand(AMDGPU::VGPR_32RegClassID, Val); 468 } 469 470 MCOperand AMDGPUDisassembler::decodeOperand_VReg_64(unsigned Val) const { 471 return createRegOperand(AMDGPU::VReg_64RegClassID, Val); 472 } 473 474 MCOperand AMDGPUDisassembler::decodeOperand_VReg_96(unsigned Val) const { 475 return createRegOperand(AMDGPU::VReg_96RegClassID, Val); 476 } 477 478 MCOperand AMDGPUDisassembler::decodeOperand_VReg_128(unsigned Val) const { 479 return createRegOperand(AMDGPU::VReg_128RegClassID, Val); 480 } 481 482 MCOperand AMDGPUDisassembler::decodeOperand_SReg_32(unsigned Val) const { 483 // table-gen generated disassembler doesn't care about operand types 484 // leaving only registry class so SSrc_32 operand turns into SReg_32 485 // and therefore we accept immediates and literals here as well 486 return decodeSrcOp(OPW32, Val); 487 } 488 489 MCOperand AMDGPUDisassembler::decodeOperand_SReg_32_XM0_XEXEC( 490 unsigned Val) const { 491 // SReg_32_XM0 is SReg_32 without M0 or EXEC_LO/EXEC_HI 492 return decodeOperand_SReg_32(Val); 493 } 494 495 MCOperand AMDGPUDisassembler::decodeOperand_SReg_32_XEXEC_HI( 496 unsigned Val) const { 497 // SReg_32_XM0 is SReg_32 without EXEC_HI 498 return decodeOperand_SReg_32(Val); 499 } 500 501 MCOperand AMDGPUDisassembler::decodeOperand_SReg_64(unsigned Val) const { 502 return decodeSrcOp(OPW64, Val); 503 } 504 505 MCOperand AMDGPUDisassembler::decodeOperand_SReg_64_XEXEC(unsigned Val) const { 506 return decodeSrcOp(OPW64, Val); 507 } 508 509 MCOperand AMDGPUDisassembler::decodeOperand_SReg_128(unsigned Val) const { 510 return decodeSrcOp(OPW128, Val); 511 } 512 513 MCOperand AMDGPUDisassembler::decodeOperand_SReg_256(unsigned Val) const { 514 return decodeDstOp(OPW256, Val); 515 } 516 517 MCOperand AMDGPUDisassembler::decodeOperand_SReg_512(unsigned Val) const { 518 return decodeDstOp(OPW512, Val); 519 } 520 521 MCOperand AMDGPUDisassembler::decodeLiteralConstant() const { 522 // For now all literal constants are supposed to be unsigned integer 523 // ToDo: deal with signed/unsigned 64-bit integer constants 524 // ToDo: deal with float/double constants 525 if (!HasLiteral) { 526 if (Bytes.size() < 4) { 527 return errOperand(0, "cannot read literal, inst bytes left " + 528 Twine(Bytes.size())); 529 } 530 HasLiteral = true; 531 Literal = eatBytes<uint32_t>(Bytes); 532 } 533 return MCOperand::createImm(Literal); 534 } 535 536 MCOperand AMDGPUDisassembler::decodeIntImmed(unsigned Imm) { 537 using namespace AMDGPU::EncValues; 538 539 assert(Imm >= INLINE_INTEGER_C_MIN && Imm <= INLINE_INTEGER_C_MAX); 540 return MCOperand::createImm((Imm <= INLINE_INTEGER_C_POSITIVE_MAX) ? 541 (static_cast<int64_t>(Imm) - INLINE_INTEGER_C_MIN) : 542 (INLINE_INTEGER_C_POSITIVE_MAX - static_cast<int64_t>(Imm))); 543 // Cast prevents negative overflow. 544 } 545 546 static int64_t getInlineImmVal32(unsigned Imm) { 547 switch (Imm) { 548 case 240: 549 return FloatToBits(0.5f); 550 case 241: 551 return FloatToBits(-0.5f); 552 case 242: 553 return FloatToBits(1.0f); 554 case 243: 555 return FloatToBits(-1.0f); 556 case 244: 557 return FloatToBits(2.0f); 558 case 245: 559 return FloatToBits(-2.0f); 560 case 246: 561 return FloatToBits(4.0f); 562 case 247: 563 return FloatToBits(-4.0f); 564 case 248: // 1 / (2 * PI) 565 return 0x3e22f983; 566 default: 567 llvm_unreachable("invalid fp inline imm"); 568 } 569 } 570 571 static int64_t getInlineImmVal64(unsigned Imm) { 572 switch (Imm) { 573 case 240: 574 return DoubleToBits(0.5); 575 case 241: 576 return DoubleToBits(-0.5); 577 case 242: 578 return DoubleToBits(1.0); 579 case 243: 580 return DoubleToBits(-1.0); 581 case 244: 582 return DoubleToBits(2.0); 583 case 245: 584 return DoubleToBits(-2.0); 585 case 246: 586 return DoubleToBits(4.0); 587 case 247: 588 return DoubleToBits(-4.0); 589 case 248: // 1 / (2 * PI) 590 return 0x3fc45f306dc9c882; 591 default: 592 llvm_unreachable("invalid fp inline imm"); 593 } 594 } 595 596 static int64_t getInlineImmVal16(unsigned Imm) { 597 switch (Imm) { 598 case 240: 599 return 0x3800; 600 case 241: 601 return 0xB800; 602 case 242: 603 return 0x3C00; 604 case 243: 605 return 0xBC00; 606 case 244: 607 return 0x4000; 608 case 245: 609 return 0xC000; 610 case 246: 611 return 0x4400; 612 case 247: 613 return 0xC400; 614 case 248: // 1 / (2 * PI) 615 return 0x3118; 616 default: 617 llvm_unreachable("invalid fp inline imm"); 618 } 619 } 620 621 MCOperand AMDGPUDisassembler::decodeFPImmed(OpWidthTy Width, unsigned Imm) { 622 assert(Imm >= AMDGPU::EncValues::INLINE_FLOATING_C_MIN 623 && Imm <= AMDGPU::EncValues::INLINE_FLOATING_C_MAX); 624 625 // ToDo: case 248: 1/(2*PI) - is allowed only on VI 626 switch (Width) { 627 case OPW32: 628 return MCOperand::createImm(getInlineImmVal32(Imm)); 629 case OPW64: 630 return MCOperand::createImm(getInlineImmVal64(Imm)); 631 case OPW16: 632 case OPWV216: 633 return MCOperand::createImm(getInlineImmVal16(Imm)); 634 default: 635 llvm_unreachable("implement me"); 636 } 637 } 638 639 unsigned AMDGPUDisassembler::getVgprClassId(const OpWidthTy Width) const { 640 using namespace AMDGPU; 641 642 assert(OPW_FIRST_ <= Width && Width < OPW_LAST_); 643 switch (Width) { 644 default: // fall 645 case OPW32: 646 case OPW16: 647 case OPWV216: 648 return VGPR_32RegClassID; 649 case OPW64: return VReg_64RegClassID; 650 case OPW128: return VReg_128RegClassID; 651 } 652 } 653 654 unsigned AMDGPUDisassembler::getSgprClassId(const OpWidthTy Width) const { 655 using namespace AMDGPU; 656 657 assert(OPW_FIRST_ <= Width && Width < OPW_LAST_); 658 switch (Width) { 659 default: // fall 660 case OPW32: 661 case OPW16: 662 case OPWV216: 663 return SGPR_32RegClassID; 664 case OPW64: return SGPR_64RegClassID; 665 case OPW128: return SGPR_128RegClassID; 666 case OPW256: return SGPR_256RegClassID; 667 case OPW512: return SGPR_512RegClassID; 668 } 669 } 670 671 unsigned AMDGPUDisassembler::getTtmpClassId(const OpWidthTy Width) const { 672 using namespace AMDGPU; 673 674 assert(OPW_FIRST_ <= Width && Width < OPW_LAST_); 675 switch (Width) { 676 default: // fall 677 case OPW32: 678 case OPW16: 679 case OPWV216: 680 return TTMP_32RegClassID; 681 case OPW64: return TTMP_64RegClassID; 682 case OPW128: return TTMP_128RegClassID; 683 case OPW256: return TTMP_256RegClassID; 684 case OPW512: return TTMP_512RegClassID; 685 } 686 } 687 688 int AMDGPUDisassembler::getTTmpIdx(unsigned Val) const { 689 using namespace AMDGPU::EncValues; 690 691 unsigned TTmpMin = isGFX9() ? TTMP_GFX9_MIN : TTMP_VI_MIN; 692 unsigned TTmpMax = isGFX9() ? TTMP_GFX9_MAX : TTMP_VI_MAX; 693 694 return (TTmpMin <= Val && Val <= TTmpMax)? Val - TTmpMin : -1; 695 } 696 697 MCOperand AMDGPUDisassembler::decodeSrcOp(const OpWidthTy Width, unsigned Val) const { 698 using namespace AMDGPU::EncValues; 699 700 assert(Val < 512); // enum9 701 702 if (VGPR_MIN <= Val && Val <= VGPR_MAX) { 703 return createRegOperand(getVgprClassId(Width), Val - VGPR_MIN); 704 } 705 if (Val <= SGPR_MAX) { 706 assert(SGPR_MIN == 0); // "SGPR_MIN <= Val" is always true and causes compilation warning. 707 return createSRegOperand(getSgprClassId(Width), Val - SGPR_MIN); 708 } 709 710 int TTmpIdx = getTTmpIdx(Val); 711 if (TTmpIdx >= 0) { 712 return createSRegOperand(getTtmpClassId(Width), TTmpIdx); 713 } 714 715 if (INLINE_INTEGER_C_MIN <= Val && Val <= INLINE_INTEGER_C_MAX) 716 return decodeIntImmed(Val); 717 718 if (INLINE_FLOATING_C_MIN <= Val && Val <= INLINE_FLOATING_C_MAX) 719 return decodeFPImmed(Width, Val); 720 721 if (Val == LITERAL_CONST) 722 return decodeLiteralConstant(); 723 724 switch (Width) { 725 case OPW32: 726 case OPW16: 727 case OPWV216: 728 return decodeSpecialReg32(Val); 729 case OPW64: 730 return decodeSpecialReg64(Val); 731 default: 732 llvm_unreachable("unexpected immediate type"); 733 } 734 } 735 736 MCOperand AMDGPUDisassembler::decodeDstOp(const OpWidthTy Width, unsigned Val) const { 737 using namespace AMDGPU::EncValues; 738 739 assert(Val < 128); 740 assert(Width == OPW256 || Width == OPW512); 741 742 if (Val <= SGPR_MAX) { 743 assert(SGPR_MIN == 0); // "SGPR_MIN <= Val" is always true and causes compilation warning. 744 return createSRegOperand(getSgprClassId(Width), Val - SGPR_MIN); 745 } 746 747 int TTmpIdx = getTTmpIdx(Val); 748 if (TTmpIdx >= 0) { 749 return createSRegOperand(getTtmpClassId(Width), TTmpIdx); 750 } 751 752 llvm_unreachable("unknown dst register"); 753 } 754 755 MCOperand AMDGPUDisassembler::decodeSpecialReg32(unsigned Val) const { 756 using namespace AMDGPU; 757 758 switch (Val) { 759 case 102: return createRegOperand(FLAT_SCR_LO); 760 case 103: return createRegOperand(FLAT_SCR_HI); 761 case 104: return createRegOperand(XNACK_MASK_LO); 762 case 105: return createRegOperand(XNACK_MASK_HI); 763 case 106: return createRegOperand(VCC_LO); 764 case 107: return createRegOperand(VCC_HI); 765 case 108: assert(!isGFX9()); return createRegOperand(TBA_LO); 766 case 109: assert(!isGFX9()); return createRegOperand(TBA_HI); 767 case 110: assert(!isGFX9()); return createRegOperand(TMA_LO); 768 case 111: assert(!isGFX9()); return createRegOperand(TMA_HI); 769 case 124: return createRegOperand(M0); 770 case 126: return createRegOperand(EXEC_LO); 771 case 127: return createRegOperand(EXEC_HI); 772 case 235: return createRegOperand(SRC_SHARED_BASE); 773 case 236: return createRegOperand(SRC_SHARED_LIMIT); 774 case 237: return createRegOperand(SRC_PRIVATE_BASE); 775 case 238: return createRegOperand(SRC_PRIVATE_LIMIT); 776 // TODO: SRC_POPS_EXITING_WAVE_ID 777 // ToDo: no support for vccz register 778 case 251: break; 779 // ToDo: no support for execz register 780 case 252: break; 781 case 253: return createRegOperand(SCC); 782 default: break; 783 } 784 return errOperand(Val, "unknown operand encoding " + Twine(Val)); 785 } 786 787 MCOperand AMDGPUDisassembler::decodeSpecialReg64(unsigned Val) const { 788 using namespace AMDGPU; 789 790 switch (Val) { 791 case 102: return createRegOperand(FLAT_SCR); 792 case 104: return createRegOperand(XNACK_MASK); 793 case 106: return createRegOperand(VCC); 794 case 108: assert(!isGFX9()); return createRegOperand(TBA); 795 case 110: assert(!isGFX9()); return createRegOperand(TMA); 796 case 126: return createRegOperand(EXEC); 797 default: break; 798 } 799 return errOperand(Val, "unknown operand encoding " + Twine(Val)); 800 } 801 802 MCOperand AMDGPUDisassembler::decodeSDWASrc(const OpWidthTy Width, 803 const unsigned Val) const { 804 using namespace AMDGPU::SDWA; 805 using namespace AMDGPU::EncValues; 806 807 if (STI.getFeatureBits()[AMDGPU::FeatureGFX9]) { 808 // XXX: static_cast<int> is needed to avoid stupid warning: 809 // compare with unsigned is always true 810 if (SDWA9EncValues::SRC_VGPR_MIN <= static_cast<int>(Val) && 811 Val <= SDWA9EncValues::SRC_VGPR_MAX) { 812 return createRegOperand(getVgprClassId(Width), 813 Val - SDWA9EncValues::SRC_VGPR_MIN); 814 } 815 if (SDWA9EncValues::SRC_SGPR_MIN <= Val && 816 Val <= SDWA9EncValues::SRC_SGPR_MAX) { 817 return createSRegOperand(getSgprClassId(Width), 818 Val - SDWA9EncValues::SRC_SGPR_MIN); 819 } 820 if (SDWA9EncValues::SRC_TTMP_MIN <= Val && 821 Val <= SDWA9EncValues::SRC_TTMP_MAX) { 822 return createSRegOperand(getTtmpClassId(Width), 823 Val - SDWA9EncValues::SRC_TTMP_MIN); 824 } 825 826 const unsigned SVal = Val - SDWA9EncValues::SRC_SGPR_MIN; 827 828 if (INLINE_INTEGER_C_MIN <= SVal && SVal <= INLINE_INTEGER_C_MAX) 829 return decodeIntImmed(SVal); 830 831 if (INLINE_FLOATING_C_MIN <= SVal && SVal <= INLINE_FLOATING_C_MAX) 832 return decodeFPImmed(Width, SVal); 833 834 return decodeSpecialReg32(SVal); 835 } else if (STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]) { 836 return createRegOperand(getVgprClassId(Width), Val); 837 } 838 llvm_unreachable("unsupported target"); 839 } 840 841 MCOperand AMDGPUDisassembler::decodeSDWASrc16(unsigned Val) const { 842 return decodeSDWASrc(OPW16, Val); 843 } 844 845 MCOperand AMDGPUDisassembler::decodeSDWASrc32(unsigned Val) const { 846 return decodeSDWASrc(OPW32, Val); 847 } 848 849 MCOperand AMDGPUDisassembler::decodeSDWAVopcDst(unsigned Val) const { 850 using namespace AMDGPU::SDWA; 851 852 assert(STI.getFeatureBits()[AMDGPU::FeatureGFX9] && 853 "SDWAVopcDst should be present only on GFX9"); 854 if (Val & SDWA9EncValues::VOPC_DST_VCC_MASK) { 855 Val &= SDWA9EncValues::VOPC_DST_SGPR_MASK; 856 857 int TTmpIdx = getTTmpIdx(Val); 858 if (TTmpIdx >= 0) { 859 return createSRegOperand(getTtmpClassId(OPW64), TTmpIdx); 860 } else if (Val > AMDGPU::EncValues::SGPR_MAX) { 861 return decodeSpecialReg64(Val); 862 } else { 863 return createSRegOperand(getSgprClassId(OPW64), Val); 864 } 865 } else { 866 return createRegOperand(AMDGPU::VCC); 867 } 868 } 869 870 bool AMDGPUDisassembler::isVI() const { 871 return STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]; 872 } 873 874 bool AMDGPUDisassembler::isGFX9() const { 875 return STI.getFeatureBits()[AMDGPU::FeatureGFX9]; 876 } 877 878 //===----------------------------------------------------------------------===// 879 // AMDGPUSymbolizer 880 //===----------------------------------------------------------------------===// 881 882 // Try to find symbol name for specified label 883 bool AMDGPUSymbolizer::tryAddingSymbolicOperand(MCInst &Inst, 884 raw_ostream &/*cStream*/, int64_t Value, 885 uint64_t /*Address*/, bool IsBranch, 886 uint64_t /*Offset*/, uint64_t /*InstSize*/) { 887 using SymbolInfoTy = std::tuple<uint64_t, StringRef, uint8_t>; 888 using SectionSymbolsTy = std::vector<SymbolInfoTy>; 889 890 if (!IsBranch) { 891 return false; 892 } 893 894 auto *Symbols = static_cast<SectionSymbolsTy *>(DisInfo); 895 if (!Symbols) 896 return false; 897 898 auto Result = std::find_if(Symbols->begin(), Symbols->end(), 899 [Value](const SymbolInfoTy& Val) { 900 return std::get<0>(Val) == static_cast<uint64_t>(Value) 901 && std::get<2>(Val) == ELF::STT_NOTYPE; 902 }); 903 if (Result != Symbols->end()) { 904 auto *Sym = Ctx.getOrCreateSymbol(std::get<1>(*Result)); 905 const auto *Add = MCSymbolRefExpr::create(Sym, Ctx); 906 Inst.addOperand(MCOperand::createExpr(Add)); 907 return true; 908 } 909 return false; 910 } 911 912 void AMDGPUSymbolizer::tryAddingPcLoadReferenceComment(raw_ostream &cStream, 913 int64_t Value, 914 uint64_t Address) { 915 llvm_unreachable("unimplemented"); 916 } 917 918 //===----------------------------------------------------------------------===// 919 // Initialization 920 //===----------------------------------------------------------------------===// 921 922 static MCSymbolizer *createAMDGPUSymbolizer(const Triple &/*TT*/, 923 LLVMOpInfoCallback /*GetOpInfo*/, 924 LLVMSymbolLookupCallback /*SymbolLookUp*/, 925 void *DisInfo, 926 MCContext *Ctx, 927 std::unique_ptr<MCRelocationInfo> &&RelInfo) { 928 return new AMDGPUSymbolizer(*Ctx, std::move(RelInfo), DisInfo); 929 } 930 931 static MCDisassembler *createAMDGPUDisassembler(const Target &T, 932 const MCSubtargetInfo &STI, 933 MCContext &Ctx) { 934 return new AMDGPUDisassembler(STI, Ctx, T.createMCInstrInfo()); 935 } 936 937 extern "C" void LLVMInitializeAMDGPUDisassembler() { 938 TargetRegistry::RegisterMCDisassembler(getTheGCNTarget(), 939 createAMDGPUDisassembler); 940 TargetRegistry::RegisterMCSymbolizer(getTheGCNTarget(), 941 createAMDGPUSymbolizer); 942 } 943