1e1818af8STom Stellard //===-- AMDGPUDisassembler.cpp - Disassembler for AMDGPU ISA --------------===// 2e1818af8STom Stellard // 3e1818af8STom Stellard // The LLVM Compiler Infrastructure 4e1818af8STom Stellard // 5e1818af8STom Stellard // This file is distributed under the University of Illinois Open Source 6e1818af8STom Stellard // License. See LICENSE.TXT for details. 7e1818af8STom Stellard // 8e1818af8STom Stellard //===----------------------------------------------------------------------===// 9e1818af8STom Stellard // 10e1818af8STom Stellard //===----------------------------------------------------------------------===// 11e1818af8STom Stellard // 12e1818af8STom Stellard /// \file 13e1818af8STom Stellard /// 14e1818af8STom Stellard /// This file contains definition for AMDGPU ISA disassembler 15e1818af8STom Stellard // 16e1818af8STom Stellard //===----------------------------------------------------------------------===// 17e1818af8STom Stellard 18e1818af8STom Stellard // ToDo: What to do with instruction suffixes (v_mov_b32 vs v_mov_b32_e32)? 19e1818af8STom Stellard 20e1818af8STom Stellard #include "AMDGPUDisassembler.h" 21e1818af8STom Stellard #include "AMDGPU.h" 22e1818af8STom Stellard #include "AMDGPURegisterInfo.h" 236bda14b3SChandler Carruth #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 24212a251cSArtem Tamazov #include "SIDefines.h" 25e1818af8STom Stellard #include "Utils/AMDGPUBaseInfo.h" 26e1818af8STom Stellard 27264b5d9eSZachary Turner #include "llvm/BinaryFormat/ELF.h" 28ac106addSNikolay Haustov #include "llvm/MC/MCContext.h" 29e1818af8STom Stellard #include "llvm/MC/MCFixedLenDisassembler.h" 30e1818af8STom Stellard #include "llvm/MC/MCInst.h" 31e1818af8STom Stellard #include "llvm/MC/MCInstrDesc.h" 32e1818af8STom Stellard #include "llvm/MC/MCSubtargetInfo.h" 336bda14b3SChandler Carruth #include "llvm/Support/Debug.h" 34ac106addSNikolay Haustov #include "llvm/Support/Endian.h" 35e1818af8STom Stellard #include "llvm/Support/TargetRegistry.h" 36e1818af8STom Stellard 37e1818af8STom Stellard using namespace llvm; 38e1818af8STom Stellard 39e1818af8STom Stellard #define DEBUG_TYPE "amdgpu-disassembler" 40e1818af8STom Stellard 41e1818af8STom Stellard typedef llvm::MCDisassembler::DecodeStatus DecodeStatus; 42e1818af8STom Stellard 43e1818af8STom Stellard 44ac106addSNikolay Haustov inline static MCDisassembler::DecodeStatus 45ac106addSNikolay Haustov addOperand(MCInst &Inst, const MCOperand& Opnd) { 46ac106addSNikolay Haustov Inst.addOperand(Opnd); 47ac106addSNikolay Haustov return Opnd.isValid() ? 48ac106addSNikolay Haustov MCDisassembler::Success : 49ac106addSNikolay Haustov MCDisassembler::SoftFail; 50e1818af8STom Stellard } 51e1818af8STom Stellard 52549c89d2SSam Kolton static int insertNamedMCOperand(MCInst &MI, const MCOperand &Op, 53549c89d2SSam Kolton uint16_t NameIdx) { 54549c89d2SSam Kolton int OpIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), NameIdx); 55549c89d2SSam Kolton if (OpIdx != -1) { 56549c89d2SSam Kolton auto I = MI.begin(); 57549c89d2SSam Kolton std::advance(I, OpIdx); 58549c89d2SSam Kolton MI.insert(I, Op); 59549c89d2SSam Kolton } 60549c89d2SSam Kolton return OpIdx; 61549c89d2SSam Kolton } 62549c89d2SSam Kolton 633381d7a2SSam Kolton static DecodeStatus decodeSoppBrTarget(MCInst &Inst, unsigned Imm, 643381d7a2SSam Kolton uint64_t Addr, const void *Decoder) { 653381d7a2SSam Kolton auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); 663381d7a2SSam Kolton 673381d7a2SSam Kolton APInt SignedOffset(18, Imm * 4, true); 683381d7a2SSam Kolton int64_t Offset = (SignedOffset.sext(64) + 4 + Addr).getSExtValue(); 693381d7a2SSam Kolton 703381d7a2SSam Kolton if (DAsm->tryAddingSymbolicOperand(Inst, Offset, Addr, true, 2, 2)) 713381d7a2SSam Kolton return MCDisassembler::Success; 723381d7a2SSam Kolton return addOperand(Inst, MCOperand::createImm(Imm)); 733381d7a2SSam Kolton } 743381d7a2SSam Kolton 75363f47a2SSam Kolton #define DECODE_OPERAND(StaticDecoderName, DecoderName) \ 76363f47a2SSam Kolton static DecodeStatus StaticDecoderName(MCInst &Inst, \ 77ac106addSNikolay Haustov unsigned Imm, \ 78ac106addSNikolay Haustov uint64_t /*Addr*/, \ 79ac106addSNikolay Haustov const void *Decoder) { \ 80ac106addSNikolay Haustov auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); \ 81363f47a2SSam Kolton return addOperand(Inst, DAsm->DecoderName(Imm)); \ 82e1818af8STom Stellard } 83e1818af8STom Stellard 84363f47a2SSam Kolton #define DECODE_OPERAND_REG(RegClass) \ 85363f47a2SSam Kolton DECODE_OPERAND(Decode##RegClass##RegisterClass, decodeOperand_##RegClass) 86e1818af8STom Stellard 87363f47a2SSam Kolton DECODE_OPERAND_REG(VGPR_32) 88363f47a2SSam Kolton DECODE_OPERAND_REG(VS_32) 89363f47a2SSam Kolton DECODE_OPERAND_REG(VS_64) 9030fc5239SDmitry Preobrazhensky DECODE_OPERAND_REG(VS_128) 91e1818af8STom Stellard 92363f47a2SSam Kolton DECODE_OPERAND_REG(VReg_64) 93363f47a2SSam Kolton DECODE_OPERAND_REG(VReg_96) 94363f47a2SSam Kolton DECODE_OPERAND_REG(VReg_128) 95e1818af8STom Stellard 96363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_32) 97363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_32_XM0_XEXEC) 98*ca7b0a17SMatt Arsenault DECODE_OPERAND_REG(SReg_32_XEXEC_HI) 99363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_64) 100363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_64_XEXEC) 101363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_128) 102363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_256) 103363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_512) 104e1818af8STom Stellard 1054bd72361SMatt Arsenault 1064bd72361SMatt Arsenault static DecodeStatus decodeOperand_VSrc16(MCInst &Inst, 1074bd72361SMatt Arsenault unsigned Imm, 1084bd72361SMatt Arsenault uint64_t Addr, 1094bd72361SMatt Arsenault const void *Decoder) { 1104bd72361SMatt Arsenault auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); 1114bd72361SMatt Arsenault return addOperand(Inst, DAsm->decodeOperand_VSrc16(Imm)); 1124bd72361SMatt Arsenault } 1134bd72361SMatt Arsenault 1149be7b0d4SMatt Arsenault static DecodeStatus decodeOperand_VSrcV216(MCInst &Inst, 1159be7b0d4SMatt Arsenault unsigned Imm, 1169be7b0d4SMatt Arsenault uint64_t Addr, 1179be7b0d4SMatt Arsenault const void *Decoder) { 1189be7b0d4SMatt Arsenault auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); 1199be7b0d4SMatt Arsenault return addOperand(Inst, DAsm->decodeOperand_VSrcV216(Imm)); 1209be7b0d4SMatt Arsenault } 1219be7b0d4SMatt Arsenault 122549c89d2SSam Kolton #define DECODE_SDWA(DecName) \ 123549c89d2SSam Kolton DECODE_OPERAND(decodeSDWA##DecName, decodeSDWA##DecName) 124363f47a2SSam Kolton 125549c89d2SSam Kolton DECODE_SDWA(Src32) 126549c89d2SSam Kolton DECODE_SDWA(Src16) 127549c89d2SSam Kolton DECODE_SDWA(VopcDst) 128363f47a2SSam Kolton 129e1818af8STom Stellard #include "AMDGPUGenDisassemblerTables.inc" 130e1818af8STom Stellard 131e1818af8STom Stellard //===----------------------------------------------------------------------===// 132e1818af8STom Stellard // 133e1818af8STom Stellard //===----------------------------------------------------------------------===// 134e1818af8STom Stellard 1351048fb18SSam Kolton template <typename T> static inline T eatBytes(ArrayRef<uint8_t>& Bytes) { 1361048fb18SSam Kolton assert(Bytes.size() >= sizeof(T)); 1371048fb18SSam Kolton const auto Res = support::endian::read<T, support::endianness::little>(Bytes.data()); 1381048fb18SSam Kolton Bytes = Bytes.slice(sizeof(T)); 139ac106addSNikolay Haustov return Res; 140ac106addSNikolay Haustov } 141ac106addSNikolay Haustov 142ac106addSNikolay Haustov DecodeStatus AMDGPUDisassembler::tryDecodeInst(const uint8_t* Table, 143ac106addSNikolay Haustov MCInst &MI, 144ac106addSNikolay Haustov uint64_t Inst, 145ac106addSNikolay Haustov uint64_t Address) const { 146ac106addSNikolay Haustov assert(MI.getOpcode() == 0); 147ac106addSNikolay Haustov assert(MI.getNumOperands() == 0); 148ac106addSNikolay Haustov MCInst TmpInst; 149ce941c9cSDmitry Preobrazhensky HasLiteral = false; 150ac106addSNikolay Haustov const auto SavedBytes = Bytes; 151ac106addSNikolay Haustov if (decodeInstruction(Table, TmpInst, Inst, Address, this, STI)) { 152ac106addSNikolay Haustov MI = TmpInst; 153ac106addSNikolay Haustov return MCDisassembler::Success; 154ac106addSNikolay Haustov } 155ac106addSNikolay Haustov Bytes = SavedBytes; 156ac106addSNikolay Haustov return MCDisassembler::Fail; 157ac106addSNikolay Haustov } 158ac106addSNikolay Haustov 159e1818af8STom Stellard DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size, 160ac106addSNikolay Haustov ArrayRef<uint8_t> Bytes_, 161e1818af8STom Stellard uint64_t Address, 162e1818af8STom Stellard raw_ostream &WS, 163e1818af8STom Stellard raw_ostream &CS) const { 164e1818af8STom Stellard CommentStream = &CS; 165549c89d2SSam Kolton bool IsSDWA = false; 166e1818af8STom Stellard 167e1818af8STom Stellard // ToDo: AMDGPUDisassembler supports only VI ISA. 168d122abeaSMatt Arsenault if (!STI.getFeatureBits()[AMDGPU::FeatureGCN3Encoding]) 169d122abeaSMatt Arsenault report_fatal_error("Disassembly not yet supported for subtarget"); 170e1818af8STom Stellard 171ac106addSNikolay Haustov const unsigned MaxInstBytesNum = (std::min)((size_t)8, Bytes_.size()); 172ac106addSNikolay Haustov Bytes = Bytes_.slice(0, MaxInstBytesNum); 173161a158eSNikolay Haustov 174ac106addSNikolay Haustov DecodeStatus Res = MCDisassembler::Fail; 175ac106addSNikolay Haustov do { 176824e804bSValery Pykhtin // ToDo: better to switch encoding length using some bit predicate 177ac106addSNikolay Haustov // but it is unknown yet, so try all we can 1781048fb18SSam Kolton 179c9bdcb75SSam Kolton // Try to decode DPP and SDWA first to solve conflict with VOP1 and VOP2 180c9bdcb75SSam Kolton // encodings 1811048fb18SSam Kolton if (Bytes.size() >= 8) { 1821048fb18SSam Kolton const uint64_t QW = eatBytes<uint64_t>(Bytes); 1831048fb18SSam Kolton Res = tryDecodeInst(DecoderTableDPP64, MI, QW, Address); 1841048fb18SSam Kolton if (Res) break; 185c9bdcb75SSam Kolton 186c9bdcb75SSam Kolton Res = tryDecodeInst(DecoderTableSDWA64, MI, QW, Address); 187549c89d2SSam Kolton if (Res) { IsSDWA = true; break; } 188363f47a2SSam Kolton 189363f47a2SSam Kolton Res = tryDecodeInst(DecoderTableSDWA964, MI, QW, Address); 190549c89d2SSam Kolton if (Res) { IsSDWA = true; break; } 1911048fb18SSam Kolton } 1921048fb18SSam Kolton 1931048fb18SSam Kolton // Reinitialize Bytes as DPP64 could have eaten too much 1941048fb18SSam Kolton Bytes = Bytes_.slice(0, MaxInstBytesNum); 1951048fb18SSam Kolton 1961048fb18SSam Kolton // Try decode 32-bit instruction 197ac106addSNikolay Haustov if (Bytes.size() < 4) break; 1981048fb18SSam Kolton const uint32_t DW = eatBytes<uint32_t>(Bytes); 199ac106addSNikolay Haustov Res = tryDecodeInst(DecoderTableVI32, MI, DW, Address); 200ac106addSNikolay Haustov if (Res) break; 201e1818af8STom Stellard 202ac106addSNikolay Haustov Res = tryDecodeInst(DecoderTableAMDGPU32, MI, DW, Address); 203ac106addSNikolay Haustov if (Res) break; 204ac106addSNikolay Haustov 205ac106addSNikolay Haustov if (Bytes.size() < 4) break; 2061048fb18SSam Kolton const uint64_t QW = ((uint64_t)eatBytes<uint32_t>(Bytes) << 32) | DW; 207ac106addSNikolay Haustov Res = tryDecodeInst(DecoderTableVI64, MI, QW, Address); 208ac106addSNikolay Haustov if (Res) break; 209ac106addSNikolay Haustov 210ac106addSNikolay Haustov Res = tryDecodeInst(DecoderTableAMDGPU64, MI, QW, Address); 211ac106addSNikolay Haustov } while (false); 212ac106addSNikolay Haustov 213678e111eSMatt Arsenault if (Res && (MI.getOpcode() == AMDGPU::V_MAC_F32_e64_vi || 214678e111eSMatt Arsenault MI.getOpcode() == AMDGPU::V_MAC_F32_e64_si || 215678e111eSMatt Arsenault MI.getOpcode() == AMDGPU::V_MAC_F16_e64_vi)) { 216678e111eSMatt Arsenault // Insert dummy unused src2_modifiers. 217549c89d2SSam Kolton insertNamedMCOperand(MI, MCOperand::createImm(0), 218678e111eSMatt Arsenault AMDGPU::OpName::src2_modifiers); 219678e111eSMatt Arsenault } 220678e111eSMatt Arsenault 221549c89d2SSam Kolton if (Res && IsSDWA) 222549c89d2SSam Kolton Res = convertSDWAInst(MI); 223549c89d2SSam Kolton 224ac106addSNikolay Haustov Size = Res ? (MaxInstBytesNum - Bytes.size()) : 0; 225ac106addSNikolay Haustov return Res; 226161a158eSNikolay Haustov } 227e1818af8STom Stellard 228549c89d2SSam Kolton DecodeStatus AMDGPUDisassembler::convertSDWAInst(MCInst &MI) const { 229549c89d2SSam Kolton if (STI.getFeatureBits()[AMDGPU::FeatureGFX9]) { 230549c89d2SSam Kolton if (AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sdst) != -1) 231549c89d2SSam Kolton // VOPC - insert clamp 232549c89d2SSam Kolton insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::clamp); 233549c89d2SSam Kolton } else if (STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]) { 234549c89d2SSam Kolton int SDst = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sdst); 235549c89d2SSam Kolton if (SDst != -1) { 236549c89d2SSam Kolton // VOPC - insert VCC register as sdst 237549c89d2SSam Kolton insertNamedMCOperand(MI, MCOperand::createReg(AMDGPU::VCC), 238549c89d2SSam Kolton AMDGPU::OpName::sdst); 239549c89d2SSam Kolton } else { 240549c89d2SSam Kolton // VOP1/2 - insert omod if present in instruction 241549c89d2SSam Kolton insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::omod); 242549c89d2SSam Kolton } 243549c89d2SSam Kolton } 244549c89d2SSam Kolton return MCDisassembler::Success; 245549c89d2SSam Kolton } 246549c89d2SSam Kolton 247ac106addSNikolay Haustov const char* AMDGPUDisassembler::getRegClassName(unsigned RegClassID) const { 248ac106addSNikolay Haustov return getContext().getRegisterInfo()-> 249ac106addSNikolay Haustov getRegClassName(&AMDGPUMCRegisterClasses[RegClassID]); 250e1818af8STom Stellard } 251e1818af8STom Stellard 252ac106addSNikolay Haustov inline 253ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::errOperand(unsigned V, 254ac106addSNikolay Haustov const Twine& ErrMsg) const { 255ac106addSNikolay Haustov *CommentStream << "Error: " + ErrMsg; 256ac106addSNikolay Haustov 257ac106addSNikolay Haustov // ToDo: add support for error operands to MCInst.h 258ac106addSNikolay Haustov // return MCOperand::createError(V); 259ac106addSNikolay Haustov return MCOperand(); 260ac106addSNikolay Haustov } 261ac106addSNikolay Haustov 262ac106addSNikolay Haustov inline 263ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::createRegOperand(unsigned int RegId) const { 264ac106addSNikolay Haustov return MCOperand::createReg(RegId); 265ac106addSNikolay Haustov } 266ac106addSNikolay Haustov 267ac106addSNikolay Haustov inline 268ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::createRegOperand(unsigned RegClassID, 269ac106addSNikolay Haustov unsigned Val) const { 270ac106addSNikolay Haustov const auto& RegCl = AMDGPUMCRegisterClasses[RegClassID]; 271ac106addSNikolay Haustov if (Val >= RegCl.getNumRegs()) 272ac106addSNikolay Haustov return errOperand(Val, Twine(getRegClassName(RegClassID)) + 273ac106addSNikolay Haustov ": unknown register " + Twine(Val)); 274ac106addSNikolay Haustov return createRegOperand(RegCl.getRegister(Val)); 275ac106addSNikolay Haustov } 276ac106addSNikolay Haustov 277ac106addSNikolay Haustov inline 278ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::createSRegOperand(unsigned SRegClassID, 279ac106addSNikolay Haustov unsigned Val) const { 280ac106addSNikolay Haustov // ToDo: SI/CI have 104 SGPRs, VI - 102 281ac106addSNikolay Haustov // Valery: here we accepting as much as we can, let assembler sort it out 282ac106addSNikolay Haustov int shift = 0; 283ac106addSNikolay Haustov switch (SRegClassID) { 284ac106addSNikolay Haustov case AMDGPU::SGPR_32RegClassID: 285212a251cSArtem Tamazov case AMDGPU::TTMP_32RegClassID: 286212a251cSArtem Tamazov break; 287ac106addSNikolay Haustov case AMDGPU::SGPR_64RegClassID: 288212a251cSArtem Tamazov case AMDGPU::TTMP_64RegClassID: 289212a251cSArtem Tamazov shift = 1; 290212a251cSArtem Tamazov break; 291212a251cSArtem Tamazov case AMDGPU::SGPR_128RegClassID: 292212a251cSArtem Tamazov case AMDGPU::TTMP_128RegClassID: 293ac106addSNikolay Haustov // ToDo: unclear if s[100:104] is available on VI. Can we use VCC as SGPR in 294ac106addSNikolay Haustov // this bundle? 295ac106addSNikolay Haustov case AMDGPU::SReg_256RegClassID: 296ac106addSNikolay Haustov // ToDo: unclear if s[96:104] is available on VI. Can we use VCC as SGPR in 297ac106addSNikolay Haustov // this bundle? 298212a251cSArtem Tamazov case AMDGPU::SReg_512RegClassID: 299212a251cSArtem Tamazov shift = 2; 300212a251cSArtem Tamazov break; 301ac106addSNikolay Haustov // ToDo: unclear if s[88:104] is available on VI. Can we use VCC as SGPR in 302ac106addSNikolay Haustov // this bundle? 303212a251cSArtem Tamazov default: 30492b355b1SMatt Arsenault llvm_unreachable("unhandled register class"); 305ac106addSNikolay Haustov } 30692b355b1SMatt Arsenault 30792b355b1SMatt Arsenault if (Val % (1 << shift)) { 308ac106addSNikolay Haustov *CommentStream << "Warning: " << getRegClassName(SRegClassID) 309ac106addSNikolay Haustov << ": scalar reg isn't aligned " << Val; 31092b355b1SMatt Arsenault } 31192b355b1SMatt Arsenault 312ac106addSNikolay Haustov return createRegOperand(SRegClassID, Val >> shift); 313ac106addSNikolay Haustov } 314ac106addSNikolay Haustov 315ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_VS_32(unsigned Val) const { 316212a251cSArtem Tamazov return decodeSrcOp(OPW32, Val); 317ac106addSNikolay Haustov } 318ac106addSNikolay Haustov 319ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_VS_64(unsigned Val) const { 320212a251cSArtem Tamazov return decodeSrcOp(OPW64, Val); 321ac106addSNikolay Haustov } 322ac106addSNikolay Haustov 32330fc5239SDmitry Preobrazhensky MCOperand AMDGPUDisassembler::decodeOperand_VS_128(unsigned Val) const { 32430fc5239SDmitry Preobrazhensky return decodeSrcOp(OPW128, Val); 32530fc5239SDmitry Preobrazhensky } 32630fc5239SDmitry Preobrazhensky 3274bd72361SMatt Arsenault MCOperand AMDGPUDisassembler::decodeOperand_VSrc16(unsigned Val) const { 3284bd72361SMatt Arsenault return decodeSrcOp(OPW16, Val); 3294bd72361SMatt Arsenault } 3304bd72361SMatt Arsenault 3319be7b0d4SMatt Arsenault MCOperand AMDGPUDisassembler::decodeOperand_VSrcV216(unsigned Val) const { 3329be7b0d4SMatt Arsenault return decodeSrcOp(OPWV216, Val); 3339be7b0d4SMatt Arsenault } 3349be7b0d4SMatt Arsenault 335ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_VGPR_32(unsigned Val) const { 336cb540bc0SMatt Arsenault // Some instructions have operand restrictions beyond what the encoding 337cb540bc0SMatt Arsenault // allows. Some ordinarily VSrc_32 operands are VGPR_32, so clear the extra 338cb540bc0SMatt Arsenault // high bit. 339cb540bc0SMatt Arsenault Val &= 255; 340cb540bc0SMatt Arsenault 341ac106addSNikolay Haustov return createRegOperand(AMDGPU::VGPR_32RegClassID, Val); 342ac106addSNikolay Haustov } 343ac106addSNikolay Haustov 344ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_VReg_64(unsigned Val) const { 345ac106addSNikolay Haustov return createRegOperand(AMDGPU::VReg_64RegClassID, Val); 346ac106addSNikolay Haustov } 347ac106addSNikolay Haustov 348ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_VReg_96(unsigned Val) const { 349ac106addSNikolay Haustov return createRegOperand(AMDGPU::VReg_96RegClassID, Val); 350ac106addSNikolay Haustov } 351ac106addSNikolay Haustov 352ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_VReg_128(unsigned Val) const { 353ac106addSNikolay Haustov return createRegOperand(AMDGPU::VReg_128RegClassID, Val); 354ac106addSNikolay Haustov } 355ac106addSNikolay Haustov 356ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_SReg_32(unsigned Val) const { 357ac106addSNikolay Haustov // table-gen generated disassembler doesn't care about operand types 358ac106addSNikolay Haustov // leaving only registry class so SSrc_32 operand turns into SReg_32 359ac106addSNikolay Haustov // and therefore we accept immediates and literals here as well 360212a251cSArtem Tamazov return decodeSrcOp(OPW32, Val); 361ac106addSNikolay Haustov } 362ac106addSNikolay Haustov 363640c44b8SMatt Arsenault MCOperand AMDGPUDisassembler::decodeOperand_SReg_32_XM0_XEXEC( 364640c44b8SMatt Arsenault unsigned Val) const { 365640c44b8SMatt Arsenault // SReg_32_XM0 is SReg_32 without M0 or EXEC_LO/EXEC_HI 36638e496b1SArtem Tamazov return decodeOperand_SReg_32(Val); 36738e496b1SArtem Tamazov } 36838e496b1SArtem Tamazov 369*ca7b0a17SMatt Arsenault MCOperand AMDGPUDisassembler::decodeOperand_SReg_32_XEXEC_HI( 370*ca7b0a17SMatt Arsenault unsigned Val) const { 371*ca7b0a17SMatt Arsenault // SReg_32_XM0 is SReg_32 without EXEC_HI 372*ca7b0a17SMatt Arsenault return decodeOperand_SReg_32(Val); 373*ca7b0a17SMatt Arsenault } 374*ca7b0a17SMatt Arsenault 375ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_SReg_64(unsigned Val) const { 376640c44b8SMatt Arsenault return decodeSrcOp(OPW64, Val); 377640c44b8SMatt Arsenault } 378640c44b8SMatt Arsenault 379640c44b8SMatt Arsenault MCOperand AMDGPUDisassembler::decodeOperand_SReg_64_XEXEC(unsigned Val) const { 380212a251cSArtem Tamazov return decodeSrcOp(OPW64, Val); 381ac106addSNikolay Haustov } 382ac106addSNikolay Haustov 383ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_SReg_128(unsigned Val) const { 384212a251cSArtem Tamazov return decodeSrcOp(OPW128, Val); 385ac106addSNikolay Haustov } 386ac106addSNikolay Haustov 387ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_SReg_256(unsigned Val) const { 388ac106addSNikolay Haustov return createSRegOperand(AMDGPU::SReg_256RegClassID, Val); 389ac106addSNikolay Haustov } 390ac106addSNikolay Haustov 391ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_SReg_512(unsigned Val) const { 392ac106addSNikolay Haustov return createSRegOperand(AMDGPU::SReg_512RegClassID, Val); 393ac106addSNikolay Haustov } 394ac106addSNikolay Haustov 395ac106addSNikolay Haustov 396ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeLiteralConstant() const { 397ac106addSNikolay Haustov // For now all literal constants are supposed to be unsigned integer 398ac106addSNikolay Haustov // ToDo: deal with signed/unsigned 64-bit integer constants 399ac106addSNikolay Haustov // ToDo: deal with float/double constants 400ce941c9cSDmitry Preobrazhensky if (!HasLiteral) { 401ce941c9cSDmitry Preobrazhensky if (Bytes.size() < 4) { 402ac106addSNikolay Haustov return errOperand(0, "cannot read literal, inst bytes left " + 403ac106addSNikolay Haustov Twine(Bytes.size())); 404ce941c9cSDmitry Preobrazhensky } 405ce941c9cSDmitry Preobrazhensky HasLiteral = true; 406ce941c9cSDmitry Preobrazhensky Literal = eatBytes<uint32_t>(Bytes); 407ce941c9cSDmitry Preobrazhensky } 408ce941c9cSDmitry Preobrazhensky return MCOperand::createImm(Literal); 409ac106addSNikolay Haustov } 410ac106addSNikolay Haustov 411ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeIntImmed(unsigned Imm) { 412212a251cSArtem Tamazov using namespace AMDGPU::EncValues; 413212a251cSArtem Tamazov assert(Imm >= INLINE_INTEGER_C_MIN && Imm <= INLINE_INTEGER_C_MAX); 414212a251cSArtem Tamazov return MCOperand::createImm((Imm <= INLINE_INTEGER_C_POSITIVE_MAX) ? 415212a251cSArtem Tamazov (static_cast<int64_t>(Imm) - INLINE_INTEGER_C_MIN) : 416212a251cSArtem Tamazov (INLINE_INTEGER_C_POSITIVE_MAX - static_cast<int64_t>(Imm))); 417212a251cSArtem Tamazov // Cast prevents negative overflow. 418ac106addSNikolay Haustov } 419ac106addSNikolay Haustov 4204bd72361SMatt Arsenault static int64_t getInlineImmVal32(unsigned Imm) { 4214bd72361SMatt Arsenault switch (Imm) { 4224bd72361SMatt Arsenault case 240: 4234bd72361SMatt Arsenault return FloatToBits(0.5f); 4244bd72361SMatt Arsenault case 241: 4254bd72361SMatt Arsenault return FloatToBits(-0.5f); 4264bd72361SMatt Arsenault case 242: 4274bd72361SMatt Arsenault return FloatToBits(1.0f); 4284bd72361SMatt Arsenault case 243: 4294bd72361SMatt Arsenault return FloatToBits(-1.0f); 4304bd72361SMatt Arsenault case 244: 4314bd72361SMatt Arsenault return FloatToBits(2.0f); 4324bd72361SMatt Arsenault case 245: 4334bd72361SMatt Arsenault return FloatToBits(-2.0f); 4344bd72361SMatt Arsenault case 246: 4354bd72361SMatt Arsenault return FloatToBits(4.0f); 4364bd72361SMatt Arsenault case 247: 4374bd72361SMatt Arsenault return FloatToBits(-4.0f); 4384bd72361SMatt Arsenault case 248: // 1 / (2 * PI) 4394bd72361SMatt Arsenault return 0x3e22f983; 4404bd72361SMatt Arsenault default: 4414bd72361SMatt Arsenault llvm_unreachable("invalid fp inline imm"); 4424bd72361SMatt Arsenault } 4434bd72361SMatt Arsenault } 4444bd72361SMatt Arsenault 4454bd72361SMatt Arsenault static int64_t getInlineImmVal64(unsigned Imm) { 4464bd72361SMatt Arsenault switch (Imm) { 4474bd72361SMatt Arsenault case 240: 4484bd72361SMatt Arsenault return DoubleToBits(0.5); 4494bd72361SMatt Arsenault case 241: 4504bd72361SMatt Arsenault return DoubleToBits(-0.5); 4514bd72361SMatt Arsenault case 242: 4524bd72361SMatt Arsenault return DoubleToBits(1.0); 4534bd72361SMatt Arsenault case 243: 4544bd72361SMatt Arsenault return DoubleToBits(-1.0); 4554bd72361SMatt Arsenault case 244: 4564bd72361SMatt Arsenault return DoubleToBits(2.0); 4574bd72361SMatt Arsenault case 245: 4584bd72361SMatt Arsenault return DoubleToBits(-2.0); 4594bd72361SMatt Arsenault case 246: 4604bd72361SMatt Arsenault return DoubleToBits(4.0); 4614bd72361SMatt Arsenault case 247: 4624bd72361SMatt Arsenault return DoubleToBits(-4.0); 4634bd72361SMatt Arsenault case 248: // 1 / (2 * PI) 4644bd72361SMatt Arsenault return 0x3fc45f306dc9c882; 4654bd72361SMatt Arsenault default: 4664bd72361SMatt Arsenault llvm_unreachable("invalid fp inline imm"); 4674bd72361SMatt Arsenault } 4684bd72361SMatt Arsenault } 4694bd72361SMatt Arsenault 4704bd72361SMatt Arsenault static int64_t getInlineImmVal16(unsigned Imm) { 4714bd72361SMatt Arsenault switch (Imm) { 4724bd72361SMatt Arsenault case 240: 4734bd72361SMatt Arsenault return 0x3800; 4744bd72361SMatt Arsenault case 241: 4754bd72361SMatt Arsenault return 0xB800; 4764bd72361SMatt Arsenault case 242: 4774bd72361SMatt Arsenault return 0x3C00; 4784bd72361SMatt Arsenault case 243: 4794bd72361SMatt Arsenault return 0xBC00; 4804bd72361SMatt Arsenault case 244: 4814bd72361SMatt Arsenault return 0x4000; 4824bd72361SMatt Arsenault case 245: 4834bd72361SMatt Arsenault return 0xC000; 4844bd72361SMatt Arsenault case 246: 4854bd72361SMatt Arsenault return 0x4400; 4864bd72361SMatt Arsenault case 247: 4874bd72361SMatt Arsenault return 0xC400; 4884bd72361SMatt Arsenault case 248: // 1 / (2 * PI) 4894bd72361SMatt Arsenault return 0x3118; 4904bd72361SMatt Arsenault default: 4914bd72361SMatt Arsenault llvm_unreachable("invalid fp inline imm"); 4924bd72361SMatt Arsenault } 4934bd72361SMatt Arsenault } 4944bd72361SMatt Arsenault 4954bd72361SMatt Arsenault MCOperand AMDGPUDisassembler::decodeFPImmed(OpWidthTy Width, unsigned Imm) { 496212a251cSArtem Tamazov assert(Imm >= AMDGPU::EncValues::INLINE_FLOATING_C_MIN 497212a251cSArtem Tamazov && Imm <= AMDGPU::EncValues::INLINE_FLOATING_C_MAX); 4984bd72361SMatt Arsenault 499e1818af8STom Stellard // ToDo: case 248: 1/(2*PI) - is allowed only on VI 5004bd72361SMatt Arsenault switch (Width) { 5014bd72361SMatt Arsenault case OPW32: 5024bd72361SMatt Arsenault return MCOperand::createImm(getInlineImmVal32(Imm)); 5034bd72361SMatt Arsenault case OPW64: 5044bd72361SMatt Arsenault return MCOperand::createImm(getInlineImmVal64(Imm)); 5054bd72361SMatt Arsenault case OPW16: 5069be7b0d4SMatt Arsenault case OPWV216: 5074bd72361SMatt Arsenault return MCOperand::createImm(getInlineImmVal16(Imm)); 5084bd72361SMatt Arsenault default: 5094bd72361SMatt Arsenault llvm_unreachable("implement me"); 510e1818af8STom Stellard } 511e1818af8STom Stellard } 512e1818af8STom Stellard 513212a251cSArtem Tamazov unsigned AMDGPUDisassembler::getVgprClassId(const OpWidthTy Width) const { 514e1818af8STom Stellard using namespace AMDGPU; 515212a251cSArtem Tamazov assert(OPW_FIRST_ <= Width && Width < OPW_LAST_); 516212a251cSArtem Tamazov switch (Width) { 517212a251cSArtem Tamazov default: // fall 5184bd72361SMatt Arsenault case OPW32: 5194bd72361SMatt Arsenault case OPW16: 5209be7b0d4SMatt Arsenault case OPWV216: 5214bd72361SMatt Arsenault return VGPR_32RegClassID; 522212a251cSArtem Tamazov case OPW64: return VReg_64RegClassID; 523212a251cSArtem Tamazov case OPW128: return VReg_128RegClassID; 524212a251cSArtem Tamazov } 525212a251cSArtem Tamazov } 526212a251cSArtem Tamazov 527212a251cSArtem Tamazov unsigned AMDGPUDisassembler::getSgprClassId(const OpWidthTy Width) const { 528212a251cSArtem Tamazov using namespace AMDGPU; 529212a251cSArtem Tamazov assert(OPW_FIRST_ <= Width && Width < OPW_LAST_); 530212a251cSArtem Tamazov switch (Width) { 531212a251cSArtem Tamazov default: // fall 5324bd72361SMatt Arsenault case OPW32: 5334bd72361SMatt Arsenault case OPW16: 5349be7b0d4SMatt Arsenault case OPWV216: 5354bd72361SMatt Arsenault return SGPR_32RegClassID; 536212a251cSArtem Tamazov case OPW64: return SGPR_64RegClassID; 537212a251cSArtem Tamazov case OPW128: return SGPR_128RegClassID; 538212a251cSArtem Tamazov } 539212a251cSArtem Tamazov } 540212a251cSArtem Tamazov 541212a251cSArtem Tamazov unsigned AMDGPUDisassembler::getTtmpClassId(const OpWidthTy Width) const { 542212a251cSArtem Tamazov using namespace AMDGPU; 543212a251cSArtem Tamazov assert(OPW_FIRST_ <= Width && Width < OPW_LAST_); 544212a251cSArtem Tamazov switch (Width) { 545212a251cSArtem Tamazov default: // fall 5464bd72361SMatt Arsenault case OPW32: 5474bd72361SMatt Arsenault case OPW16: 5489be7b0d4SMatt Arsenault case OPWV216: 5494bd72361SMatt Arsenault return TTMP_32RegClassID; 550212a251cSArtem Tamazov case OPW64: return TTMP_64RegClassID; 551212a251cSArtem Tamazov case OPW128: return TTMP_128RegClassID; 552212a251cSArtem Tamazov } 553212a251cSArtem Tamazov } 554212a251cSArtem Tamazov 555212a251cSArtem Tamazov MCOperand AMDGPUDisassembler::decodeSrcOp(const OpWidthTy Width, unsigned Val) const { 556212a251cSArtem Tamazov using namespace AMDGPU::EncValues; 557ac106addSNikolay Haustov assert(Val < 512); // enum9 558ac106addSNikolay Haustov 559212a251cSArtem Tamazov if (VGPR_MIN <= Val && Val <= VGPR_MAX) { 560212a251cSArtem Tamazov return createRegOperand(getVgprClassId(Width), Val - VGPR_MIN); 561212a251cSArtem Tamazov } 562b49c3361SArtem Tamazov if (Val <= SGPR_MAX) { 563b49c3361SArtem Tamazov assert(SGPR_MIN == 0); // "SGPR_MIN <= Val" is always true and causes compilation warning. 564212a251cSArtem Tamazov return createSRegOperand(getSgprClassId(Width), Val - SGPR_MIN); 565212a251cSArtem Tamazov } 566212a251cSArtem Tamazov if (TTMP_MIN <= Val && Val <= TTMP_MAX) { 567212a251cSArtem Tamazov return createSRegOperand(getTtmpClassId(Width), Val - TTMP_MIN); 568212a251cSArtem Tamazov } 569ac106addSNikolay Haustov 570212a251cSArtem Tamazov if (INLINE_INTEGER_C_MIN <= Val && Val <= INLINE_INTEGER_C_MAX) 571ac106addSNikolay Haustov return decodeIntImmed(Val); 572ac106addSNikolay Haustov 573212a251cSArtem Tamazov if (INLINE_FLOATING_C_MIN <= Val && Val <= INLINE_FLOATING_C_MAX) 5744bd72361SMatt Arsenault return decodeFPImmed(Width, Val); 575ac106addSNikolay Haustov 576212a251cSArtem Tamazov if (Val == LITERAL_CONST) 577ac106addSNikolay Haustov return decodeLiteralConstant(); 578ac106addSNikolay Haustov 5794bd72361SMatt Arsenault switch (Width) { 5804bd72361SMatt Arsenault case OPW32: 5814bd72361SMatt Arsenault case OPW16: 5829be7b0d4SMatt Arsenault case OPWV216: 5834bd72361SMatt Arsenault return decodeSpecialReg32(Val); 5844bd72361SMatt Arsenault case OPW64: 5854bd72361SMatt Arsenault return decodeSpecialReg64(Val); 5864bd72361SMatt Arsenault default: 5874bd72361SMatt Arsenault llvm_unreachable("unexpected immediate type"); 5884bd72361SMatt Arsenault } 589ac106addSNikolay Haustov } 590ac106addSNikolay Haustov 591ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeSpecialReg32(unsigned Val) const { 592ac106addSNikolay Haustov using namespace AMDGPU; 593e1818af8STom Stellard switch (Val) { 594ac106addSNikolay Haustov case 102: return createRegOperand(getMCReg(FLAT_SCR_LO, STI)); 595ac106addSNikolay Haustov case 103: return createRegOperand(getMCReg(FLAT_SCR_HI, STI)); 596e1818af8STom Stellard // ToDo: no support for xnack_mask_lo/_hi register 597e1818af8STom Stellard case 104: 598ac106addSNikolay Haustov case 105: break; 599ac106addSNikolay Haustov case 106: return createRegOperand(VCC_LO); 600ac106addSNikolay Haustov case 107: return createRegOperand(VCC_HI); 601212a251cSArtem Tamazov case 108: return createRegOperand(TBA_LO); 602212a251cSArtem Tamazov case 109: return createRegOperand(TBA_HI); 603212a251cSArtem Tamazov case 110: return createRegOperand(TMA_LO); 604212a251cSArtem Tamazov case 111: return createRegOperand(TMA_HI); 605ac106addSNikolay Haustov case 124: return createRegOperand(M0); 606ac106addSNikolay Haustov case 126: return createRegOperand(EXEC_LO); 607ac106addSNikolay Haustov case 127: return createRegOperand(EXEC_HI); 608a3b3b489SMatt Arsenault case 235: return createRegOperand(SRC_SHARED_BASE); 609a3b3b489SMatt Arsenault case 236: return createRegOperand(SRC_SHARED_LIMIT); 610a3b3b489SMatt Arsenault case 237: return createRegOperand(SRC_PRIVATE_BASE); 611a3b3b489SMatt Arsenault case 238: return createRegOperand(SRC_PRIVATE_LIMIT); 612a3b3b489SMatt Arsenault // TODO: SRC_POPS_EXITING_WAVE_ID 613e1818af8STom Stellard // ToDo: no support for vccz register 614ac106addSNikolay Haustov case 251: break; 615e1818af8STom Stellard // ToDo: no support for execz register 616ac106addSNikolay Haustov case 252: break; 617ac106addSNikolay Haustov case 253: return createRegOperand(SCC); 618ac106addSNikolay Haustov default: break; 619e1818af8STom Stellard } 620ac106addSNikolay Haustov return errOperand(Val, "unknown operand encoding " + Twine(Val)); 621e1818af8STom Stellard } 622e1818af8STom Stellard 623ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeSpecialReg64(unsigned Val) const { 624161a158eSNikolay Haustov using namespace AMDGPU; 625161a158eSNikolay Haustov switch (Val) { 626ac106addSNikolay Haustov case 102: return createRegOperand(getMCReg(FLAT_SCR, STI)); 627ac106addSNikolay Haustov case 106: return createRegOperand(VCC); 628212a251cSArtem Tamazov case 108: return createRegOperand(TBA); 629212a251cSArtem Tamazov case 110: return createRegOperand(TMA); 630ac106addSNikolay Haustov case 126: return createRegOperand(EXEC); 631ac106addSNikolay Haustov default: break; 632161a158eSNikolay Haustov } 633ac106addSNikolay Haustov return errOperand(Val, "unknown operand encoding " + Twine(Val)); 634161a158eSNikolay Haustov } 635161a158eSNikolay Haustov 636549c89d2SSam Kolton MCOperand AMDGPUDisassembler::decodeSDWASrc(const OpWidthTy Width, 637363f47a2SSam Kolton unsigned Val) const { 638363f47a2SSam Kolton using namespace AMDGPU::SDWA; 639363f47a2SSam Kolton 640549c89d2SSam Kolton if (STI.getFeatureBits()[AMDGPU::FeatureGFX9]) { 641a179d25bSSam Kolton // XXX: static_cast<int> is needed to avoid stupid warning: 642a179d25bSSam Kolton // compare with unsigned is always true 643a179d25bSSam Kolton if (SDWA9EncValues::SRC_VGPR_MIN <= static_cast<int>(Val) && 644363f47a2SSam Kolton Val <= SDWA9EncValues::SRC_VGPR_MAX) { 645363f47a2SSam Kolton return createRegOperand(getVgprClassId(Width), 646363f47a2SSam Kolton Val - SDWA9EncValues::SRC_VGPR_MIN); 647363f47a2SSam Kolton } 648363f47a2SSam Kolton if (SDWA9EncValues::SRC_SGPR_MIN <= Val && 649363f47a2SSam Kolton Val <= SDWA9EncValues::SRC_SGPR_MAX) { 650363f47a2SSam Kolton return createSRegOperand(getSgprClassId(Width), 651363f47a2SSam Kolton Val - SDWA9EncValues::SRC_SGPR_MIN); 652363f47a2SSam Kolton } 653363f47a2SSam Kolton 654363f47a2SSam Kolton return decodeSpecialReg32(Val - SDWA9EncValues::SRC_SGPR_MIN); 655549c89d2SSam Kolton } else if (STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]) { 656549c89d2SSam Kolton return createRegOperand(getVgprClassId(Width), Val); 657549c89d2SSam Kolton } 658549c89d2SSam Kolton llvm_unreachable("unsupported target"); 659363f47a2SSam Kolton } 660363f47a2SSam Kolton 661549c89d2SSam Kolton MCOperand AMDGPUDisassembler::decodeSDWASrc16(unsigned Val) const { 662549c89d2SSam Kolton return decodeSDWASrc(OPW16, Val); 663363f47a2SSam Kolton } 664363f47a2SSam Kolton 665549c89d2SSam Kolton MCOperand AMDGPUDisassembler::decodeSDWASrc32(unsigned Val) const { 666549c89d2SSam Kolton return decodeSDWASrc(OPW32, Val); 667363f47a2SSam Kolton } 668363f47a2SSam Kolton 669363f47a2SSam Kolton 670549c89d2SSam Kolton MCOperand AMDGPUDisassembler::decodeSDWAVopcDst(unsigned Val) const { 671363f47a2SSam Kolton using namespace AMDGPU::SDWA; 672363f47a2SSam Kolton 673549c89d2SSam Kolton assert(STI.getFeatureBits()[AMDGPU::FeatureGFX9] && 674549c89d2SSam Kolton "SDWAVopcDst should be present only on GFX9"); 675363f47a2SSam Kolton if (Val & SDWA9EncValues::VOPC_DST_VCC_MASK) { 676363f47a2SSam Kolton Val &= SDWA9EncValues::VOPC_DST_SGPR_MASK; 677363f47a2SSam Kolton if (Val > AMDGPU::EncValues::SGPR_MAX) { 678363f47a2SSam Kolton return decodeSpecialReg64(Val); 679363f47a2SSam Kolton } else { 680363f47a2SSam Kolton return createSRegOperand(getSgprClassId(OPW64), Val); 681363f47a2SSam Kolton } 682363f47a2SSam Kolton } else { 683363f47a2SSam Kolton return createRegOperand(AMDGPU::VCC); 684363f47a2SSam Kolton } 685363f47a2SSam Kolton } 686363f47a2SSam Kolton 6873381d7a2SSam Kolton //===----------------------------------------------------------------------===// 6883381d7a2SSam Kolton // AMDGPUSymbolizer 6893381d7a2SSam Kolton //===----------------------------------------------------------------------===// 6903381d7a2SSam Kolton 6913381d7a2SSam Kolton // Try to find symbol name for specified label 6923381d7a2SSam Kolton bool AMDGPUSymbolizer::tryAddingSymbolicOperand(MCInst &Inst, 6933381d7a2SSam Kolton raw_ostream &/*cStream*/, int64_t Value, 6943381d7a2SSam Kolton uint64_t /*Address*/, bool IsBranch, 6953381d7a2SSam Kolton uint64_t /*Offset*/, uint64_t /*InstSize*/) { 6963381d7a2SSam Kolton typedef std::tuple<uint64_t, StringRef, uint8_t> SymbolInfoTy; 6973381d7a2SSam Kolton typedef std::vector<SymbolInfoTy> SectionSymbolsTy; 6983381d7a2SSam Kolton 6993381d7a2SSam Kolton if (!IsBranch) { 7003381d7a2SSam Kolton return false; 7013381d7a2SSam Kolton } 7023381d7a2SSam Kolton 7033381d7a2SSam Kolton auto *Symbols = static_cast<SectionSymbolsTy *>(DisInfo); 7043381d7a2SSam Kolton auto Result = std::find_if(Symbols->begin(), Symbols->end(), 7053381d7a2SSam Kolton [Value](const SymbolInfoTy& Val) { 7063381d7a2SSam Kolton return std::get<0>(Val) == static_cast<uint64_t>(Value) 7073381d7a2SSam Kolton && std::get<2>(Val) == ELF::STT_NOTYPE; 7083381d7a2SSam Kolton }); 7093381d7a2SSam Kolton if (Result != Symbols->end()) { 7103381d7a2SSam Kolton auto *Sym = Ctx.getOrCreateSymbol(std::get<1>(*Result)); 7113381d7a2SSam Kolton const auto *Add = MCSymbolRefExpr::create(Sym, Ctx); 7123381d7a2SSam Kolton Inst.addOperand(MCOperand::createExpr(Add)); 7133381d7a2SSam Kolton return true; 7143381d7a2SSam Kolton } 7153381d7a2SSam Kolton return false; 7163381d7a2SSam Kolton } 7173381d7a2SSam Kolton 71892b355b1SMatt Arsenault void AMDGPUSymbolizer::tryAddingPcLoadReferenceComment(raw_ostream &cStream, 71992b355b1SMatt Arsenault int64_t Value, 72092b355b1SMatt Arsenault uint64_t Address) { 72192b355b1SMatt Arsenault llvm_unreachable("unimplemented"); 72292b355b1SMatt Arsenault } 72392b355b1SMatt Arsenault 7243381d7a2SSam Kolton //===----------------------------------------------------------------------===// 7253381d7a2SSam Kolton // Initialization 7263381d7a2SSam Kolton //===----------------------------------------------------------------------===// 7273381d7a2SSam Kolton 7283381d7a2SSam Kolton static MCSymbolizer *createAMDGPUSymbolizer(const Triple &/*TT*/, 7293381d7a2SSam Kolton LLVMOpInfoCallback /*GetOpInfo*/, 7303381d7a2SSam Kolton LLVMSymbolLookupCallback /*SymbolLookUp*/, 7313381d7a2SSam Kolton void *DisInfo, 7323381d7a2SSam Kolton MCContext *Ctx, 7333381d7a2SSam Kolton std::unique_ptr<MCRelocationInfo> &&RelInfo) { 7343381d7a2SSam Kolton return new AMDGPUSymbolizer(*Ctx, std::move(RelInfo), DisInfo); 7353381d7a2SSam Kolton } 7363381d7a2SSam Kolton 737e1818af8STom Stellard static MCDisassembler *createAMDGPUDisassembler(const Target &T, 738e1818af8STom Stellard const MCSubtargetInfo &STI, 739e1818af8STom Stellard MCContext &Ctx) { 740e1818af8STom Stellard return new AMDGPUDisassembler(STI, Ctx); 741e1818af8STom Stellard } 742e1818af8STom Stellard 743e1818af8STom Stellard extern "C" void LLVMInitializeAMDGPUDisassembler() { 744f42454b9SMehdi Amini TargetRegistry::RegisterMCDisassembler(getTheGCNTarget(), 745f42454b9SMehdi Amini createAMDGPUDisassembler); 746f42454b9SMehdi Amini TargetRegistry::RegisterMCSymbolizer(getTheGCNTarget(), 747f42454b9SMehdi Amini createAMDGPUSymbolizer); 748e1818af8STom Stellard } 749