1e1818af8STom Stellard //===-- AMDGPUDisassembler.cpp - Disassembler for AMDGPU ISA --------------===//
2e1818af8STom Stellard //
3e1818af8STom Stellard //                     The LLVM Compiler Infrastructure
4e1818af8STom Stellard //
5e1818af8STom Stellard // This file is distributed under the University of Illinois Open Source
6e1818af8STom Stellard // License. See LICENSE.TXT for details.
7e1818af8STom Stellard //
8e1818af8STom Stellard //===----------------------------------------------------------------------===//
9e1818af8STom Stellard //
10e1818af8STom Stellard //===----------------------------------------------------------------------===//
11e1818af8STom Stellard //
12e1818af8STom Stellard /// \file
13e1818af8STom Stellard ///
14e1818af8STom Stellard /// This file contains definition for AMDGPU ISA disassembler
15e1818af8STom Stellard //
16e1818af8STom Stellard //===----------------------------------------------------------------------===//
17e1818af8STom Stellard 
18e1818af8STom Stellard // ToDo: What to do with instruction suffixes (v_mov_b32 vs v_mov_b32_e32)?
19e1818af8STom Stellard 
20e1818af8STom Stellard #include "AMDGPUDisassembler.h"
21e1818af8STom Stellard #include "AMDGPU.h"
22e1818af8STom Stellard #include "AMDGPURegisterInfo.h"
23212a251cSArtem Tamazov #include "SIDefines.h"
24e1818af8STom Stellard #include "Utils/AMDGPUBaseInfo.h"
25678e111eSMatt Arsenault #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
26e1818af8STom Stellard 
27ac106addSNikolay Haustov #include "llvm/MC/MCContext.h"
28e1818af8STom Stellard #include "llvm/MC/MCFixedLenDisassembler.h"
29e1818af8STom Stellard #include "llvm/MC/MCInst.h"
30e1818af8STom Stellard #include "llvm/MC/MCInstrDesc.h"
31e1818af8STom Stellard #include "llvm/MC/MCSubtargetInfo.h"
323381d7a2SSam Kolton #include "llvm/Support/ELF.h"
33ac106addSNikolay Haustov #include "llvm/Support/Endian.h"
34e1818af8STom Stellard #include "llvm/Support/Debug.h"
35e1818af8STom Stellard #include "llvm/Support/TargetRegistry.h"
36e1818af8STom Stellard 
37e1818af8STom Stellard 
38e1818af8STom Stellard using namespace llvm;
39e1818af8STom Stellard 
40e1818af8STom Stellard #define DEBUG_TYPE "amdgpu-disassembler"
41e1818af8STom Stellard 
42e1818af8STom Stellard typedef llvm::MCDisassembler::DecodeStatus DecodeStatus;
43e1818af8STom Stellard 
44e1818af8STom Stellard 
45ac106addSNikolay Haustov inline static MCDisassembler::DecodeStatus
46ac106addSNikolay Haustov addOperand(MCInst &Inst, const MCOperand& Opnd) {
47ac106addSNikolay Haustov   Inst.addOperand(Opnd);
48ac106addSNikolay Haustov   return Opnd.isValid() ?
49ac106addSNikolay Haustov     MCDisassembler::Success :
50ac106addSNikolay Haustov     MCDisassembler::SoftFail;
51e1818af8STom Stellard }
52e1818af8STom Stellard 
533381d7a2SSam Kolton static DecodeStatus decodeSoppBrTarget(MCInst &Inst, unsigned Imm,
543381d7a2SSam Kolton                                        uint64_t Addr, const void *Decoder) {
553381d7a2SSam Kolton   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
563381d7a2SSam Kolton 
573381d7a2SSam Kolton   APInt SignedOffset(18, Imm * 4, true);
583381d7a2SSam Kolton   int64_t Offset = (SignedOffset.sext(64) + 4 + Addr).getSExtValue();
593381d7a2SSam Kolton 
603381d7a2SSam Kolton   if (DAsm->tryAddingSymbolicOperand(Inst, Offset, Addr, true, 2, 2))
613381d7a2SSam Kolton     return MCDisassembler::Success;
623381d7a2SSam Kolton   return addOperand(Inst, MCOperand::createImm(Imm));
633381d7a2SSam Kolton }
643381d7a2SSam Kolton 
65*363f47a2SSam Kolton #define DECODE_OPERAND(StaticDecoderName, DecoderName) \
66*363f47a2SSam Kolton static DecodeStatus StaticDecoderName(MCInst &Inst, \
67ac106addSNikolay Haustov                                        unsigned Imm, \
68ac106addSNikolay Haustov                                        uint64_t /*Addr*/, \
69ac106addSNikolay Haustov                                        const void *Decoder) { \
70ac106addSNikolay Haustov   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); \
71*363f47a2SSam Kolton   return addOperand(Inst, DAsm->DecoderName(Imm)); \
72e1818af8STom Stellard }
73e1818af8STom Stellard 
74*363f47a2SSam Kolton #define DECODE_OPERAND_REG(RegClass) \
75*363f47a2SSam Kolton DECODE_OPERAND(Decode##RegClass##RegisterClass, decodeOperand_##RegClass)
76e1818af8STom Stellard 
77*363f47a2SSam Kolton DECODE_OPERAND_REG(VGPR_32)
78*363f47a2SSam Kolton DECODE_OPERAND_REG(VS_32)
79*363f47a2SSam Kolton DECODE_OPERAND_REG(VS_64)
80e1818af8STom Stellard 
81*363f47a2SSam Kolton DECODE_OPERAND_REG(VReg_64)
82*363f47a2SSam Kolton DECODE_OPERAND_REG(VReg_96)
83*363f47a2SSam Kolton DECODE_OPERAND_REG(VReg_128)
84e1818af8STom Stellard 
85*363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_32)
86*363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_32_XM0_XEXEC)
87*363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_64)
88*363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_64_XEXEC)
89*363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_128)
90*363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_256)
91*363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_512)
92e1818af8STom Stellard 
934bd72361SMatt Arsenault 
944bd72361SMatt Arsenault static DecodeStatus decodeOperand_VSrc16(MCInst &Inst,
954bd72361SMatt Arsenault                                          unsigned Imm,
964bd72361SMatt Arsenault                                          uint64_t Addr,
974bd72361SMatt Arsenault                                          const void *Decoder) {
984bd72361SMatt Arsenault   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
994bd72361SMatt Arsenault   return addOperand(Inst, DAsm->decodeOperand_VSrc16(Imm));
1004bd72361SMatt Arsenault }
1014bd72361SMatt Arsenault 
1029be7b0d4SMatt Arsenault static DecodeStatus decodeOperand_VSrcV216(MCInst &Inst,
1039be7b0d4SMatt Arsenault                                          unsigned Imm,
1049be7b0d4SMatt Arsenault                                          uint64_t Addr,
1059be7b0d4SMatt Arsenault                                          const void *Decoder) {
1069be7b0d4SMatt Arsenault   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
1079be7b0d4SMatt Arsenault   return addOperand(Inst, DAsm->decodeOperand_VSrcV216(Imm));
1089be7b0d4SMatt Arsenault }
1099be7b0d4SMatt Arsenault 
110*363f47a2SSam Kolton #define DECODE_SDWA9(DecName) \
111*363f47a2SSam Kolton DECODE_OPERAND(decodeSDWA9##DecName, decodeSDWA9##DecName)
112*363f47a2SSam Kolton 
113*363f47a2SSam Kolton DECODE_SDWA9(Src32)
114*363f47a2SSam Kolton DECODE_SDWA9(Src16)
115*363f47a2SSam Kolton DECODE_SDWA9(VopcDst)
116*363f47a2SSam Kolton 
117e1818af8STom Stellard #include "AMDGPUGenDisassemblerTables.inc"
118e1818af8STom Stellard 
119e1818af8STom Stellard //===----------------------------------------------------------------------===//
120e1818af8STom Stellard //
121e1818af8STom Stellard //===----------------------------------------------------------------------===//
122e1818af8STom Stellard 
1231048fb18SSam Kolton template <typename T> static inline T eatBytes(ArrayRef<uint8_t>& Bytes) {
1241048fb18SSam Kolton   assert(Bytes.size() >= sizeof(T));
1251048fb18SSam Kolton   const auto Res = support::endian::read<T, support::endianness::little>(Bytes.data());
1261048fb18SSam Kolton   Bytes = Bytes.slice(sizeof(T));
127ac106addSNikolay Haustov   return Res;
128ac106addSNikolay Haustov }
129ac106addSNikolay Haustov 
130ac106addSNikolay Haustov DecodeStatus AMDGPUDisassembler::tryDecodeInst(const uint8_t* Table,
131ac106addSNikolay Haustov                                                MCInst &MI,
132ac106addSNikolay Haustov                                                uint64_t Inst,
133ac106addSNikolay Haustov                                                uint64_t Address) const {
134ac106addSNikolay Haustov   assert(MI.getOpcode() == 0);
135ac106addSNikolay Haustov   assert(MI.getNumOperands() == 0);
136ac106addSNikolay Haustov   MCInst TmpInst;
137ce941c9cSDmitry Preobrazhensky   HasLiteral = false;
138ac106addSNikolay Haustov   const auto SavedBytes = Bytes;
139ac106addSNikolay Haustov   if (decodeInstruction(Table, TmpInst, Inst, Address, this, STI)) {
140ac106addSNikolay Haustov     MI = TmpInst;
141ac106addSNikolay Haustov     return MCDisassembler::Success;
142ac106addSNikolay Haustov   }
143ac106addSNikolay Haustov   Bytes = SavedBytes;
144ac106addSNikolay Haustov   return MCDisassembler::Fail;
145ac106addSNikolay Haustov }
146ac106addSNikolay Haustov 
147e1818af8STom Stellard DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
148ac106addSNikolay Haustov                                                 ArrayRef<uint8_t> Bytes_,
149e1818af8STom Stellard                                                 uint64_t Address,
150e1818af8STom Stellard                                                 raw_ostream &WS,
151e1818af8STom Stellard                                                 raw_ostream &CS) const {
152e1818af8STom Stellard   CommentStream = &CS;
153e1818af8STom Stellard 
154e1818af8STom Stellard   // ToDo: AMDGPUDisassembler supports only VI ISA.
155d122abeaSMatt Arsenault   if (!STI.getFeatureBits()[AMDGPU::FeatureGCN3Encoding])
156d122abeaSMatt Arsenault     report_fatal_error("Disassembly not yet supported for subtarget");
157e1818af8STom Stellard 
158ac106addSNikolay Haustov   const unsigned MaxInstBytesNum = (std::min)((size_t)8, Bytes_.size());
159ac106addSNikolay Haustov   Bytes = Bytes_.slice(0, MaxInstBytesNum);
160161a158eSNikolay Haustov 
161ac106addSNikolay Haustov   DecodeStatus Res = MCDisassembler::Fail;
162ac106addSNikolay Haustov   do {
163824e804bSValery Pykhtin     // ToDo: better to switch encoding length using some bit predicate
164ac106addSNikolay Haustov     // but it is unknown yet, so try all we can
1651048fb18SSam Kolton 
166c9bdcb75SSam Kolton     // Try to decode DPP and SDWA first to solve conflict with VOP1 and VOP2
167c9bdcb75SSam Kolton     // encodings
1681048fb18SSam Kolton     if (Bytes.size() >= 8) {
1691048fb18SSam Kolton       const uint64_t QW = eatBytes<uint64_t>(Bytes);
1701048fb18SSam Kolton       Res = tryDecodeInst(DecoderTableDPP64, MI, QW, Address);
1711048fb18SSam Kolton       if (Res) break;
172c9bdcb75SSam Kolton 
173c9bdcb75SSam Kolton       Res = tryDecodeInst(DecoderTableSDWA64, MI, QW, Address);
174c9bdcb75SSam Kolton       if (Res) break;
175*363f47a2SSam Kolton 
176*363f47a2SSam Kolton       Res = tryDecodeInst(DecoderTableSDWA964, MI, QW, Address);
177*363f47a2SSam Kolton       if (Res) break;
1781048fb18SSam Kolton     }
1791048fb18SSam Kolton 
1801048fb18SSam Kolton     // Reinitialize Bytes as DPP64 could have eaten too much
1811048fb18SSam Kolton     Bytes = Bytes_.slice(0, MaxInstBytesNum);
1821048fb18SSam Kolton 
1831048fb18SSam Kolton     // Try decode 32-bit instruction
184ac106addSNikolay Haustov     if (Bytes.size() < 4) break;
1851048fb18SSam Kolton     const uint32_t DW = eatBytes<uint32_t>(Bytes);
186ac106addSNikolay Haustov     Res = tryDecodeInst(DecoderTableVI32, MI, DW, Address);
187ac106addSNikolay Haustov     if (Res) break;
188e1818af8STom Stellard 
189ac106addSNikolay Haustov     Res = tryDecodeInst(DecoderTableAMDGPU32, MI, DW, Address);
190ac106addSNikolay Haustov     if (Res) break;
191ac106addSNikolay Haustov 
192ac106addSNikolay Haustov     if (Bytes.size() < 4) break;
1931048fb18SSam Kolton     const uint64_t QW = ((uint64_t)eatBytes<uint32_t>(Bytes) << 32) | DW;
194ac106addSNikolay Haustov     Res = tryDecodeInst(DecoderTableVI64, MI, QW, Address);
195ac106addSNikolay Haustov     if (Res) break;
196ac106addSNikolay Haustov 
197ac106addSNikolay Haustov     Res = tryDecodeInst(DecoderTableAMDGPU64, MI, QW, Address);
198ac106addSNikolay Haustov   } while (false);
199ac106addSNikolay Haustov 
200678e111eSMatt Arsenault   if (Res && (MI.getOpcode() == AMDGPU::V_MAC_F32_e64_vi ||
201678e111eSMatt Arsenault               MI.getOpcode() == AMDGPU::V_MAC_F32_e64_si ||
202678e111eSMatt Arsenault               MI.getOpcode() == AMDGPU::V_MAC_F16_e64_vi)) {
203678e111eSMatt Arsenault     // Insert dummy unused src2_modifiers.
204678e111eSMatt Arsenault     int Src2ModIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
205678e111eSMatt Arsenault                                                 AMDGPU::OpName::src2_modifiers);
206678e111eSMatt Arsenault     auto I = MI.begin();
207678e111eSMatt Arsenault     std::advance(I, Src2ModIdx);
208678e111eSMatt Arsenault     MI.insert(I, MCOperand::createImm(0));
209678e111eSMatt Arsenault   }
210678e111eSMatt Arsenault 
211ac106addSNikolay Haustov   Size = Res ? (MaxInstBytesNum - Bytes.size()) : 0;
212ac106addSNikolay Haustov   return Res;
213161a158eSNikolay Haustov }
214e1818af8STom Stellard 
215ac106addSNikolay Haustov const char* AMDGPUDisassembler::getRegClassName(unsigned RegClassID) const {
216ac106addSNikolay Haustov   return getContext().getRegisterInfo()->
217ac106addSNikolay Haustov     getRegClassName(&AMDGPUMCRegisterClasses[RegClassID]);
218e1818af8STom Stellard }
219e1818af8STom Stellard 
220ac106addSNikolay Haustov inline
221ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::errOperand(unsigned V,
222ac106addSNikolay Haustov                                          const Twine& ErrMsg) const {
223ac106addSNikolay Haustov   *CommentStream << "Error: " + ErrMsg;
224ac106addSNikolay Haustov 
225ac106addSNikolay Haustov   // ToDo: add support for error operands to MCInst.h
226ac106addSNikolay Haustov   // return MCOperand::createError(V);
227ac106addSNikolay Haustov   return MCOperand();
228ac106addSNikolay Haustov }
229ac106addSNikolay Haustov 
230ac106addSNikolay Haustov inline
231ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::createRegOperand(unsigned int RegId) const {
232ac106addSNikolay Haustov   return MCOperand::createReg(RegId);
233ac106addSNikolay Haustov }
234ac106addSNikolay Haustov 
235ac106addSNikolay Haustov inline
236ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::createRegOperand(unsigned RegClassID,
237ac106addSNikolay Haustov                                                unsigned Val) const {
238ac106addSNikolay Haustov   const auto& RegCl = AMDGPUMCRegisterClasses[RegClassID];
239ac106addSNikolay Haustov   if (Val >= RegCl.getNumRegs())
240ac106addSNikolay Haustov     return errOperand(Val, Twine(getRegClassName(RegClassID)) +
241ac106addSNikolay Haustov                            ": unknown register " + Twine(Val));
242ac106addSNikolay Haustov   return createRegOperand(RegCl.getRegister(Val));
243ac106addSNikolay Haustov }
244ac106addSNikolay Haustov 
245ac106addSNikolay Haustov inline
246ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::createSRegOperand(unsigned SRegClassID,
247ac106addSNikolay Haustov                                                 unsigned Val) const {
248ac106addSNikolay Haustov   // ToDo: SI/CI have 104 SGPRs, VI - 102
249ac106addSNikolay Haustov   // Valery: here we accepting as much as we can, let assembler sort it out
250ac106addSNikolay Haustov   int shift = 0;
251ac106addSNikolay Haustov   switch (SRegClassID) {
252ac106addSNikolay Haustov   case AMDGPU::SGPR_32RegClassID:
253212a251cSArtem Tamazov   case AMDGPU::TTMP_32RegClassID:
254212a251cSArtem Tamazov     break;
255ac106addSNikolay Haustov   case AMDGPU::SGPR_64RegClassID:
256212a251cSArtem Tamazov   case AMDGPU::TTMP_64RegClassID:
257212a251cSArtem Tamazov     shift = 1;
258212a251cSArtem Tamazov     break;
259212a251cSArtem Tamazov   case AMDGPU::SGPR_128RegClassID:
260212a251cSArtem Tamazov   case AMDGPU::TTMP_128RegClassID:
261ac106addSNikolay Haustov   // ToDo: unclear if s[100:104] is available on VI. Can we use VCC as SGPR in
262ac106addSNikolay Haustov   // this bundle?
263ac106addSNikolay Haustov   case AMDGPU::SReg_256RegClassID:
264ac106addSNikolay Haustov   // ToDo: unclear if s[96:104] is available on VI. Can we use VCC as SGPR in
265ac106addSNikolay Haustov   // this bundle?
266212a251cSArtem Tamazov   case AMDGPU::SReg_512RegClassID:
267212a251cSArtem Tamazov     shift = 2;
268212a251cSArtem Tamazov     break;
269ac106addSNikolay Haustov   // ToDo: unclear if s[88:104] is available on VI. Can we use VCC as SGPR in
270ac106addSNikolay Haustov   // this bundle?
271212a251cSArtem Tamazov   default:
27292b355b1SMatt Arsenault     llvm_unreachable("unhandled register class");
273ac106addSNikolay Haustov   }
27492b355b1SMatt Arsenault 
27592b355b1SMatt Arsenault   if (Val % (1 << shift)) {
276ac106addSNikolay Haustov     *CommentStream << "Warning: " << getRegClassName(SRegClassID)
277ac106addSNikolay Haustov                    << ": scalar reg isn't aligned " << Val;
27892b355b1SMatt Arsenault   }
27992b355b1SMatt Arsenault 
280ac106addSNikolay Haustov   return createRegOperand(SRegClassID, Val >> shift);
281ac106addSNikolay Haustov }
282ac106addSNikolay Haustov 
283ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_VS_32(unsigned Val) const {
284212a251cSArtem Tamazov   return decodeSrcOp(OPW32, Val);
285ac106addSNikolay Haustov }
286ac106addSNikolay Haustov 
287ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_VS_64(unsigned Val) const {
288212a251cSArtem Tamazov   return decodeSrcOp(OPW64, Val);
289ac106addSNikolay Haustov }
290ac106addSNikolay Haustov 
2914bd72361SMatt Arsenault MCOperand AMDGPUDisassembler::decodeOperand_VSrc16(unsigned Val) const {
2924bd72361SMatt Arsenault   return decodeSrcOp(OPW16, Val);
2934bd72361SMatt Arsenault }
2944bd72361SMatt Arsenault 
2959be7b0d4SMatt Arsenault MCOperand AMDGPUDisassembler::decodeOperand_VSrcV216(unsigned Val) const {
2969be7b0d4SMatt Arsenault   return decodeSrcOp(OPWV216, Val);
2979be7b0d4SMatt Arsenault }
2989be7b0d4SMatt Arsenault 
299ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_VGPR_32(unsigned Val) const {
300cb540bc0SMatt Arsenault   // Some instructions have operand restrictions beyond what the encoding
301cb540bc0SMatt Arsenault   // allows. Some ordinarily VSrc_32 operands are VGPR_32, so clear the extra
302cb540bc0SMatt Arsenault   // high bit.
303cb540bc0SMatt Arsenault   Val &= 255;
304cb540bc0SMatt Arsenault 
305ac106addSNikolay Haustov   return createRegOperand(AMDGPU::VGPR_32RegClassID, Val);
306ac106addSNikolay Haustov }
307ac106addSNikolay Haustov 
308ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_VReg_64(unsigned Val) const {
309ac106addSNikolay Haustov   return createRegOperand(AMDGPU::VReg_64RegClassID, Val);
310ac106addSNikolay Haustov }
311ac106addSNikolay Haustov 
312ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_VReg_96(unsigned Val) const {
313ac106addSNikolay Haustov   return createRegOperand(AMDGPU::VReg_96RegClassID, Val);
314ac106addSNikolay Haustov }
315ac106addSNikolay Haustov 
316ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_VReg_128(unsigned Val) const {
317ac106addSNikolay Haustov   return createRegOperand(AMDGPU::VReg_128RegClassID, Val);
318ac106addSNikolay Haustov }
319ac106addSNikolay Haustov 
320ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_SReg_32(unsigned Val) const {
321ac106addSNikolay Haustov   // table-gen generated disassembler doesn't care about operand types
322ac106addSNikolay Haustov   // leaving only registry class so SSrc_32 operand turns into SReg_32
323ac106addSNikolay Haustov   // and therefore we accept immediates and literals here as well
324212a251cSArtem Tamazov   return decodeSrcOp(OPW32, Val);
325ac106addSNikolay Haustov }
326ac106addSNikolay Haustov 
327640c44b8SMatt Arsenault MCOperand AMDGPUDisassembler::decodeOperand_SReg_32_XM0_XEXEC(
328640c44b8SMatt Arsenault   unsigned Val) const {
329640c44b8SMatt Arsenault   // SReg_32_XM0 is SReg_32 without M0 or EXEC_LO/EXEC_HI
33038e496b1SArtem Tamazov   return decodeOperand_SReg_32(Val);
33138e496b1SArtem Tamazov }
33238e496b1SArtem Tamazov 
333ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_SReg_64(unsigned Val) const {
334640c44b8SMatt Arsenault   return decodeSrcOp(OPW64, Val);
335640c44b8SMatt Arsenault }
336640c44b8SMatt Arsenault 
337640c44b8SMatt Arsenault MCOperand AMDGPUDisassembler::decodeOperand_SReg_64_XEXEC(unsigned Val) const {
338212a251cSArtem Tamazov   return decodeSrcOp(OPW64, Val);
339ac106addSNikolay Haustov }
340ac106addSNikolay Haustov 
341ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_SReg_128(unsigned Val) const {
342212a251cSArtem Tamazov   return decodeSrcOp(OPW128, Val);
343ac106addSNikolay Haustov }
344ac106addSNikolay Haustov 
345ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_SReg_256(unsigned Val) const {
346ac106addSNikolay Haustov   return createSRegOperand(AMDGPU::SReg_256RegClassID, Val);
347ac106addSNikolay Haustov }
348ac106addSNikolay Haustov 
349ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_SReg_512(unsigned Val) const {
350ac106addSNikolay Haustov   return createSRegOperand(AMDGPU::SReg_512RegClassID, Val);
351ac106addSNikolay Haustov }
352ac106addSNikolay Haustov 
353ac106addSNikolay Haustov 
354ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeLiteralConstant() const {
355ac106addSNikolay Haustov   // For now all literal constants are supposed to be unsigned integer
356ac106addSNikolay Haustov   // ToDo: deal with signed/unsigned 64-bit integer constants
357ac106addSNikolay Haustov   // ToDo: deal with float/double constants
358ce941c9cSDmitry Preobrazhensky   if (!HasLiteral) {
359ce941c9cSDmitry Preobrazhensky     if (Bytes.size() < 4) {
360ac106addSNikolay Haustov       return errOperand(0, "cannot read literal, inst bytes left " +
361ac106addSNikolay Haustov                         Twine(Bytes.size()));
362ce941c9cSDmitry Preobrazhensky     }
363ce941c9cSDmitry Preobrazhensky     HasLiteral = true;
364ce941c9cSDmitry Preobrazhensky     Literal = eatBytes<uint32_t>(Bytes);
365ce941c9cSDmitry Preobrazhensky   }
366ce941c9cSDmitry Preobrazhensky   return MCOperand::createImm(Literal);
367ac106addSNikolay Haustov }
368ac106addSNikolay Haustov 
369ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeIntImmed(unsigned Imm) {
370212a251cSArtem Tamazov   using namespace AMDGPU::EncValues;
371212a251cSArtem Tamazov   assert(Imm >= INLINE_INTEGER_C_MIN && Imm <= INLINE_INTEGER_C_MAX);
372212a251cSArtem Tamazov   return MCOperand::createImm((Imm <= INLINE_INTEGER_C_POSITIVE_MAX) ?
373212a251cSArtem Tamazov     (static_cast<int64_t>(Imm) - INLINE_INTEGER_C_MIN) :
374212a251cSArtem Tamazov     (INLINE_INTEGER_C_POSITIVE_MAX - static_cast<int64_t>(Imm)));
375212a251cSArtem Tamazov       // Cast prevents negative overflow.
376ac106addSNikolay Haustov }
377ac106addSNikolay Haustov 
3784bd72361SMatt Arsenault static int64_t getInlineImmVal32(unsigned Imm) {
3794bd72361SMatt Arsenault   switch (Imm) {
3804bd72361SMatt Arsenault   case 240:
3814bd72361SMatt Arsenault     return FloatToBits(0.5f);
3824bd72361SMatt Arsenault   case 241:
3834bd72361SMatt Arsenault     return FloatToBits(-0.5f);
3844bd72361SMatt Arsenault   case 242:
3854bd72361SMatt Arsenault     return FloatToBits(1.0f);
3864bd72361SMatt Arsenault   case 243:
3874bd72361SMatt Arsenault     return FloatToBits(-1.0f);
3884bd72361SMatt Arsenault   case 244:
3894bd72361SMatt Arsenault     return FloatToBits(2.0f);
3904bd72361SMatt Arsenault   case 245:
3914bd72361SMatt Arsenault     return FloatToBits(-2.0f);
3924bd72361SMatt Arsenault   case 246:
3934bd72361SMatt Arsenault     return FloatToBits(4.0f);
3944bd72361SMatt Arsenault   case 247:
3954bd72361SMatt Arsenault     return FloatToBits(-4.0f);
3964bd72361SMatt Arsenault   case 248: // 1 / (2 * PI)
3974bd72361SMatt Arsenault     return 0x3e22f983;
3984bd72361SMatt Arsenault   default:
3994bd72361SMatt Arsenault     llvm_unreachable("invalid fp inline imm");
4004bd72361SMatt Arsenault   }
4014bd72361SMatt Arsenault }
4024bd72361SMatt Arsenault 
4034bd72361SMatt Arsenault static int64_t getInlineImmVal64(unsigned Imm) {
4044bd72361SMatt Arsenault   switch (Imm) {
4054bd72361SMatt Arsenault   case 240:
4064bd72361SMatt Arsenault     return DoubleToBits(0.5);
4074bd72361SMatt Arsenault   case 241:
4084bd72361SMatt Arsenault     return DoubleToBits(-0.5);
4094bd72361SMatt Arsenault   case 242:
4104bd72361SMatt Arsenault     return DoubleToBits(1.0);
4114bd72361SMatt Arsenault   case 243:
4124bd72361SMatt Arsenault     return DoubleToBits(-1.0);
4134bd72361SMatt Arsenault   case 244:
4144bd72361SMatt Arsenault     return DoubleToBits(2.0);
4154bd72361SMatt Arsenault   case 245:
4164bd72361SMatt Arsenault     return DoubleToBits(-2.0);
4174bd72361SMatt Arsenault   case 246:
4184bd72361SMatt Arsenault     return DoubleToBits(4.0);
4194bd72361SMatt Arsenault   case 247:
4204bd72361SMatt Arsenault     return DoubleToBits(-4.0);
4214bd72361SMatt Arsenault   case 248: // 1 / (2 * PI)
4224bd72361SMatt Arsenault     return 0x3fc45f306dc9c882;
4234bd72361SMatt Arsenault   default:
4244bd72361SMatt Arsenault     llvm_unreachable("invalid fp inline imm");
4254bd72361SMatt Arsenault   }
4264bd72361SMatt Arsenault }
4274bd72361SMatt Arsenault 
4284bd72361SMatt Arsenault static int64_t getInlineImmVal16(unsigned Imm) {
4294bd72361SMatt Arsenault   switch (Imm) {
4304bd72361SMatt Arsenault   case 240:
4314bd72361SMatt Arsenault     return 0x3800;
4324bd72361SMatt Arsenault   case 241:
4334bd72361SMatt Arsenault     return 0xB800;
4344bd72361SMatt Arsenault   case 242:
4354bd72361SMatt Arsenault     return 0x3C00;
4364bd72361SMatt Arsenault   case 243:
4374bd72361SMatt Arsenault     return 0xBC00;
4384bd72361SMatt Arsenault   case 244:
4394bd72361SMatt Arsenault     return 0x4000;
4404bd72361SMatt Arsenault   case 245:
4414bd72361SMatt Arsenault     return 0xC000;
4424bd72361SMatt Arsenault   case 246:
4434bd72361SMatt Arsenault     return 0x4400;
4444bd72361SMatt Arsenault   case 247:
4454bd72361SMatt Arsenault     return 0xC400;
4464bd72361SMatt Arsenault   case 248: // 1 / (2 * PI)
4474bd72361SMatt Arsenault     return 0x3118;
4484bd72361SMatt Arsenault   default:
4494bd72361SMatt Arsenault     llvm_unreachable("invalid fp inline imm");
4504bd72361SMatt Arsenault   }
4514bd72361SMatt Arsenault }
4524bd72361SMatt Arsenault 
4534bd72361SMatt Arsenault MCOperand AMDGPUDisassembler::decodeFPImmed(OpWidthTy Width, unsigned Imm) {
454212a251cSArtem Tamazov   assert(Imm >= AMDGPU::EncValues::INLINE_FLOATING_C_MIN
455212a251cSArtem Tamazov       && Imm <= AMDGPU::EncValues::INLINE_FLOATING_C_MAX);
4564bd72361SMatt Arsenault 
457e1818af8STom Stellard   // ToDo: case 248: 1/(2*PI) - is allowed only on VI
4584bd72361SMatt Arsenault   switch (Width) {
4594bd72361SMatt Arsenault   case OPW32:
4604bd72361SMatt Arsenault     return MCOperand::createImm(getInlineImmVal32(Imm));
4614bd72361SMatt Arsenault   case OPW64:
4624bd72361SMatt Arsenault     return MCOperand::createImm(getInlineImmVal64(Imm));
4634bd72361SMatt Arsenault   case OPW16:
4649be7b0d4SMatt Arsenault   case OPWV216:
4654bd72361SMatt Arsenault     return MCOperand::createImm(getInlineImmVal16(Imm));
4664bd72361SMatt Arsenault   default:
4674bd72361SMatt Arsenault     llvm_unreachable("implement me");
468e1818af8STom Stellard   }
469e1818af8STom Stellard }
470e1818af8STom Stellard 
471212a251cSArtem Tamazov unsigned AMDGPUDisassembler::getVgprClassId(const OpWidthTy Width) const {
472e1818af8STom Stellard   using namespace AMDGPU;
473212a251cSArtem Tamazov   assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
474212a251cSArtem Tamazov   switch (Width) {
475212a251cSArtem Tamazov   default: // fall
4764bd72361SMatt Arsenault   case OPW32:
4774bd72361SMatt Arsenault   case OPW16:
4789be7b0d4SMatt Arsenault   case OPWV216:
4794bd72361SMatt Arsenault     return VGPR_32RegClassID;
480212a251cSArtem Tamazov   case OPW64: return VReg_64RegClassID;
481212a251cSArtem Tamazov   case OPW128: return VReg_128RegClassID;
482212a251cSArtem Tamazov   }
483212a251cSArtem Tamazov }
484212a251cSArtem Tamazov 
485212a251cSArtem Tamazov unsigned AMDGPUDisassembler::getSgprClassId(const OpWidthTy Width) const {
486212a251cSArtem Tamazov   using namespace AMDGPU;
487212a251cSArtem Tamazov   assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
488212a251cSArtem Tamazov   switch (Width) {
489212a251cSArtem Tamazov   default: // fall
4904bd72361SMatt Arsenault   case OPW32:
4914bd72361SMatt Arsenault   case OPW16:
4929be7b0d4SMatt Arsenault   case OPWV216:
4934bd72361SMatt Arsenault     return SGPR_32RegClassID;
494212a251cSArtem Tamazov   case OPW64: return SGPR_64RegClassID;
495212a251cSArtem Tamazov   case OPW128: return SGPR_128RegClassID;
496212a251cSArtem Tamazov   }
497212a251cSArtem Tamazov }
498212a251cSArtem Tamazov 
499212a251cSArtem Tamazov unsigned AMDGPUDisassembler::getTtmpClassId(const OpWidthTy Width) const {
500212a251cSArtem Tamazov   using namespace AMDGPU;
501212a251cSArtem Tamazov   assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
502212a251cSArtem Tamazov   switch (Width) {
503212a251cSArtem Tamazov   default: // fall
5044bd72361SMatt Arsenault   case OPW32:
5054bd72361SMatt Arsenault   case OPW16:
5069be7b0d4SMatt Arsenault   case OPWV216:
5074bd72361SMatt Arsenault     return TTMP_32RegClassID;
508212a251cSArtem Tamazov   case OPW64: return TTMP_64RegClassID;
509212a251cSArtem Tamazov   case OPW128: return TTMP_128RegClassID;
510212a251cSArtem Tamazov   }
511212a251cSArtem Tamazov }
512212a251cSArtem Tamazov 
513212a251cSArtem Tamazov MCOperand AMDGPUDisassembler::decodeSrcOp(const OpWidthTy Width, unsigned Val) const {
514212a251cSArtem Tamazov   using namespace AMDGPU::EncValues;
515ac106addSNikolay Haustov   assert(Val < 512); // enum9
516ac106addSNikolay Haustov 
517212a251cSArtem Tamazov   if (VGPR_MIN <= Val && Val <= VGPR_MAX) {
518212a251cSArtem Tamazov     return createRegOperand(getVgprClassId(Width), Val - VGPR_MIN);
519212a251cSArtem Tamazov   }
520b49c3361SArtem Tamazov   if (Val <= SGPR_MAX) {
521b49c3361SArtem Tamazov     assert(SGPR_MIN == 0); // "SGPR_MIN <= Val" is always true and causes compilation warning.
522212a251cSArtem Tamazov     return createSRegOperand(getSgprClassId(Width), Val - SGPR_MIN);
523212a251cSArtem Tamazov   }
524212a251cSArtem Tamazov   if (TTMP_MIN <= Val && Val <= TTMP_MAX) {
525212a251cSArtem Tamazov     return createSRegOperand(getTtmpClassId(Width), Val - TTMP_MIN);
526212a251cSArtem Tamazov   }
527ac106addSNikolay Haustov 
5284bd72361SMatt Arsenault   assert(Width == OPW16 || Width == OPW32 || Width == OPW64);
529212a251cSArtem Tamazov 
530212a251cSArtem Tamazov   if (INLINE_INTEGER_C_MIN <= Val && Val <= INLINE_INTEGER_C_MAX)
531ac106addSNikolay Haustov     return decodeIntImmed(Val);
532ac106addSNikolay Haustov 
533212a251cSArtem Tamazov   if (INLINE_FLOATING_C_MIN <= Val && Val <= INLINE_FLOATING_C_MAX)
5344bd72361SMatt Arsenault     return decodeFPImmed(Width, Val);
535ac106addSNikolay Haustov 
536212a251cSArtem Tamazov   if (Val == LITERAL_CONST)
537ac106addSNikolay Haustov     return decodeLiteralConstant();
538ac106addSNikolay Haustov 
5394bd72361SMatt Arsenault   switch (Width) {
5404bd72361SMatt Arsenault   case OPW32:
5414bd72361SMatt Arsenault   case OPW16:
5429be7b0d4SMatt Arsenault   case OPWV216:
5434bd72361SMatt Arsenault     return decodeSpecialReg32(Val);
5444bd72361SMatt Arsenault   case OPW64:
5454bd72361SMatt Arsenault     return decodeSpecialReg64(Val);
5464bd72361SMatt Arsenault   default:
5474bd72361SMatt Arsenault     llvm_unreachable("unexpected immediate type");
5484bd72361SMatt Arsenault   }
549ac106addSNikolay Haustov }
550ac106addSNikolay Haustov 
551ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeSpecialReg32(unsigned Val) const {
552ac106addSNikolay Haustov   using namespace AMDGPU;
553e1818af8STom Stellard   switch (Val) {
554ac106addSNikolay Haustov   case 102: return createRegOperand(getMCReg(FLAT_SCR_LO, STI));
555ac106addSNikolay Haustov   case 103: return createRegOperand(getMCReg(FLAT_SCR_HI, STI));
556e1818af8STom Stellard     // ToDo: no support for xnack_mask_lo/_hi register
557e1818af8STom Stellard   case 104:
558ac106addSNikolay Haustov   case 105: break;
559ac106addSNikolay Haustov   case 106: return createRegOperand(VCC_LO);
560ac106addSNikolay Haustov   case 107: return createRegOperand(VCC_HI);
561212a251cSArtem Tamazov   case 108: return createRegOperand(TBA_LO);
562212a251cSArtem Tamazov   case 109: return createRegOperand(TBA_HI);
563212a251cSArtem Tamazov   case 110: return createRegOperand(TMA_LO);
564212a251cSArtem Tamazov   case 111: return createRegOperand(TMA_HI);
565ac106addSNikolay Haustov   case 124: return createRegOperand(M0);
566ac106addSNikolay Haustov   case 126: return createRegOperand(EXEC_LO);
567ac106addSNikolay Haustov   case 127: return createRegOperand(EXEC_HI);
568a3b3b489SMatt Arsenault   case 235: return createRegOperand(SRC_SHARED_BASE);
569a3b3b489SMatt Arsenault   case 236: return createRegOperand(SRC_SHARED_LIMIT);
570a3b3b489SMatt Arsenault   case 237: return createRegOperand(SRC_PRIVATE_BASE);
571a3b3b489SMatt Arsenault   case 238: return createRegOperand(SRC_PRIVATE_LIMIT);
572a3b3b489SMatt Arsenault     // TODO: SRC_POPS_EXITING_WAVE_ID
573e1818af8STom Stellard     // ToDo: no support for vccz register
574ac106addSNikolay Haustov   case 251: break;
575e1818af8STom Stellard     // ToDo: no support for execz register
576ac106addSNikolay Haustov   case 252: break;
577ac106addSNikolay Haustov   case 253: return createRegOperand(SCC);
578ac106addSNikolay Haustov   default: break;
579e1818af8STom Stellard   }
580ac106addSNikolay Haustov   return errOperand(Val, "unknown operand encoding " + Twine(Val));
581e1818af8STom Stellard }
582e1818af8STom Stellard 
583ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeSpecialReg64(unsigned Val) const {
584161a158eSNikolay Haustov   using namespace AMDGPU;
585161a158eSNikolay Haustov   switch (Val) {
586ac106addSNikolay Haustov   case 102: return createRegOperand(getMCReg(FLAT_SCR, STI));
587ac106addSNikolay Haustov   case 106: return createRegOperand(VCC);
588212a251cSArtem Tamazov   case 108: return createRegOperand(TBA);
589212a251cSArtem Tamazov   case 110: return createRegOperand(TMA);
590ac106addSNikolay Haustov   case 126: return createRegOperand(EXEC);
591ac106addSNikolay Haustov   default: break;
592161a158eSNikolay Haustov   }
593ac106addSNikolay Haustov   return errOperand(Val, "unknown operand encoding " + Twine(Val));
594161a158eSNikolay Haustov }
595161a158eSNikolay Haustov 
596*363f47a2SSam Kolton MCOperand AMDGPUDisassembler::decodeSDWA9Src(const OpWidthTy Width,
597*363f47a2SSam Kolton                                              unsigned Val) const {
598*363f47a2SSam Kolton   using namespace AMDGPU::SDWA;
599*363f47a2SSam Kolton 
600*363f47a2SSam Kolton   if (SDWA9EncValues::SRC_VGPR_MIN <= Val &&
601*363f47a2SSam Kolton       Val <= SDWA9EncValues::SRC_VGPR_MAX) {
602*363f47a2SSam Kolton     return createRegOperand(getVgprClassId(Width),
603*363f47a2SSam Kolton                             Val - SDWA9EncValues::SRC_VGPR_MIN);
604*363f47a2SSam Kolton   }
605*363f47a2SSam Kolton   if (SDWA9EncValues::SRC_SGPR_MIN <= Val &&
606*363f47a2SSam Kolton       Val <= SDWA9EncValues::SRC_SGPR_MAX) {
607*363f47a2SSam Kolton     return createSRegOperand(getSgprClassId(Width),
608*363f47a2SSam Kolton                              Val - SDWA9EncValues::SRC_SGPR_MIN);
609*363f47a2SSam Kolton   }
610*363f47a2SSam Kolton 
611*363f47a2SSam Kolton   return decodeSpecialReg32(Val - SDWA9EncValues::SRC_SGPR_MIN);
612*363f47a2SSam Kolton }
613*363f47a2SSam Kolton 
614*363f47a2SSam Kolton MCOperand AMDGPUDisassembler::decodeSDWA9Src16(unsigned Val) const {
615*363f47a2SSam Kolton   return decodeSDWA9Src(OPW16, Val);
616*363f47a2SSam Kolton }
617*363f47a2SSam Kolton 
618*363f47a2SSam Kolton MCOperand AMDGPUDisassembler::decodeSDWA9Src32(unsigned Val) const {
619*363f47a2SSam Kolton   return decodeSDWA9Src(OPW32, Val);
620*363f47a2SSam Kolton }
621*363f47a2SSam Kolton 
622*363f47a2SSam Kolton 
623*363f47a2SSam Kolton MCOperand AMDGPUDisassembler::decodeSDWA9VopcDst(unsigned Val) const {
624*363f47a2SSam Kolton   using namespace AMDGPU::SDWA;
625*363f47a2SSam Kolton 
626*363f47a2SSam Kolton   if (Val & SDWA9EncValues::VOPC_DST_VCC_MASK) {
627*363f47a2SSam Kolton     Val &= SDWA9EncValues::VOPC_DST_SGPR_MASK;
628*363f47a2SSam Kolton     if (Val > AMDGPU::EncValues::SGPR_MAX) {
629*363f47a2SSam Kolton       return decodeSpecialReg64(Val);
630*363f47a2SSam Kolton     } else {
631*363f47a2SSam Kolton       return createSRegOperand(getSgprClassId(OPW64), Val);
632*363f47a2SSam Kolton     }
633*363f47a2SSam Kolton   } else {
634*363f47a2SSam Kolton     return createRegOperand(AMDGPU::VCC);
635*363f47a2SSam Kolton   }
636*363f47a2SSam Kolton }
637*363f47a2SSam Kolton 
6383381d7a2SSam Kolton //===----------------------------------------------------------------------===//
6393381d7a2SSam Kolton // AMDGPUSymbolizer
6403381d7a2SSam Kolton //===----------------------------------------------------------------------===//
6413381d7a2SSam Kolton 
6423381d7a2SSam Kolton // Try to find symbol name for specified label
6433381d7a2SSam Kolton bool AMDGPUSymbolizer::tryAddingSymbolicOperand(MCInst &Inst,
6443381d7a2SSam Kolton                                 raw_ostream &/*cStream*/, int64_t Value,
6453381d7a2SSam Kolton                                 uint64_t /*Address*/, bool IsBranch,
6463381d7a2SSam Kolton                                 uint64_t /*Offset*/, uint64_t /*InstSize*/) {
6473381d7a2SSam Kolton   typedef std::tuple<uint64_t, StringRef, uint8_t> SymbolInfoTy;
6483381d7a2SSam Kolton   typedef std::vector<SymbolInfoTy> SectionSymbolsTy;
6493381d7a2SSam Kolton 
6503381d7a2SSam Kolton   if (!IsBranch) {
6513381d7a2SSam Kolton     return false;
6523381d7a2SSam Kolton   }
6533381d7a2SSam Kolton 
6543381d7a2SSam Kolton   auto *Symbols = static_cast<SectionSymbolsTy *>(DisInfo);
6553381d7a2SSam Kolton   auto Result = std::find_if(Symbols->begin(), Symbols->end(),
6563381d7a2SSam Kolton                              [Value](const SymbolInfoTy& Val) {
6573381d7a2SSam Kolton                                 return std::get<0>(Val) == static_cast<uint64_t>(Value)
6583381d7a2SSam Kolton                                     && std::get<2>(Val) == ELF::STT_NOTYPE;
6593381d7a2SSam Kolton                              });
6603381d7a2SSam Kolton   if (Result != Symbols->end()) {
6613381d7a2SSam Kolton     auto *Sym = Ctx.getOrCreateSymbol(std::get<1>(*Result));
6623381d7a2SSam Kolton     const auto *Add = MCSymbolRefExpr::create(Sym, Ctx);
6633381d7a2SSam Kolton     Inst.addOperand(MCOperand::createExpr(Add));
6643381d7a2SSam Kolton     return true;
6653381d7a2SSam Kolton   }
6663381d7a2SSam Kolton   return false;
6673381d7a2SSam Kolton }
6683381d7a2SSam Kolton 
66992b355b1SMatt Arsenault void AMDGPUSymbolizer::tryAddingPcLoadReferenceComment(raw_ostream &cStream,
67092b355b1SMatt Arsenault                                                        int64_t Value,
67192b355b1SMatt Arsenault                                                        uint64_t Address) {
67292b355b1SMatt Arsenault   llvm_unreachable("unimplemented");
67392b355b1SMatt Arsenault }
67492b355b1SMatt Arsenault 
6753381d7a2SSam Kolton //===----------------------------------------------------------------------===//
6763381d7a2SSam Kolton // Initialization
6773381d7a2SSam Kolton //===----------------------------------------------------------------------===//
6783381d7a2SSam Kolton 
6793381d7a2SSam Kolton static MCSymbolizer *createAMDGPUSymbolizer(const Triple &/*TT*/,
6803381d7a2SSam Kolton                               LLVMOpInfoCallback /*GetOpInfo*/,
6813381d7a2SSam Kolton                               LLVMSymbolLookupCallback /*SymbolLookUp*/,
6823381d7a2SSam Kolton                               void *DisInfo,
6833381d7a2SSam Kolton                               MCContext *Ctx,
6843381d7a2SSam Kolton                               std::unique_ptr<MCRelocationInfo> &&RelInfo) {
6853381d7a2SSam Kolton   return new AMDGPUSymbolizer(*Ctx, std::move(RelInfo), DisInfo);
6863381d7a2SSam Kolton }
6873381d7a2SSam Kolton 
688e1818af8STom Stellard static MCDisassembler *createAMDGPUDisassembler(const Target &T,
689e1818af8STom Stellard                                                 const MCSubtargetInfo &STI,
690e1818af8STom Stellard                                                 MCContext &Ctx) {
691e1818af8STom Stellard   return new AMDGPUDisassembler(STI, Ctx);
692e1818af8STom Stellard }
693e1818af8STom Stellard 
694e1818af8STom Stellard extern "C" void LLVMInitializeAMDGPUDisassembler() {
695f42454b9SMehdi Amini   TargetRegistry::RegisterMCDisassembler(getTheGCNTarget(),
696f42454b9SMehdi Amini                                          createAMDGPUDisassembler);
697f42454b9SMehdi Amini   TargetRegistry::RegisterMCSymbolizer(getTheGCNTarget(),
698f42454b9SMehdi Amini                                        createAMDGPUSymbolizer);
699e1818af8STom Stellard }
700