1e1818af8STom Stellard //===-- AMDGPUDisassembler.cpp - Disassembler for AMDGPU ISA --------------===//
2e1818af8STom Stellard //
3e1818af8STom Stellard //                     The LLVM Compiler Infrastructure
4e1818af8STom Stellard //
5e1818af8STom Stellard // This file is distributed under the University of Illinois Open Source
6e1818af8STom Stellard // License. See LICENSE.TXT for details.
7e1818af8STom Stellard //
8e1818af8STom Stellard //===----------------------------------------------------------------------===//
9e1818af8STom Stellard //
10e1818af8STom Stellard //===----------------------------------------------------------------------===//
11e1818af8STom Stellard //
12e1818af8STom Stellard /// \file
13e1818af8STom Stellard ///
14e1818af8STom Stellard /// This file contains definition for AMDGPU ISA disassembler
15e1818af8STom Stellard //
16e1818af8STom Stellard //===----------------------------------------------------------------------===//
17e1818af8STom Stellard 
18e1818af8STom Stellard // ToDo: What to do with instruction suffixes (v_mov_b32 vs v_mov_b32_e32)?
19e1818af8STom Stellard 
20e1818af8STom Stellard #include "AMDGPUDisassembler.h"
21e1818af8STom Stellard #include "AMDGPU.h"
22e1818af8STom Stellard #include "AMDGPURegisterInfo.h"
236bda14b3SChandler Carruth #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
24212a251cSArtem Tamazov #include "SIDefines.h"
25e1818af8STom Stellard #include "Utils/AMDGPUBaseInfo.h"
26e1818af8STom Stellard 
27264b5d9eSZachary Turner #include "llvm/BinaryFormat/ELF.h"
28ac106addSNikolay Haustov #include "llvm/MC/MCContext.h"
29e1818af8STom Stellard #include "llvm/MC/MCFixedLenDisassembler.h"
30e1818af8STom Stellard #include "llvm/MC/MCInst.h"
31e1818af8STom Stellard #include "llvm/MC/MCInstrDesc.h"
32e1818af8STom Stellard #include "llvm/MC/MCSubtargetInfo.h"
336bda14b3SChandler Carruth #include "llvm/Support/Debug.h"
34ac106addSNikolay Haustov #include "llvm/Support/Endian.h"
35e1818af8STom Stellard #include "llvm/Support/TargetRegistry.h"
36e1818af8STom Stellard 
37e1818af8STom Stellard using namespace llvm;
38e1818af8STom Stellard 
39e1818af8STom Stellard #define DEBUG_TYPE "amdgpu-disassembler"
40e1818af8STom Stellard 
41e1818af8STom Stellard typedef llvm::MCDisassembler::DecodeStatus DecodeStatus;
42e1818af8STom Stellard 
43e1818af8STom Stellard 
44ac106addSNikolay Haustov inline static MCDisassembler::DecodeStatus
45ac106addSNikolay Haustov addOperand(MCInst &Inst, const MCOperand& Opnd) {
46ac106addSNikolay Haustov   Inst.addOperand(Opnd);
47ac106addSNikolay Haustov   return Opnd.isValid() ?
48ac106addSNikolay Haustov     MCDisassembler::Success :
49ac106addSNikolay Haustov     MCDisassembler::SoftFail;
50e1818af8STom Stellard }
51e1818af8STom Stellard 
52549c89d2SSam Kolton static int insertNamedMCOperand(MCInst &MI, const MCOperand &Op,
53549c89d2SSam Kolton                                 uint16_t NameIdx) {
54549c89d2SSam Kolton   int OpIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), NameIdx);
55549c89d2SSam Kolton   if (OpIdx != -1) {
56549c89d2SSam Kolton     auto I = MI.begin();
57549c89d2SSam Kolton     std::advance(I, OpIdx);
58549c89d2SSam Kolton     MI.insert(I, Op);
59549c89d2SSam Kolton   }
60549c89d2SSam Kolton   return OpIdx;
61549c89d2SSam Kolton }
62549c89d2SSam Kolton 
633381d7a2SSam Kolton static DecodeStatus decodeSoppBrTarget(MCInst &Inst, unsigned Imm,
643381d7a2SSam Kolton                                        uint64_t Addr, const void *Decoder) {
653381d7a2SSam Kolton   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
663381d7a2SSam Kolton 
673381d7a2SSam Kolton   APInt SignedOffset(18, Imm * 4, true);
683381d7a2SSam Kolton   int64_t Offset = (SignedOffset.sext(64) + 4 + Addr).getSExtValue();
693381d7a2SSam Kolton 
703381d7a2SSam Kolton   if (DAsm->tryAddingSymbolicOperand(Inst, Offset, Addr, true, 2, 2))
713381d7a2SSam Kolton     return MCDisassembler::Success;
723381d7a2SSam Kolton   return addOperand(Inst, MCOperand::createImm(Imm));
733381d7a2SSam Kolton }
743381d7a2SSam Kolton 
75363f47a2SSam Kolton #define DECODE_OPERAND(StaticDecoderName, DecoderName) \
76363f47a2SSam Kolton static DecodeStatus StaticDecoderName(MCInst &Inst, \
77ac106addSNikolay Haustov                                        unsigned Imm, \
78ac106addSNikolay Haustov                                        uint64_t /*Addr*/, \
79ac106addSNikolay Haustov                                        const void *Decoder) { \
80ac106addSNikolay Haustov   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); \
81363f47a2SSam Kolton   return addOperand(Inst, DAsm->DecoderName(Imm)); \
82e1818af8STom Stellard }
83e1818af8STom Stellard 
84363f47a2SSam Kolton #define DECODE_OPERAND_REG(RegClass) \
85363f47a2SSam Kolton DECODE_OPERAND(Decode##RegClass##RegisterClass, decodeOperand_##RegClass)
86e1818af8STom Stellard 
87363f47a2SSam Kolton DECODE_OPERAND_REG(VGPR_32)
88363f47a2SSam Kolton DECODE_OPERAND_REG(VS_32)
89363f47a2SSam Kolton DECODE_OPERAND_REG(VS_64)
90*30fc5239SDmitry Preobrazhensky DECODE_OPERAND_REG(VS_128)
91e1818af8STom Stellard 
92363f47a2SSam Kolton DECODE_OPERAND_REG(VReg_64)
93363f47a2SSam Kolton DECODE_OPERAND_REG(VReg_96)
94363f47a2SSam Kolton DECODE_OPERAND_REG(VReg_128)
95e1818af8STom Stellard 
96363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_32)
97363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_32_XM0_XEXEC)
98363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_64)
99363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_64_XEXEC)
100363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_128)
101363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_256)
102363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_512)
103e1818af8STom Stellard 
1044bd72361SMatt Arsenault 
1054bd72361SMatt Arsenault static DecodeStatus decodeOperand_VSrc16(MCInst &Inst,
1064bd72361SMatt Arsenault                                          unsigned Imm,
1074bd72361SMatt Arsenault                                          uint64_t Addr,
1084bd72361SMatt Arsenault                                          const void *Decoder) {
1094bd72361SMatt Arsenault   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
1104bd72361SMatt Arsenault   return addOperand(Inst, DAsm->decodeOperand_VSrc16(Imm));
1114bd72361SMatt Arsenault }
1124bd72361SMatt Arsenault 
1139be7b0d4SMatt Arsenault static DecodeStatus decodeOperand_VSrcV216(MCInst &Inst,
1149be7b0d4SMatt Arsenault                                          unsigned Imm,
1159be7b0d4SMatt Arsenault                                          uint64_t Addr,
1169be7b0d4SMatt Arsenault                                          const void *Decoder) {
1179be7b0d4SMatt Arsenault   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
1189be7b0d4SMatt Arsenault   return addOperand(Inst, DAsm->decodeOperand_VSrcV216(Imm));
1199be7b0d4SMatt Arsenault }
1209be7b0d4SMatt Arsenault 
121549c89d2SSam Kolton #define DECODE_SDWA(DecName) \
122549c89d2SSam Kolton DECODE_OPERAND(decodeSDWA##DecName, decodeSDWA##DecName)
123363f47a2SSam Kolton 
124549c89d2SSam Kolton DECODE_SDWA(Src32)
125549c89d2SSam Kolton DECODE_SDWA(Src16)
126549c89d2SSam Kolton DECODE_SDWA(VopcDst)
127363f47a2SSam Kolton 
128e1818af8STom Stellard #include "AMDGPUGenDisassemblerTables.inc"
129e1818af8STom Stellard 
130e1818af8STom Stellard //===----------------------------------------------------------------------===//
131e1818af8STom Stellard //
132e1818af8STom Stellard //===----------------------------------------------------------------------===//
133e1818af8STom Stellard 
1341048fb18SSam Kolton template <typename T> static inline T eatBytes(ArrayRef<uint8_t>& Bytes) {
1351048fb18SSam Kolton   assert(Bytes.size() >= sizeof(T));
1361048fb18SSam Kolton   const auto Res = support::endian::read<T, support::endianness::little>(Bytes.data());
1371048fb18SSam Kolton   Bytes = Bytes.slice(sizeof(T));
138ac106addSNikolay Haustov   return Res;
139ac106addSNikolay Haustov }
140ac106addSNikolay Haustov 
141ac106addSNikolay Haustov DecodeStatus AMDGPUDisassembler::tryDecodeInst(const uint8_t* Table,
142ac106addSNikolay Haustov                                                MCInst &MI,
143ac106addSNikolay Haustov                                                uint64_t Inst,
144ac106addSNikolay Haustov                                                uint64_t Address) const {
145ac106addSNikolay Haustov   assert(MI.getOpcode() == 0);
146ac106addSNikolay Haustov   assert(MI.getNumOperands() == 0);
147ac106addSNikolay Haustov   MCInst TmpInst;
148ce941c9cSDmitry Preobrazhensky   HasLiteral = false;
149ac106addSNikolay Haustov   const auto SavedBytes = Bytes;
150ac106addSNikolay Haustov   if (decodeInstruction(Table, TmpInst, Inst, Address, this, STI)) {
151ac106addSNikolay Haustov     MI = TmpInst;
152ac106addSNikolay Haustov     return MCDisassembler::Success;
153ac106addSNikolay Haustov   }
154ac106addSNikolay Haustov   Bytes = SavedBytes;
155ac106addSNikolay Haustov   return MCDisassembler::Fail;
156ac106addSNikolay Haustov }
157ac106addSNikolay Haustov 
158e1818af8STom Stellard DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
159ac106addSNikolay Haustov                                                 ArrayRef<uint8_t> Bytes_,
160e1818af8STom Stellard                                                 uint64_t Address,
161e1818af8STom Stellard                                                 raw_ostream &WS,
162e1818af8STom Stellard                                                 raw_ostream &CS) const {
163e1818af8STom Stellard   CommentStream = &CS;
164549c89d2SSam Kolton   bool IsSDWA = false;
165e1818af8STom Stellard 
166e1818af8STom Stellard   // ToDo: AMDGPUDisassembler supports only VI ISA.
167d122abeaSMatt Arsenault   if (!STI.getFeatureBits()[AMDGPU::FeatureGCN3Encoding])
168d122abeaSMatt Arsenault     report_fatal_error("Disassembly not yet supported for subtarget");
169e1818af8STom Stellard 
170ac106addSNikolay Haustov   const unsigned MaxInstBytesNum = (std::min)((size_t)8, Bytes_.size());
171ac106addSNikolay Haustov   Bytes = Bytes_.slice(0, MaxInstBytesNum);
172161a158eSNikolay Haustov 
173ac106addSNikolay Haustov   DecodeStatus Res = MCDisassembler::Fail;
174ac106addSNikolay Haustov   do {
175824e804bSValery Pykhtin     // ToDo: better to switch encoding length using some bit predicate
176ac106addSNikolay Haustov     // but it is unknown yet, so try all we can
1771048fb18SSam Kolton 
178c9bdcb75SSam Kolton     // Try to decode DPP and SDWA first to solve conflict with VOP1 and VOP2
179c9bdcb75SSam Kolton     // encodings
1801048fb18SSam Kolton     if (Bytes.size() >= 8) {
1811048fb18SSam Kolton       const uint64_t QW = eatBytes<uint64_t>(Bytes);
1821048fb18SSam Kolton       Res = tryDecodeInst(DecoderTableDPP64, MI, QW, Address);
1831048fb18SSam Kolton       if (Res) break;
184c9bdcb75SSam Kolton 
185c9bdcb75SSam Kolton       Res = tryDecodeInst(DecoderTableSDWA64, MI, QW, Address);
186549c89d2SSam Kolton       if (Res) { IsSDWA = true;  break; }
187363f47a2SSam Kolton 
188363f47a2SSam Kolton       Res = tryDecodeInst(DecoderTableSDWA964, MI, QW, Address);
189549c89d2SSam Kolton       if (Res) { IsSDWA = true;  break; }
1901048fb18SSam Kolton     }
1911048fb18SSam Kolton 
1921048fb18SSam Kolton     // Reinitialize Bytes as DPP64 could have eaten too much
1931048fb18SSam Kolton     Bytes = Bytes_.slice(0, MaxInstBytesNum);
1941048fb18SSam Kolton 
1951048fb18SSam Kolton     // Try decode 32-bit instruction
196ac106addSNikolay Haustov     if (Bytes.size() < 4) break;
1971048fb18SSam Kolton     const uint32_t DW = eatBytes<uint32_t>(Bytes);
198ac106addSNikolay Haustov     Res = tryDecodeInst(DecoderTableVI32, MI, DW, Address);
199ac106addSNikolay Haustov     if (Res) break;
200e1818af8STom Stellard 
201ac106addSNikolay Haustov     Res = tryDecodeInst(DecoderTableAMDGPU32, MI, DW, Address);
202ac106addSNikolay Haustov     if (Res) break;
203ac106addSNikolay Haustov 
204ac106addSNikolay Haustov     if (Bytes.size() < 4) break;
2051048fb18SSam Kolton     const uint64_t QW = ((uint64_t)eatBytes<uint32_t>(Bytes) << 32) | DW;
206ac106addSNikolay Haustov     Res = tryDecodeInst(DecoderTableVI64, MI, QW, Address);
207ac106addSNikolay Haustov     if (Res) break;
208ac106addSNikolay Haustov 
209ac106addSNikolay Haustov     Res = tryDecodeInst(DecoderTableAMDGPU64, MI, QW, Address);
210ac106addSNikolay Haustov   } while (false);
211ac106addSNikolay Haustov 
212678e111eSMatt Arsenault   if (Res && (MI.getOpcode() == AMDGPU::V_MAC_F32_e64_vi ||
213678e111eSMatt Arsenault               MI.getOpcode() == AMDGPU::V_MAC_F32_e64_si ||
214678e111eSMatt Arsenault               MI.getOpcode() == AMDGPU::V_MAC_F16_e64_vi)) {
215678e111eSMatt Arsenault     // Insert dummy unused src2_modifiers.
216549c89d2SSam Kolton     insertNamedMCOperand(MI, MCOperand::createImm(0),
217678e111eSMatt Arsenault                          AMDGPU::OpName::src2_modifiers);
218678e111eSMatt Arsenault   }
219678e111eSMatt Arsenault 
220549c89d2SSam Kolton   if (Res && IsSDWA)
221549c89d2SSam Kolton     Res = convertSDWAInst(MI);
222549c89d2SSam Kolton 
223ac106addSNikolay Haustov   Size = Res ? (MaxInstBytesNum - Bytes.size()) : 0;
224ac106addSNikolay Haustov   return Res;
225161a158eSNikolay Haustov }
226e1818af8STom Stellard 
227549c89d2SSam Kolton DecodeStatus AMDGPUDisassembler::convertSDWAInst(MCInst &MI) const {
228549c89d2SSam Kolton   if (STI.getFeatureBits()[AMDGPU::FeatureGFX9]) {
229549c89d2SSam Kolton     if (AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sdst) != -1)
230549c89d2SSam Kolton       // VOPC - insert clamp
231549c89d2SSam Kolton       insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::clamp);
232549c89d2SSam Kolton   } else if (STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]) {
233549c89d2SSam Kolton     int SDst = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sdst);
234549c89d2SSam Kolton     if (SDst != -1) {
235549c89d2SSam Kolton       // VOPC - insert VCC register as sdst
236549c89d2SSam Kolton       insertNamedMCOperand(MI, MCOperand::createReg(AMDGPU::VCC),
237549c89d2SSam Kolton                            AMDGPU::OpName::sdst);
238549c89d2SSam Kolton     } else {
239549c89d2SSam Kolton       // VOP1/2 - insert omod if present in instruction
240549c89d2SSam Kolton       insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::omod);
241549c89d2SSam Kolton     }
242549c89d2SSam Kolton   }
243549c89d2SSam Kolton   return MCDisassembler::Success;
244549c89d2SSam Kolton }
245549c89d2SSam Kolton 
246ac106addSNikolay Haustov const char* AMDGPUDisassembler::getRegClassName(unsigned RegClassID) const {
247ac106addSNikolay Haustov   return getContext().getRegisterInfo()->
248ac106addSNikolay Haustov     getRegClassName(&AMDGPUMCRegisterClasses[RegClassID]);
249e1818af8STom Stellard }
250e1818af8STom Stellard 
251ac106addSNikolay Haustov inline
252ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::errOperand(unsigned V,
253ac106addSNikolay Haustov                                          const Twine& ErrMsg) const {
254ac106addSNikolay Haustov   *CommentStream << "Error: " + ErrMsg;
255ac106addSNikolay Haustov 
256ac106addSNikolay Haustov   // ToDo: add support for error operands to MCInst.h
257ac106addSNikolay Haustov   // return MCOperand::createError(V);
258ac106addSNikolay Haustov   return MCOperand();
259ac106addSNikolay Haustov }
260ac106addSNikolay Haustov 
261ac106addSNikolay Haustov inline
262ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::createRegOperand(unsigned int RegId) const {
263ac106addSNikolay Haustov   return MCOperand::createReg(RegId);
264ac106addSNikolay Haustov }
265ac106addSNikolay Haustov 
266ac106addSNikolay Haustov inline
267ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::createRegOperand(unsigned RegClassID,
268ac106addSNikolay Haustov                                                unsigned Val) const {
269ac106addSNikolay Haustov   const auto& RegCl = AMDGPUMCRegisterClasses[RegClassID];
270ac106addSNikolay Haustov   if (Val >= RegCl.getNumRegs())
271ac106addSNikolay Haustov     return errOperand(Val, Twine(getRegClassName(RegClassID)) +
272ac106addSNikolay Haustov                            ": unknown register " + Twine(Val));
273ac106addSNikolay Haustov   return createRegOperand(RegCl.getRegister(Val));
274ac106addSNikolay Haustov }
275ac106addSNikolay Haustov 
276ac106addSNikolay Haustov inline
277ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::createSRegOperand(unsigned SRegClassID,
278ac106addSNikolay Haustov                                                 unsigned Val) const {
279ac106addSNikolay Haustov   // ToDo: SI/CI have 104 SGPRs, VI - 102
280ac106addSNikolay Haustov   // Valery: here we accepting as much as we can, let assembler sort it out
281ac106addSNikolay Haustov   int shift = 0;
282ac106addSNikolay Haustov   switch (SRegClassID) {
283ac106addSNikolay Haustov   case AMDGPU::SGPR_32RegClassID:
284212a251cSArtem Tamazov   case AMDGPU::TTMP_32RegClassID:
285212a251cSArtem Tamazov     break;
286ac106addSNikolay Haustov   case AMDGPU::SGPR_64RegClassID:
287212a251cSArtem Tamazov   case AMDGPU::TTMP_64RegClassID:
288212a251cSArtem Tamazov     shift = 1;
289212a251cSArtem Tamazov     break;
290212a251cSArtem Tamazov   case AMDGPU::SGPR_128RegClassID:
291212a251cSArtem Tamazov   case AMDGPU::TTMP_128RegClassID:
292ac106addSNikolay Haustov   // ToDo: unclear if s[100:104] is available on VI. Can we use VCC as SGPR in
293ac106addSNikolay Haustov   // this bundle?
294ac106addSNikolay Haustov   case AMDGPU::SReg_256RegClassID:
295ac106addSNikolay Haustov   // ToDo: unclear if s[96:104] is available on VI. Can we use VCC as SGPR in
296ac106addSNikolay Haustov   // this bundle?
297212a251cSArtem Tamazov   case AMDGPU::SReg_512RegClassID:
298212a251cSArtem Tamazov     shift = 2;
299212a251cSArtem Tamazov     break;
300ac106addSNikolay Haustov   // ToDo: unclear if s[88:104] is available on VI. Can we use VCC as SGPR in
301ac106addSNikolay Haustov   // this bundle?
302212a251cSArtem Tamazov   default:
30392b355b1SMatt Arsenault     llvm_unreachable("unhandled register class");
304ac106addSNikolay Haustov   }
30592b355b1SMatt Arsenault 
30692b355b1SMatt Arsenault   if (Val % (1 << shift)) {
307ac106addSNikolay Haustov     *CommentStream << "Warning: " << getRegClassName(SRegClassID)
308ac106addSNikolay Haustov                    << ": scalar reg isn't aligned " << Val;
30992b355b1SMatt Arsenault   }
31092b355b1SMatt Arsenault 
311ac106addSNikolay Haustov   return createRegOperand(SRegClassID, Val >> shift);
312ac106addSNikolay Haustov }
313ac106addSNikolay Haustov 
314ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_VS_32(unsigned Val) const {
315212a251cSArtem Tamazov   return decodeSrcOp(OPW32, Val);
316ac106addSNikolay Haustov }
317ac106addSNikolay Haustov 
318ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_VS_64(unsigned Val) const {
319212a251cSArtem Tamazov   return decodeSrcOp(OPW64, Val);
320ac106addSNikolay Haustov }
321ac106addSNikolay Haustov 
322*30fc5239SDmitry Preobrazhensky MCOperand AMDGPUDisassembler::decodeOperand_VS_128(unsigned Val) const {
323*30fc5239SDmitry Preobrazhensky   return decodeSrcOp(OPW128, Val);
324*30fc5239SDmitry Preobrazhensky }
325*30fc5239SDmitry Preobrazhensky 
3264bd72361SMatt Arsenault MCOperand AMDGPUDisassembler::decodeOperand_VSrc16(unsigned Val) const {
3274bd72361SMatt Arsenault   return decodeSrcOp(OPW16, Val);
3284bd72361SMatt Arsenault }
3294bd72361SMatt Arsenault 
3309be7b0d4SMatt Arsenault MCOperand AMDGPUDisassembler::decodeOperand_VSrcV216(unsigned Val) const {
3319be7b0d4SMatt Arsenault   return decodeSrcOp(OPWV216, Val);
3329be7b0d4SMatt Arsenault }
3339be7b0d4SMatt Arsenault 
334ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_VGPR_32(unsigned Val) const {
335cb540bc0SMatt Arsenault   // Some instructions have operand restrictions beyond what the encoding
336cb540bc0SMatt Arsenault   // allows. Some ordinarily VSrc_32 operands are VGPR_32, so clear the extra
337cb540bc0SMatt Arsenault   // high bit.
338cb540bc0SMatt Arsenault   Val &= 255;
339cb540bc0SMatt Arsenault 
340ac106addSNikolay Haustov   return createRegOperand(AMDGPU::VGPR_32RegClassID, Val);
341ac106addSNikolay Haustov }
342ac106addSNikolay Haustov 
343ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_VReg_64(unsigned Val) const {
344ac106addSNikolay Haustov   return createRegOperand(AMDGPU::VReg_64RegClassID, Val);
345ac106addSNikolay Haustov }
346ac106addSNikolay Haustov 
347ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_VReg_96(unsigned Val) const {
348ac106addSNikolay Haustov   return createRegOperand(AMDGPU::VReg_96RegClassID, Val);
349ac106addSNikolay Haustov }
350ac106addSNikolay Haustov 
351ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_VReg_128(unsigned Val) const {
352ac106addSNikolay Haustov   return createRegOperand(AMDGPU::VReg_128RegClassID, Val);
353ac106addSNikolay Haustov }
354ac106addSNikolay Haustov 
355ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_SReg_32(unsigned Val) const {
356ac106addSNikolay Haustov   // table-gen generated disassembler doesn't care about operand types
357ac106addSNikolay Haustov   // leaving only registry class so SSrc_32 operand turns into SReg_32
358ac106addSNikolay Haustov   // and therefore we accept immediates and literals here as well
359212a251cSArtem Tamazov   return decodeSrcOp(OPW32, Val);
360ac106addSNikolay Haustov }
361ac106addSNikolay Haustov 
362640c44b8SMatt Arsenault MCOperand AMDGPUDisassembler::decodeOperand_SReg_32_XM0_XEXEC(
363640c44b8SMatt Arsenault   unsigned Val) const {
364640c44b8SMatt Arsenault   // SReg_32_XM0 is SReg_32 without M0 or EXEC_LO/EXEC_HI
36538e496b1SArtem Tamazov   return decodeOperand_SReg_32(Val);
36638e496b1SArtem Tamazov }
36738e496b1SArtem Tamazov 
368ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_SReg_64(unsigned Val) const {
369640c44b8SMatt Arsenault   return decodeSrcOp(OPW64, Val);
370640c44b8SMatt Arsenault }
371640c44b8SMatt Arsenault 
372640c44b8SMatt Arsenault MCOperand AMDGPUDisassembler::decodeOperand_SReg_64_XEXEC(unsigned Val) const {
373212a251cSArtem Tamazov   return decodeSrcOp(OPW64, Val);
374ac106addSNikolay Haustov }
375ac106addSNikolay Haustov 
376ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_SReg_128(unsigned Val) const {
377212a251cSArtem Tamazov   return decodeSrcOp(OPW128, Val);
378ac106addSNikolay Haustov }
379ac106addSNikolay Haustov 
380ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_SReg_256(unsigned Val) const {
381ac106addSNikolay Haustov   return createSRegOperand(AMDGPU::SReg_256RegClassID, Val);
382ac106addSNikolay Haustov }
383ac106addSNikolay Haustov 
384ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_SReg_512(unsigned Val) const {
385ac106addSNikolay Haustov   return createSRegOperand(AMDGPU::SReg_512RegClassID, Val);
386ac106addSNikolay Haustov }
387ac106addSNikolay Haustov 
388ac106addSNikolay Haustov 
389ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeLiteralConstant() const {
390ac106addSNikolay Haustov   // For now all literal constants are supposed to be unsigned integer
391ac106addSNikolay Haustov   // ToDo: deal with signed/unsigned 64-bit integer constants
392ac106addSNikolay Haustov   // ToDo: deal with float/double constants
393ce941c9cSDmitry Preobrazhensky   if (!HasLiteral) {
394ce941c9cSDmitry Preobrazhensky     if (Bytes.size() < 4) {
395ac106addSNikolay Haustov       return errOperand(0, "cannot read literal, inst bytes left " +
396ac106addSNikolay Haustov                         Twine(Bytes.size()));
397ce941c9cSDmitry Preobrazhensky     }
398ce941c9cSDmitry Preobrazhensky     HasLiteral = true;
399ce941c9cSDmitry Preobrazhensky     Literal = eatBytes<uint32_t>(Bytes);
400ce941c9cSDmitry Preobrazhensky   }
401ce941c9cSDmitry Preobrazhensky   return MCOperand::createImm(Literal);
402ac106addSNikolay Haustov }
403ac106addSNikolay Haustov 
404ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeIntImmed(unsigned Imm) {
405212a251cSArtem Tamazov   using namespace AMDGPU::EncValues;
406212a251cSArtem Tamazov   assert(Imm >= INLINE_INTEGER_C_MIN && Imm <= INLINE_INTEGER_C_MAX);
407212a251cSArtem Tamazov   return MCOperand::createImm((Imm <= INLINE_INTEGER_C_POSITIVE_MAX) ?
408212a251cSArtem Tamazov     (static_cast<int64_t>(Imm) - INLINE_INTEGER_C_MIN) :
409212a251cSArtem Tamazov     (INLINE_INTEGER_C_POSITIVE_MAX - static_cast<int64_t>(Imm)));
410212a251cSArtem Tamazov       // Cast prevents negative overflow.
411ac106addSNikolay Haustov }
412ac106addSNikolay Haustov 
4134bd72361SMatt Arsenault static int64_t getInlineImmVal32(unsigned Imm) {
4144bd72361SMatt Arsenault   switch (Imm) {
4154bd72361SMatt Arsenault   case 240:
4164bd72361SMatt Arsenault     return FloatToBits(0.5f);
4174bd72361SMatt Arsenault   case 241:
4184bd72361SMatt Arsenault     return FloatToBits(-0.5f);
4194bd72361SMatt Arsenault   case 242:
4204bd72361SMatt Arsenault     return FloatToBits(1.0f);
4214bd72361SMatt Arsenault   case 243:
4224bd72361SMatt Arsenault     return FloatToBits(-1.0f);
4234bd72361SMatt Arsenault   case 244:
4244bd72361SMatt Arsenault     return FloatToBits(2.0f);
4254bd72361SMatt Arsenault   case 245:
4264bd72361SMatt Arsenault     return FloatToBits(-2.0f);
4274bd72361SMatt Arsenault   case 246:
4284bd72361SMatt Arsenault     return FloatToBits(4.0f);
4294bd72361SMatt Arsenault   case 247:
4304bd72361SMatt Arsenault     return FloatToBits(-4.0f);
4314bd72361SMatt Arsenault   case 248: // 1 / (2 * PI)
4324bd72361SMatt Arsenault     return 0x3e22f983;
4334bd72361SMatt Arsenault   default:
4344bd72361SMatt Arsenault     llvm_unreachable("invalid fp inline imm");
4354bd72361SMatt Arsenault   }
4364bd72361SMatt Arsenault }
4374bd72361SMatt Arsenault 
4384bd72361SMatt Arsenault static int64_t getInlineImmVal64(unsigned Imm) {
4394bd72361SMatt Arsenault   switch (Imm) {
4404bd72361SMatt Arsenault   case 240:
4414bd72361SMatt Arsenault     return DoubleToBits(0.5);
4424bd72361SMatt Arsenault   case 241:
4434bd72361SMatt Arsenault     return DoubleToBits(-0.5);
4444bd72361SMatt Arsenault   case 242:
4454bd72361SMatt Arsenault     return DoubleToBits(1.0);
4464bd72361SMatt Arsenault   case 243:
4474bd72361SMatt Arsenault     return DoubleToBits(-1.0);
4484bd72361SMatt Arsenault   case 244:
4494bd72361SMatt Arsenault     return DoubleToBits(2.0);
4504bd72361SMatt Arsenault   case 245:
4514bd72361SMatt Arsenault     return DoubleToBits(-2.0);
4524bd72361SMatt Arsenault   case 246:
4534bd72361SMatt Arsenault     return DoubleToBits(4.0);
4544bd72361SMatt Arsenault   case 247:
4554bd72361SMatt Arsenault     return DoubleToBits(-4.0);
4564bd72361SMatt Arsenault   case 248: // 1 / (2 * PI)
4574bd72361SMatt Arsenault     return 0x3fc45f306dc9c882;
4584bd72361SMatt Arsenault   default:
4594bd72361SMatt Arsenault     llvm_unreachable("invalid fp inline imm");
4604bd72361SMatt Arsenault   }
4614bd72361SMatt Arsenault }
4624bd72361SMatt Arsenault 
4634bd72361SMatt Arsenault static int64_t getInlineImmVal16(unsigned Imm) {
4644bd72361SMatt Arsenault   switch (Imm) {
4654bd72361SMatt Arsenault   case 240:
4664bd72361SMatt Arsenault     return 0x3800;
4674bd72361SMatt Arsenault   case 241:
4684bd72361SMatt Arsenault     return 0xB800;
4694bd72361SMatt Arsenault   case 242:
4704bd72361SMatt Arsenault     return 0x3C00;
4714bd72361SMatt Arsenault   case 243:
4724bd72361SMatt Arsenault     return 0xBC00;
4734bd72361SMatt Arsenault   case 244:
4744bd72361SMatt Arsenault     return 0x4000;
4754bd72361SMatt Arsenault   case 245:
4764bd72361SMatt Arsenault     return 0xC000;
4774bd72361SMatt Arsenault   case 246:
4784bd72361SMatt Arsenault     return 0x4400;
4794bd72361SMatt Arsenault   case 247:
4804bd72361SMatt Arsenault     return 0xC400;
4814bd72361SMatt Arsenault   case 248: // 1 / (2 * PI)
4824bd72361SMatt Arsenault     return 0x3118;
4834bd72361SMatt Arsenault   default:
4844bd72361SMatt Arsenault     llvm_unreachable("invalid fp inline imm");
4854bd72361SMatt Arsenault   }
4864bd72361SMatt Arsenault }
4874bd72361SMatt Arsenault 
4884bd72361SMatt Arsenault MCOperand AMDGPUDisassembler::decodeFPImmed(OpWidthTy Width, unsigned Imm) {
489212a251cSArtem Tamazov   assert(Imm >= AMDGPU::EncValues::INLINE_FLOATING_C_MIN
490212a251cSArtem Tamazov       && Imm <= AMDGPU::EncValues::INLINE_FLOATING_C_MAX);
4914bd72361SMatt Arsenault 
492e1818af8STom Stellard   // ToDo: case 248: 1/(2*PI) - is allowed only on VI
4934bd72361SMatt Arsenault   switch (Width) {
4944bd72361SMatt Arsenault   case OPW32:
4954bd72361SMatt Arsenault     return MCOperand::createImm(getInlineImmVal32(Imm));
4964bd72361SMatt Arsenault   case OPW64:
4974bd72361SMatt Arsenault     return MCOperand::createImm(getInlineImmVal64(Imm));
4984bd72361SMatt Arsenault   case OPW16:
4999be7b0d4SMatt Arsenault   case OPWV216:
5004bd72361SMatt Arsenault     return MCOperand::createImm(getInlineImmVal16(Imm));
5014bd72361SMatt Arsenault   default:
5024bd72361SMatt Arsenault     llvm_unreachable("implement me");
503e1818af8STom Stellard   }
504e1818af8STom Stellard }
505e1818af8STom Stellard 
506212a251cSArtem Tamazov unsigned AMDGPUDisassembler::getVgprClassId(const OpWidthTy Width) const {
507e1818af8STom Stellard   using namespace AMDGPU;
508212a251cSArtem Tamazov   assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
509212a251cSArtem Tamazov   switch (Width) {
510212a251cSArtem Tamazov   default: // fall
5114bd72361SMatt Arsenault   case OPW32:
5124bd72361SMatt Arsenault   case OPW16:
5139be7b0d4SMatt Arsenault   case OPWV216:
5144bd72361SMatt Arsenault     return VGPR_32RegClassID;
515212a251cSArtem Tamazov   case OPW64: return VReg_64RegClassID;
516212a251cSArtem Tamazov   case OPW128: return VReg_128RegClassID;
517212a251cSArtem Tamazov   }
518212a251cSArtem Tamazov }
519212a251cSArtem Tamazov 
520212a251cSArtem Tamazov unsigned AMDGPUDisassembler::getSgprClassId(const OpWidthTy Width) const {
521212a251cSArtem Tamazov   using namespace AMDGPU;
522212a251cSArtem Tamazov   assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
523212a251cSArtem Tamazov   switch (Width) {
524212a251cSArtem Tamazov   default: // fall
5254bd72361SMatt Arsenault   case OPW32:
5264bd72361SMatt Arsenault   case OPW16:
5279be7b0d4SMatt Arsenault   case OPWV216:
5284bd72361SMatt Arsenault     return SGPR_32RegClassID;
529212a251cSArtem Tamazov   case OPW64: return SGPR_64RegClassID;
530212a251cSArtem Tamazov   case OPW128: return SGPR_128RegClassID;
531212a251cSArtem Tamazov   }
532212a251cSArtem Tamazov }
533212a251cSArtem Tamazov 
534212a251cSArtem Tamazov unsigned AMDGPUDisassembler::getTtmpClassId(const OpWidthTy Width) const {
535212a251cSArtem Tamazov   using namespace AMDGPU;
536212a251cSArtem Tamazov   assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
537212a251cSArtem Tamazov   switch (Width) {
538212a251cSArtem Tamazov   default: // fall
5394bd72361SMatt Arsenault   case OPW32:
5404bd72361SMatt Arsenault   case OPW16:
5419be7b0d4SMatt Arsenault   case OPWV216:
5424bd72361SMatt Arsenault     return TTMP_32RegClassID;
543212a251cSArtem Tamazov   case OPW64: return TTMP_64RegClassID;
544212a251cSArtem Tamazov   case OPW128: return TTMP_128RegClassID;
545212a251cSArtem Tamazov   }
546212a251cSArtem Tamazov }
547212a251cSArtem Tamazov 
548212a251cSArtem Tamazov MCOperand AMDGPUDisassembler::decodeSrcOp(const OpWidthTy Width, unsigned Val) const {
549212a251cSArtem Tamazov   using namespace AMDGPU::EncValues;
550ac106addSNikolay Haustov   assert(Val < 512); // enum9
551ac106addSNikolay Haustov 
552212a251cSArtem Tamazov   if (VGPR_MIN <= Val && Val <= VGPR_MAX) {
553212a251cSArtem Tamazov     return createRegOperand(getVgprClassId(Width), Val - VGPR_MIN);
554212a251cSArtem Tamazov   }
555b49c3361SArtem Tamazov   if (Val <= SGPR_MAX) {
556b49c3361SArtem Tamazov     assert(SGPR_MIN == 0); // "SGPR_MIN <= Val" is always true and causes compilation warning.
557212a251cSArtem Tamazov     return createSRegOperand(getSgprClassId(Width), Val - SGPR_MIN);
558212a251cSArtem Tamazov   }
559212a251cSArtem Tamazov   if (TTMP_MIN <= Val && Val <= TTMP_MAX) {
560212a251cSArtem Tamazov     return createSRegOperand(getTtmpClassId(Width), Val - TTMP_MIN);
561212a251cSArtem Tamazov   }
562ac106addSNikolay Haustov 
563212a251cSArtem Tamazov   if (INLINE_INTEGER_C_MIN <= Val && Val <= INLINE_INTEGER_C_MAX)
564ac106addSNikolay Haustov     return decodeIntImmed(Val);
565ac106addSNikolay Haustov 
566212a251cSArtem Tamazov   if (INLINE_FLOATING_C_MIN <= Val && Val <= INLINE_FLOATING_C_MAX)
5674bd72361SMatt Arsenault     return decodeFPImmed(Width, Val);
568ac106addSNikolay Haustov 
569212a251cSArtem Tamazov   if (Val == LITERAL_CONST)
570ac106addSNikolay Haustov     return decodeLiteralConstant();
571ac106addSNikolay Haustov 
5724bd72361SMatt Arsenault   switch (Width) {
5734bd72361SMatt Arsenault   case OPW32:
5744bd72361SMatt Arsenault   case OPW16:
5759be7b0d4SMatt Arsenault   case OPWV216:
5764bd72361SMatt Arsenault     return decodeSpecialReg32(Val);
5774bd72361SMatt Arsenault   case OPW64:
5784bd72361SMatt Arsenault     return decodeSpecialReg64(Val);
5794bd72361SMatt Arsenault   default:
5804bd72361SMatt Arsenault     llvm_unreachable("unexpected immediate type");
5814bd72361SMatt Arsenault   }
582ac106addSNikolay Haustov }
583ac106addSNikolay Haustov 
584ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeSpecialReg32(unsigned Val) const {
585ac106addSNikolay Haustov   using namespace AMDGPU;
586e1818af8STom Stellard   switch (Val) {
587ac106addSNikolay Haustov   case 102: return createRegOperand(getMCReg(FLAT_SCR_LO, STI));
588ac106addSNikolay Haustov   case 103: return createRegOperand(getMCReg(FLAT_SCR_HI, STI));
589e1818af8STom Stellard     // ToDo: no support for xnack_mask_lo/_hi register
590e1818af8STom Stellard   case 104:
591ac106addSNikolay Haustov   case 105: break;
592ac106addSNikolay Haustov   case 106: return createRegOperand(VCC_LO);
593ac106addSNikolay Haustov   case 107: return createRegOperand(VCC_HI);
594212a251cSArtem Tamazov   case 108: return createRegOperand(TBA_LO);
595212a251cSArtem Tamazov   case 109: return createRegOperand(TBA_HI);
596212a251cSArtem Tamazov   case 110: return createRegOperand(TMA_LO);
597212a251cSArtem Tamazov   case 111: return createRegOperand(TMA_HI);
598ac106addSNikolay Haustov   case 124: return createRegOperand(M0);
599ac106addSNikolay Haustov   case 126: return createRegOperand(EXEC_LO);
600ac106addSNikolay Haustov   case 127: return createRegOperand(EXEC_HI);
601a3b3b489SMatt Arsenault   case 235: return createRegOperand(SRC_SHARED_BASE);
602a3b3b489SMatt Arsenault   case 236: return createRegOperand(SRC_SHARED_LIMIT);
603a3b3b489SMatt Arsenault   case 237: return createRegOperand(SRC_PRIVATE_BASE);
604a3b3b489SMatt Arsenault   case 238: return createRegOperand(SRC_PRIVATE_LIMIT);
605a3b3b489SMatt Arsenault     // TODO: SRC_POPS_EXITING_WAVE_ID
606e1818af8STom Stellard     // ToDo: no support for vccz register
607ac106addSNikolay Haustov   case 251: break;
608e1818af8STom Stellard     // ToDo: no support for execz register
609ac106addSNikolay Haustov   case 252: break;
610ac106addSNikolay Haustov   case 253: return createRegOperand(SCC);
611ac106addSNikolay Haustov   default: break;
612e1818af8STom Stellard   }
613ac106addSNikolay Haustov   return errOperand(Val, "unknown operand encoding " + Twine(Val));
614e1818af8STom Stellard }
615e1818af8STom Stellard 
616ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeSpecialReg64(unsigned Val) const {
617161a158eSNikolay Haustov   using namespace AMDGPU;
618161a158eSNikolay Haustov   switch (Val) {
619ac106addSNikolay Haustov   case 102: return createRegOperand(getMCReg(FLAT_SCR, STI));
620ac106addSNikolay Haustov   case 106: return createRegOperand(VCC);
621212a251cSArtem Tamazov   case 108: return createRegOperand(TBA);
622212a251cSArtem Tamazov   case 110: return createRegOperand(TMA);
623ac106addSNikolay Haustov   case 126: return createRegOperand(EXEC);
624ac106addSNikolay Haustov   default: break;
625161a158eSNikolay Haustov   }
626ac106addSNikolay Haustov   return errOperand(Val, "unknown operand encoding " + Twine(Val));
627161a158eSNikolay Haustov }
628161a158eSNikolay Haustov 
629549c89d2SSam Kolton MCOperand AMDGPUDisassembler::decodeSDWASrc(const OpWidthTy Width,
630363f47a2SSam Kolton                                             unsigned Val) const {
631363f47a2SSam Kolton   using namespace AMDGPU::SDWA;
632363f47a2SSam Kolton 
633549c89d2SSam Kolton   if (STI.getFeatureBits()[AMDGPU::FeatureGFX9]) {
634a179d25bSSam Kolton     // XXX: static_cast<int> is needed to avoid stupid warning:
635a179d25bSSam Kolton     // compare with unsigned is always true
636a179d25bSSam Kolton     if (SDWA9EncValues::SRC_VGPR_MIN <= static_cast<int>(Val) &&
637363f47a2SSam Kolton         Val <= SDWA9EncValues::SRC_VGPR_MAX) {
638363f47a2SSam Kolton       return createRegOperand(getVgprClassId(Width),
639363f47a2SSam Kolton                               Val - SDWA9EncValues::SRC_VGPR_MIN);
640363f47a2SSam Kolton     }
641363f47a2SSam Kolton     if (SDWA9EncValues::SRC_SGPR_MIN <= Val &&
642363f47a2SSam Kolton         Val <= SDWA9EncValues::SRC_SGPR_MAX) {
643363f47a2SSam Kolton       return createSRegOperand(getSgprClassId(Width),
644363f47a2SSam Kolton                                Val - SDWA9EncValues::SRC_SGPR_MIN);
645363f47a2SSam Kolton     }
646363f47a2SSam Kolton 
647363f47a2SSam Kolton     return decodeSpecialReg32(Val - SDWA9EncValues::SRC_SGPR_MIN);
648549c89d2SSam Kolton   } else if (STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]) {
649549c89d2SSam Kolton     return createRegOperand(getVgprClassId(Width), Val);
650549c89d2SSam Kolton   }
651549c89d2SSam Kolton   llvm_unreachable("unsupported target");
652363f47a2SSam Kolton }
653363f47a2SSam Kolton 
654549c89d2SSam Kolton MCOperand AMDGPUDisassembler::decodeSDWASrc16(unsigned Val) const {
655549c89d2SSam Kolton   return decodeSDWASrc(OPW16, Val);
656363f47a2SSam Kolton }
657363f47a2SSam Kolton 
658549c89d2SSam Kolton MCOperand AMDGPUDisassembler::decodeSDWASrc32(unsigned Val) const {
659549c89d2SSam Kolton   return decodeSDWASrc(OPW32, Val);
660363f47a2SSam Kolton }
661363f47a2SSam Kolton 
662363f47a2SSam Kolton 
663549c89d2SSam Kolton MCOperand AMDGPUDisassembler::decodeSDWAVopcDst(unsigned Val) const {
664363f47a2SSam Kolton   using namespace AMDGPU::SDWA;
665363f47a2SSam Kolton 
666549c89d2SSam Kolton   assert(STI.getFeatureBits()[AMDGPU::FeatureGFX9] &&
667549c89d2SSam Kolton          "SDWAVopcDst should be present only on GFX9");
668363f47a2SSam Kolton   if (Val & SDWA9EncValues::VOPC_DST_VCC_MASK) {
669363f47a2SSam Kolton     Val &= SDWA9EncValues::VOPC_DST_SGPR_MASK;
670363f47a2SSam Kolton     if (Val > AMDGPU::EncValues::SGPR_MAX) {
671363f47a2SSam Kolton       return decodeSpecialReg64(Val);
672363f47a2SSam Kolton     } else {
673363f47a2SSam Kolton       return createSRegOperand(getSgprClassId(OPW64), Val);
674363f47a2SSam Kolton     }
675363f47a2SSam Kolton   } else {
676363f47a2SSam Kolton     return createRegOperand(AMDGPU::VCC);
677363f47a2SSam Kolton   }
678363f47a2SSam Kolton }
679363f47a2SSam Kolton 
6803381d7a2SSam Kolton //===----------------------------------------------------------------------===//
6813381d7a2SSam Kolton // AMDGPUSymbolizer
6823381d7a2SSam Kolton //===----------------------------------------------------------------------===//
6833381d7a2SSam Kolton 
6843381d7a2SSam Kolton // Try to find symbol name for specified label
6853381d7a2SSam Kolton bool AMDGPUSymbolizer::tryAddingSymbolicOperand(MCInst &Inst,
6863381d7a2SSam Kolton                                 raw_ostream &/*cStream*/, int64_t Value,
6873381d7a2SSam Kolton                                 uint64_t /*Address*/, bool IsBranch,
6883381d7a2SSam Kolton                                 uint64_t /*Offset*/, uint64_t /*InstSize*/) {
6893381d7a2SSam Kolton   typedef std::tuple<uint64_t, StringRef, uint8_t> SymbolInfoTy;
6903381d7a2SSam Kolton   typedef std::vector<SymbolInfoTy> SectionSymbolsTy;
6913381d7a2SSam Kolton 
6923381d7a2SSam Kolton   if (!IsBranch) {
6933381d7a2SSam Kolton     return false;
6943381d7a2SSam Kolton   }
6953381d7a2SSam Kolton 
6963381d7a2SSam Kolton   auto *Symbols = static_cast<SectionSymbolsTy *>(DisInfo);
6973381d7a2SSam Kolton   auto Result = std::find_if(Symbols->begin(), Symbols->end(),
6983381d7a2SSam Kolton                              [Value](const SymbolInfoTy& Val) {
6993381d7a2SSam Kolton                                 return std::get<0>(Val) == static_cast<uint64_t>(Value)
7003381d7a2SSam Kolton                                     && std::get<2>(Val) == ELF::STT_NOTYPE;
7013381d7a2SSam Kolton                              });
7023381d7a2SSam Kolton   if (Result != Symbols->end()) {
7033381d7a2SSam Kolton     auto *Sym = Ctx.getOrCreateSymbol(std::get<1>(*Result));
7043381d7a2SSam Kolton     const auto *Add = MCSymbolRefExpr::create(Sym, Ctx);
7053381d7a2SSam Kolton     Inst.addOperand(MCOperand::createExpr(Add));
7063381d7a2SSam Kolton     return true;
7073381d7a2SSam Kolton   }
7083381d7a2SSam Kolton   return false;
7093381d7a2SSam Kolton }
7103381d7a2SSam Kolton 
71192b355b1SMatt Arsenault void AMDGPUSymbolizer::tryAddingPcLoadReferenceComment(raw_ostream &cStream,
71292b355b1SMatt Arsenault                                                        int64_t Value,
71392b355b1SMatt Arsenault                                                        uint64_t Address) {
71492b355b1SMatt Arsenault   llvm_unreachable("unimplemented");
71592b355b1SMatt Arsenault }
71692b355b1SMatt Arsenault 
7173381d7a2SSam Kolton //===----------------------------------------------------------------------===//
7183381d7a2SSam Kolton // Initialization
7193381d7a2SSam Kolton //===----------------------------------------------------------------------===//
7203381d7a2SSam Kolton 
7213381d7a2SSam Kolton static MCSymbolizer *createAMDGPUSymbolizer(const Triple &/*TT*/,
7223381d7a2SSam Kolton                               LLVMOpInfoCallback /*GetOpInfo*/,
7233381d7a2SSam Kolton                               LLVMSymbolLookupCallback /*SymbolLookUp*/,
7243381d7a2SSam Kolton                               void *DisInfo,
7253381d7a2SSam Kolton                               MCContext *Ctx,
7263381d7a2SSam Kolton                               std::unique_ptr<MCRelocationInfo> &&RelInfo) {
7273381d7a2SSam Kolton   return new AMDGPUSymbolizer(*Ctx, std::move(RelInfo), DisInfo);
7283381d7a2SSam Kolton }
7293381d7a2SSam Kolton 
730e1818af8STom Stellard static MCDisassembler *createAMDGPUDisassembler(const Target &T,
731e1818af8STom Stellard                                                 const MCSubtargetInfo &STI,
732e1818af8STom Stellard                                                 MCContext &Ctx) {
733e1818af8STom Stellard   return new AMDGPUDisassembler(STI, Ctx);
734e1818af8STom Stellard }
735e1818af8STom Stellard 
736e1818af8STom Stellard extern "C" void LLVMInitializeAMDGPUDisassembler() {
737f42454b9SMehdi Amini   TargetRegistry::RegisterMCDisassembler(getTheGCNTarget(),
738f42454b9SMehdi Amini                                          createAMDGPUDisassembler);
739f42454b9SMehdi Amini   TargetRegistry::RegisterMCSymbolizer(getTheGCNTarget(),
740f42454b9SMehdi Amini                                        createAMDGPUSymbolizer);
741e1818af8STom Stellard }
742