1c8fbf6ffSEugene Zelenko //===- AMDGPUDisassembler.cpp - Disassembler for AMDGPU ISA ---------------===//
2e1818af8STom Stellard //
32946cd70SChandler Carruth // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
42946cd70SChandler Carruth // See https://llvm.org/LICENSE.txt for license information.
52946cd70SChandler Carruth // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6e1818af8STom Stellard //
7e1818af8STom Stellard //===----------------------------------------------------------------------===//
8e1818af8STom Stellard //
9e1818af8STom Stellard //===----------------------------------------------------------------------===//
10e1818af8STom Stellard //
11e1818af8STom Stellard /// \file
12e1818af8STom Stellard ///
13e1818af8STom Stellard /// This file contains definition for AMDGPU ISA disassembler
14e1818af8STom Stellard //
15e1818af8STom Stellard //===----------------------------------------------------------------------===//
16e1818af8STom Stellard 
17e1818af8STom Stellard // ToDo: What to do with instruction suffixes (v_mov_b32 vs v_mov_b32_e32)?
18e1818af8STom Stellard 
19c8fbf6ffSEugene Zelenko #include "Disassembler/AMDGPUDisassembler.h"
20e1818af8STom Stellard #include "AMDGPU.h"
21e1818af8STom Stellard #include "AMDGPURegisterInfo.h"
22c5a154dbSTom Stellard #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
23212a251cSArtem Tamazov #include "SIDefines.h"
2444b30b45STom Stellard #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
25e1818af8STom Stellard #include "Utils/AMDGPUBaseInfo.h"
26c8fbf6ffSEugene Zelenko #include "llvm-c/Disassembler.h"
27c8fbf6ffSEugene Zelenko #include "llvm/ADT/APInt.h"
28c8fbf6ffSEugene Zelenko #include "llvm/ADT/ArrayRef.h"
29c8fbf6ffSEugene Zelenko #include "llvm/ADT/Twine.h"
30264b5d9eSZachary Turner #include "llvm/BinaryFormat/ELF.h"
31ac106addSNikolay Haustov #include "llvm/MC/MCContext.h"
32c8fbf6ffSEugene Zelenko #include "llvm/MC/MCDisassembler/MCDisassembler.h"
33c8fbf6ffSEugene Zelenko #include "llvm/MC/MCExpr.h"
34e1818af8STom Stellard #include "llvm/MC/MCFixedLenDisassembler.h"
35e1818af8STom Stellard #include "llvm/MC/MCInst.h"
36e1818af8STom Stellard #include "llvm/MC/MCSubtargetInfo.h"
37ac106addSNikolay Haustov #include "llvm/Support/Endian.h"
38c8fbf6ffSEugene Zelenko #include "llvm/Support/ErrorHandling.h"
39c8fbf6ffSEugene Zelenko #include "llvm/Support/MathExtras.h"
40e1818af8STom Stellard #include "llvm/Support/TargetRegistry.h"
41c8fbf6ffSEugene Zelenko #include "llvm/Support/raw_ostream.h"
42c8fbf6ffSEugene Zelenko #include <algorithm>
43c8fbf6ffSEugene Zelenko #include <cassert>
44c8fbf6ffSEugene Zelenko #include <cstddef>
45c8fbf6ffSEugene Zelenko #include <cstdint>
46c8fbf6ffSEugene Zelenko #include <iterator>
47c8fbf6ffSEugene Zelenko #include <tuple>
48c8fbf6ffSEugene Zelenko #include <vector>
49e1818af8STom Stellard 
50e1818af8STom Stellard using namespace llvm;
51e1818af8STom Stellard 
52e1818af8STom Stellard #define DEBUG_TYPE "amdgpu-disassembler"
53e1818af8STom Stellard 
5433d806a5SStanislav Mekhanoshin #define SGPR_MAX (isGFX10() ? AMDGPU::EncValues::SGPR_MAX_GFX10 \
5533d806a5SStanislav Mekhanoshin                             : AMDGPU::EncValues::SGPR_MAX_SI)
5633d806a5SStanislav Mekhanoshin 
57c8fbf6ffSEugene Zelenko using DecodeStatus = llvm::MCDisassembler::DecodeStatus;
58e1818af8STom Stellard 
59ac106addSNikolay Haustov inline static MCDisassembler::DecodeStatus
60ac106addSNikolay Haustov addOperand(MCInst &Inst, const MCOperand& Opnd) {
61ac106addSNikolay Haustov   Inst.addOperand(Opnd);
62ac106addSNikolay Haustov   return Opnd.isValid() ?
63ac106addSNikolay Haustov     MCDisassembler::Success :
64ac106addSNikolay Haustov     MCDisassembler::SoftFail;
65e1818af8STom Stellard }
66e1818af8STom Stellard 
67549c89d2SSam Kolton static int insertNamedMCOperand(MCInst &MI, const MCOperand &Op,
68549c89d2SSam Kolton                                 uint16_t NameIdx) {
69549c89d2SSam Kolton   int OpIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), NameIdx);
70549c89d2SSam Kolton   if (OpIdx != -1) {
71549c89d2SSam Kolton     auto I = MI.begin();
72549c89d2SSam Kolton     std::advance(I, OpIdx);
73549c89d2SSam Kolton     MI.insert(I, Op);
74549c89d2SSam Kolton   }
75549c89d2SSam Kolton   return OpIdx;
76549c89d2SSam Kolton }
77549c89d2SSam Kolton 
783381d7a2SSam Kolton static DecodeStatus decodeSoppBrTarget(MCInst &Inst, unsigned Imm,
793381d7a2SSam Kolton                                        uint64_t Addr, const void *Decoder) {
803381d7a2SSam Kolton   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
813381d7a2SSam Kolton 
82efec1396SScott Linder   // Our branches take a simm16, but we need two extra bits to account for the
83efec1396SScott Linder   // factor of 4.
843381d7a2SSam Kolton   APInt SignedOffset(18, Imm * 4, true);
853381d7a2SSam Kolton   int64_t Offset = (SignedOffset.sext(64) + 4 + Addr).getSExtValue();
863381d7a2SSam Kolton 
873381d7a2SSam Kolton   if (DAsm->tryAddingSymbolicOperand(Inst, Offset, Addr, true, 2, 2))
883381d7a2SSam Kolton     return MCDisassembler::Success;
893381d7a2SSam Kolton   return addOperand(Inst, MCOperand::createImm(Imm));
903381d7a2SSam Kolton }
913381d7a2SSam Kolton 
92363f47a2SSam Kolton #define DECODE_OPERAND(StaticDecoderName, DecoderName) \
93363f47a2SSam Kolton static DecodeStatus StaticDecoderName(MCInst &Inst, \
94ac106addSNikolay Haustov                                        unsigned Imm, \
95ac106addSNikolay Haustov                                        uint64_t /*Addr*/, \
96ac106addSNikolay Haustov                                        const void *Decoder) { \
97ac106addSNikolay Haustov   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); \
98363f47a2SSam Kolton   return addOperand(Inst, DAsm->DecoderName(Imm)); \
99e1818af8STom Stellard }
100e1818af8STom Stellard 
101363f47a2SSam Kolton #define DECODE_OPERAND_REG(RegClass) \
102363f47a2SSam Kolton DECODE_OPERAND(Decode##RegClass##RegisterClass, decodeOperand_##RegClass)
103e1818af8STom Stellard 
104363f47a2SSam Kolton DECODE_OPERAND_REG(VGPR_32)
1056023d599SDmitry Preobrazhensky DECODE_OPERAND_REG(VRegOrLds_32)
106363f47a2SSam Kolton DECODE_OPERAND_REG(VS_32)
107363f47a2SSam Kolton DECODE_OPERAND_REG(VS_64)
10830fc5239SDmitry Preobrazhensky DECODE_OPERAND_REG(VS_128)
109e1818af8STom Stellard 
110363f47a2SSam Kolton DECODE_OPERAND_REG(VReg_64)
111363f47a2SSam Kolton DECODE_OPERAND_REG(VReg_96)
112363f47a2SSam Kolton DECODE_OPERAND_REG(VReg_128)
113e1818af8STom Stellard 
114363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_32)
115363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_32_XM0_XEXEC)
116ca7b0a17SMatt Arsenault DECODE_OPERAND_REG(SReg_32_XEXEC_HI)
1176023d599SDmitry Preobrazhensky DECODE_OPERAND_REG(SRegOrLds_32)
118363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_64)
119363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_64_XEXEC)
120363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_128)
121363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_256)
122363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_512)
123e1818af8STom Stellard 
1244bd72361SMatt Arsenault static DecodeStatus decodeOperand_VSrc16(MCInst &Inst,
1254bd72361SMatt Arsenault                                          unsigned Imm,
1264bd72361SMatt Arsenault                                          uint64_t Addr,
1274bd72361SMatt Arsenault                                          const void *Decoder) {
1284bd72361SMatt Arsenault   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
1294bd72361SMatt Arsenault   return addOperand(Inst, DAsm->decodeOperand_VSrc16(Imm));
1304bd72361SMatt Arsenault }
1314bd72361SMatt Arsenault 
1329be7b0d4SMatt Arsenault static DecodeStatus decodeOperand_VSrcV216(MCInst &Inst,
1339be7b0d4SMatt Arsenault                                          unsigned Imm,
1349be7b0d4SMatt Arsenault                                          uint64_t Addr,
1359be7b0d4SMatt Arsenault                                          const void *Decoder) {
1369be7b0d4SMatt Arsenault   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
1379be7b0d4SMatt Arsenault   return addOperand(Inst, DAsm->decodeOperand_VSrcV216(Imm));
1389be7b0d4SMatt Arsenault }
1399be7b0d4SMatt Arsenault 
140549c89d2SSam Kolton #define DECODE_SDWA(DecName) \
141549c89d2SSam Kolton DECODE_OPERAND(decodeSDWA##DecName, decodeSDWA##DecName)
142363f47a2SSam Kolton 
143549c89d2SSam Kolton DECODE_SDWA(Src32)
144549c89d2SSam Kolton DECODE_SDWA(Src16)
145549c89d2SSam Kolton DECODE_SDWA(VopcDst)
146363f47a2SSam Kolton 
147e1818af8STom Stellard #include "AMDGPUGenDisassemblerTables.inc"
148e1818af8STom Stellard 
149e1818af8STom Stellard //===----------------------------------------------------------------------===//
150e1818af8STom Stellard //
151e1818af8STom Stellard //===----------------------------------------------------------------------===//
152e1818af8STom Stellard 
1531048fb18SSam Kolton template <typename T> static inline T eatBytes(ArrayRef<uint8_t>& Bytes) {
1541048fb18SSam Kolton   assert(Bytes.size() >= sizeof(T));
1551048fb18SSam Kolton   const auto Res = support::endian::read<T, support::endianness::little>(Bytes.data());
1561048fb18SSam Kolton   Bytes = Bytes.slice(sizeof(T));
157ac106addSNikolay Haustov   return Res;
158ac106addSNikolay Haustov }
159ac106addSNikolay Haustov 
160ac106addSNikolay Haustov DecodeStatus AMDGPUDisassembler::tryDecodeInst(const uint8_t* Table,
161ac106addSNikolay Haustov                                                MCInst &MI,
162ac106addSNikolay Haustov                                                uint64_t Inst,
163ac106addSNikolay Haustov                                                uint64_t Address) const {
164ac106addSNikolay Haustov   assert(MI.getOpcode() == 0);
165ac106addSNikolay Haustov   assert(MI.getNumOperands() == 0);
166ac106addSNikolay Haustov   MCInst TmpInst;
167ce941c9cSDmitry Preobrazhensky   HasLiteral = false;
168ac106addSNikolay Haustov   const auto SavedBytes = Bytes;
169ac106addSNikolay Haustov   if (decodeInstruction(Table, TmpInst, Inst, Address, this, STI)) {
170ac106addSNikolay Haustov     MI = TmpInst;
171ac106addSNikolay Haustov     return MCDisassembler::Success;
172ac106addSNikolay Haustov   }
173ac106addSNikolay Haustov   Bytes = SavedBytes;
174ac106addSNikolay Haustov   return MCDisassembler::Fail;
175ac106addSNikolay Haustov }
176ac106addSNikolay Haustov 
177e1818af8STom Stellard DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
178ac106addSNikolay Haustov                                                 ArrayRef<uint8_t> Bytes_,
179e1818af8STom Stellard                                                 uint64_t Address,
180e1818af8STom Stellard                                                 raw_ostream &WS,
181e1818af8STom Stellard                                                 raw_ostream &CS) const {
182e1818af8STom Stellard   CommentStream = &CS;
183549c89d2SSam Kolton   bool IsSDWA = false;
184e1818af8STom Stellard 
185e1818af8STom Stellard   // ToDo: AMDGPUDisassembler supports only VI ISA.
186*8f3da70eSStanislav Mekhanoshin   if (!STI.getFeatureBits()[AMDGPU::FeatureGCN3Encoding] && !isGFX10())
187d122abeaSMatt Arsenault     report_fatal_error("Disassembly not yet supported for subtarget");
188e1818af8STom Stellard 
189*8f3da70eSStanislav Mekhanoshin   unsigned MaxInstBytesNum = (std::min)(
190*8f3da70eSStanislav Mekhanoshin     STI.getFeatureBits()[AMDGPU::FeatureGFX10] ? (size_t) 20 :
191*8f3da70eSStanislav Mekhanoshin     STI.getFeatureBits()[AMDGPU::FeatureVOP3Literal] ? (size_t) 12 : (size_t)8,
192*8f3da70eSStanislav Mekhanoshin     Bytes_.size());
193ac106addSNikolay Haustov   Bytes = Bytes_.slice(0, MaxInstBytesNum);
194161a158eSNikolay Haustov 
195ac106addSNikolay Haustov   DecodeStatus Res = MCDisassembler::Fail;
196ac106addSNikolay Haustov   do {
197824e804bSValery Pykhtin     // ToDo: better to switch encoding length using some bit predicate
198ac106addSNikolay Haustov     // but it is unknown yet, so try all we can
1991048fb18SSam Kolton 
200c9bdcb75SSam Kolton     // Try to decode DPP and SDWA first to solve conflict with VOP1 and VOP2
201c9bdcb75SSam Kolton     // encodings
2021048fb18SSam Kolton     if (Bytes.size() >= 8) {
2031048fb18SSam Kolton       const uint64_t QW = eatBytes<uint64_t>(Bytes);
2041048fb18SSam Kolton       Res = tryDecodeInst(DecoderTableDPP64, MI, QW, Address);
2051048fb18SSam Kolton       if (Res) break;
206c9bdcb75SSam Kolton 
207c9bdcb75SSam Kolton       Res = tryDecodeInst(DecoderTableSDWA64, MI, QW, Address);
208549c89d2SSam Kolton       if (Res) { IsSDWA = true;  break; }
209363f47a2SSam Kolton 
210363f47a2SSam Kolton       Res = tryDecodeInst(DecoderTableSDWA964, MI, QW, Address);
211549c89d2SSam Kolton       if (Res) { IsSDWA = true;  break; }
2120905870fSChangpeng Fang 
213*8f3da70eSStanislav Mekhanoshin       Res = tryDecodeInst(DecoderTableSDWA1064, MI, QW, Address);
214*8f3da70eSStanislav Mekhanoshin       if (Res) { IsSDWA = true;  break; }
215*8f3da70eSStanislav Mekhanoshin 
216*8f3da70eSStanislav Mekhanoshin       // Some GFX9 subtargets repurposed the v_mad_mix_f32, v_mad_mixlo_f16 and
217*8f3da70eSStanislav Mekhanoshin       // v_mad_mixhi_f16 for FMA variants. Try to decode using this special
218*8f3da70eSStanislav Mekhanoshin       // table first so we print the correct name.
219*8f3da70eSStanislav Mekhanoshin 
220*8f3da70eSStanislav Mekhanoshin       if (STI.getFeatureBits()[AMDGPU::FeatureFmaMixInsts]) {
221*8f3da70eSStanislav Mekhanoshin         Res = tryDecodeInst(DecoderTableGFX9_DL64, MI, QW, Address);
222*8f3da70eSStanislav Mekhanoshin         if (Res) break;
223*8f3da70eSStanislav Mekhanoshin       }
224*8f3da70eSStanislav Mekhanoshin 
2250905870fSChangpeng Fang       if (STI.getFeatureBits()[AMDGPU::FeatureUnpackedD16VMem]) {
2260905870fSChangpeng Fang         Res = tryDecodeInst(DecoderTableGFX80_UNPACKED64, MI, QW, Address);
2270084adc5SMatt Arsenault         if (Res)
2280084adc5SMatt Arsenault           break;
2290084adc5SMatt Arsenault       }
2300084adc5SMatt Arsenault 
2310084adc5SMatt Arsenault       // Some GFX9 subtargets repurposed the v_mad_mix_f32, v_mad_mixlo_f16 and
2320084adc5SMatt Arsenault       // v_mad_mixhi_f16 for FMA variants. Try to decode using this special
2330084adc5SMatt Arsenault       // table first so we print the correct name.
2340084adc5SMatt Arsenault       if (STI.getFeatureBits()[AMDGPU::FeatureFmaMixInsts]) {
2350084adc5SMatt Arsenault         Res = tryDecodeInst(DecoderTableGFX9_DL64, MI, QW, Address);
2360084adc5SMatt Arsenault         if (Res)
2370084adc5SMatt Arsenault           break;
2380905870fSChangpeng Fang       }
2391048fb18SSam Kolton     }
2401048fb18SSam Kolton 
2411048fb18SSam Kolton     // Reinitialize Bytes as DPP64 could have eaten too much
2421048fb18SSam Kolton     Bytes = Bytes_.slice(0, MaxInstBytesNum);
2431048fb18SSam Kolton 
2441048fb18SSam Kolton     // Try decode 32-bit instruction
245ac106addSNikolay Haustov     if (Bytes.size() < 4) break;
2461048fb18SSam Kolton     const uint32_t DW = eatBytes<uint32_t>(Bytes);
2475182302aSStanislav Mekhanoshin     Res = tryDecodeInst(DecoderTableGFX832, MI, DW, Address);
248ac106addSNikolay Haustov     if (Res) break;
249e1818af8STom Stellard 
250ac106addSNikolay Haustov     Res = tryDecodeInst(DecoderTableAMDGPU32, MI, DW, Address);
251ac106addSNikolay Haustov     if (Res) break;
252ac106addSNikolay Haustov 
253a0342dc9SDmitry Preobrazhensky     Res = tryDecodeInst(DecoderTableGFX932, MI, DW, Address);
254a0342dc9SDmitry Preobrazhensky     if (Res) break;
255a0342dc9SDmitry Preobrazhensky 
256*8f3da70eSStanislav Mekhanoshin     Res = tryDecodeInst(DecoderTableGFX1032, MI, DW, Address);
257*8f3da70eSStanislav Mekhanoshin     if (Res) break;
258*8f3da70eSStanislav Mekhanoshin 
259ac106addSNikolay Haustov     if (Bytes.size() < 4) break;
2601048fb18SSam Kolton     const uint64_t QW = ((uint64_t)eatBytes<uint32_t>(Bytes) << 32) | DW;
2615182302aSStanislav Mekhanoshin     Res = tryDecodeInst(DecoderTableGFX864, MI, QW, Address);
262ac106addSNikolay Haustov     if (Res) break;
263ac106addSNikolay Haustov 
264ac106addSNikolay Haustov     Res = tryDecodeInst(DecoderTableAMDGPU64, MI, QW, Address);
2651e32550dSDmitry Preobrazhensky     if (Res) break;
2661e32550dSDmitry Preobrazhensky 
2671e32550dSDmitry Preobrazhensky     Res = tryDecodeInst(DecoderTableGFX964, MI, QW, Address);
268*8f3da70eSStanislav Mekhanoshin     if (Res) break;
269*8f3da70eSStanislav Mekhanoshin 
270*8f3da70eSStanislav Mekhanoshin     Res = tryDecodeInst(DecoderTableGFX1064, MI, QW, Address);
271ac106addSNikolay Haustov   } while (false);
272ac106addSNikolay Haustov 
273*8f3da70eSStanislav Mekhanoshin   if (Res && (MaxInstBytesNum - Bytes.size()) == 12 && (!HasLiteral ||
274*8f3da70eSStanislav Mekhanoshin         !(MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::VOP3))) {
275*8f3da70eSStanislav Mekhanoshin     MaxInstBytesNum = 8;
276*8f3da70eSStanislav Mekhanoshin     Bytes = Bytes_.slice(0, MaxInstBytesNum);
277*8f3da70eSStanislav Mekhanoshin     eatBytes<uint64_t>(Bytes);
278*8f3da70eSStanislav Mekhanoshin   }
279*8f3da70eSStanislav Mekhanoshin 
280678e111eSMatt Arsenault   if (Res && (MI.getOpcode() == AMDGPU::V_MAC_F32_e64_vi ||
281*8f3da70eSStanislav Mekhanoshin               MI.getOpcode() == AMDGPU::V_MAC_F32_e64_gfx6_gfx7 ||
282*8f3da70eSStanislav Mekhanoshin               MI.getOpcode() == AMDGPU::V_MAC_F32_e64_gfx10 ||
283603a43fcSKonstantin Zhuravlyov               MI.getOpcode() == AMDGPU::V_MAC_F16_e64_vi ||
284*8f3da70eSStanislav Mekhanoshin               MI.getOpcode() == AMDGPU::V_FMAC_F32_e64_vi ||
285*8f3da70eSStanislav Mekhanoshin               MI.getOpcode() == AMDGPU::V_FMAC_F32_e64_gfx10 ||
286*8f3da70eSStanislav Mekhanoshin               MI.getOpcode() == AMDGPU::V_FMAC_F16_e64_gfx10)) {
287678e111eSMatt Arsenault     // Insert dummy unused src2_modifiers.
288549c89d2SSam Kolton     insertNamedMCOperand(MI, MCOperand::createImm(0),
289678e111eSMatt Arsenault                          AMDGPU::OpName::src2_modifiers);
290678e111eSMatt Arsenault   }
291678e111eSMatt Arsenault 
292cad7fa85SMatt Arsenault   if (Res && (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::MIMG)) {
293cad7fa85SMatt Arsenault     Res = convertMIMGInst(MI);
294cad7fa85SMatt Arsenault   }
295cad7fa85SMatt Arsenault 
296549c89d2SSam Kolton   if (Res && IsSDWA)
297549c89d2SSam Kolton     Res = convertSDWAInst(MI);
298549c89d2SSam Kolton 
299*8f3da70eSStanislav Mekhanoshin   int VDstIn_Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
300*8f3da70eSStanislav Mekhanoshin                                               AMDGPU::OpName::vdst_in);
301*8f3da70eSStanislav Mekhanoshin   if (VDstIn_Idx != -1) {
302*8f3da70eSStanislav Mekhanoshin     int Tied = MCII->get(MI.getOpcode()).getOperandConstraint(VDstIn_Idx,
303*8f3da70eSStanislav Mekhanoshin                            MCOI::OperandConstraint::TIED_TO);
304*8f3da70eSStanislav Mekhanoshin     if (Tied != -1 && (MI.getNumOperands() <= (unsigned)VDstIn_Idx ||
305*8f3da70eSStanislav Mekhanoshin          !MI.getOperand(VDstIn_Idx).isReg() ||
306*8f3da70eSStanislav Mekhanoshin          MI.getOperand(VDstIn_Idx).getReg() != MI.getOperand(Tied).getReg())) {
307*8f3da70eSStanislav Mekhanoshin       if (MI.getNumOperands() > (unsigned)VDstIn_Idx)
308*8f3da70eSStanislav Mekhanoshin         MI.erase(&MI.getOperand(VDstIn_Idx));
309*8f3da70eSStanislav Mekhanoshin       insertNamedMCOperand(MI,
310*8f3da70eSStanislav Mekhanoshin         MCOperand::createReg(MI.getOperand(Tied).getReg()),
311*8f3da70eSStanislav Mekhanoshin         AMDGPU::OpName::vdst_in);
312*8f3da70eSStanislav Mekhanoshin     }
313*8f3da70eSStanislav Mekhanoshin   }
314*8f3da70eSStanislav Mekhanoshin 
3157116e896STim Corringham   // if the opcode was not recognized we'll assume a Size of 4 bytes
3167116e896STim Corringham   // (unless there are fewer bytes left)
3177116e896STim Corringham   Size = Res ? (MaxInstBytesNum - Bytes.size())
3187116e896STim Corringham              : std::min((size_t)4, Bytes_.size());
319ac106addSNikolay Haustov   return Res;
320161a158eSNikolay Haustov }
321e1818af8STom Stellard 
322549c89d2SSam Kolton DecodeStatus AMDGPUDisassembler::convertSDWAInst(MCInst &MI) const {
323*8f3da70eSStanislav Mekhanoshin   if (STI.getFeatureBits()[AMDGPU::FeatureGFX9] ||
324*8f3da70eSStanislav Mekhanoshin       STI.getFeatureBits()[AMDGPU::FeatureGFX10]) {
325549c89d2SSam Kolton     if (AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sdst) != -1)
326549c89d2SSam Kolton       // VOPC - insert clamp
327549c89d2SSam Kolton       insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::clamp);
328549c89d2SSam Kolton   } else if (STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]) {
329549c89d2SSam Kolton     int SDst = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sdst);
330549c89d2SSam Kolton     if (SDst != -1) {
331549c89d2SSam Kolton       // VOPC - insert VCC register as sdst
332ac2b0264SDmitry Preobrazhensky       insertNamedMCOperand(MI, createRegOperand(AMDGPU::VCC),
333549c89d2SSam Kolton                            AMDGPU::OpName::sdst);
334549c89d2SSam Kolton     } else {
335549c89d2SSam Kolton       // VOP1/2 - insert omod if present in instruction
336549c89d2SSam Kolton       insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::omod);
337549c89d2SSam Kolton     }
338549c89d2SSam Kolton   }
339549c89d2SSam Kolton   return MCDisassembler::Success;
340549c89d2SSam Kolton }
341549c89d2SSam Kolton 
3420a1ff464SDmitry Preobrazhensky // Note that MIMG format provides no information about VADDR size.
3430a1ff464SDmitry Preobrazhensky // Consequently, decoded instructions always show address
3440a1ff464SDmitry Preobrazhensky // as if it has 1 dword, which could be not really so.
345cad7fa85SMatt Arsenault DecodeStatus AMDGPUDisassembler::convertMIMGInst(MCInst &MI) const {
346da4a7c01SDmitry Preobrazhensky 
3470b4eb1eaSDmitry Preobrazhensky   int VDstIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
3480b4eb1eaSDmitry Preobrazhensky                                            AMDGPU::OpName::vdst);
3490b4eb1eaSDmitry Preobrazhensky 
350cad7fa85SMatt Arsenault   int VDataIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
351cad7fa85SMatt Arsenault                                             AMDGPU::OpName::vdata);
352cad7fa85SMatt Arsenault 
353cad7fa85SMatt Arsenault   int DMaskIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
354cad7fa85SMatt Arsenault                                             AMDGPU::OpName::dmask);
3550b4eb1eaSDmitry Preobrazhensky 
3560a1ff464SDmitry Preobrazhensky   int TFEIdx   = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
3570a1ff464SDmitry Preobrazhensky                                             AMDGPU::OpName::tfe);
358f2674319SNicolai Haehnle   int D16Idx   = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
359f2674319SNicolai Haehnle                                             AMDGPU::OpName::d16);
3600a1ff464SDmitry Preobrazhensky 
3610b4eb1eaSDmitry Preobrazhensky   assert(VDataIdx != -1);
3620b4eb1eaSDmitry Preobrazhensky   assert(DMaskIdx != -1);
3630a1ff464SDmitry Preobrazhensky   assert(TFEIdx != -1);
3640b4eb1eaSDmitry Preobrazhensky 
365da4a7c01SDmitry Preobrazhensky   bool IsAtomic = (VDstIdx != -1);
366f2674319SNicolai Haehnle   bool IsGather4 = MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::Gather4;
3670b4eb1eaSDmitry Preobrazhensky 
368cad7fa85SMatt Arsenault   unsigned DMask = MI.getOperand(DMaskIdx).getImm() & 0xf;
369cad7fa85SMatt Arsenault   if (DMask == 0)
370cad7fa85SMatt Arsenault     return MCDisassembler::Success;
371cad7fa85SMatt Arsenault 
372f2674319SNicolai Haehnle   unsigned DstSize = IsGather4 ? 4 : countPopulation(DMask);
3730a1ff464SDmitry Preobrazhensky   if (DstSize == 1)
3740a1ff464SDmitry Preobrazhensky     return MCDisassembler::Success;
3750a1ff464SDmitry Preobrazhensky 
376f2674319SNicolai Haehnle   bool D16 = D16Idx >= 0 && MI.getOperand(D16Idx).getImm();
3770a1ff464SDmitry Preobrazhensky   if (D16 && AMDGPU::hasPackedD16(STI)) {
3780a1ff464SDmitry Preobrazhensky     DstSize = (DstSize + 1) / 2;
3790a1ff464SDmitry Preobrazhensky   }
3800a1ff464SDmitry Preobrazhensky 
3810a1ff464SDmitry Preobrazhensky   // FIXME: Add tfe support
3820a1ff464SDmitry Preobrazhensky   if (MI.getOperand(TFEIdx).getImm())
383cad7fa85SMatt Arsenault     return MCDisassembler::Success;
384cad7fa85SMatt Arsenault 
3850b4eb1eaSDmitry Preobrazhensky   int NewOpcode = -1;
3860b4eb1eaSDmitry Preobrazhensky 
3870ab200b6SNicolai Haehnle   if (IsGather4) {
388f2674319SNicolai Haehnle     if (D16 && AMDGPU::hasPackedD16(STI))
3890ab200b6SNicolai Haehnle       NewOpcode = AMDGPU::getMaskedMIMGOp(MI.getOpcode(), 2);
390f2674319SNicolai Haehnle     else
391f2674319SNicolai Haehnle       return MCDisassembler::Success;
3920b4eb1eaSDmitry Preobrazhensky   } else {
3930ab200b6SNicolai Haehnle     NewOpcode = AMDGPU::getMaskedMIMGOp(MI.getOpcode(), DstSize);
3940ab200b6SNicolai Haehnle     if (NewOpcode == -1)
3950ab200b6SNicolai Haehnle       return MCDisassembler::Success;
3960b4eb1eaSDmitry Preobrazhensky   }
3970b4eb1eaSDmitry Preobrazhensky 
398cad7fa85SMatt Arsenault   auto RCID = MCII->get(NewOpcode).OpInfo[VDataIdx].RegClass;
399cad7fa85SMatt Arsenault 
4000b4eb1eaSDmitry Preobrazhensky   // Get first subregister of VData
401cad7fa85SMatt Arsenault   unsigned Vdata0 = MI.getOperand(VDataIdx).getReg();
4020b4eb1eaSDmitry Preobrazhensky   unsigned VdataSub0 = MRI.getSubReg(Vdata0, AMDGPU::sub0);
4030b4eb1eaSDmitry Preobrazhensky   Vdata0 = (VdataSub0 != 0)? VdataSub0 : Vdata0;
4040b4eb1eaSDmitry Preobrazhensky 
4050b4eb1eaSDmitry Preobrazhensky   // Widen the register to the correct number of enabled channels.
406cad7fa85SMatt Arsenault   auto NewVdata = MRI.getMatchingSuperReg(Vdata0, AMDGPU::sub0,
407cad7fa85SMatt Arsenault                                           &MRI.getRegClass(RCID));
408cad7fa85SMatt Arsenault   if (NewVdata == AMDGPU::NoRegister) {
409cad7fa85SMatt Arsenault     // It's possible to encode this such that the low register + enabled
410cad7fa85SMatt Arsenault     // components exceeds the register count.
411cad7fa85SMatt Arsenault     return MCDisassembler::Success;
412cad7fa85SMatt Arsenault   }
413cad7fa85SMatt Arsenault 
414cad7fa85SMatt Arsenault   MI.setOpcode(NewOpcode);
415cad7fa85SMatt Arsenault   // vaddr will be always appear as a single VGPR. This will look different than
416cad7fa85SMatt Arsenault   // how it is usually emitted because the number of register components is not
417cad7fa85SMatt Arsenault   // in the instruction encoding.
418cad7fa85SMatt Arsenault   MI.getOperand(VDataIdx) = MCOperand::createReg(NewVdata);
4190b4eb1eaSDmitry Preobrazhensky 
420da4a7c01SDmitry Preobrazhensky   if (IsAtomic) {
4210b4eb1eaSDmitry Preobrazhensky     // Atomic operations have an additional operand (a copy of data)
4220b4eb1eaSDmitry Preobrazhensky     MI.getOperand(VDstIdx) = MCOperand::createReg(NewVdata);
4230b4eb1eaSDmitry Preobrazhensky   }
4240b4eb1eaSDmitry Preobrazhensky 
425cad7fa85SMatt Arsenault   return MCDisassembler::Success;
426cad7fa85SMatt Arsenault }
427cad7fa85SMatt Arsenault 
428ac106addSNikolay Haustov const char* AMDGPUDisassembler::getRegClassName(unsigned RegClassID) const {
429ac106addSNikolay Haustov   return getContext().getRegisterInfo()->
430ac106addSNikolay Haustov     getRegClassName(&AMDGPUMCRegisterClasses[RegClassID]);
431e1818af8STom Stellard }
432e1818af8STom Stellard 
433ac106addSNikolay Haustov inline
434ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::errOperand(unsigned V,
435ac106addSNikolay Haustov                                          const Twine& ErrMsg) const {
436ac106addSNikolay Haustov   *CommentStream << "Error: " + ErrMsg;
437ac106addSNikolay Haustov 
438ac106addSNikolay Haustov   // ToDo: add support for error operands to MCInst.h
439ac106addSNikolay Haustov   // return MCOperand::createError(V);
440ac106addSNikolay Haustov   return MCOperand();
441ac106addSNikolay Haustov }
442ac106addSNikolay Haustov 
443ac106addSNikolay Haustov inline
444ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::createRegOperand(unsigned int RegId) const {
445ac2b0264SDmitry Preobrazhensky   return MCOperand::createReg(AMDGPU::getMCReg(RegId, STI));
446ac106addSNikolay Haustov }
447ac106addSNikolay Haustov 
448ac106addSNikolay Haustov inline
449ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::createRegOperand(unsigned RegClassID,
450ac106addSNikolay Haustov                                                unsigned Val) const {
451ac106addSNikolay Haustov   const auto& RegCl = AMDGPUMCRegisterClasses[RegClassID];
452ac106addSNikolay Haustov   if (Val >= RegCl.getNumRegs())
453ac106addSNikolay Haustov     return errOperand(Val, Twine(getRegClassName(RegClassID)) +
454ac106addSNikolay Haustov                            ": unknown register " + Twine(Val));
455ac106addSNikolay Haustov   return createRegOperand(RegCl.getRegister(Val));
456ac106addSNikolay Haustov }
457ac106addSNikolay Haustov 
458ac106addSNikolay Haustov inline
459ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::createSRegOperand(unsigned SRegClassID,
460ac106addSNikolay Haustov                                                 unsigned Val) const {
461ac106addSNikolay Haustov   // ToDo: SI/CI have 104 SGPRs, VI - 102
462ac106addSNikolay Haustov   // Valery: here we accepting as much as we can, let assembler sort it out
463ac106addSNikolay Haustov   int shift = 0;
464ac106addSNikolay Haustov   switch (SRegClassID) {
465ac106addSNikolay Haustov   case AMDGPU::SGPR_32RegClassID:
466212a251cSArtem Tamazov   case AMDGPU::TTMP_32RegClassID:
467212a251cSArtem Tamazov     break;
468ac106addSNikolay Haustov   case AMDGPU::SGPR_64RegClassID:
469212a251cSArtem Tamazov   case AMDGPU::TTMP_64RegClassID:
470212a251cSArtem Tamazov     shift = 1;
471212a251cSArtem Tamazov     break;
472212a251cSArtem Tamazov   case AMDGPU::SGPR_128RegClassID:
473212a251cSArtem Tamazov   case AMDGPU::TTMP_128RegClassID:
474ac106addSNikolay Haustov   // ToDo: unclear if s[100:104] is available on VI. Can we use VCC as SGPR in
475ac106addSNikolay Haustov   // this bundle?
47627134953SDmitry Preobrazhensky   case AMDGPU::SGPR_256RegClassID:
47727134953SDmitry Preobrazhensky   case AMDGPU::TTMP_256RegClassID:
478ac106addSNikolay Haustov     // ToDo: unclear if s[96:104] is available on VI. Can we use VCC as SGPR in
479ac106addSNikolay Haustov   // this bundle?
48027134953SDmitry Preobrazhensky   case AMDGPU::SGPR_512RegClassID:
48127134953SDmitry Preobrazhensky   case AMDGPU::TTMP_512RegClassID:
482212a251cSArtem Tamazov     shift = 2;
483212a251cSArtem Tamazov     break;
484ac106addSNikolay Haustov   // ToDo: unclear if s[88:104] is available on VI. Can we use VCC as SGPR in
485ac106addSNikolay Haustov   // this bundle?
486212a251cSArtem Tamazov   default:
48792b355b1SMatt Arsenault     llvm_unreachable("unhandled register class");
488ac106addSNikolay Haustov   }
48992b355b1SMatt Arsenault 
49092b355b1SMatt Arsenault   if (Val % (1 << shift)) {
491ac106addSNikolay Haustov     *CommentStream << "Warning: " << getRegClassName(SRegClassID)
492ac106addSNikolay Haustov                    << ": scalar reg isn't aligned " << Val;
49392b355b1SMatt Arsenault   }
49492b355b1SMatt Arsenault 
495ac106addSNikolay Haustov   return createRegOperand(SRegClassID, Val >> shift);
496ac106addSNikolay Haustov }
497ac106addSNikolay Haustov 
498ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_VS_32(unsigned Val) const {
499212a251cSArtem Tamazov   return decodeSrcOp(OPW32, Val);
500ac106addSNikolay Haustov }
501ac106addSNikolay Haustov 
502ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_VS_64(unsigned Val) const {
503212a251cSArtem Tamazov   return decodeSrcOp(OPW64, Val);
504ac106addSNikolay Haustov }
505ac106addSNikolay Haustov 
50630fc5239SDmitry Preobrazhensky MCOperand AMDGPUDisassembler::decodeOperand_VS_128(unsigned Val) const {
50730fc5239SDmitry Preobrazhensky   return decodeSrcOp(OPW128, Val);
50830fc5239SDmitry Preobrazhensky }
50930fc5239SDmitry Preobrazhensky 
5104bd72361SMatt Arsenault MCOperand AMDGPUDisassembler::decodeOperand_VSrc16(unsigned Val) const {
5114bd72361SMatt Arsenault   return decodeSrcOp(OPW16, Val);
5124bd72361SMatt Arsenault }
5134bd72361SMatt Arsenault 
5149be7b0d4SMatt Arsenault MCOperand AMDGPUDisassembler::decodeOperand_VSrcV216(unsigned Val) const {
5159be7b0d4SMatt Arsenault   return decodeSrcOp(OPWV216, Val);
5169be7b0d4SMatt Arsenault }
5179be7b0d4SMatt Arsenault 
518ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_VGPR_32(unsigned Val) const {
519cb540bc0SMatt Arsenault   // Some instructions have operand restrictions beyond what the encoding
520cb540bc0SMatt Arsenault   // allows. Some ordinarily VSrc_32 operands are VGPR_32, so clear the extra
521cb540bc0SMatt Arsenault   // high bit.
522cb540bc0SMatt Arsenault   Val &= 255;
523cb540bc0SMatt Arsenault 
524ac106addSNikolay Haustov   return createRegOperand(AMDGPU::VGPR_32RegClassID, Val);
525ac106addSNikolay Haustov }
526ac106addSNikolay Haustov 
5276023d599SDmitry Preobrazhensky MCOperand AMDGPUDisassembler::decodeOperand_VRegOrLds_32(unsigned Val) const {
5286023d599SDmitry Preobrazhensky   return decodeSrcOp(OPW32, Val);
5296023d599SDmitry Preobrazhensky }
5306023d599SDmitry Preobrazhensky 
531ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_VReg_64(unsigned Val) const {
532ac106addSNikolay Haustov   return createRegOperand(AMDGPU::VReg_64RegClassID, Val);
533ac106addSNikolay Haustov }
534ac106addSNikolay Haustov 
535ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_VReg_96(unsigned Val) const {
536ac106addSNikolay Haustov   return createRegOperand(AMDGPU::VReg_96RegClassID, Val);
537ac106addSNikolay Haustov }
538ac106addSNikolay Haustov 
539ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_VReg_128(unsigned Val) const {
540ac106addSNikolay Haustov   return createRegOperand(AMDGPU::VReg_128RegClassID, Val);
541ac106addSNikolay Haustov }
542ac106addSNikolay Haustov 
543ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_SReg_32(unsigned Val) const {
544ac106addSNikolay Haustov   // table-gen generated disassembler doesn't care about operand types
545ac106addSNikolay Haustov   // leaving only registry class so SSrc_32 operand turns into SReg_32
546ac106addSNikolay Haustov   // and therefore we accept immediates and literals here as well
547212a251cSArtem Tamazov   return decodeSrcOp(OPW32, Val);
548ac106addSNikolay Haustov }
549ac106addSNikolay Haustov 
550640c44b8SMatt Arsenault MCOperand AMDGPUDisassembler::decodeOperand_SReg_32_XM0_XEXEC(
551640c44b8SMatt Arsenault   unsigned Val) const {
552640c44b8SMatt Arsenault   // SReg_32_XM0 is SReg_32 without M0 or EXEC_LO/EXEC_HI
55338e496b1SArtem Tamazov   return decodeOperand_SReg_32(Val);
55438e496b1SArtem Tamazov }
55538e496b1SArtem Tamazov 
556ca7b0a17SMatt Arsenault MCOperand AMDGPUDisassembler::decodeOperand_SReg_32_XEXEC_HI(
557ca7b0a17SMatt Arsenault   unsigned Val) const {
558ca7b0a17SMatt Arsenault   // SReg_32_XM0 is SReg_32 without EXEC_HI
559ca7b0a17SMatt Arsenault   return decodeOperand_SReg_32(Val);
560ca7b0a17SMatt Arsenault }
561ca7b0a17SMatt Arsenault 
5626023d599SDmitry Preobrazhensky MCOperand AMDGPUDisassembler::decodeOperand_SRegOrLds_32(unsigned Val) const {
5636023d599SDmitry Preobrazhensky   // table-gen generated disassembler doesn't care about operand types
5646023d599SDmitry Preobrazhensky   // leaving only registry class so SSrc_32 operand turns into SReg_32
5656023d599SDmitry Preobrazhensky   // and therefore we accept immediates and literals here as well
5666023d599SDmitry Preobrazhensky   return decodeSrcOp(OPW32, Val);
5676023d599SDmitry Preobrazhensky }
5686023d599SDmitry Preobrazhensky 
569ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_SReg_64(unsigned Val) const {
570640c44b8SMatt Arsenault   return decodeSrcOp(OPW64, Val);
571640c44b8SMatt Arsenault }
572640c44b8SMatt Arsenault 
573640c44b8SMatt Arsenault MCOperand AMDGPUDisassembler::decodeOperand_SReg_64_XEXEC(unsigned Val) const {
574212a251cSArtem Tamazov   return decodeSrcOp(OPW64, Val);
575ac106addSNikolay Haustov }
576ac106addSNikolay Haustov 
577ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_SReg_128(unsigned Val) const {
578212a251cSArtem Tamazov   return decodeSrcOp(OPW128, Val);
579ac106addSNikolay Haustov }
580ac106addSNikolay Haustov 
581ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_SReg_256(unsigned Val) const {
58227134953SDmitry Preobrazhensky   return decodeDstOp(OPW256, Val);
583ac106addSNikolay Haustov }
584ac106addSNikolay Haustov 
585ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_SReg_512(unsigned Val) const {
58627134953SDmitry Preobrazhensky   return decodeDstOp(OPW512, Val);
587ac106addSNikolay Haustov }
588ac106addSNikolay Haustov 
589ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeLiteralConstant() const {
590ac106addSNikolay Haustov   // For now all literal constants are supposed to be unsigned integer
591ac106addSNikolay Haustov   // ToDo: deal with signed/unsigned 64-bit integer constants
592ac106addSNikolay Haustov   // ToDo: deal with float/double constants
593ce941c9cSDmitry Preobrazhensky   if (!HasLiteral) {
594ce941c9cSDmitry Preobrazhensky     if (Bytes.size() < 4) {
595ac106addSNikolay Haustov       return errOperand(0, "cannot read literal, inst bytes left " +
596ac106addSNikolay Haustov                         Twine(Bytes.size()));
597ce941c9cSDmitry Preobrazhensky     }
598ce941c9cSDmitry Preobrazhensky     HasLiteral = true;
599ce941c9cSDmitry Preobrazhensky     Literal = eatBytes<uint32_t>(Bytes);
600ce941c9cSDmitry Preobrazhensky   }
601ce941c9cSDmitry Preobrazhensky   return MCOperand::createImm(Literal);
602ac106addSNikolay Haustov }
603ac106addSNikolay Haustov 
604ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeIntImmed(unsigned Imm) {
605212a251cSArtem Tamazov   using namespace AMDGPU::EncValues;
606c8fbf6ffSEugene Zelenko 
607212a251cSArtem Tamazov   assert(Imm >= INLINE_INTEGER_C_MIN && Imm <= INLINE_INTEGER_C_MAX);
608212a251cSArtem Tamazov   return MCOperand::createImm((Imm <= INLINE_INTEGER_C_POSITIVE_MAX) ?
609212a251cSArtem Tamazov     (static_cast<int64_t>(Imm) - INLINE_INTEGER_C_MIN) :
610212a251cSArtem Tamazov     (INLINE_INTEGER_C_POSITIVE_MAX - static_cast<int64_t>(Imm)));
611212a251cSArtem Tamazov       // Cast prevents negative overflow.
612ac106addSNikolay Haustov }
613ac106addSNikolay Haustov 
6144bd72361SMatt Arsenault static int64_t getInlineImmVal32(unsigned Imm) {
6154bd72361SMatt Arsenault   switch (Imm) {
6164bd72361SMatt Arsenault   case 240:
6174bd72361SMatt Arsenault     return FloatToBits(0.5f);
6184bd72361SMatt Arsenault   case 241:
6194bd72361SMatt Arsenault     return FloatToBits(-0.5f);
6204bd72361SMatt Arsenault   case 242:
6214bd72361SMatt Arsenault     return FloatToBits(1.0f);
6224bd72361SMatt Arsenault   case 243:
6234bd72361SMatt Arsenault     return FloatToBits(-1.0f);
6244bd72361SMatt Arsenault   case 244:
6254bd72361SMatt Arsenault     return FloatToBits(2.0f);
6264bd72361SMatt Arsenault   case 245:
6274bd72361SMatt Arsenault     return FloatToBits(-2.0f);
6284bd72361SMatt Arsenault   case 246:
6294bd72361SMatt Arsenault     return FloatToBits(4.0f);
6304bd72361SMatt Arsenault   case 247:
6314bd72361SMatt Arsenault     return FloatToBits(-4.0f);
6324bd72361SMatt Arsenault   case 248: // 1 / (2 * PI)
6334bd72361SMatt Arsenault     return 0x3e22f983;
6344bd72361SMatt Arsenault   default:
6354bd72361SMatt Arsenault     llvm_unreachable("invalid fp inline imm");
6364bd72361SMatt Arsenault   }
6374bd72361SMatt Arsenault }
6384bd72361SMatt Arsenault 
6394bd72361SMatt Arsenault static int64_t getInlineImmVal64(unsigned Imm) {
6404bd72361SMatt Arsenault   switch (Imm) {
6414bd72361SMatt Arsenault   case 240:
6424bd72361SMatt Arsenault     return DoubleToBits(0.5);
6434bd72361SMatt Arsenault   case 241:
6444bd72361SMatt Arsenault     return DoubleToBits(-0.5);
6454bd72361SMatt Arsenault   case 242:
6464bd72361SMatt Arsenault     return DoubleToBits(1.0);
6474bd72361SMatt Arsenault   case 243:
6484bd72361SMatt Arsenault     return DoubleToBits(-1.0);
6494bd72361SMatt Arsenault   case 244:
6504bd72361SMatt Arsenault     return DoubleToBits(2.0);
6514bd72361SMatt Arsenault   case 245:
6524bd72361SMatt Arsenault     return DoubleToBits(-2.0);
6534bd72361SMatt Arsenault   case 246:
6544bd72361SMatt Arsenault     return DoubleToBits(4.0);
6554bd72361SMatt Arsenault   case 247:
6564bd72361SMatt Arsenault     return DoubleToBits(-4.0);
6574bd72361SMatt Arsenault   case 248: // 1 / (2 * PI)
6584bd72361SMatt Arsenault     return 0x3fc45f306dc9c882;
6594bd72361SMatt Arsenault   default:
6604bd72361SMatt Arsenault     llvm_unreachable("invalid fp inline imm");
6614bd72361SMatt Arsenault   }
6624bd72361SMatt Arsenault }
6634bd72361SMatt Arsenault 
6644bd72361SMatt Arsenault static int64_t getInlineImmVal16(unsigned Imm) {
6654bd72361SMatt Arsenault   switch (Imm) {
6664bd72361SMatt Arsenault   case 240:
6674bd72361SMatt Arsenault     return 0x3800;
6684bd72361SMatt Arsenault   case 241:
6694bd72361SMatt Arsenault     return 0xB800;
6704bd72361SMatt Arsenault   case 242:
6714bd72361SMatt Arsenault     return 0x3C00;
6724bd72361SMatt Arsenault   case 243:
6734bd72361SMatt Arsenault     return 0xBC00;
6744bd72361SMatt Arsenault   case 244:
6754bd72361SMatt Arsenault     return 0x4000;
6764bd72361SMatt Arsenault   case 245:
6774bd72361SMatt Arsenault     return 0xC000;
6784bd72361SMatt Arsenault   case 246:
6794bd72361SMatt Arsenault     return 0x4400;
6804bd72361SMatt Arsenault   case 247:
6814bd72361SMatt Arsenault     return 0xC400;
6824bd72361SMatt Arsenault   case 248: // 1 / (2 * PI)
6834bd72361SMatt Arsenault     return 0x3118;
6844bd72361SMatt Arsenault   default:
6854bd72361SMatt Arsenault     llvm_unreachable("invalid fp inline imm");
6864bd72361SMatt Arsenault   }
6874bd72361SMatt Arsenault }
6884bd72361SMatt Arsenault 
6894bd72361SMatt Arsenault MCOperand AMDGPUDisassembler::decodeFPImmed(OpWidthTy Width, unsigned Imm) {
690212a251cSArtem Tamazov   assert(Imm >= AMDGPU::EncValues::INLINE_FLOATING_C_MIN
691212a251cSArtem Tamazov       && Imm <= AMDGPU::EncValues::INLINE_FLOATING_C_MAX);
6924bd72361SMatt Arsenault 
693e1818af8STom Stellard   // ToDo: case 248: 1/(2*PI) - is allowed only on VI
6944bd72361SMatt Arsenault   switch (Width) {
6954bd72361SMatt Arsenault   case OPW32:
6964bd72361SMatt Arsenault     return MCOperand::createImm(getInlineImmVal32(Imm));
6974bd72361SMatt Arsenault   case OPW64:
6984bd72361SMatt Arsenault     return MCOperand::createImm(getInlineImmVal64(Imm));
6994bd72361SMatt Arsenault   case OPW16:
7009be7b0d4SMatt Arsenault   case OPWV216:
7014bd72361SMatt Arsenault     return MCOperand::createImm(getInlineImmVal16(Imm));
7024bd72361SMatt Arsenault   default:
7034bd72361SMatt Arsenault     llvm_unreachable("implement me");
704e1818af8STom Stellard   }
705e1818af8STom Stellard }
706e1818af8STom Stellard 
707212a251cSArtem Tamazov unsigned AMDGPUDisassembler::getVgprClassId(const OpWidthTy Width) const {
708e1818af8STom Stellard   using namespace AMDGPU;
709c8fbf6ffSEugene Zelenko 
710212a251cSArtem Tamazov   assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
711212a251cSArtem Tamazov   switch (Width) {
712212a251cSArtem Tamazov   default: // fall
7134bd72361SMatt Arsenault   case OPW32:
7144bd72361SMatt Arsenault   case OPW16:
7159be7b0d4SMatt Arsenault   case OPWV216:
7164bd72361SMatt Arsenault     return VGPR_32RegClassID;
717212a251cSArtem Tamazov   case OPW64: return VReg_64RegClassID;
718212a251cSArtem Tamazov   case OPW128: return VReg_128RegClassID;
719212a251cSArtem Tamazov   }
720212a251cSArtem Tamazov }
721212a251cSArtem Tamazov 
722212a251cSArtem Tamazov unsigned AMDGPUDisassembler::getSgprClassId(const OpWidthTy Width) const {
723212a251cSArtem Tamazov   using namespace AMDGPU;
724c8fbf6ffSEugene Zelenko 
725212a251cSArtem Tamazov   assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
726212a251cSArtem Tamazov   switch (Width) {
727212a251cSArtem Tamazov   default: // fall
7284bd72361SMatt Arsenault   case OPW32:
7294bd72361SMatt Arsenault   case OPW16:
7309be7b0d4SMatt Arsenault   case OPWV216:
7314bd72361SMatt Arsenault     return SGPR_32RegClassID;
732212a251cSArtem Tamazov   case OPW64: return SGPR_64RegClassID;
733212a251cSArtem Tamazov   case OPW128: return SGPR_128RegClassID;
73427134953SDmitry Preobrazhensky   case OPW256: return SGPR_256RegClassID;
73527134953SDmitry Preobrazhensky   case OPW512: return SGPR_512RegClassID;
736212a251cSArtem Tamazov   }
737212a251cSArtem Tamazov }
738212a251cSArtem Tamazov 
739212a251cSArtem Tamazov unsigned AMDGPUDisassembler::getTtmpClassId(const OpWidthTy Width) const {
740212a251cSArtem Tamazov   using namespace AMDGPU;
741c8fbf6ffSEugene Zelenko 
742212a251cSArtem Tamazov   assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
743212a251cSArtem Tamazov   switch (Width) {
744212a251cSArtem Tamazov   default: // fall
7454bd72361SMatt Arsenault   case OPW32:
7464bd72361SMatt Arsenault   case OPW16:
7479be7b0d4SMatt Arsenault   case OPWV216:
7484bd72361SMatt Arsenault     return TTMP_32RegClassID;
749212a251cSArtem Tamazov   case OPW64: return TTMP_64RegClassID;
750212a251cSArtem Tamazov   case OPW128: return TTMP_128RegClassID;
75127134953SDmitry Preobrazhensky   case OPW256: return TTMP_256RegClassID;
75227134953SDmitry Preobrazhensky   case OPW512: return TTMP_512RegClassID;
753212a251cSArtem Tamazov   }
754212a251cSArtem Tamazov }
755212a251cSArtem Tamazov 
756ac2b0264SDmitry Preobrazhensky int AMDGPUDisassembler::getTTmpIdx(unsigned Val) const {
757ac2b0264SDmitry Preobrazhensky   using namespace AMDGPU::EncValues;
758ac2b0264SDmitry Preobrazhensky 
75933d806a5SStanislav Mekhanoshin   unsigned TTmpMin =
76033d806a5SStanislav Mekhanoshin       (isGFX9() || isGFX10()) ? TTMP_GFX9_GFX10_MIN : TTMP_VI_MIN;
76133d806a5SStanislav Mekhanoshin   unsigned TTmpMax =
76233d806a5SStanislav Mekhanoshin       (isGFX9() || isGFX10()) ? TTMP_GFX9_GFX10_MAX : TTMP_VI_MAX;
763ac2b0264SDmitry Preobrazhensky 
764ac2b0264SDmitry Preobrazhensky   return (TTmpMin <= Val && Val <= TTmpMax)? Val - TTmpMin : -1;
765ac2b0264SDmitry Preobrazhensky }
766ac2b0264SDmitry Preobrazhensky 
767212a251cSArtem Tamazov MCOperand AMDGPUDisassembler::decodeSrcOp(const OpWidthTy Width, unsigned Val) const {
768212a251cSArtem Tamazov   using namespace AMDGPU::EncValues;
769c8fbf6ffSEugene Zelenko 
770ac106addSNikolay Haustov   assert(Val < 512); // enum9
771ac106addSNikolay Haustov 
772212a251cSArtem Tamazov   if (VGPR_MIN <= Val && Val <= VGPR_MAX) {
773212a251cSArtem Tamazov     return createRegOperand(getVgprClassId(Width), Val - VGPR_MIN);
774212a251cSArtem Tamazov   }
775b49c3361SArtem Tamazov   if (Val <= SGPR_MAX) {
776b49c3361SArtem Tamazov     assert(SGPR_MIN == 0); // "SGPR_MIN <= Val" is always true and causes compilation warning.
777212a251cSArtem Tamazov     return createSRegOperand(getSgprClassId(Width), Val - SGPR_MIN);
778212a251cSArtem Tamazov   }
779ac2b0264SDmitry Preobrazhensky 
780ac2b0264SDmitry Preobrazhensky   int TTmpIdx = getTTmpIdx(Val);
781ac2b0264SDmitry Preobrazhensky   if (TTmpIdx >= 0) {
782ac2b0264SDmitry Preobrazhensky     return createSRegOperand(getTtmpClassId(Width), TTmpIdx);
783212a251cSArtem Tamazov   }
784ac106addSNikolay Haustov 
785212a251cSArtem Tamazov   if (INLINE_INTEGER_C_MIN <= Val && Val <= INLINE_INTEGER_C_MAX)
786ac106addSNikolay Haustov     return decodeIntImmed(Val);
787ac106addSNikolay Haustov 
788212a251cSArtem Tamazov   if (INLINE_FLOATING_C_MIN <= Val && Val <= INLINE_FLOATING_C_MAX)
7894bd72361SMatt Arsenault     return decodeFPImmed(Width, Val);
790ac106addSNikolay Haustov 
791212a251cSArtem Tamazov   if (Val == LITERAL_CONST)
792ac106addSNikolay Haustov     return decodeLiteralConstant();
793ac106addSNikolay Haustov 
7944bd72361SMatt Arsenault   switch (Width) {
7954bd72361SMatt Arsenault   case OPW32:
7964bd72361SMatt Arsenault   case OPW16:
7979be7b0d4SMatt Arsenault   case OPWV216:
7984bd72361SMatt Arsenault     return decodeSpecialReg32(Val);
7994bd72361SMatt Arsenault   case OPW64:
8004bd72361SMatt Arsenault     return decodeSpecialReg64(Val);
8014bd72361SMatt Arsenault   default:
8024bd72361SMatt Arsenault     llvm_unreachable("unexpected immediate type");
8034bd72361SMatt Arsenault   }
804ac106addSNikolay Haustov }
805ac106addSNikolay Haustov 
80627134953SDmitry Preobrazhensky MCOperand AMDGPUDisassembler::decodeDstOp(const OpWidthTy Width, unsigned Val) const {
80727134953SDmitry Preobrazhensky   using namespace AMDGPU::EncValues;
80827134953SDmitry Preobrazhensky 
80927134953SDmitry Preobrazhensky   assert(Val < 128);
81027134953SDmitry Preobrazhensky   assert(Width == OPW256 || Width == OPW512);
81127134953SDmitry Preobrazhensky 
81227134953SDmitry Preobrazhensky   if (Val <= SGPR_MAX) {
81327134953SDmitry Preobrazhensky     assert(SGPR_MIN == 0); // "SGPR_MIN <= Val" is always true and causes compilation warning.
81427134953SDmitry Preobrazhensky     return createSRegOperand(getSgprClassId(Width), Val - SGPR_MIN);
81527134953SDmitry Preobrazhensky   }
81627134953SDmitry Preobrazhensky 
81727134953SDmitry Preobrazhensky   int TTmpIdx = getTTmpIdx(Val);
81827134953SDmitry Preobrazhensky   if (TTmpIdx >= 0) {
81927134953SDmitry Preobrazhensky     return createSRegOperand(getTtmpClassId(Width), TTmpIdx);
82027134953SDmitry Preobrazhensky   }
82127134953SDmitry Preobrazhensky 
82227134953SDmitry Preobrazhensky   llvm_unreachable("unknown dst register");
82327134953SDmitry Preobrazhensky }
82427134953SDmitry Preobrazhensky 
825ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeSpecialReg32(unsigned Val) const {
826ac106addSNikolay Haustov   using namespace AMDGPU;
827c8fbf6ffSEugene Zelenko 
828e1818af8STom Stellard   switch (Val) {
829ac2b0264SDmitry Preobrazhensky   case 102: return createRegOperand(FLAT_SCR_LO);
830ac2b0264SDmitry Preobrazhensky   case 103: return createRegOperand(FLAT_SCR_HI);
8313afbd825SDmitry Preobrazhensky   case 104: return createRegOperand(XNACK_MASK_LO);
8323afbd825SDmitry Preobrazhensky   case 105: return createRegOperand(XNACK_MASK_HI);
833ac106addSNikolay Haustov   case 106: return createRegOperand(VCC_LO);
834ac106addSNikolay Haustov   case 107: return createRegOperand(VCC_HI);
835137976faSDmitry Preobrazhensky   case 108: return createRegOperand(TBA_LO);
836137976faSDmitry Preobrazhensky   case 109: return createRegOperand(TBA_HI);
837137976faSDmitry Preobrazhensky   case 110: return createRegOperand(TMA_LO);
838137976faSDmitry Preobrazhensky   case 111: return createRegOperand(TMA_HI);
839ac106addSNikolay Haustov   case 124: return createRegOperand(M0);
84033d806a5SStanislav Mekhanoshin   case 125: return createRegOperand(SGPR_NULL);
841ac106addSNikolay Haustov   case 126: return createRegOperand(EXEC_LO);
842ac106addSNikolay Haustov   case 127: return createRegOperand(EXEC_HI);
843a3b3b489SMatt Arsenault   case 235: return createRegOperand(SRC_SHARED_BASE);
844a3b3b489SMatt Arsenault   case 236: return createRegOperand(SRC_SHARED_LIMIT);
845a3b3b489SMatt Arsenault   case 237: return createRegOperand(SRC_PRIVATE_BASE);
846a3b3b489SMatt Arsenault   case 238: return createRegOperand(SRC_PRIVATE_LIMIT);
847137976faSDmitry Preobrazhensky   case 239: return createRegOperand(SRC_POPS_EXITING_WAVE_ID);
848e1818af8STom Stellard     // ToDo: no support for vccz register
849ac106addSNikolay Haustov   case 251: break;
850e1818af8STom Stellard     // ToDo: no support for execz register
851ac106addSNikolay Haustov   case 252: break;
852ac106addSNikolay Haustov   case 253: return createRegOperand(SCC);
853942c273dSDmitry Preobrazhensky   case 254: return createRegOperand(LDS_DIRECT);
854ac106addSNikolay Haustov   default: break;
855e1818af8STom Stellard   }
856ac106addSNikolay Haustov   return errOperand(Val, "unknown operand encoding " + Twine(Val));
857e1818af8STom Stellard }
858e1818af8STom Stellard 
859ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeSpecialReg64(unsigned Val) const {
860161a158eSNikolay Haustov   using namespace AMDGPU;
861c8fbf6ffSEugene Zelenko 
862161a158eSNikolay Haustov   switch (Val) {
863ac2b0264SDmitry Preobrazhensky   case 102: return createRegOperand(FLAT_SCR);
8643afbd825SDmitry Preobrazhensky   case 104: return createRegOperand(XNACK_MASK);
865ac106addSNikolay Haustov   case 106: return createRegOperand(VCC);
866137976faSDmitry Preobrazhensky   case 108: return createRegOperand(TBA);
867137976faSDmitry Preobrazhensky   case 110: return createRegOperand(TMA);
868ac106addSNikolay Haustov   case 126: return createRegOperand(EXEC);
869137976faSDmitry Preobrazhensky   case 235: return createRegOperand(SRC_SHARED_BASE);
870137976faSDmitry Preobrazhensky   case 236: return createRegOperand(SRC_SHARED_LIMIT);
871137976faSDmitry Preobrazhensky   case 237: return createRegOperand(SRC_PRIVATE_BASE);
872137976faSDmitry Preobrazhensky   case 238: return createRegOperand(SRC_PRIVATE_LIMIT);
873137976faSDmitry Preobrazhensky   case 239: return createRegOperand(SRC_POPS_EXITING_WAVE_ID);
874ac106addSNikolay Haustov   default: break;
875161a158eSNikolay Haustov   }
876ac106addSNikolay Haustov   return errOperand(Val, "unknown operand encoding " + Twine(Val));
877161a158eSNikolay Haustov }
878161a158eSNikolay Haustov 
879549c89d2SSam Kolton MCOperand AMDGPUDisassembler::decodeSDWASrc(const OpWidthTy Width,
8806b65f7c3SDmitry Preobrazhensky                                             const unsigned Val) const {
881363f47a2SSam Kolton   using namespace AMDGPU::SDWA;
8826b65f7c3SDmitry Preobrazhensky   using namespace AMDGPU::EncValues;
883363f47a2SSam Kolton 
88433d806a5SStanislav Mekhanoshin   if (STI.getFeatureBits()[AMDGPU::FeatureGFX9] ||
88533d806a5SStanislav Mekhanoshin       STI.getFeatureBits()[AMDGPU::FeatureGFX10]) {
886da644c02SStanislav Mekhanoshin     // XXX: cast to int is needed to avoid stupid warning:
887a179d25bSSam Kolton     // compare with unsigned is always true
888da644c02SStanislav Mekhanoshin     if (int(SDWA9EncValues::SRC_VGPR_MIN) <= int(Val) &&
889363f47a2SSam Kolton         Val <= SDWA9EncValues::SRC_VGPR_MAX) {
890363f47a2SSam Kolton       return createRegOperand(getVgprClassId(Width),
891363f47a2SSam Kolton                               Val - SDWA9EncValues::SRC_VGPR_MIN);
892363f47a2SSam Kolton     }
893363f47a2SSam Kolton     if (SDWA9EncValues::SRC_SGPR_MIN <= Val &&
89433d806a5SStanislav Mekhanoshin         Val <= (isGFX10() ? SDWA9EncValues::SRC_SGPR_MAX_GFX10
89533d806a5SStanislav Mekhanoshin                           : SDWA9EncValues::SRC_SGPR_MAX_SI)) {
896363f47a2SSam Kolton       return createSRegOperand(getSgprClassId(Width),
897363f47a2SSam Kolton                                Val - SDWA9EncValues::SRC_SGPR_MIN);
898363f47a2SSam Kolton     }
899ac2b0264SDmitry Preobrazhensky     if (SDWA9EncValues::SRC_TTMP_MIN <= Val &&
900ac2b0264SDmitry Preobrazhensky         Val <= SDWA9EncValues::SRC_TTMP_MAX) {
901ac2b0264SDmitry Preobrazhensky       return createSRegOperand(getTtmpClassId(Width),
902ac2b0264SDmitry Preobrazhensky                                Val - SDWA9EncValues::SRC_TTMP_MIN);
903ac2b0264SDmitry Preobrazhensky     }
904363f47a2SSam Kolton 
9056b65f7c3SDmitry Preobrazhensky     const unsigned SVal = Val - SDWA9EncValues::SRC_SGPR_MIN;
9066b65f7c3SDmitry Preobrazhensky 
9076b65f7c3SDmitry Preobrazhensky     if (INLINE_INTEGER_C_MIN <= SVal && SVal <= INLINE_INTEGER_C_MAX)
9086b65f7c3SDmitry Preobrazhensky       return decodeIntImmed(SVal);
9096b65f7c3SDmitry Preobrazhensky 
9106b65f7c3SDmitry Preobrazhensky     if (INLINE_FLOATING_C_MIN <= SVal && SVal <= INLINE_FLOATING_C_MAX)
9116b65f7c3SDmitry Preobrazhensky       return decodeFPImmed(Width, SVal);
9126b65f7c3SDmitry Preobrazhensky 
9136b65f7c3SDmitry Preobrazhensky     return decodeSpecialReg32(SVal);
914549c89d2SSam Kolton   } else if (STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]) {
915549c89d2SSam Kolton     return createRegOperand(getVgprClassId(Width), Val);
916549c89d2SSam Kolton   }
917549c89d2SSam Kolton   llvm_unreachable("unsupported target");
918363f47a2SSam Kolton }
919363f47a2SSam Kolton 
920549c89d2SSam Kolton MCOperand AMDGPUDisassembler::decodeSDWASrc16(unsigned Val) const {
921549c89d2SSam Kolton   return decodeSDWASrc(OPW16, Val);
922363f47a2SSam Kolton }
923363f47a2SSam Kolton 
924549c89d2SSam Kolton MCOperand AMDGPUDisassembler::decodeSDWASrc32(unsigned Val) const {
925549c89d2SSam Kolton   return decodeSDWASrc(OPW32, Val);
926363f47a2SSam Kolton }
927363f47a2SSam Kolton 
928549c89d2SSam Kolton MCOperand AMDGPUDisassembler::decodeSDWAVopcDst(unsigned Val) const {
929363f47a2SSam Kolton   using namespace AMDGPU::SDWA;
930363f47a2SSam Kolton 
93133d806a5SStanislav Mekhanoshin   assert((STI.getFeatureBits()[AMDGPU::FeatureGFX9] ||
93233d806a5SStanislav Mekhanoshin           STI.getFeatureBits()[AMDGPU::FeatureGFX10]) &&
93333d806a5SStanislav Mekhanoshin          "SDWAVopcDst should be present only on GFX9+");
93433d806a5SStanislav Mekhanoshin 
935363f47a2SSam Kolton   if (Val & SDWA9EncValues::VOPC_DST_VCC_MASK) {
936363f47a2SSam Kolton     Val &= SDWA9EncValues::VOPC_DST_SGPR_MASK;
937ac2b0264SDmitry Preobrazhensky 
938ac2b0264SDmitry Preobrazhensky     int TTmpIdx = getTTmpIdx(Val);
939ac2b0264SDmitry Preobrazhensky     if (TTmpIdx >= 0) {
940ac2b0264SDmitry Preobrazhensky       return createSRegOperand(getTtmpClassId(OPW64), TTmpIdx);
94133d806a5SStanislav Mekhanoshin     } else if (Val > SGPR_MAX) {
942363f47a2SSam Kolton       return decodeSpecialReg64(Val);
943363f47a2SSam Kolton     } else {
944363f47a2SSam Kolton       return createSRegOperand(getSgprClassId(OPW64), Val);
945363f47a2SSam Kolton     }
946363f47a2SSam Kolton   } else {
947363f47a2SSam Kolton     return createRegOperand(AMDGPU::VCC);
948363f47a2SSam Kolton   }
949363f47a2SSam Kolton }
950363f47a2SSam Kolton 
951ac2b0264SDmitry Preobrazhensky bool AMDGPUDisassembler::isVI() const {
952ac2b0264SDmitry Preobrazhensky   return STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands];
953ac2b0264SDmitry Preobrazhensky }
954ac2b0264SDmitry Preobrazhensky 
955ac2b0264SDmitry Preobrazhensky bool AMDGPUDisassembler::isGFX9() const {
956ac2b0264SDmitry Preobrazhensky   return STI.getFeatureBits()[AMDGPU::FeatureGFX9];
957ac2b0264SDmitry Preobrazhensky }
958ac2b0264SDmitry Preobrazhensky 
95933d806a5SStanislav Mekhanoshin bool AMDGPUDisassembler::isGFX10() const {
96033d806a5SStanislav Mekhanoshin   return STI.getFeatureBits()[AMDGPU::FeatureGFX10];
96133d806a5SStanislav Mekhanoshin }
96233d806a5SStanislav Mekhanoshin 
9633381d7a2SSam Kolton //===----------------------------------------------------------------------===//
9643381d7a2SSam Kolton // AMDGPUSymbolizer
9653381d7a2SSam Kolton //===----------------------------------------------------------------------===//
9663381d7a2SSam Kolton 
9673381d7a2SSam Kolton // Try to find symbol name for specified label
9683381d7a2SSam Kolton bool AMDGPUSymbolizer::tryAddingSymbolicOperand(MCInst &Inst,
9693381d7a2SSam Kolton                                 raw_ostream &/*cStream*/, int64_t Value,
9703381d7a2SSam Kolton                                 uint64_t /*Address*/, bool IsBranch,
9713381d7a2SSam Kolton                                 uint64_t /*Offset*/, uint64_t /*InstSize*/) {
972c8fbf6ffSEugene Zelenko   using SymbolInfoTy = std::tuple<uint64_t, StringRef, uint8_t>;
973c8fbf6ffSEugene Zelenko   using SectionSymbolsTy = std::vector<SymbolInfoTy>;
9743381d7a2SSam Kolton 
9753381d7a2SSam Kolton   if (!IsBranch) {
9763381d7a2SSam Kolton     return false;
9773381d7a2SSam Kolton   }
9783381d7a2SSam Kolton 
9793381d7a2SSam Kolton   auto *Symbols = static_cast<SectionSymbolsTy *>(DisInfo);
980b1c3b22bSNicolai Haehnle   if (!Symbols)
981b1c3b22bSNicolai Haehnle     return false;
982b1c3b22bSNicolai Haehnle 
9833381d7a2SSam Kolton   auto Result = std::find_if(Symbols->begin(), Symbols->end(),
9843381d7a2SSam Kolton                              [Value](const SymbolInfoTy& Val) {
9853381d7a2SSam Kolton                                 return std::get<0>(Val) == static_cast<uint64_t>(Value)
9863381d7a2SSam Kolton                                     && std::get<2>(Val) == ELF::STT_NOTYPE;
9873381d7a2SSam Kolton                              });
9883381d7a2SSam Kolton   if (Result != Symbols->end()) {
9893381d7a2SSam Kolton     auto *Sym = Ctx.getOrCreateSymbol(std::get<1>(*Result));
9903381d7a2SSam Kolton     const auto *Add = MCSymbolRefExpr::create(Sym, Ctx);
9913381d7a2SSam Kolton     Inst.addOperand(MCOperand::createExpr(Add));
9923381d7a2SSam Kolton     return true;
9933381d7a2SSam Kolton   }
9943381d7a2SSam Kolton   return false;
9953381d7a2SSam Kolton }
9963381d7a2SSam Kolton 
99792b355b1SMatt Arsenault void AMDGPUSymbolizer::tryAddingPcLoadReferenceComment(raw_ostream &cStream,
99892b355b1SMatt Arsenault                                                        int64_t Value,
99992b355b1SMatt Arsenault                                                        uint64_t Address) {
100092b355b1SMatt Arsenault   llvm_unreachable("unimplemented");
100192b355b1SMatt Arsenault }
100292b355b1SMatt Arsenault 
10033381d7a2SSam Kolton //===----------------------------------------------------------------------===//
10043381d7a2SSam Kolton // Initialization
10053381d7a2SSam Kolton //===----------------------------------------------------------------------===//
10063381d7a2SSam Kolton 
10073381d7a2SSam Kolton static MCSymbolizer *createAMDGPUSymbolizer(const Triple &/*TT*/,
10083381d7a2SSam Kolton                               LLVMOpInfoCallback /*GetOpInfo*/,
10093381d7a2SSam Kolton                               LLVMSymbolLookupCallback /*SymbolLookUp*/,
10103381d7a2SSam Kolton                               void *DisInfo,
10113381d7a2SSam Kolton                               MCContext *Ctx,
10123381d7a2SSam Kolton                               std::unique_ptr<MCRelocationInfo> &&RelInfo) {
10133381d7a2SSam Kolton   return new AMDGPUSymbolizer(*Ctx, std::move(RelInfo), DisInfo);
10143381d7a2SSam Kolton }
10153381d7a2SSam Kolton 
1016e1818af8STom Stellard static MCDisassembler *createAMDGPUDisassembler(const Target &T,
1017e1818af8STom Stellard                                                 const MCSubtargetInfo &STI,
1018e1818af8STom Stellard                                                 MCContext &Ctx) {
1019cad7fa85SMatt Arsenault   return new AMDGPUDisassembler(STI, Ctx, T.createMCInstrInfo());
1020e1818af8STom Stellard }
1021e1818af8STom Stellard 
1022e1818af8STom Stellard extern "C" void LLVMInitializeAMDGPUDisassembler() {
1023f42454b9SMehdi Amini   TargetRegistry::RegisterMCDisassembler(getTheGCNTarget(),
1024f42454b9SMehdi Amini                                          createAMDGPUDisassembler);
1025f42454b9SMehdi Amini   TargetRegistry::RegisterMCSymbolizer(getTheGCNTarget(),
1026f42454b9SMehdi Amini                                        createAMDGPUSymbolizer);
1027e1818af8STom Stellard }
1028