1c8fbf6ffSEugene Zelenko //===- AMDGPUDisassembler.cpp - Disassembler for AMDGPU ISA ---------------===//
2e1818af8STom Stellard //
3e1818af8STom Stellard //                     The LLVM Compiler Infrastructure
4e1818af8STom Stellard //
5e1818af8STom Stellard // This file is distributed under the University of Illinois Open Source
6e1818af8STom Stellard // License. See LICENSE.TXT for details.
7e1818af8STom Stellard //
8e1818af8STom Stellard //===----------------------------------------------------------------------===//
9e1818af8STom Stellard //
10e1818af8STom Stellard //===----------------------------------------------------------------------===//
11e1818af8STom Stellard //
12e1818af8STom Stellard /// \file
13e1818af8STom Stellard ///
14e1818af8STom Stellard /// This file contains definition for AMDGPU ISA disassembler
15e1818af8STom Stellard //
16e1818af8STom Stellard //===----------------------------------------------------------------------===//
17e1818af8STom Stellard 
18e1818af8STom Stellard // ToDo: What to do with instruction suffixes (v_mov_b32 vs v_mov_b32_e32)?
19e1818af8STom Stellard 
20c8fbf6ffSEugene Zelenko #include "Disassembler/AMDGPUDisassembler.h"
21e1818af8STom Stellard #include "AMDGPU.h"
22e1818af8STom Stellard #include "AMDGPURegisterInfo.h"
23212a251cSArtem Tamazov #include "SIDefines.h"
24e1818af8STom Stellard #include "Utils/AMDGPUBaseInfo.h"
25c8fbf6ffSEugene Zelenko #include "llvm-c/Disassembler.h"
26c8fbf6ffSEugene Zelenko #include "llvm/ADT/APInt.h"
27c8fbf6ffSEugene Zelenko #include "llvm/ADT/ArrayRef.h"
28c8fbf6ffSEugene Zelenko #include "llvm/ADT/Twine.h"
29264b5d9eSZachary Turner #include "llvm/BinaryFormat/ELF.h"
30ac106addSNikolay Haustov #include "llvm/MC/MCContext.h"
31c8fbf6ffSEugene Zelenko #include "llvm/MC/MCDisassembler/MCDisassembler.h"
32c8fbf6ffSEugene Zelenko #include "llvm/MC/MCExpr.h"
33e1818af8STom Stellard #include "llvm/MC/MCFixedLenDisassembler.h"
34e1818af8STom Stellard #include "llvm/MC/MCInst.h"
35e1818af8STom Stellard #include "llvm/MC/MCSubtargetInfo.h"
36ac106addSNikolay Haustov #include "llvm/Support/Endian.h"
37c8fbf6ffSEugene Zelenko #include "llvm/Support/ErrorHandling.h"
38c8fbf6ffSEugene Zelenko #include "llvm/Support/MathExtras.h"
39e1818af8STom Stellard #include "llvm/Support/TargetRegistry.h"
40c8fbf6ffSEugene Zelenko #include "llvm/Support/raw_ostream.h"
41c8fbf6ffSEugene Zelenko #include <algorithm>
42c8fbf6ffSEugene Zelenko #include <cassert>
43c8fbf6ffSEugene Zelenko #include <cstddef>
44c8fbf6ffSEugene Zelenko #include <cstdint>
45c8fbf6ffSEugene Zelenko #include <iterator>
46c8fbf6ffSEugene Zelenko #include <tuple>
47c8fbf6ffSEugene Zelenko #include <vector>
48e1818af8STom Stellard 
49e1818af8STom Stellard using namespace llvm;
50e1818af8STom Stellard 
51e1818af8STom Stellard #define DEBUG_TYPE "amdgpu-disassembler"
52e1818af8STom Stellard 
53c8fbf6ffSEugene Zelenko using DecodeStatus = llvm::MCDisassembler::DecodeStatus;
54e1818af8STom Stellard 
55ac106addSNikolay Haustov inline static MCDisassembler::DecodeStatus
56ac106addSNikolay Haustov addOperand(MCInst &Inst, const MCOperand& Opnd) {
57ac106addSNikolay Haustov   Inst.addOperand(Opnd);
58ac106addSNikolay Haustov   return Opnd.isValid() ?
59ac106addSNikolay Haustov     MCDisassembler::Success :
60ac106addSNikolay Haustov     MCDisassembler::SoftFail;
61e1818af8STom Stellard }
62e1818af8STom Stellard 
63549c89d2SSam Kolton static int insertNamedMCOperand(MCInst &MI, const MCOperand &Op,
64549c89d2SSam Kolton                                 uint16_t NameIdx) {
65549c89d2SSam Kolton   int OpIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), NameIdx);
66549c89d2SSam Kolton   if (OpIdx != -1) {
67549c89d2SSam Kolton     auto I = MI.begin();
68549c89d2SSam Kolton     std::advance(I, OpIdx);
69549c89d2SSam Kolton     MI.insert(I, Op);
70549c89d2SSam Kolton   }
71549c89d2SSam Kolton   return OpIdx;
72549c89d2SSam Kolton }
73549c89d2SSam Kolton 
743381d7a2SSam Kolton static DecodeStatus decodeSoppBrTarget(MCInst &Inst, unsigned Imm,
753381d7a2SSam Kolton                                        uint64_t Addr, const void *Decoder) {
763381d7a2SSam Kolton   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
773381d7a2SSam Kolton 
783381d7a2SSam Kolton   APInt SignedOffset(18, Imm * 4, true);
793381d7a2SSam Kolton   int64_t Offset = (SignedOffset.sext(64) + 4 + Addr).getSExtValue();
803381d7a2SSam Kolton 
813381d7a2SSam Kolton   if (DAsm->tryAddingSymbolicOperand(Inst, Offset, Addr, true, 2, 2))
823381d7a2SSam Kolton     return MCDisassembler::Success;
833381d7a2SSam Kolton   return addOperand(Inst, MCOperand::createImm(Imm));
843381d7a2SSam Kolton }
853381d7a2SSam Kolton 
86363f47a2SSam Kolton #define DECODE_OPERAND(StaticDecoderName, DecoderName) \
87363f47a2SSam Kolton static DecodeStatus StaticDecoderName(MCInst &Inst, \
88ac106addSNikolay Haustov                                        unsigned Imm, \
89ac106addSNikolay Haustov                                        uint64_t /*Addr*/, \
90ac106addSNikolay Haustov                                        const void *Decoder) { \
91ac106addSNikolay Haustov   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); \
92363f47a2SSam Kolton   return addOperand(Inst, DAsm->DecoderName(Imm)); \
93e1818af8STom Stellard }
94e1818af8STom Stellard 
95363f47a2SSam Kolton #define DECODE_OPERAND_REG(RegClass) \
96363f47a2SSam Kolton DECODE_OPERAND(Decode##RegClass##RegisterClass, decodeOperand_##RegClass)
97e1818af8STom Stellard 
98363f47a2SSam Kolton DECODE_OPERAND_REG(VGPR_32)
99363f47a2SSam Kolton DECODE_OPERAND_REG(VS_32)
100363f47a2SSam Kolton DECODE_OPERAND_REG(VS_64)
10130fc5239SDmitry Preobrazhensky DECODE_OPERAND_REG(VS_128)
102e1818af8STom Stellard 
103363f47a2SSam Kolton DECODE_OPERAND_REG(VReg_64)
104363f47a2SSam Kolton DECODE_OPERAND_REG(VReg_96)
105363f47a2SSam Kolton DECODE_OPERAND_REG(VReg_128)
106e1818af8STom Stellard 
107363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_32)
108363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_32_XM0_XEXEC)
109ca7b0a17SMatt Arsenault DECODE_OPERAND_REG(SReg_32_XEXEC_HI)
110363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_64)
111363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_64_XEXEC)
112363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_128)
113363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_256)
114363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_512)
115e1818af8STom Stellard 
1164bd72361SMatt Arsenault static DecodeStatus decodeOperand_VSrc16(MCInst &Inst,
1174bd72361SMatt Arsenault                                          unsigned Imm,
1184bd72361SMatt Arsenault                                          uint64_t Addr,
1194bd72361SMatt Arsenault                                          const void *Decoder) {
1204bd72361SMatt Arsenault   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
1214bd72361SMatt Arsenault   return addOperand(Inst, DAsm->decodeOperand_VSrc16(Imm));
1224bd72361SMatt Arsenault }
1234bd72361SMatt Arsenault 
1249be7b0d4SMatt Arsenault static DecodeStatus decodeOperand_VSrcV216(MCInst &Inst,
1259be7b0d4SMatt Arsenault                                          unsigned Imm,
1269be7b0d4SMatt Arsenault                                          uint64_t Addr,
1279be7b0d4SMatt Arsenault                                          const void *Decoder) {
1289be7b0d4SMatt Arsenault   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
1299be7b0d4SMatt Arsenault   return addOperand(Inst, DAsm->decodeOperand_VSrcV216(Imm));
1309be7b0d4SMatt Arsenault }
1319be7b0d4SMatt Arsenault 
132549c89d2SSam Kolton #define DECODE_SDWA(DecName) \
133549c89d2SSam Kolton DECODE_OPERAND(decodeSDWA##DecName, decodeSDWA##DecName)
134363f47a2SSam Kolton 
135549c89d2SSam Kolton DECODE_SDWA(Src32)
136549c89d2SSam Kolton DECODE_SDWA(Src16)
137549c89d2SSam Kolton DECODE_SDWA(VopcDst)
138363f47a2SSam Kolton 
139e1818af8STom Stellard #include "AMDGPUGenDisassemblerTables.inc"
140e1818af8STom Stellard 
141e1818af8STom Stellard //===----------------------------------------------------------------------===//
142e1818af8STom Stellard //
143e1818af8STom Stellard //===----------------------------------------------------------------------===//
144e1818af8STom Stellard 
1451048fb18SSam Kolton template <typename T> static inline T eatBytes(ArrayRef<uint8_t>& Bytes) {
1461048fb18SSam Kolton   assert(Bytes.size() >= sizeof(T));
1471048fb18SSam Kolton   const auto Res = support::endian::read<T, support::endianness::little>(Bytes.data());
1481048fb18SSam Kolton   Bytes = Bytes.slice(sizeof(T));
149ac106addSNikolay Haustov   return Res;
150ac106addSNikolay Haustov }
151ac106addSNikolay Haustov 
152ac106addSNikolay Haustov DecodeStatus AMDGPUDisassembler::tryDecodeInst(const uint8_t* Table,
153ac106addSNikolay Haustov                                                MCInst &MI,
154ac106addSNikolay Haustov                                                uint64_t Inst,
155ac106addSNikolay Haustov                                                uint64_t Address) const {
156ac106addSNikolay Haustov   assert(MI.getOpcode() == 0);
157ac106addSNikolay Haustov   assert(MI.getNumOperands() == 0);
158ac106addSNikolay Haustov   MCInst TmpInst;
159ce941c9cSDmitry Preobrazhensky   HasLiteral = false;
160ac106addSNikolay Haustov   const auto SavedBytes = Bytes;
161ac106addSNikolay Haustov   if (decodeInstruction(Table, TmpInst, Inst, Address, this, STI)) {
162ac106addSNikolay Haustov     MI = TmpInst;
163ac106addSNikolay Haustov     return MCDisassembler::Success;
164ac106addSNikolay Haustov   }
165ac106addSNikolay Haustov   Bytes = SavedBytes;
166ac106addSNikolay Haustov   return MCDisassembler::Fail;
167ac106addSNikolay Haustov }
168ac106addSNikolay Haustov 
169e1818af8STom Stellard DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
170ac106addSNikolay Haustov                                                 ArrayRef<uint8_t> Bytes_,
171e1818af8STom Stellard                                                 uint64_t Address,
172e1818af8STom Stellard                                                 raw_ostream &WS,
173e1818af8STom Stellard                                                 raw_ostream &CS) const {
174e1818af8STom Stellard   CommentStream = &CS;
175549c89d2SSam Kolton   bool IsSDWA = false;
176e1818af8STom Stellard 
177e1818af8STom Stellard   // ToDo: AMDGPUDisassembler supports only VI ISA.
178d122abeaSMatt Arsenault   if (!STI.getFeatureBits()[AMDGPU::FeatureGCN3Encoding])
179d122abeaSMatt Arsenault     report_fatal_error("Disassembly not yet supported for subtarget");
180e1818af8STom Stellard 
181ac106addSNikolay Haustov   const unsigned MaxInstBytesNum = (std::min)((size_t)8, Bytes_.size());
182ac106addSNikolay Haustov   Bytes = Bytes_.slice(0, MaxInstBytesNum);
183161a158eSNikolay Haustov 
184ac106addSNikolay Haustov   DecodeStatus Res = MCDisassembler::Fail;
185ac106addSNikolay Haustov   do {
186824e804bSValery Pykhtin     // ToDo: better to switch encoding length using some bit predicate
187ac106addSNikolay Haustov     // but it is unknown yet, so try all we can
1881048fb18SSam Kolton 
189c9bdcb75SSam Kolton     // Try to decode DPP and SDWA first to solve conflict with VOP1 and VOP2
190c9bdcb75SSam Kolton     // encodings
1911048fb18SSam Kolton     if (Bytes.size() >= 8) {
1921048fb18SSam Kolton       const uint64_t QW = eatBytes<uint64_t>(Bytes);
1931048fb18SSam Kolton       Res = tryDecodeInst(DecoderTableDPP64, MI, QW, Address);
1941048fb18SSam Kolton       if (Res) break;
195c9bdcb75SSam Kolton 
196c9bdcb75SSam Kolton       Res = tryDecodeInst(DecoderTableSDWA64, MI, QW, Address);
197549c89d2SSam Kolton       if (Res) { IsSDWA = true;  break; }
198363f47a2SSam Kolton 
199363f47a2SSam Kolton       Res = tryDecodeInst(DecoderTableSDWA964, MI, QW, Address);
200549c89d2SSam Kolton       if (Res) { IsSDWA = true;  break; }
201*0905870fSChangpeng Fang 
202*0905870fSChangpeng Fang       if (STI.getFeatureBits()[AMDGPU::FeatureUnpackedD16VMem]) {
203*0905870fSChangpeng Fang         Res = tryDecodeInst(DecoderTableGFX80_UNPACKED64, MI, QW, Address);
204*0905870fSChangpeng Fang         if (Res) break;
205*0905870fSChangpeng Fang       }
2061048fb18SSam Kolton     }
2071048fb18SSam Kolton 
2081048fb18SSam Kolton     // Reinitialize Bytes as DPP64 could have eaten too much
2091048fb18SSam Kolton     Bytes = Bytes_.slice(0, MaxInstBytesNum);
2101048fb18SSam Kolton 
2111048fb18SSam Kolton     // Try decode 32-bit instruction
212ac106addSNikolay Haustov     if (Bytes.size() < 4) break;
2131048fb18SSam Kolton     const uint32_t DW = eatBytes<uint32_t>(Bytes);
214ac106addSNikolay Haustov     Res = tryDecodeInst(DecoderTableVI32, MI, DW, Address);
215ac106addSNikolay Haustov     if (Res) break;
216e1818af8STom Stellard 
217ac106addSNikolay Haustov     Res = tryDecodeInst(DecoderTableAMDGPU32, MI, DW, Address);
218ac106addSNikolay Haustov     if (Res) break;
219ac106addSNikolay Haustov 
220a0342dc9SDmitry Preobrazhensky     Res = tryDecodeInst(DecoderTableGFX932, MI, DW, Address);
221a0342dc9SDmitry Preobrazhensky     if (Res) break;
222a0342dc9SDmitry Preobrazhensky 
223ac106addSNikolay Haustov     if (Bytes.size() < 4) break;
2241048fb18SSam Kolton     const uint64_t QW = ((uint64_t)eatBytes<uint32_t>(Bytes) << 32) | DW;
225ac106addSNikolay Haustov     Res = tryDecodeInst(DecoderTableVI64, MI, QW, Address);
226ac106addSNikolay Haustov     if (Res) break;
227ac106addSNikolay Haustov 
228ac106addSNikolay Haustov     Res = tryDecodeInst(DecoderTableAMDGPU64, MI, QW, Address);
2291e32550dSDmitry Preobrazhensky     if (Res) break;
2301e32550dSDmitry Preobrazhensky 
2311e32550dSDmitry Preobrazhensky     Res = tryDecodeInst(DecoderTableGFX964, MI, QW, Address);
232ac106addSNikolay Haustov   } while (false);
233ac106addSNikolay Haustov 
234678e111eSMatt Arsenault   if (Res && (MI.getOpcode() == AMDGPU::V_MAC_F32_e64_vi ||
235678e111eSMatt Arsenault               MI.getOpcode() == AMDGPU::V_MAC_F32_e64_si ||
236678e111eSMatt Arsenault               MI.getOpcode() == AMDGPU::V_MAC_F16_e64_vi)) {
237678e111eSMatt Arsenault     // Insert dummy unused src2_modifiers.
238549c89d2SSam Kolton     insertNamedMCOperand(MI, MCOperand::createImm(0),
239678e111eSMatt Arsenault                          AMDGPU::OpName::src2_modifiers);
240678e111eSMatt Arsenault   }
241678e111eSMatt Arsenault 
242cad7fa85SMatt Arsenault   if (Res && (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::MIMG)) {
243cad7fa85SMatt Arsenault     Res = convertMIMGInst(MI);
244cad7fa85SMatt Arsenault   }
245cad7fa85SMatt Arsenault 
246549c89d2SSam Kolton   if (Res && IsSDWA)
247549c89d2SSam Kolton     Res = convertSDWAInst(MI);
248549c89d2SSam Kolton 
249ac106addSNikolay Haustov   Size = Res ? (MaxInstBytesNum - Bytes.size()) : 0;
250ac106addSNikolay Haustov   return Res;
251161a158eSNikolay Haustov }
252e1818af8STom Stellard 
253549c89d2SSam Kolton DecodeStatus AMDGPUDisassembler::convertSDWAInst(MCInst &MI) const {
254549c89d2SSam Kolton   if (STI.getFeatureBits()[AMDGPU::FeatureGFX9]) {
255549c89d2SSam Kolton     if (AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sdst) != -1)
256549c89d2SSam Kolton       // VOPC - insert clamp
257549c89d2SSam Kolton       insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::clamp);
258549c89d2SSam Kolton   } else if (STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]) {
259549c89d2SSam Kolton     int SDst = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sdst);
260549c89d2SSam Kolton     if (SDst != -1) {
261549c89d2SSam Kolton       // VOPC - insert VCC register as sdst
262ac2b0264SDmitry Preobrazhensky       insertNamedMCOperand(MI, createRegOperand(AMDGPU::VCC),
263549c89d2SSam Kolton                            AMDGPU::OpName::sdst);
264549c89d2SSam Kolton     } else {
265549c89d2SSam Kolton       // VOP1/2 - insert omod if present in instruction
266549c89d2SSam Kolton       insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::omod);
267549c89d2SSam Kolton     }
268549c89d2SSam Kolton   }
269549c89d2SSam Kolton   return MCDisassembler::Success;
270549c89d2SSam Kolton }
271549c89d2SSam Kolton 
272cad7fa85SMatt Arsenault DecodeStatus AMDGPUDisassembler::convertMIMGInst(MCInst &MI) const {
2730b4eb1eaSDmitry Preobrazhensky   int VDstIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
2740b4eb1eaSDmitry Preobrazhensky                                            AMDGPU::OpName::vdst);
2750b4eb1eaSDmitry Preobrazhensky 
276cad7fa85SMatt Arsenault   int VDataIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
277cad7fa85SMatt Arsenault                                             AMDGPU::OpName::vdata);
278cad7fa85SMatt Arsenault 
279cad7fa85SMatt Arsenault   int DMaskIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
280cad7fa85SMatt Arsenault                                             AMDGPU::OpName::dmask);
2810b4eb1eaSDmitry Preobrazhensky 
2820b4eb1eaSDmitry Preobrazhensky   assert(VDataIdx != -1);
2830b4eb1eaSDmitry Preobrazhensky   assert(DMaskIdx != -1);
2840b4eb1eaSDmitry Preobrazhensky 
2850b4eb1eaSDmitry Preobrazhensky   bool isAtomic = (VDstIdx != -1);
2860b4eb1eaSDmitry Preobrazhensky 
287cad7fa85SMatt Arsenault   unsigned DMask = MI.getOperand(DMaskIdx).getImm() & 0xf;
288cad7fa85SMatt Arsenault   if (DMask == 0)
289cad7fa85SMatt Arsenault     return MCDisassembler::Success;
290cad7fa85SMatt Arsenault 
291cad7fa85SMatt Arsenault   unsigned ChannelCount = countPopulation(DMask);
292cad7fa85SMatt Arsenault   if (ChannelCount == 1)
293cad7fa85SMatt Arsenault     return MCDisassembler::Success;
294cad7fa85SMatt Arsenault 
2950b4eb1eaSDmitry Preobrazhensky   int NewOpcode = -1;
2960b4eb1eaSDmitry Preobrazhensky 
2970b4eb1eaSDmitry Preobrazhensky   if (isAtomic) {
2980b4eb1eaSDmitry Preobrazhensky     if (DMask == 0x1 || DMask == 0x3 || DMask == 0xF) {
2990b4eb1eaSDmitry Preobrazhensky       NewOpcode = AMDGPU::getMaskedMIMGAtomicOp(*MCII, MI.getOpcode(), ChannelCount);
3000b4eb1eaSDmitry Preobrazhensky     }
3010b4eb1eaSDmitry Preobrazhensky     if (NewOpcode == -1) return MCDisassembler::Success;
3020b4eb1eaSDmitry Preobrazhensky   } else {
3030b4eb1eaSDmitry Preobrazhensky     NewOpcode = AMDGPU::getMaskedMIMGOp(*MCII, MI.getOpcode(), ChannelCount);
304cad7fa85SMatt Arsenault     assert(NewOpcode != -1 && "could not find matching mimg channel instruction");
3050b4eb1eaSDmitry Preobrazhensky   }
3060b4eb1eaSDmitry Preobrazhensky 
307cad7fa85SMatt Arsenault   auto RCID = MCII->get(NewOpcode).OpInfo[VDataIdx].RegClass;
308cad7fa85SMatt Arsenault 
3090b4eb1eaSDmitry Preobrazhensky   // Get first subregister of VData
310cad7fa85SMatt Arsenault   unsigned Vdata0 = MI.getOperand(VDataIdx).getReg();
3110b4eb1eaSDmitry Preobrazhensky   unsigned VdataSub0 = MRI.getSubReg(Vdata0, AMDGPU::sub0);
3120b4eb1eaSDmitry Preobrazhensky   Vdata0 = (VdataSub0 != 0)? VdataSub0 : Vdata0;
3130b4eb1eaSDmitry Preobrazhensky 
3140b4eb1eaSDmitry Preobrazhensky   // Widen the register to the correct number of enabled channels.
315cad7fa85SMatt Arsenault   auto NewVdata = MRI.getMatchingSuperReg(Vdata0, AMDGPU::sub0,
316cad7fa85SMatt Arsenault                                           &MRI.getRegClass(RCID));
317cad7fa85SMatt Arsenault   if (NewVdata == AMDGPU::NoRegister) {
318cad7fa85SMatt Arsenault     // It's possible to encode this such that the low register + enabled
319cad7fa85SMatt Arsenault     // components exceeds the register count.
320cad7fa85SMatt Arsenault     return MCDisassembler::Success;
321cad7fa85SMatt Arsenault   }
322cad7fa85SMatt Arsenault 
323cad7fa85SMatt Arsenault   MI.setOpcode(NewOpcode);
324cad7fa85SMatt Arsenault   // vaddr will be always appear as a single VGPR. This will look different than
325cad7fa85SMatt Arsenault   // how it is usually emitted because the number of register components is not
326cad7fa85SMatt Arsenault   // in the instruction encoding.
327cad7fa85SMatt Arsenault   MI.getOperand(VDataIdx) = MCOperand::createReg(NewVdata);
3280b4eb1eaSDmitry Preobrazhensky 
3290b4eb1eaSDmitry Preobrazhensky   if (isAtomic) {
3300b4eb1eaSDmitry Preobrazhensky     // Atomic operations have an additional operand (a copy of data)
3310b4eb1eaSDmitry Preobrazhensky     MI.getOperand(VDstIdx) = MCOperand::createReg(NewVdata);
3320b4eb1eaSDmitry Preobrazhensky   }
3330b4eb1eaSDmitry Preobrazhensky 
334cad7fa85SMatt Arsenault   return MCDisassembler::Success;
335cad7fa85SMatt Arsenault }
336cad7fa85SMatt Arsenault 
337ac106addSNikolay Haustov const char* AMDGPUDisassembler::getRegClassName(unsigned RegClassID) const {
338ac106addSNikolay Haustov   return getContext().getRegisterInfo()->
339ac106addSNikolay Haustov     getRegClassName(&AMDGPUMCRegisterClasses[RegClassID]);
340e1818af8STom Stellard }
341e1818af8STom Stellard 
342ac106addSNikolay Haustov inline
343ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::errOperand(unsigned V,
344ac106addSNikolay Haustov                                          const Twine& ErrMsg) const {
345ac106addSNikolay Haustov   *CommentStream << "Error: " + ErrMsg;
346ac106addSNikolay Haustov 
347ac106addSNikolay Haustov   // ToDo: add support for error operands to MCInst.h
348ac106addSNikolay Haustov   // return MCOperand::createError(V);
349ac106addSNikolay Haustov   return MCOperand();
350ac106addSNikolay Haustov }
351ac106addSNikolay Haustov 
352ac106addSNikolay Haustov inline
353ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::createRegOperand(unsigned int RegId) const {
354ac2b0264SDmitry Preobrazhensky   return MCOperand::createReg(AMDGPU::getMCReg(RegId, STI));
355ac106addSNikolay Haustov }
356ac106addSNikolay Haustov 
357ac106addSNikolay Haustov inline
358ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::createRegOperand(unsigned RegClassID,
359ac106addSNikolay Haustov                                                unsigned Val) const {
360ac106addSNikolay Haustov   const auto& RegCl = AMDGPUMCRegisterClasses[RegClassID];
361ac106addSNikolay Haustov   if (Val >= RegCl.getNumRegs())
362ac106addSNikolay Haustov     return errOperand(Val, Twine(getRegClassName(RegClassID)) +
363ac106addSNikolay Haustov                            ": unknown register " + Twine(Val));
364ac106addSNikolay Haustov   return createRegOperand(RegCl.getRegister(Val));
365ac106addSNikolay Haustov }
366ac106addSNikolay Haustov 
367ac106addSNikolay Haustov inline
368ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::createSRegOperand(unsigned SRegClassID,
369ac106addSNikolay Haustov                                                 unsigned Val) const {
370ac106addSNikolay Haustov   // ToDo: SI/CI have 104 SGPRs, VI - 102
371ac106addSNikolay Haustov   // Valery: here we accepting as much as we can, let assembler sort it out
372ac106addSNikolay Haustov   int shift = 0;
373ac106addSNikolay Haustov   switch (SRegClassID) {
374ac106addSNikolay Haustov   case AMDGPU::SGPR_32RegClassID:
375212a251cSArtem Tamazov   case AMDGPU::TTMP_32RegClassID:
376212a251cSArtem Tamazov     break;
377ac106addSNikolay Haustov   case AMDGPU::SGPR_64RegClassID:
378212a251cSArtem Tamazov   case AMDGPU::TTMP_64RegClassID:
379212a251cSArtem Tamazov     shift = 1;
380212a251cSArtem Tamazov     break;
381212a251cSArtem Tamazov   case AMDGPU::SGPR_128RegClassID:
382212a251cSArtem Tamazov   case AMDGPU::TTMP_128RegClassID:
383ac106addSNikolay Haustov   // ToDo: unclear if s[100:104] is available on VI. Can we use VCC as SGPR in
384ac106addSNikolay Haustov   // this bundle?
38527134953SDmitry Preobrazhensky   case AMDGPU::SGPR_256RegClassID:
38627134953SDmitry Preobrazhensky   case AMDGPU::TTMP_256RegClassID:
387ac106addSNikolay Haustov     // ToDo: unclear if s[96:104] is available on VI. Can we use VCC as SGPR in
388ac106addSNikolay Haustov   // this bundle?
38927134953SDmitry Preobrazhensky   case AMDGPU::SGPR_512RegClassID:
39027134953SDmitry Preobrazhensky   case AMDGPU::TTMP_512RegClassID:
391212a251cSArtem Tamazov     shift = 2;
392212a251cSArtem Tamazov     break;
393ac106addSNikolay Haustov   // ToDo: unclear if s[88:104] is available on VI. Can we use VCC as SGPR in
394ac106addSNikolay Haustov   // this bundle?
395212a251cSArtem Tamazov   default:
39692b355b1SMatt Arsenault     llvm_unreachable("unhandled register class");
397ac106addSNikolay Haustov   }
39892b355b1SMatt Arsenault 
39992b355b1SMatt Arsenault   if (Val % (1 << shift)) {
400ac106addSNikolay Haustov     *CommentStream << "Warning: " << getRegClassName(SRegClassID)
401ac106addSNikolay Haustov                    << ": scalar reg isn't aligned " << Val;
40292b355b1SMatt Arsenault   }
40392b355b1SMatt Arsenault 
404ac106addSNikolay Haustov   return createRegOperand(SRegClassID, Val >> shift);
405ac106addSNikolay Haustov }
406ac106addSNikolay Haustov 
407ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_VS_32(unsigned Val) const {
408212a251cSArtem Tamazov   return decodeSrcOp(OPW32, Val);
409ac106addSNikolay Haustov }
410ac106addSNikolay Haustov 
411ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_VS_64(unsigned Val) const {
412212a251cSArtem Tamazov   return decodeSrcOp(OPW64, Val);
413ac106addSNikolay Haustov }
414ac106addSNikolay Haustov 
41530fc5239SDmitry Preobrazhensky MCOperand AMDGPUDisassembler::decodeOperand_VS_128(unsigned Val) const {
41630fc5239SDmitry Preobrazhensky   return decodeSrcOp(OPW128, Val);
41730fc5239SDmitry Preobrazhensky }
41830fc5239SDmitry Preobrazhensky 
4194bd72361SMatt Arsenault MCOperand AMDGPUDisassembler::decodeOperand_VSrc16(unsigned Val) const {
4204bd72361SMatt Arsenault   return decodeSrcOp(OPW16, Val);
4214bd72361SMatt Arsenault }
4224bd72361SMatt Arsenault 
4239be7b0d4SMatt Arsenault MCOperand AMDGPUDisassembler::decodeOperand_VSrcV216(unsigned Val) const {
4249be7b0d4SMatt Arsenault   return decodeSrcOp(OPWV216, Val);
4259be7b0d4SMatt Arsenault }
4269be7b0d4SMatt Arsenault 
427ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_VGPR_32(unsigned Val) const {
428cb540bc0SMatt Arsenault   // Some instructions have operand restrictions beyond what the encoding
429cb540bc0SMatt Arsenault   // allows. Some ordinarily VSrc_32 operands are VGPR_32, so clear the extra
430cb540bc0SMatt Arsenault   // high bit.
431cb540bc0SMatt Arsenault   Val &= 255;
432cb540bc0SMatt Arsenault 
433ac106addSNikolay Haustov   return createRegOperand(AMDGPU::VGPR_32RegClassID, Val);
434ac106addSNikolay Haustov }
435ac106addSNikolay Haustov 
436ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_VReg_64(unsigned Val) const {
437ac106addSNikolay Haustov   return createRegOperand(AMDGPU::VReg_64RegClassID, Val);
438ac106addSNikolay Haustov }
439ac106addSNikolay Haustov 
440ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_VReg_96(unsigned Val) const {
441ac106addSNikolay Haustov   return createRegOperand(AMDGPU::VReg_96RegClassID, Val);
442ac106addSNikolay Haustov }
443ac106addSNikolay Haustov 
444ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_VReg_128(unsigned Val) const {
445ac106addSNikolay Haustov   return createRegOperand(AMDGPU::VReg_128RegClassID, Val);
446ac106addSNikolay Haustov }
447ac106addSNikolay Haustov 
448ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_SReg_32(unsigned Val) const {
449ac106addSNikolay Haustov   // table-gen generated disassembler doesn't care about operand types
450ac106addSNikolay Haustov   // leaving only registry class so SSrc_32 operand turns into SReg_32
451ac106addSNikolay Haustov   // and therefore we accept immediates and literals here as well
452212a251cSArtem Tamazov   return decodeSrcOp(OPW32, Val);
453ac106addSNikolay Haustov }
454ac106addSNikolay Haustov 
455640c44b8SMatt Arsenault MCOperand AMDGPUDisassembler::decodeOperand_SReg_32_XM0_XEXEC(
456640c44b8SMatt Arsenault   unsigned Val) const {
457640c44b8SMatt Arsenault   // SReg_32_XM0 is SReg_32 without M0 or EXEC_LO/EXEC_HI
45838e496b1SArtem Tamazov   return decodeOperand_SReg_32(Val);
45938e496b1SArtem Tamazov }
46038e496b1SArtem Tamazov 
461ca7b0a17SMatt Arsenault MCOperand AMDGPUDisassembler::decodeOperand_SReg_32_XEXEC_HI(
462ca7b0a17SMatt Arsenault   unsigned Val) const {
463ca7b0a17SMatt Arsenault   // SReg_32_XM0 is SReg_32 without EXEC_HI
464ca7b0a17SMatt Arsenault   return decodeOperand_SReg_32(Val);
465ca7b0a17SMatt Arsenault }
466ca7b0a17SMatt Arsenault 
467ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_SReg_64(unsigned Val) const {
468640c44b8SMatt Arsenault   return decodeSrcOp(OPW64, Val);
469640c44b8SMatt Arsenault }
470640c44b8SMatt Arsenault 
471640c44b8SMatt Arsenault MCOperand AMDGPUDisassembler::decodeOperand_SReg_64_XEXEC(unsigned Val) const {
472212a251cSArtem Tamazov   return decodeSrcOp(OPW64, Val);
473ac106addSNikolay Haustov }
474ac106addSNikolay Haustov 
475ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_SReg_128(unsigned Val) const {
476212a251cSArtem Tamazov   return decodeSrcOp(OPW128, Val);
477ac106addSNikolay Haustov }
478ac106addSNikolay Haustov 
479ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_SReg_256(unsigned Val) const {
48027134953SDmitry Preobrazhensky   return decodeDstOp(OPW256, Val);
481ac106addSNikolay Haustov }
482ac106addSNikolay Haustov 
483ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_SReg_512(unsigned Val) const {
48427134953SDmitry Preobrazhensky   return decodeDstOp(OPW512, Val);
485ac106addSNikolay Haustov }
486ac106addSNikolay Haustov 
487ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeLiteralConstant() const {
488ac106addSNikolay Haustov   // For now all literal constants are supposed to be unsigned integer
489ac106addSNikolay Haustov   // ToDo: deal with signed/unsigned 64-bit integer constants
490ac106addSNikolay Haustov   // ToDo: deal with float/double constants
491ce941c9cSDmitry Preobrazhensky   if (!HasLiteral) {
492ce941c9cSDmitry Preobrazhensky     if (Bytes.size() < 4) {
493ac106addSNikolay Haustov       return errOperand(0, "cannot read literal, inst bytes left " +
494ac106addSNikolay Haustov                         Twine(Bytes.size()));
495ce941c9cSDmitry Preobrazhensky     }
496ce941c9cSDmitry Preobrazhensky     HasLiteral = true;
497ce941c9cSDmitry Preobrazhensky     Literal = eatBytes<uint32_t>(Bytes);
498ce941c9cSDmitry Preobrazhensky   }
499ce941c9cSDmitry Preobrazhensky   return MCOperand::createImm(Literal);
500ac106addSNikolay Haustov }
501ac106addSNikolay Haustov 
502ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeIntImmed(unsigned Imm) {
503212a251cSArtem Tamazov   using namespace AMDGPU::EncValues;
504c8fbf6ffSEugene Zelenko 
505212a251cSArtem Tamazov   assert(Imm >= INLINE_INTEGER_C_MIN && Imm <= INLINE_INTEGER_C_MAX);
506212a251cSArtem Tamazov   return MCOperand::createImm((Imm <= INLINE_INTEGER_C_POSITIVE_MAX) ?
507212a251cSArtem Tamazov     (static_cast<int64_t>(Imm) - INLINE_INTEGER_C_MIN) :
508212a251cSArtem Tamazov     (INLINE_INTEGER_C_POSITIVE_MAX - static_cast<int64_t>(Imm)));
509212a251cSArtem Tamazov       // Cast prevents negative overflow.
510ac106addSNikolay Haustov }
511ac106addSNikolay Haustov 
5124bd72361SMatt Arsenault static int64_t getInlineImmVal32(unsigned Imm) {
5134bd72361SMatt Arsenault   switch (Imm) {
5144bd72361SMatt Arsenault   case 240:
5154bd72361SMatt Arsenault     return FloatToBits(0.5f);
5164bd72361SMatt Arsenault   case 241:
5174bd72361SMatt Arsenault     return FloatToBits(-0.5f);
5184bd72361SMatt Arsenault   case 242:
5194bd72361SMatt Arsenault     return FloatToBits(1.0f);
5204bd72361SMatt Arsenault   case 243:
5214bd72361SMatt Arsenault     return FloatToBits(-1.0f);
5224bd72361SMatt Arsenault   case 244:
5234bd72361SMatt Arsenault     return FloatToBits(2.0f);
5244bd72361SMatt Arsenault   case 245:
5254bd72361SMatt Arsenault     return FloatToBits(-2.0f);
5264bd72361SMatt Arsenault   case 246:
5274bd72361SMatt Arsenault     return FloatToBits(4.0f);
5284bd72361SMatt Arsenault   case 247:
5294bd72361SMatt Arsenault     return FloatToBits(-4.0f);
5304bd72361SMatt Arsenault   case 248: // 1 / (2 * PI)
5314bd72361SMatt Arsenault     return 0x3e22f983;
5324bd72361SMatt Arsenault   default:
5334bd72361SMatt Arsenault     llvm_unreachable("invalid fp inline imm");
5344bd72361SMatt Arsenault   }
5354bd72361SMatt Arsenault }
5364bd72361SMatt Arsenault 
5374bd72361SMatt Arsenault static int64_t getInlineImmVal64(unsigned Imm) {
5384bd72361SMatt Arsenault   switch (Imm) {
5394bd72361SMatt Arsenault   case 240:
5404bd72361SMatt Arsenault     return DoubleToBits(0.5);
5414bd72361SMatt Arsenault   case 241:
5424bd72361SMatt Arsenault     return DoubleToBits(-0.5);
5434bd72361SMatt Arsenault   case 242:
5444bd72361SMatt Arsenault     return DoubleToBits(1.0);
5454bd72361SMatt Arsenault   case 243:
5464bd72361SMatt Arsenault     return DoubleToBits(-1.0);
5474bd72361SMatt Arsenault   case 244:
5484bd72361SMatt Arsenault     return DoubleToBits(2.0);
5494bd72361SMatt Arsenault   case 245:
5504bd72361SMatt Arsenault     return DoubleToBits(-2.0);
5514bd72361SMatt Arsenault   case 246:
5524bd72361SMatt Arsenault     return DoubleToBits(4.0);
5534bd72361SMatt Arsenault   case 247:
5544bd72361SMatt Arsenault     return DoubleToBits(-4.0);
5554bd72361SMatt Arsenault   case 248: // 1 / (2 * PI)
5564bd72361SMatt Arsenault     return 0x3fc45f306dc9c882;
5574bd72361SMatt Arsenault   default:
5584bd72361SMatt Arsenault     llvm_unreachable("invalid fp inline imm");
5594bd72361SMatt Arsenault   }
5604bd72361SMatt Arsenault }
5614bd72361SMatt Arsenault 
5624bd72361SMatt Arsenault static int64_t getInlineImmVal16(unsigned Imm) {
5634bd72361SMatt Arsenault   switch (Imm) {
5644bd72361SMatt Arsenault   case 240:
5654bd72361SMatt Arsenault     return 0x3800;
5664bd72361SMatt Arsenault   case 241:
5674bd72361SMatt Arsenault     return 0xB800;
5684bd72361SMatt Arsenault   case 242:
5694bd72361SMatt Arsenault     return 0x3C00;
5704bd72361SMatt Arsenault   case 243:
5714bd72361SMatt Arsenault     return 0xBC00;
5724bd72361SMatt Arsenault   case 244:
5734bd72361SMatt Arsenault     return 0x4000;
5744bd72361SMatt Arsenault   case 245:
5754bd72361SMatt Arsenault     return 0xC000;
5764bd72361SMatt Arsenault   case 246:
5774bd72361SMatt Arsenault     return 0x4400;
5784bd72361SMatt Arsenault   case 247:
5794bd72361SMatt Arsenault     return 0xC400;
5804bd72361SMatt Arsenault   case 248: // 1 / (2 * PI)
5814bd72361SMatt Arsenault     return 0x3118;
5824bd72361SMatt Arsenault   default:
5834bd72361SMatt Arsenault     llvm_unreachable("invalid fp inline imm");
5844bd72361SMatt Arsenault   }
5854bd72361SMatt Arsenault }
5864bd72361SMatt Arsenault 
5874bd72361SMatt Arsenault MCOperand AMDGPUDisassembler::decodeFPImmed(OpWidthTy Width, unsigned Imm) {
588212a251cSArtem Tamazov   assert(Imm >= AMDGPU::EncValues::INLINE_FLOATING_C_MIN
589212a251cSArtem Tamazov       && Imm <= AMDGPU::EncValues::INLINE_FLOATING_C_MAX);
5904bd72361SMatt Arsenault 
591e1818af8STom Stellard   // ToDo: case 248: 1/(2*PI) - is allowed only on VI
5924bd72361SMatt Arsenault   switch (Width) {
5934bd72361SMatt Arsenault   case OPW32:
5944bd72361SMatt Arsenault     return MCOperand::createImm(getInlineImmVal32(Imm));
5954bd72361SMatt Arsenault   case OPW64:
5964bd72361SMatt Arsenault     return MCOperand::createImm(getInlineImmVal64(Imm));
5974bd72361SMatt Arsenault   case OPW16:
5989be7b0d4SMatt Arsenault   case OPWV216:
5994bd72361SMatt Arsenault     return MCOperand::createImm(getInlineImmVal16(Imm));
6004bd72361SMatt Arsenault   default:
6014bd72361SMatt Arsenault     llvm_unreachable("implement me");
602e1818af8STom Stellard   }
603e1818af8STom Stellard }
604e1818af8STom Stellard 
605212a251cSArtem Tamazov unsigned AMDGPUDisassembler::getVgprClassId(const OpWidthTy Width) const {
606e1818af8STom Stellard   using namespace AMDGPU;
607c8fbf6ffSEugene Zelenko 
608212a251cSArtem Tamazov   assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
609212a251cSArtem Tamazov   switch (Width) {
610212a251cSArtem Tamazov   default: // fall
6114bd72361SMatt Arsenault   case OPW32:
6124bd72361SMatt Arsenault   case OPW16:
6139be7b0d4SMatt Arsenault   case OPWV216:
6144bd72361SMatt Arsenault     return VGPR_32RegClassID;
615212a251cSArtem Tamazov   case OPW64: return VReg_64RegClassID;
616212a251cSArtem Tamazov   case OPW128: return VReg_128RegClassID;
617212a251cSArtem Tamazov   }
618212a251cSArtem Tamazov }
619212a251cSArtem Tamazov 
620212a251cSArtem Tamazov unsigned AMDGPUDisassembler::getSgprClassId(const OpWidthTy Width) const {
621212a251cSArtem Tamazov   using namespace AMDGPU;
622c8fbf6ffSEugene Zelenko 
623212a251cSArtem Tamazov   assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
624212a251cSArtem Tamazov   switch (Width) {
625212a251cSArtem Tamazov   default: // fall
6264bd72361SMatt Arsenault   case OPW32:
6274bd72361SMatt Arsenault   case OPW16:
6289be7b0d4SMatt Arsenault   case OPWV216:
6294bd72361SMatt Arsenault     return SGPR_32RegClassID;
630212a251cSArtem Tamazov   case OPW64: return SGPR_64RegClassID;
631212a251cSArtem Tamazov   case OPW128: return SGPR_128RegClassID;
63227134953SDmitry Preobrazhensky   case OPW256: return SGPR_256RegClassID;
63327134953SDmitry Preobrazhensky   case OPW512: return SGPR_512RegClassID;
634212a251cSArtem Tamazov   }
635212a251cSArtem Tamazov }
636212a251cSArtem Tamazov 
637212a251cSArtem Tamazov unsigned AMDGPUDisassembler::getTtmpClassId(const OpWidthTy Width) const {
638212a251cSArtem Tamazov   using namespace AMDGPU;
639c8fbf6ffSEugene Zelenko 
640212a251cSArtem Tamazov   assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
641212a251cSArtem Tamazov   switch (Width) {
642212a251cSArtem Tamazov   default: // fall
6434bd72361SMatt Arsenault   case OPW32:
6444bd72361SMatt Arsenault   case OPW16:
6459be7b0d4SMatt Arsenault   case OPWV216:
6464bd72361SMatt Arsenault     return TTMP_32RegClassID;
647212a251cSArtem Tamazov   case OPW64: return TTMP_64RegClassID;
648212a251cSArtem Tamazov   case OPW128: return TTMP_128RegClassID;
64927134953SDmitry Preobrazhensky   case OPW256: return TTMP_256RegClassID;
65027134953SDmitry Preobrazhensky   case OPW512: return TTMP_512RegClassID;
651212a251cSArtem Tamazov   }
652212a251cSArtem Tamazov }
653212a251cSArtem Tamazov 
654ac2b0264SDmitry Preobrazhensky int AMDGPUDisassembler::getTTmpIdx(unsigned Val) const {
655ac2b0264SDmitry Preobrazhensky   using namespace AMDGPU::EncValues;
656ac2b0264SDmitry Preobrazhensky 
657ac2b0264SDmitry Preobrazhensky   unsigned TTmpMin = isGFX9() ? TTMP_GFX9_MIN : TTMP_VI_MIN;
658ac2b0264SDmitry Preobrazhensky   unsigned TTmpMax = isGFX9() ? TTMP_GFX9_MAX : TTMP_VI_MAX;
659ac2b0264SDmitry Preobrazhensky 
660ac2b0264SDmitry Preobrazhensky   return (TTmpMin <= Val && Val <= TTmpMax)? Val - TTmpMin : -1;
661ac2b0264SDmitry Preobrazhensky }
662ac2b0264SDmitry Preobrazhensky 
663212a251cSArtem Tamazov MCOperand AMDGPUDisassembler::decodeSrcOp(const OpWidthTy Width, unsigned Val) const {
664212a251cSArtem Tamazov   using namespace AMDGPU::EncValues;
665c8fbf6ffSEugene Zelenko 
666ac106addSNikolay Haustov   assert(Val < 512); // enum9
667ac106addSNikolay Haustov 
668212a251cSArtem Tamazov   if (VGPR_MIN <= Val && Val <= VGPR_MAX) {
669212a251cSArtem Tamazov     return createRegOperand(getVgprClassId(Width), Val - VGPR_MIN);
670212a251cSArtem Tamazov   }
671b49c3361SArtem Tamazov   if (Val <= SGPR_MAX) {
672b49c3361SArtem Tamazov     assert(SGPR_MIN == 0); // "SGPR_MIN <= Val" is always true and causes compilation warning.
673212a251cSArtem Tamazov     return createSRegOperand(getSgprClassId(Width), Val - SGPR_MIN);
674212a251cSArtem Tamazov   }
675ac2b0264SDmitry Preobrazhensky 
676ac2b0264SDmitry Preobrazhensky   int TTmpIdx = getTTmpIdx(Val);
677ac2b0264SDmitry Preobrazhensky   if (TTmpIdx >= 0) {
678ac2b0264SDmitry Preobrazhensky     return createSRegOperand(getTtmpClassId(Width), TTmpIdx);
679212a251cSArtem Tamazov   }
680ac106addSNikolay Haustov 
681212a251cSArtem Tamazov   if (INLINE_INTEGER_C_MIN <= Val && Val <= INLINE_INTEGER_C_MAX)
682ac106addSNikolay Haustov     return decodeIntImmed(Val);
683ac106addSNikolay Haustov 
684212a251cSArtem Tamazov   if (INLINE_FLOATING_C_MIN <= Val && Val <= INLINE_FLOATING_C_MAX)
6854bd72361SMatt Arsenault     return decodeFPImmed(Width, Val);
686ac106addSNikolay Haustov 
687212a251cSArtem Tamazov   if (Val == LITERAL_CONST)
688ac106addSNikolay Haustov     return decodeLiteralConstant();
689ac106addSNikolay Haustov 
6904bd72361SMatt Arsenault   switch (Width) {
6914bd72361SMatt Arsenault   case OPW32:
6924bd72361SMatt Arsenault   case OPW16:
6939be7b0d4SMatt Arsenault   case OPWV216:
6944bd72361SMatt Arsenault     return decodeSpecialReg32(Val);
6954bd72361SMatt Arsenault   case OPW64:
6964bd72361SMatt Arsenault     return decodeSpecialReg64(Val);
6974bd72361SMatt Arsenault   default:
6984bd72361SMatt Arsenault     llvm_unreachable("unexpected immediate type");
6994bd72361SMatt Arsenault   }
700ac106addSNikolay Haustov }
701ac106addSNikolay Haustov 
70227134953SDmitry Preobrazhensky MCOperand AMDGPUDisassembler::decodeDstOp(const OpWidthTy Width, unsigned Val) const {
70327134953SDmitry Preobrazhensky   using namespace AMDGPU::EncValues;
70427134953SDmitry Preobrazhensky 
70527134953SDmitry Preobrazhensky   assert(Val < 128);
70627134953SDmitry Preobrazhensky   assert(Width == OPW256 || Width == OPW512);
70727134953SDmitry Preobrazhensky 
70827134953SDmitry Preobrazhensky   if (Val <= SGPR_MAX) {
70927134953SDmitry Preobrazhensky     assert(SGPR_MIN == 0); // "SGPR_MIN <= Val" is always true and causes compilation warning.
71027134953SDmitry Preobrazhensky     return createSRegOperand(getSgprClassId(Width), Val - SGPR_MIN);
71127134953SDmitry Preobrazhensky   }
71227134953SDmitry Preobrazhensky 
71327134953SDmitry Preobrazhensky   int TTmpIdx = getTTmpIdx(Val);
71427134953SDmitry Preobrazhensky   if (TTmpIdx >= 0) {
71527134953SDmitry Preobrazhensky     return createSRegOperand(getTtmpClassId(Width), TTmpIdx);
71627134953SDmitry Preobrazhensky   }
71727134953SDmitry Preobrazhensky 
71827134953SDmitry Preobrazhensky   llvm_unreachable("unknown dst register");
71927134953SDmitry Preobrazhensky }
72027134953SDmitry Preobrazhensky 
721ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeSpecialReg32(unsigned Val) const {
722ac106addSNikolay Haustov   using namespace AMDGPU;
723c8fbf6ffSEugene Zelenko 
724e1818af8STom Stellard   switch (Val) {
725ac2b0264SDmitry Preobrazhensky   case 102: return createRegOperand(FLAT_SCR_LO);
726ac2b0264SDmitry Preobrazhensky   case 103: return createRegOperand(FLAT_SCR_HI);
7273afbd825SDmitry Preobrazhensky   case 104: return createRegOperand(XNACK_MASK_LO);
7283afbd825SDmitry Preobrazhensky   case 105: return createRegOperand(XNACK_MASK_HI);
729ac106addSNikolay Haustov   case 106: return createRegOperand(VCC_LO);
730ac106addSNikolay Haustov   case 107: return createRegOperand(VCC_HI);
731ac2b0264SDmitry Preobrazhensky   case 108: assert(!isGFX9()); return createRegOperand(TBA_LO);
732ac2b0264SDmitry Preobrazhensky   case 109: assert(!isGFX9()); return createRegOperand(TBA_HI);
733ac2b0264SDmitry Preobrazhensky   case 110: assert(!isGFX9()); return createRegOperand(TMA_LO);
734ac2b0264SDmitry Preobrazhensky   case 111: assert(!isGFX9()); return createRegOperand(TMA_HI);
735ac106addSNikolay Haustov   case 124: return createRegOperand(M0);
736ac106addSNikolay Haustov   case 126: return createRegOperand(EXEC_LO);
737ac106addSNikolay Haustov   case 127: return createRegOperand(EXEC_HI);
738a3b3b489SMatt Arsenault   case 235: return createRegOperand(SRC_SHARED_BASE);
739a3b3b489SMatt Arsenault   case 236: return createRegOperand(SRC_SHARED_LIMIT);
740a3b3b489SMatt Arsenault   case 237: return createRegOperand(SRC_PRIVATE_BASE);
741a3b3b489SMatt Arsenault   case 238: return createRegOperand(SRC_PRIVATE_LIMIT);
742a3b3b489SMatt Arsenault     // TODO: SRC_POPS_EXITING_WAVE_ID
743e1818af8STom Stellard     // ToDo: no support for vccz register
744ac106addSNikolay Haustov   case 251: break;
745e1818af8STom Stellard     // ToDo: no support for execz register
746ac106addSNikolay Haustov   case 252: break;
747ac106addSNikolay Haustov   case 253: return createRegOperand(SCC);
748ac106addSNikolay Haustov   default: break;
749e1818af8STom Stellard   }
750ac106addSNikolay Haustov   return errOperand(Val, "unknown operand encoding " + Twine(Val));
751e1818af8STom Stellard }
752e1818af8STom Stellard 
753ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeSpecialReg64(unsigned Val) const {
754161a158eSNikolay Haustov   using namespace AMDGPU;
755c8fbf6ffSEugene Zelenko 
756161a158eSNikolay Haustov   switch (Val) {
757ac2b0264SDmitry Preobrazhensky   case 102: return createRegOperand(FLAT_SCR);
7583afbd825SDmitry Preobrazhensky   case 104: return createRegOperand(XNACK_MASK);
759ac106addSNikolay Haustov   case 106: return createRegOperand(VCC);
760ac2b0264SDmitry Preobrazhensky   case 108: assert(!isGFX9()); return createRegOperand(TBA);
761ac2b0264SDmitry Preobrazhensky   case 110: assert(!isGFX9()); return createRegOperand(TMA);
762ac106addSNikolay Haustov   case 126: return createRegOperand(EXEC);
763ac106addSNikolay Haustov   default: break;
764161a158eSNikolay Haustov   }
765ac106addSNikolay Haustov   return errOperand(Val, "unknown operand encoding " + Twine(Val));
766161a158eSNikolay Haustov }
767161a158eSNikolay Haustov 
768549c89d2SSam Kolton MCOperand AMDGPUDisassembler::decodeSDWASrc(const OpWidthTy Width,
7696b65f7c3SDmitry Preobrazhensky                                             const unsigned Val) const {
770363f47a2SSam Kolton   using namespace AMDGPU::SDWA;
7716b65f7c3SDmitry Preobrazhensky   using namespace AMDGPU::EncValues;
772363f47a2SSam Kolton 
773549c89d2SSam Kolton   if (STI.getFeatureBits()[AMDGPU::FeatureGFX9]) {
774a179d25bSSam Kolton     // XXX: static_cast<int> is needed to avoid stupid warning:
775a179d25bSSam Kolton     // compare with unsigned is always true
776a179d25bSSam Kolton     if (SDWA9EncValues::SRC_VGPR_MIN <= static_cast<int>(Val) &&
777363f47a2SSam Kolton         Val <= SDWA9EncValues::SRC_VGPR_MAX) {
778363f47a2SSam Kolton       return createRegOperand(getVgprClassId(Width),
779363f47a2SSam Kolton                               Val - SDWA9EncValues::SRC_VGPR_MIN);
780363f47a2SSam Kolton     }
781363f47a2SSam Kolton     if (SDWA9EncValues::SRC_SGPR_MIN <= Val &&
782363f47a2SSam Kolton         Val <= SDWA9EncValues::SRC_SGPR_MAX) {
783363f47a2SSam Kolton       return createSRegOperand(getSgprClassId(Width),
784363f47a2SSam Kolton                                Val - SDWA9EncValues::SRC_SGPR_MIN);
785363f47a2SSam Kolton     }
786ac2b0264SDmitry Preobrazhensky     if (SDWA9EncValues::SRC_TTMP_MIN <= Val &&
787ac2b0264SDmitry Preobrazhensky         Val <= SDWA9EncValues::SRC_TTMP_MAX) {
788ac2b0264SDmitry Preobrazhensky       return createSRegOperand(getTtmpClassId(Width),
789ac2b0264SDmitry Preobrazhensky                                Val - SDWA9EncValues::SRC_TTMP_MIN);
790ac2b0264SDmitry Preobrazhensky     }
791363f47a2SSam Kolton 
7926b65f7c3SDmitry Preobrazhensky     const unsigned SVal = Val - SDWA9EncValues::SRC_SGPR_MIN;
7936b65f7c3SDmitry Preobrazhensky 
7946b65f7c3SDmitry Preobrazhensky     if (INLINE_INTEGER_C_MIN <= SVal && SVal <= INLINE_INTEGER_C_MAX)
7956b65f7c3SDmitry Preobrazhensky       return decodeIntImmed(SVal);
7966b65f7c3SDmitry Preobrazhensky 
7976b65f7c3SDmitry Preobrazhensky     if (INLINE_FLOATING_C_MIN <= SVal && SVal <= INLINE_FLOATING_C_MAX)
7986b65f7c3SDmitry Preobrazhensky       return decodeFPImmed(Width, SVal);
7996b65f7c3SDmitry Preobrazhensky 
8006b65f7c3SDmitry Preobrazhensky     return decodeSpecialReg32(SVal);
801549c89d2SSam Kolton   } else if (STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]) {
802549c89d2SSam Kolton     return createRegOperand(getVgprClassId(Width), Val);
803549c89d2SSam Kolton   }
804549c89d2SSam Kolton   llvm_unreachable("unsupported target");
805363f47a2SSam Kolton }
806363f47a2SSam Kolton 
807549c89d2SSam Kolton MCOperand AMDGPUDisassembler::decodeSDWASrc16(unsigned Val) const {
808549c89d2SSam Kolton   return decodeSDWASrc(OPW16, Val);
809363f47a2SSam Kolton }
810363f47a2SSam Kolton 
811549c89d2SSam Kolton MCOperand AMDGPUDisassembler::decodeSDWASrc32(unsigned Val) const {
812549c89d2SSam Kolton   return decodeSDWASrc(OPW32, Val);
813363f47a2SSam Kolton }
814363f47a2SSam Kolton 
815549c89d2SSam Kolton MCOperand AMDGPUDisassembler::decodeSDWAVopcDst(unsigned Val) const {
816363f47a2SSam Kolton   using namespace AMDGPU::SDWA;
817363f47a2SSam Kolton 
818549c89d2SSam Kolton   assert(STI.getFeatureBits()[AMDGPU::FeatureGFX9] &&
819549c89d2SSam Kolton          "SDWAVopcDst should be present only on GFX9");
820363f47a2SSam Kolton   if (Val & SDWA9EncValues::VOPC_DST_VCC_MASK) {
821363f47a2SSam Kolton     Val &= SDWA9EncValues::VOPC_DST_SGPR_MASK;
822ac2b0264SDmitry Preobrazhensky 
823ac2b0264SDmitry Preobrazhensky     int TTmpIdx = getTTmpIdx(Val);
824ac2b0264SDmitry Preobrazhensky     if (TTmpIdx >= 0) {
825ac2b0264SDmitry Preobrazhensky       return createSRegOperand(getTtmpClassId(OPW64), TTmpIdx);
826ac2b0264SDmitry Preobrazhensky     } else if (Val > AMDGPU::EncValues::SGPR_MAX) {
827363f47a2SSam Kolton       return decodeSpecialReg64(Val);
828363f47a2SSam Kolton     } else {
829363f47a2SSam Kolton       return createSRegOperand(getSgprClassId(OPW64), Val);
830363f47a2SSam Kolton     }
831363f47a2SSam Kolton   } else {
832363f47a2SSam Kolton     return createRegOperand(AMDGPU::VCC);
833363f47a2SSam Kolton   }
834363f47a2SSam Kolton }
835363f47a2SSam Kolton 
836ac2b0264SDmitry Preobrazhensky bool AMDGPUDisassembler::isVI() const {
837ac2b0264SDmitry Preobrazhensky   return STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands];
838ac2b0264SDmitry Preobrazhensky }
839ac2b0264SDmitry Preobrazhensky 
840ac2b0264SDmitry Preobrazhensky bool AMDGPUDisassembler::isGFX9() const {
841ac2b0264SDmitry Preobrazhensky   return STI.getFeatureBits()[AMDGPU::FeatureGFX9];
842ac2b0264SDmitry Preobrazhensky }
843ac2b0264SDmitry Preobrazhensky 
8443381d7a2SSam Kolton //===----------------------------------------------------------------------===//
8453381d7a2SSam Kolton // AMDGPUSymbolizer
8463381d7a2SSam Kolton //===----------------------------------------------------------------------===//
8473381d7a2SSam Kolton 
8483381d7a2SSam Kolton // Try to find symbol name for specified label
8493381d7a2SSam Kolton bool AMDGPUSymbolizer::tryAddingSymbolicOperand(MCInst &Inst,
8503381d7a2SSam Kolton                                 raw_ostream &/*cStream*/, int64_t Value,
8513381d7a2SSam Kolton                                 uint64_t /*Address*/, bool IsBranch,
8523381d7a2SSam Kolton                                 uint64_t /*Offset*/, uint64_t /*InstSize*/) {
853c8fbf6ffSEugene Zelenko   using SymbolInfoTy = std::tuple<uint64_t, StringRef, uint8_t>;
854c8fbf6ffSEugene Zelenko   using SectionSymbolsTy = std::vector<SymbolInfoTy>;
8553381d7a2SSam Kolton 
8563381d7a2SSam Kolton   if (!IsBranch) {
8573381d7a2SSam Kolton     return false;
8583381d7a2SSam Kolton   }
8593381d7a2SSam Kolton 
8603381d7a2SSam Kolton   auto *Symbols = static_cast<SectionSymbolsTy *>(DisInfo);
8613381d7a2SSam Kolton   auto Result = std::find_if(Symbols->begin(), Symbols->end(),
8623381d7a2SSam Kolton                              [Value](const SymbolInfoTy& Val) {
8633381d7a2SSam Kolton                                 return std::get<0>(Val) == static_cast<uint64_t>(Value)
8643381d7a2SSam Kolton                                     && std::get<2>(Val) == ELF::STT_NOTYPE;
8653381d7a2SSam Kolton                              });
8663381d7a2SSam Kolton   if (Result != Symbols->end()) {
8673381d7a2SSam Kolton     auto *Sym = Ctx.getOrCreateSymbol(std::get<1>(*Result));
8683381d7a2SSam Kolton     const auto *Add = MCSymbolRefExpr::create(Sym, Ctx);
8693381d7a2SSam Kolton     Inst.addOperand(MCOperand::createExpr(Add));
8703381d7a2SSam Kolton     return true;
8713381d7a2SSam Kolton   }
8723381d7a2SSam Kolton   return false;
8733381d7a2SSam Kolton }
8743381d7a2SSam Kolton 
87592b355b1SMatt Arsenault void AMDGPUSymbolizer::tryAddingPcLoadReferenceComment(raw_ostream &cStream,
87692b355b1SMatt Arsenault                                                        int64_t Value,
87792b355b1SMatt Arsenault                                                        uint64_t Address) {
87892b355b1SMatt Arsenault   llvm_unreachable("unimplemented");
87992b355b1SMatt Arsenault }
88092b355b1SMatt Arsenault 
8813381d7a2SSam Kolton //===----------------------------------------------------------------------===//
8823381d7a2SSam Kolton // Initialization
8833381d7a2SSam Kolton //===----------------------------------------------------------------------===//
8843381d7a2SSam Kolton 
8853381d7a2SSam Kolton static MCSymbolizer *createAMDGPUSymbolizer(const Triple &/*TT*/,
8863381d7a2SSam Kolton                               LLVMOpInfoCallback /*GetOpInfo*/,
8873381d7a2SSam Kolton                               LLVMSymbolLookupCallback /*SymbolLookUp*/,
8883381d7a2SSam Kolton                               void *DisInfo,
8893381d7a2SSam Kolton                               MCContext *Ctx,
8903381d7a2SSam Kolton                               std::unique_ptr<MCRelocationInfo> &&RelInfo) {
8913381d7a2SSam Kolton   return new AMDGPUSymbolizer(*Ctx, std::move(RelInfo), DisInfo);
8923381d7a2SSam Kolton }
8933381d7a2SSam Kolton 
894e1818af8STom Stellard static MCDisassembler *createAMDGPUDisassembler(const Target &T,
895e1818af8STom Stellard                                                 const MCSubtargetInfo &STI,
896e1818af8STom Stellard                                                 MCContext &Ctx) {
897cad7fa85SMatt Arsenault   return new AMDGPUDisassembler(STI, Ctx, T.createMCInstrInfo());
898e1818af8STom Stellard }
899e1818af8STom Stellard 
900e1818af8STom Stellard extern "C" void LLVMInitializeAMDGPUDisassembler() {
901f42454b9SMehdi Amini   TargetRegistry::RegisterMCDisassembler(getTheGCNTarget(),
902f42454b9SMehdi Amini                                          createAMDGPUDisassembler);
903f42454b9SMehdi Amini   TargetRegistry::RegisterMCSymbolizer(getTheGCNTarget(),
904f42454b9SMehdi Amini                                        createAMDGPUSymbolizer);
905e1818af8STom Stellard }
906