1c8fbf6ffSEugene Zelenko //===- AMDGPUDisassembler.cpp - Disassembler for AMDGPU ISA ---------------===//
2e1818af8STom Stellard //
32946cd70SChandler Carruth // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
42946cd70SChandler Carruth // See https://llvm.org/LICENSE.txt for license information.
52946cd70SChandler Carruth // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6e1818af8STom Stellard //
7e1818af8STom Stellard //===----------------------------------------------------------------------===//
8e1818af8STom Stellard //
9e1818af8STom Stellard //===----------------------------------------------------------------------===//
10e1818af8STom Stellard //
11e1818af8STom Stellard /// \file
12e1818af8STom Stellard ///
13e1818af8STom Stellard /// This file contains definition for AMDGPU ISA disassembler
14e1818af8STom Stellard //
15e1818af8STom Stellard //===----------------------------------------------------------------------===//
16e1818af8STom Stellard 
17e1818af8STom Stellard // ToDo: What to do with instruction suffixes (v_mov_b32 vs v_mov_b32_e32)?
18e1818af8STom Stellard 
19c8fbf6ffSEugene Zelenko #include "Disassembler/AMDGPUDisassembler.h"
20e1818af8STom Stellard #include "AMDGPU.h"
21e1818af8STom Stellard #include "AMDGPURegisterInfo.h"
22c5a154dbSTom Stellard #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
23212a251cSArtem Tamazov #include "SIDefines.h"
2444b30b45STom Stellard #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
25e1818af8STom Stellard #include "Utils/AMDGPUBaseInfo.h"
26c8fbf6ffSEugene Zelenko #include "llvm-c/Disassembler.h"
27c8fbf6ffSEugene Zelenko #include "llvm/ADT/APInt.h"
28c8fbf6ffSEugene Zelenko #include "llvm/ADT/ArrayRef.h"
29c8fbf6ffSEugene Zelenko #include "llvm/ADT/Twine.h"
30264b5d9eSZachary Turner #include "llvm/BinaryFormat/ELF.h"
31ac106addSNikolay Haustov #include "llvm/MC/MCContext.h"
32c8fbf6ffSEugene Zelenko #include "llvm/MC/MCDisassembler/MCDisassembler.h"
33c8fbf6ffSEugene Zelenko #include "llvm/MC/MCExpr.h"
34e1818af8STom Stellard #include "llvm/MC/MCFixedLenDisassembler.h"
35e1818af8STom Stellard #include "llvm/MC/MCInst.h"
36e1818af8STom Stellard #include "llvm/MC/MCSubtargetInfo.h"
37ac106addSNikolay Haustov #include "llvm/Support/Endian.h"
38c8fbf6ffSEugene Zelenko #include "llvm/Support/ErrorHandling.h"
39c8fbf6ffSEugene Zelenko #include "llvm/Support/MathExtras.h"
40e1818af8STom Stellard #include "llvm/Support/TargetRegistry.h"
41c8fbf6ffSEugene Zelenko #include "llvm/Support/raw_ostream.h"
42c8fbf6ffSEugene Zelenko #include <algorithm>
43c8fbf6ffSEugene Zelenko #include <cassert>
44c8fbf6ffSEugene Zelenko #include <cstddef>
45c8fbf6ffSEugene Zelenko #include <cstdint>
46c8fbf6ffSEugene Zelenko #include <iterator>
47c8fbf6ffSEugene Zelenko #include <tuple>
48c8fbf6ffSEugene Zelenko #include <vector>
49e1818af8STom Stellard 
50e1818af8STom Stellard using namespace llvm;
51e1818af8STom Stellard 
52e1818af8STom Stellard #define DEBUG_TYPE "amdgpu-disassembler"
53e1818af8STom Stellard 
54c8fbf6ffSEugene Zelenko using DecodeStatus = llvm::MCDisassembler::DecodeStatus;
55e1818af8STom Stellard 
56ac106addSNikolay Haustov inline static MCDisassembler::DecodeStatus
57ac106addSNikolay Haustov addOperand(MCInst &Inst, const MCOperand& Opnd) {
58ac106addSNikolay Haustov   Inst.addOperand(Opnd);
59ac106addSNikolay Haustov   return Opnd.isValid() ?
60ac106addSNikolay Haustov     MCDisassembler::Success :
61ac106addSNikolay Haustov     MCDisassembler::SoftFail;
62e1818af8STom Stellard }
63e1818af8STom Stellard 
64549c89d2SSam Kolton static int insertNamedMCOperand(MCInst &MI, const MCOperand &Op,
65549c89d2SSam Kolton                                 uint16_t NameIdx) {
66549c89d2SSam Kolton   int OpIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), NameIdx);
67549c89d2SSam Kolton   if (OpIdx != -1) {
68549c89d2SSam Kolton     auto I = MI.begin();
69549c89d2SSam Kolton     std::advance(I, OpIdx);
70549c89d2SSam Kolton     MI.insert(I, Op);
71549c89d2SSam Kolton   }
72549c89d2SSam Kolton   return OpIdx;
73549c89d2SSam Kolton }
74549c89d2SSam Kolton 
753381d7a2SSam Kolton static DecodeStatus decodeSoppBrTarget(MCInst &Inst, unsigned Imm,
763381d7a2SSam Kolton                                        uint64_t Addr, const void *Decoder) {
773381d7a2SSam Kolton   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
783381d7a2SSam Kolton 
79*efec1396SScott Linder   // Our branches take a simm16, but we need two extra bits to account for the
80*efec1396SScott Linder   // factor of 4.
813381d7a2SSam Kolton   APInt SignedOffset(18, Imm * 4, true);
823381d7a2SSam Kolton   int64_t Offset = (SignedOffset.sext(64) + 4 + Addr).getSExtValue();
833381d7a2SSam Kolton 
843381d7a2SSam Kolton   if (DAsm->tryAddingSymbolicOperand(Inst, Offset, Addr, true, 2, 2))
853381d7a2SSam Kolton     return MCDisassembler::Success;
863381d7a2SSam Kolton   return addOperand(Inst, MCOperand::createImm(Imm));
873381d7a2SSam Kolton }
883381d7a2SSam Kolton 
89363f47a2SSam Kolton #define DECODE_OPERAND(StaticDecoderName, DecoderName) \
90363f47a2SSam Kolton static DecodeStatus StaticDecoderName(MCInst &Inst, \
91ac106addSNikolay Haustov                                        unsigned Imm, \
92ac106addSNikolay Haustov                                        uint64_t /*Addr*/, \
93ac106addSNikolay Haustov                                        const void *Decoder) { \
94ac106addSNikolay Haustov   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); \
95363f47a2SSam Kolton   return addOperand(Inst, DAsm->DecoderName(Imm)); \
96e1818af8STom Stellard }
97e1818af8STom Stellard 
98363f47a2SSam Kolton #define DECODE_OPERAND_REG(RegClass) \
99363f47a2SSam Kolton DECODE_OPERAND(Decode##RegClass##RegisterClass, decodeOperand_##RegClass)
100e1818af8STom Stellard 
101363f47a2SSam Kolton DECODE_OPERAND_REG(VGPR_32)
1026023d599SDmitry Preobrazhensky DECODE_OPERAND_REG(VRegOrLds_32)
103363f47a2SSam Kolton DECODE_OPERAND_REG(VS_32)
104363f47a2SSam Kolton DECODE_OPERAND_REG(VS_64)
10530fc5239SDmitry Preobrazhensky DECODE_OPERAND_REG(VS_128)
106e1818af8STom Stellard 
107363f47a2SSam Kolton DECODE_OPERAND_REG(VReg_64)
108363f47a2SSam Kolton DECODE_OPERAND_REG(VReg_96)
109363f47a2SSam Kolton DECODE_OPERAND_REG(VReg_128)
110e1818af8STom Stellard 
111363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_32)
112363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_32_XM0_XEXEC)
113ca7b0a17SMatt Arsenault DECODE_OPERAND_REG(SReg_32_XEXEC_HI)
1146023d599SDmitry Preobrazhensky DECODE_OPERAND_REG(SRegOrLds_32)
115363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_64)
116363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_64_XEXEC)
117363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_128)
118363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_256)
119363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_512)
120e1818af8STom Stellard 
1214bd72361SMatt Arsenault static DecodeStatus decodeOperand_VSrc16(MCInst &Inst,
1224bd72361SMatt Arsenault                                          unsigned Imm,
1234bd72361SMatt Arsenault                                          uint64_t Addr,
1244bd72361SMatt Arsenault                                          const void *Decoder) {
1254bd72361SMatt Arsenault   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
1264bd72361SMatt Arsenault   return addOperand(Inst, DAsm->decodeOperand_VSrc16(Imm));
1274bd72361SMatt Arsenault }
1284bd72361SMatt Arsenault 
1299be7b0d4SMatt Arsenault static DecodeStatus decodeOperand_VSrcV216(MCInst &Inst,
1309be7b0d4SMatt Arsenault                                          unsigned Imm,
1319be7b0d4SMatt Arsenault                                          uint64_t Addr,
1329be7b0d4SMatt Arsenault                                          const void *Decoder) {
1339be7b0d4SMatt Arsenault   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
1349be7b0d4SMatt Arsenault   return addOperand(Inst, DAsm->decodeOperand_VSrcV216(Imm));
1359be7b0d4SMatt Arsenault }
1369be7b0d4SMatt Arsenault 
137549c89d2SSam Kolton #define DECODE_SDWA(DecName) \
138549c89d2SSam Kolton DECODE_OPERAND(decodeSDWA##DecName, decodeSDWA##DecName)
139363f47a2SSam Kolton 
140549c89d2SSam Kolton DECODE_SDWA(Src32)
141549c89d2SSam Kolton DECODE_SDWA(Src16)
142549c89d2SSam Kolton DECODE_SDWA(VopcDst)
143363f47a2SSam Kolton 
144e1818af8STom Stellard #include "AMDGPUGenDisassemblerTables.inc"
145e1818af8STom Stellard 
146e1818af8STom Stellard //===----------------------------------------------------------------------===//
147e1818af8STom Stellard //
148e1818af8STom Stellard //===----------------------------------------------------------------------===//
149e1818af8STom Stellard 
1501048fb18SSam Kolton template <typename T> static inline T eatBytes(ArrayRef<uint8_t>& Bytes) {
1511048fb18SSam Kolton   assert(Bytes.size() >= sizeof(T));
1521048fb18SSam Kolton   const auto Res = support::endian::read<T, support::endianness::little>(Bytes.data());
1531048fb18SSam Kolton   Bytes = Bytes.slice(sizeof(T));
154ac106addSNikolay Haustov   return Res;
155ac106addSNikolay Haustov }
156ac106addSNikolay Haustov 
157ac106addSNikolay Haustov DecodeStatus AMDGPUDisassembler::tryDecodeInst(const uint8_t* Table,
158ac106addSNikolay Haustov                                                MCInst &MI,
159ac106addSNikolay Haustov                                                uint64_t Inst,
160ac106addSNikolay Haustov                                                uint64_t Address) const {
161ac106addSNikolay Haustov   assert(MI.getOpcode() == 0);
162ac106addSNikolay Haustov   assert(MI.getNumOperands() == 0);
163ac106addSNikolay Haustov   MCInst TmpInst;
164ce941c9cSDmitry Preobrazhensky   HasLiteral = false;
165ac106addSNikolay Haustov   const auto SavedBytes = Bytes;
166ac106addSNikolay Haustov   if (decodeInstruction(Table, TmpInst, Inst, Address, this, STI)) {
167ac106addSNikolay Haustov     MI = TmpInst;
168ac106addSNikolay Haustov     return MCDisassembler::Success;
169ac106addSNikolay Haustov   }
170ac106addSNikolay Haustov   Bytes = SavedBytes;
171ac106addSNikolay Haustov   return MCDisassembler::Fail;
172ac106addSNikolay Haustov }
173ac106addSNikolay Haustov 
174e1818af8STom Stellard DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
175ac106addSNikolay Haustov                                                 ArrayRef<uint8_t> Bytes_,
176e1818af8STom Stellard                                                 uint64_t Address,
177e1818af8STom Stellard                                                 raw_ostream &WS,
178e1818af8STom Stellard                                                 raw_ostream &CS) const {
179e1818af8STom Stellard   CommentStream = &CS;
180549c89d2SSam Kolton   bool IsSDWA = false;
181e1818af8STom Stellard 
182e1818af8STom Stellard   // ToDo: AMDGPUDisassembler supports only VI ISA.
183d122abeaSMatt Arsenault   if (!STI.getFeatureBits()[AMDGPU::FeatureGCN3Encoding])
184d122abeaSMatt Arsenault     report_fatal_error("Disassembly not yet supported for subtarget");
185e1818af8STom Stellard 
186ac106addSNikolay Haustov   const unsigned MaxInstBytesNum = (std::min)((size_t)8, Bytes_.size());
187ac106addSNikolay Haustov   Bytes = Bytes_.slice(0, MaxInstBytesNum);
188161a158eSNikolay Haustov 
189ac106addSNikolay Haustov   DecodeStatus Res = MCDisassembler::Fail;
190ac106addSNikolay Haustov   do {
191824e804bSValery Pykhtin     // ToDo: better to switch encoding length using some bit predicate
192ac106addSNikolay Haustov     // but it is unknown yet, so try all we can
1931048fb18SSam Kolton 
194c9bdcb75SSam Kolton     // Try to decode DPP and SDWA first to solve conflict with VOP1 and VOP2
195c9bdcb75SSam Kolton     // encodings
1961048fb18SSam Kolton     if (Bytes.size() >= 8) {
1971048fb18SSam Kolton       const uint64_t QW = eatBytes<uint64_t>(Bytes);
1981048fb18SSam Kolton       Res = tryDecodeInst(DecoderTableDPP64, MI, QW, Address);
1991048fb18SSam Kolton       if (Res) break;
200c9bdcb75SSam Kolton 
201c9bdcb75SSam Kolton       Res = tryDecodeInst(DecoderTableSDWA64, MI, QW, Address);
202549c89d2SSam Kolton       if (Res) { IsSDWA = true;  break; }
203363f47a2SSam Kolton 
204363f47a2SSam Kolton       Res = tryDecodeInst(DecoderTableSDWA964, MI, QW, Address);
205549c89d2SSam Kolton       if (Res) { IsSDWA = true;  break; }
2060905870fSChangpeng Fang 
2070905870fSChangpeng Fang       if (STI.getFeatureBits()[AMDGPU::FeatureUnpackedD16VMem]) {
2080905870fSChangpeng Fang         Res = tryDecodeInst(DecoderTableGFX80_UNPACKED64, MI, QW, Address);
2090084adc5SMatt Arsenault         if (Res)
2100084adc5SMatt Arsenault           break;
2110084adc5SMatt Arsenault       }
2120084adc5SMatt Arsenault 
2130084adc5SMatt Arsenault       // Some GFX9 subtargets repurposed the v_mad_mix_f32, v_mad_mixlo_f16 and
2140084adc5SMatt Arsenault       // v_mad_mixhi_f16 for FMA variants. Try to decode using this special
2150084adc5SMatt Arsenault       // table first so we print the correct name.
2160084adc5SMatt Arsenault       if (STI.getFeatureBits()[AMDGPU::FeatureFmaMixInsts]) {
2170084adc5SMatt Arsenault         Res = tryDecodeInst(DecoderTableGFX9_DL64, MI, QW, Address);
2180084adc5SMatt Arsenault         if (Res)
2190084adc5SMatt Arsenault           break;
2200905870fSChangpeng Fang       }
2211048fb18SSam Kolton     }
2221048fb18SSam Kolton 
2231048fb18SSam Kolton     // Reinitialize Bytes as DPP64 could have eaten too much
2241048fb18SSam Kolton     Bytes = Bytes_.slice(0, MaxInstBytesNum);
2251048fb18SSam Kolton 
2261048fb18SSam Kolton     // Try decode 32-bit instruction
227ac106addSNikolay Haustov     if (Bytes.size() < 4) break;
2281048fb18SSam Kolton     const uint32_t DW = eatBytes<uint32_t>(Bytes);
229ac106addSNikolay Haustov     Res = tryDecodeInst(DecoderTableVI32, MI, DW, Address);
230ac106addSNikolay Haustov     if (Res) break;
231e1818af8STom Stellard 
232ac106addSNikolay Haustov     Res = tryDecodeInst(DecoderTableAMDGPU32, MI, DW, Address);
233ac106addSNikolay Haustov     if (Res) break;
234ac106addSNikolay Haustov 
235a0342dc9SDmitry Preobrazhensky     Res = tryDecodeInst(DecoderTableGFX932, MI, DW, Address);
236a0342dc9SDmitry Preobrazhensky     if (Res) break;
237a0342dc9SDmitry Preobrazhensky 
238ac106addSNikolay Haustov     if (Bytes.size() < 4) break;
2391048fb18SSam Kolton     const uint64_t QW = ((uint64_t)eatBytes<uint32_t>(Bytes) << 32) | DW;
240ac106addSNikolay Haustov     Res = tryDecodeInst(DecoderTableVI64, MI, QW, Address);
241ac106addSNikolay Haustov     if (Res) break;
242ac106addSNikolay Haustov 
243ac106addSNikolay Haustov     Res = tryDecodeInst(DecoderTableAMDGPU64, MI, QW, Address);
2441e32550dSDmitry Preobrazhensky     if (Res) break;
2451e32550dSDmitry Preobrazhensky 
2461e32550dSDmitry Preobrazhensky     Res = tryDecodeInst(DecoderTableGFX964, MI, QW, Address);
247ac106addSNikolay Haustov   } while (false);
248ac106addSNikolay Haustov 
249678e111eSMatt Arsenault   if (Res && (MI.getOpcode() == AMDGPU::V_MAC_F32_e64_vi ||
250678e111eSMatt Arsenault               MI.getOpcode() == AMDGPU::V_MAC_F32_e64_si ||
251603a43fcSKonstantin Zhuravlyov               MI.getOpcode() == AMDGPU::V_MAC_F16_e64_vi ||
252603a43fcSKonstantin Zhuravlyov               MI.getOpcode() == AMDGPU::V_FMAC_F32_e64_vi)) {
253678e111eSMatt Arsenault     // Insert dummy unused src2_modifiers.
254549c89d2SSam Kolton     insertNamedMCOperand(MI, MCOperand::createImm(0),
255678e111eSMatt Arsenault                          AMDGPU::OpName::src2_modifiers);
256678e111eSMatt Arsenault   }
257678e111eSMatt Arsenault 
258cad7fa85SMatt Arsenault   if (Res && (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::MIMG)) {
259cad7fa85SMatt Arsenault     Res = convertMIMGInst(MI);
260cad7fa85SMatt Arsenault   }
261cad7fa85SMatt Arsenault 
262549c89d2SSam Kolton   if (Res && IsSDWA)
263549c89d2SSam Kolton     Res = convertSDWAInst(MI);
264549c89d2SSam Kolton 
2657116e896STim Corringham   // if the opcode was not recognized we'll assume a Size of 4 bytes
2667116e896STim Corringham   // (unless there are fewer bytes left)
2677116e896STim Corringham   Size = Res ? (MaxInstBytesNum - Bytes.size())
2687116e896STim Corringham              : std::min((size_t)4, Bytes_.size());
269ac106addSNikolay Haustov   return Res;
270161a158eSNikolay Haustov }
271e1818af8STom Stellard 
272549c89d2SSam Kolton DecodeStatus AMDGPUDisassembler::convertSDWAInst(MCInst &MI) const {
273549c89d2SSam Kolton   if (STI.getFeatureBits()[AMDGPU::FeatureGFX9]) {
274549c89d2SSam Kolton     if (AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sdst) != -1)
275549c89d2SSam Kolton       // VOPC - insert clamp
276549c89d2SSam Kolton       insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::clamp);
277549c89d2SSam Kolton   } else if (STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]) {
278549c89d2SSam Kolton     int SDst = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sdst);
279549c89d2SSam Kolton     if (SDst != -1) {
280549c89d2SSam Kolton       // VOPC - insert VCC register as sdst
281ac2b0264SDmitry Preobrazhensky       insertNamedMCOperand(MI, createRegOperand(AMDGPU::VCC),
282549c89d2SSam Kolton                            AMDGPU::OpName::sdst);
283549c89d2SSam Kolton     } else {
284549c89d2SSam Kolton       // VOP1/2 - insert omod if present in instruction
285549c89d2SSam Kolton       insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::omod);
286549c89d2SSam Kolton     }
287549c89d2SSam Kolton   }
288549c89d2SSam Kolton   return MCDisassembler::Success;
289549c89d2SSam Kolton }
290549c89d2SSam Kolton 
2910a1ff464SDmitry Preobrazhensky // Note that MIMG format provides no information about VADDR size.
2920a1ff464SDmitry Preobrazhensky // Consequently, decoded instructions always show address
2930a1ff464SDmitry Preobrazhensky // as if it has 1 dword, which could be not really so.
294cad7fa85SMatt Arsenault DecodeStatus AMDGPUDisassembler::convertMIMGInst(MCInst &MI) const {
295da4a7c01SDmitry Preobrazhensky 
2960b4eb1eaSDmitry Preobrazhensky   int VDstIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
2970b4eb1eaSDmitry Preobrazhensky                                            AMDGPU::OpName::vdst);
2980b4eb1eaSDmitry Preobrazhensky 
299cad7fa85SMatt Arsenault   int VDataIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
300cad7fa85SMatt Arsenault                                             AMDGPU::OpName::vdata);
301cad7fa85SMatt Arsenault 
302cad7fa85SMatt Arsenault   int DMaskIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
303cad7fa85SMatt Arsenault                                             AMDGPU::OpName::dmask);
3040b4eb1eaSDmitry Preobrazhensky 
3050a1ff464SDmitry Preobrazhensky   int TFEIdx   = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
3060a1ff464SDmitry Preobrazhensky                                             AMDGPU::OpName::tfe);
307f2674319SNicolai Haehnle   int D16Idx   = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
308f2674319SNicolai Haehnle                                             AMDGPU::OpName::d16);
3090a1ff464SDmitry Preobrazhensky 
3100b4eb1eaSDmitry Preobrazhensky   assert(VDataIdx != -1);
3110b4eb1eaSDmitry Preobrazhensky   assert(DMaskIdx != -1);
3120a1ff464SDmitry Preobrazhensky   assert(TFEIdx != -1);
3130b4eb1eaSDmitry Preobrazhensky 
314da4a7c01SDmitry Preobrazhensky   bool IsAtomic = (VDstIdx != -1);
315f2674319SNicolai Haehnle   bool IsGather4 = MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::Gather4;
3160b4eb1eaSDmitry Preobrazhensky 
317cad7fa85SMatt Arsenault   unsigned DMask = MI.getOperand(DMaskIdx).getImm() & 0xf;
318cad7fa85SMatt Arsenault   if (DMask == 0)
319cad7fa85SMatt Arsenault     return MCDisassembler::Success;
320cad7fa85SMatt Arsenault 
321f2674319SNicolai Haehnle   unsigned DstSize = IsGather4 ? 4 : countPopulation(DMask);
3220a1ff464SDmitry Preobrazhensky   if (DstSize == 1)
3230a1ff464SDmitry Preobrazhensky     return MCDisassembler::Success;
3240a1ff464SDmitry Preobrazhensky 
325f2674319SNicolai Haehnle   bool D16 = D16Idx >= 0 && MI.getOperand(D16Idx).getImm();
3260a1ff464SDmitry Preobrazhensky   if (D16 && AMDGPU::hasPackedD16(STI)) {
3270a1ff464SDmitry Preobrazhensky     DstSize = (DstSize + 1) / 2;
3280a1ff464SDmitry Preobrazhensky   }
3290a1ff464SDmitry Preobrazhensky 
3300a1ff464SDmitry Preobrazhensky   // FIXME: Add tfe support
3310a1ff464SDmitry Preobrazhensky   if (MI.getOperand(TFEIdx).getImm())
332cad7fa85SMatt Arsenault     return MCDisassembler::Success;
333cad7fa85SMatt Arsenault 
3340b4eb1eaSDmitry Preobrazhensky   int NewOpcode = -1;
3350b4eb1eaSDmitry Preobrazhensky 
3360ab200b6SNicolai Haehnle   if (IsGather4) {
337f2674319SNicolai Haehnle     if (D16 && AMDGPU::hasPackedD16(STI))
3380ab200b6SNicolai Haehnle       NewOpcode = AMDGPU::getMaskedMIMGOp(MI.getOpcode(), 2);
339f2674319SNicolai Haehnle     else
340f2674319SNicolai Haehnle       return MCDisassembler::Success;
3410b4eb1eaSDmitry Preobrazhensky   } else {
3420ab200b6SNicolai Haehnle     NewOpcode = AMDGPU::getMaskedMIMGOp(MI.getOpcode(), DstSize);
3430ab200b6SNicolai Haehnle     if (NewOpcode == -1)
3440ab200b6SNicolai Haehnle       return MCDisassembler::Success;
3450b4eb1eaSDmitry Preobrazhensky   }
3460b4eb1eaSDmitry Preobrazhensky 
347cad7fa85SMatt Arsenault   auto RCID = MCII->get(NewOpcode).OpInfo[VDataIdx].RegClass;
348cad7fa85SMatt Arsenault 
3490b4eb1eaSDmitry Preobrazhensky   // Get first subregister of VData
350cad7fa85SMatt Arsenault   unsigned Vdata0 = MI.getOperand(VDataIdx).getReg();
3510b4eb1eaSDmitry Preobrazhensky   unsigned VdataSub0 = MRI.getSubReg(Vdata0, AMDGPU::sub0);
3520b4eb1eaSDmitry Preobrazhensky   Vdata0 = (VdataSub0 != 0)? VdataSub0 : Vdata0;
3530b4eb1eaSDmitry Preobrazhensky 
3540b4eb1eaSDmitry Preobrazhensky   // Widen the register to the correct number of enabled channels.
355cad7fa85SMatt Arsenault   auto NewVdata = MRI.getMatchingSuperReg(Vdata0, AMDGPU::sub0,
356cad7fa85SMatt Arsenault                                           &MRI.getRegClass(RCID));
357cad7fa85SMatt Arsenault   if (NewVdata == AMDGPU::NoRegister) {
358cad7fa85SMatt Arsenault     // It's possible to encode this such that the low register + enabled
359cad7fa85SMatt Arsenault     // components exceeds the register count.
360cad7fa85SMatt Arsenault     return MCDisassembler::Success;
361cad7fa85SMatt Arsenault   }
362cad7fa85SMatt Arsenault 
363cad7fa85SMatt Arsenault   MI.setOpcode(NewOpcode);
364cad7fa85SMatt Arsenault   // vaddr will be always appear as a single VGPR. This will look different than
365cad7fa85SMatt Arsenault   // how it is usually emitted because the number of register components is not
366cad7fa85SMatt Arsenault   // in the instruction encoding.
367cad7fa85SMatt Arsenault   MI.getOperand(VDataIdx) = MCOperand::createReg(NewVdata);
3680b4eb1eaSDmitry Preobrazhensky 
369da4a7c01SDmitry Preobrazhensky   if (IsAtomic) {
3700b4eb1eaSDmitry Preobrazhensky     // Atomic operations have an additional operand (a copy of data)
3710b4eb1eaSDmitry Preobrazhensky     MI.getOperand(VDstIdx) = MCOperand::createReg(NewVdata);
3720b4eb1eaSDmitry Preobrazhensky   }
3730b4eb1eaSDmitry Preobrazhensky 
374cad7fa85SMatt Arsenault   return MCDisassembler::Success;
375cad7fa85SMatt Arsenault }
376cad7fa85SMatt Arsenault 
377ac106addSNikolay Haustov const char* AMDGPUDisassembler::getRegClassName(unsigned RegClassID) const {
378ac106addSNikolay Haustov   return getContext().getRegisterInfo()->
379ac106addSNikolay Haustov     getRegClassName(&AMDGPUMCRegisterClasses[RegClassID]);
380e1818af8STom Stellard }
381e1818af8STom Stellard 
382ac106addSNikolay Haustov inline
383ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::errOperand(unsigned V,
384ac106addSNikolay Haustov                                          const Twine& ErrMsg) const {
385ac106addSNikolay Haustov   *CommentStream << "Error: " + ErrMsg;
386ac106addSNikolay Haustov 
387ac106addSNikolay Haustov   // ToDo: add support for error operands to MCInst.h
388ac106addSNikolay Haustov   // return MCOperand::createError(V);
389ac106addSNikolay Haustov   return MCOperand();
390ac106addSNikolay Haustov }
391ac106addSNikolay Haustov 
392ac106addSNikolay Haustov inline
393ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::createRegOperand(unsigned int RegId) const {
394ac2b0264SDmitry Preobrazhensky   return MCOperand::createReg(AMDGPU::getMCReg(RegId, STI));
395ac106addSNikolay Haustov }
396ac106addSNikolay Haustov 
397ac106addSNikolay Haustov inline
398ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::createRegOperand(unsigned RegClassID,
399ac106addSNikolay Haustov                                                unsigned Val) const {
400ac106addSNikolay Haustov   const auto& RegCl = AMDGPUMCRegisterClasses[RegClassID];
401ac106addSNikolay Haustov   if (Val >= RegCl.getNumRegs())
402ac106addSNikolay Haustov     return errOperand(Val, Twine(getRegClassName(RegClassID)) +
403ac106addSNikolay Haustov                            ": unknown register " + Twine(Val));
404ac106addSNikolay Haustov   return createRegOperand(RegCl.getRegister(Val));
405ac106addSNikolay Haustov }
406ac106addSNikolay Haustov 
407ac106addSNikolay Haustov inline
408ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::createSRegOperand(unsigned SRegClassID,
409ac106addSNikolay Haustov                                                 unsigned Val) const {
410ac106addSNikolay Haustov   // ToDo: SI/CI have 104 SGPRs, VI - 102
411ac106addSNikolay Haustov   // Valery: here we accepting as much as we can, let assembler sort it out
412ac106addSNikolay Haustov   int shift = 0;
413ac106addSNikolay Haustov   switch (SRegClassID) {
414ac106addSNikolay Haustov   case AMDGPU::SGPR_32RegClassID:
415212a251cSArtem Tamazov   case AMDGPU::TTMP_32RegClassID:
416212a251cSArtem Tamazov     break;
417ac106addSNikolay Haustov   case AMDGPU::SGPR_64RegClassID:
418212a251cSArtem Tamazov   case AMDGPU::TTMP_64RegClassID:
419212a251cSArtem Tamazov     shift = 1;
420212a251cSArtem Tamazov     break;
421212a251cSArtem Tamazov   case AMDGPU::SGPR_128RegClassID:
422212a251cSArtem Tamazov   case AMDGPU::TTMP_128RegClassID:
423ac106addSNikolay Haustov   // ToDo: unclear if s[100:104] is available on VI. Can we use VCC as SGPR in
424ac106addSNikolay Haustov   // this bundle?
42527134953SDmitry Preobrazhensky   case AMDGPU::SGPR_256RegClassID:
42627134953SDmitry Preobrazhensky   case AMDGPU::TTMP_256RegClassID:
427ac106addSNikolay Haustov     // ToDo: unclear if s[96:104] is available on VI. Can we use VCC as SGPR in
428ac106addSNikolay Haustov   // this bundle?
42927134953SDmitry Preobrazhensky   case AMDGPU::SGPR_512RegClassID:
43027134953SDmitry Preobrazhensky   case AMDGPU::TTMP_512RegClassID:
431212a251cSArtem Tamazov     shift = 2;
432212a251cSArtem Tamazov     break;
433ac106addSNikolay Haustov   // ToDo: unclear if s[88:104] is available on VI. Can we use VCC as SGPR in
434ac106addSNikolay Haustov   // this bundle?
435212a251cSArtem Tamazov   default:
43692b355b1SMatt Arsenault     llvm_unreachable("unhandled register class");
437ac106addSNikolay Haustov   }
43892b355b1SMatt Arsenault 
43992b355b1SMatt Arsenault   if (Val % (1 << shift)) {
440ac106addSNikolay Haustov     *CommentStream << "Warning: " << getRegClassName(SRegClassID)
441ac106addSNikolay Haustov                    << ": scalar reg isn't aligned " << Val;
44292b355b1SMatt Arsenault   }
44392b355b1SMatt Arsenault 
444ac106addSNikolay Haustov   return createRegOperand(SRegClassID, Val >> shift);
445ac106addSNikolay Haustov }
446ac106addSNikolay Haustov 
447ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_VS_32(unsigned Val) const {
448212a251cSArtem Tamazov   return decodeSrcOp(OPW32, Val);
449ac106addSNikolay Haustov }
450ac106addSNikolay Haustov 
451ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_VS_64(unsigned Val) const {
452212a251cSArtem Tamazov   return decodeSrcOp(OPW64, Val);
453ac106addSNikolay Haustov }
454ac106addSNikolay Haustov 
45530fc5239SDmitry Preobrazhensky MCOperand AMDGPUDisassembler::decodeOperand_VS_128(unsigned Val) const {
45630fc5239SDmitry Preobrazhensky   return decodeSrcOp(OPW128, Val);
45730fc5239SDmitry Preobrazhensky }
45830fc5239SDmitry Preobrazhensky 
4594bd72361SMatt Arsenault MCOperand AMDGPUDisassembler::decodeOperand_VSrc16(unsigned Val) const {
4604bd72361SMatt Arsenault   return decodeSrcOp(OPW16, Val);
4614bd72361SMatt Arsenault }
4624bd72361SMatt Arsenault 
4639be7b0d4SMatt Arsenault MCOperand AMDGPUDisassembler::decodeOperand_VSrcV216(unsigned Val) const {
4649be7b0d4SMatt Arsenault   return decodeSrcOp(OPWV216, Val);
4659be7b0d4SMatt Arsenault }
4669be7b0d4SMatt Arsenault 
467ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_VGPR_32(unsigned Val) const {
468cb540bc0SMatt Arsenault   // Some instructions have operand restrictions beyond what the encoding
469cb540bc0SMatt Arsenault   // allows. Some ordinarily VSrc_32 operands are VGPR_32, so clear the extra
470cb540bc0SMatt Arsenault   // high bit.
471cb540bc0SMatt Arsenault   Val &= 255;
472cb540bc0SMatt Arsenault 
473ac106addSNikolay Haustov   return createRegOperand(AMDGPU::VGPR_32RegClassID, Val);
474ac106addSNikolay Haustov }
475ac106addSNikolay Haustov 
4766023d599SDmitry Preobrazhensky MCOperand AMDGPUDisassembler::decodeOperand_VRegOrLds_32(unsigned Val) const {
4776023d599SDmitry Preobrazhensky   return decodeSrcOp(OPW32, Val);
4786023d599SDmitry Preobrazhensky }
4796023d599SDmitry Preobrazhensky 
480ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_VReg_64(unsigned Val) const {
481ac106addSNikolay Haustov   return createRegOperand(AMDGPU::VReg_64RegClassID, Val);
482ac106addSNikolay Haustov }
483ac106addSNikolay Haustov 
484ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_VReg_96(unsigned Val) const {
485ac106addSNikolay Haustov   return createRegOperand(AMDGPU::VReg_96RegClassID, Val);
486ac106addSNikolay Haustov }
487ac106addSNikolay Haustov 
488ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_VReg_128(unsigned Val) const {
489ac106addSNikolay Haustov   return createRegOperand(AMDGPU::VReg_128RegClassID, Val);
490ac106addSNikolay Haustov }
491ac106addSNikolay Haustov 
492ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_SReg_32(unsigned Val) const {
493ac106addSNikolay Haustov   // table-gen generated disassembler doesn't care about operand types
494ac106addSNikolay Haustov   // leaving only registry class so SSrc_32 operand turns into SReg_32
495ac106addSNikolay Haustov   // and therefore we accept immediates and literals here as well
496212a251cSArtem Tamazov   return decodeSrcOp(OPW32, Val);
497ac106addSNikolay Haustov }
498ac106addSNikolay Haustov 
499640c44b8SMatt Arsenault MCOperand AMDGPUDisassembler::decodeOperand_SReg_32_XM0_XEXEC(
500640c44b8SMatt Arsenault   unsigned Val) const {
501640c44b8SMatt Arsenault   // SReg_32_XM0 is SReg_32 without M0 or EXEC_LO/EXEC_HI
50238e496b1SArtem Tamazov   return decodeOperand_SReg_32(Val);
50338e496b1SArtem Tamazov }
50438e496b1SArtem Tamazov 
505ca7b0a17SMatt Arsenault MCOperand AMDGPUDisassembler::decodeOperand_SReg_32_XEXEC_HI(
506ca7b0a17SMatt Arsenault   unsigned Val) const {
507ca7b0a17SMatt Arsenault   // SReg_32_XM0 is SReg_32 without EXEC_HI
508ca7b0a17SMatt Arsenault   return decodeOperand_SReg_32(Val);
509ca7b0a17SMatt Arsenault }
510ca7b0a17SMatt Arsenault 
5116023d599SDmitry Preobrazhensky MCOperand AMDGPUDisassembler::decodeOperand_SRegOrLds_32(unsigned Val) const {
5126023d599SDmitry Preobrazhensky   // table-gen generated disassembler doesn't care about operand types
5136023d599SDmitry Preobrazhensky   // leaving only registry class so SSrc_32 operand turns into SReg_32
5146023d599SDmitry Preobrazhensky   // and therefore we accept immediates and literals here as well
5156023d599SDmitry Preobrazhensky   return decodeSrcOp(OPW32, Val);
5166023d599SDmitry Preobrazhensky }
5176023d599SDmitry Preobrazhensky 
518ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_SReg_64(unsigned Val) const {
519640c44b8SMatt Arsenault   return decodeSrcOp(OPW64, Val);
520640c44b8SMatt Arsenault }
521640c44b8SMatt Arsenault 
522640c44b8SMatt Arsenault MCOperand AMDGPUDisassembler::decodeOperand_SReg_64_XEXEC(unsigned Val) const {
523212a251cSArtem Tamazov   return decodeSrcOp(OPW64, Val);
524ac106addSNikolay Haustov }
525ac106addSNikolay Haustov 
526ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_SReg_128(unsigned Val) const {
527212a251cSArtem Tamazov   return decodeSrcOp(OPW128, Val);
528ac106addSNikolay Haustov }
529ac106addSNikolay Haustov 
530ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_SReg_256(unsigned Val) const {
53127134953SDmitry Preobrazhensky   return decodeDstOp(OPW256, Val);
532ac106addSNikolay Haustov }
533ac106addSNikolay Haustov 
534ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_SReg_512(unsigned Val) const {
53527134953SDmitry Preobrazhensky   return decodeDstOp(OPW512, Val);
536ac106addSNikolay Haustov }
537ac106addSNikolay Haustov 
538ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeLiteralConstant() const {
539ac106addSNikolay Haustov   // For now all literal constants are supposed to be unsigned integer
540ac106addSNikolay Haustov   // ToDo: deal with signed/unsigned 64-bit integer constants
541ac106addSNikolay Haustov   // ToDo: deal with float/double constants
542ce941c9cSDmitry Preobrazhensky   if (!HasLiteral) {
543ce941c9cSDmitry Preobrazhensky     if (Bytes.size() < 4) {
544ac106addSNikolay Haustov       return errOperand(0, "cannot read literal, inst bytes left " +
545ac106addSNikolay Haustov                         Twine(Bytes.size()));
546ce941c9cSDmitry Preobrazhensky     }
547ce941c9cSDmitry Preobrazhensky     HasLiteral = true;
548ce941c9cSDmitry Preobrazhensky     Literal = eatBytes<uint32_t>(Bytes);
549ce941c9cSDmitry Preobrazhensky   }
550ce941c9cSDmitry Preobrazhensky   return MCOperand::createImm(Literal);
551ac106addSNikolay Haustov }
552ac106addSNikolay Haustov 
553ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeIntImmed(unsigned Imm) {
554212a251cSArtem Tamazov   using namespace AMDGPU::EncValues;
555c8fbf6ffSEugene Zelenko 
556212a251cSArtem Tamazov   assert(Imm >= INLINE_INTEGER_C_MIN && Imm <= INLINE_INTEGER_C_MAX);
557212a251cSArtem Tamazov   return MCOperand::createImm((Imm <= INLINE_INTEGER_C_POSITIVE_MAX) ?
558212a251cSArtem Tamazov     (static_cast<int64_t>(Imm) - INLINE_INTEGER_C_MIN) :
559212a251cSArtem Tamazov     (INLINE_INTEGER_C_POSITIVE_MAX - static_cast<int64_t>(Imm)));
560212a251cSArtem Tamazov       // Cast prevents negative overflow.
561ac106addSNikolay Haustov }
562ac106addSNikolay Haustov 
5634bd72361SMatt Arsenault static int64_t getInlineImmVal32(unsigned Imm) {
5644bd72361SMatt Arsenault   switch (Imm) {
5654bd72361SMatt Arsenault   case 240:
5664bd72361SMatt Arsenault     return FloatToBits(0.5f);
5674bd72361SMatt Arsenault   case 241:
5684bd72361SMatt Arsenault     return FloatToBits(-0.5f);
5694bd72361SMatt Arsenault   case 242:
5704bd72361SMatt Arsenault     return FloatToBits(1.0f);
5714bd72361SMatt Arsenault   case 243:
5724bd72361SMatt Arsenault     return FloatToBits(-1.0f);
5734bd72361SMatt Arsenault   case 244:
5744bd72361SMatt Arsenault     return FloatToBits(2.0f);
5754bd72361SMatt Arsenault   case 245:
5764bd72361SMatt Arsenault     return FloatToBits(-2.0f);
5774bd72361SMatt Arsenault   case 246:
5784bd72361SMatt Arsenault     return FloatToBits(4.0f);
5794bd72361SMatt Arsenault   case 247:
5804bd72361SMatt Arsenault     return FloatToBits(-4.0f);
5814bd72361SMatt Arsenault   case 248: // 1 / (2 * PI)
5824bd72361SMatt Arsenault     return 0x3e22f983;
5834bd72361SMatt Arsenault   default:
5844bd72361SMatt Arsenault     llvm_unreachable("invalid fp inline imm");
5854bd72361SMatt Arsenault   }
5864bd72361SMatt Arsenault }
5874bd72361SMatt Arsenault 
5884bd72361SMatt Arsenault static int64_t getInlineImmVal64(unsigned Imm) {
5894bd72361SMatt Arsenault   switch (Imm) {
5904bd72361SMatt Arsenault   case 240:
5914bd72361SMatt Arsenault     return DoubleToBits(0.5);
5924bd72361SMatt Arsenault   case 241:
5934bd72361SMatt Arsenault     return DoubleToBits(-0.5);
5944bd72361SMatt Arsenault   case 242:
5954bd72361SMatt Arsenault     return DoubleToBits(1.0);
5964bd72361SMatt Arsenault   case 243:
5974bd72361SMatt Arsenault     return DoubleToBits(-1.0);
5984bd72361SMatt Arsenault   case 244:
5994bd72361SMatt Arsenault     return DoubleToBits(2.0);
6004bd72361SMatt Arsenault   case 245:
6014bd72361SMatt Arsenault     return DoubleToBits(-2.0);
6024bd72361SMatt Arsenault   case 246:
6034bd72361SMatt Arsenault     return DoubleToBits(4.0);
6044bd72361SMatt Arsenault   case 247:
6054bd72361SMatt Arsenault     return DoubleToBits(-4.0);
6064bd72361SMatt Arsenault   case 248: // 1 / (2 * PI)
6074bd72361SMatt Arsenault     return 0x3fc45f306dc9c882;
6084bd72361SMatt Arsenault   default:
6094bd72361SMatt Arsenault     llvm_unreachable("invalid fp inline imm");
6104bd72361SMatt Arsenault   }
6114bd72361SMatt Arsenault }
6124bd72361SMatt Arsenault 
6134bd72361SMatt Arsenault static int64_t getInlineImmVal16(unsigned Imm) {
6144bd72361SMatt Arsenault   switch (Imm) {
6154bd72361SMatt Arsenault   case 240:
6164bd72361SMatt Arsenault     return 0x3800;
6174bd72361SMatt Arsenault   case 241:
6184bd72361SMatt Arsenault     return 0xB800;
6194bd72361SMatt Arsenault   case 242:
6204bd72361SMatt Arsenault     return 0x3C00;
6214bd72361SMatt Arsenault   case 243:
6224bd72361SMatt Arsenault     return 0xBC00;
6234bd72361SMatt Arsenault   case 244:
6244bd72361SMatt Arsenault     return 0x4000;
6254bd72361SMatt Arsenault   case 245:
6264bd72361SMatt Arsenault     return 0xC000;
6274bd72361SMatt Arsenault   case 246:
6284bd72361SMatt Arsenault     return 0x4400;
6294bd72361SMatt Arsenault   case 247:
6304bd72361SMatt Arsenault     return 0xC400;
6314bd72361SMatt Arsenault   case 248: // 1 / (2 * PI)
6324bd72361SMatt Arsenault     return 0x3118;
6334bd72361SMatt Arsenault   default:
6344bd72361SMatt Arsenault     llvm_unreachable("invalid fp inline imm");
6354bd72361SMatt Arsenault   }
6364bd72361SMatt Arsenault }
6374bd72361SMatt Arsenault 
6384bd72361SMatt Arsenault MCOperand AMDGPUDisassembler::decodeFPImmed(OpWidthTy Width, unsigned Imm) {
639212a251cSArtem Tamazov   assert(Imm >= AMDGPU::EncValues::INLINE_FLOATING_C_MIN
640212a251cSArtem Tamazov       && Imm <= AMDGPU::EncValues::INLINE_FLOATING_C_MAX);
6414bd72361SMatt Arsenault 
642e1818af8STom Stellard   // ToDo: case 248: 1/(2*PI) - is allowed only on VI
6434bd72361SMatt Arsenault   switch (Width) {
6444bd72361SMatt Arsenault   case OPW32:
6454bd72361SMatt Arsenault     return MCOperand::createImm(getInlineImmVal32(Imm));
6464bd72361SMatt Arsenault   case OPW64:
6474bd72361SMatt Arsenault     return MCOperand::createImm(getInlineImmVal64(Imm));
6484bd72361SMatt Arsenault   case OPW16:
6499be7b0d4SMatt Arsenault   case OPWV216:
6504bd72361SMatt Arsenault     return MCOperand::createImm(getInlineImmVal16(Imm));
6514bd72361SMatt Arsenault   default:
6524bd72361SMatt Arsenault     llvm_unreachable("implement me");
653e1818af8STom Stellard   }
654e1818af8STom Stellard }
655e1818af8STom Stellard 
656212a251cSArtem Tamazov unsigned AMDGPUDisassembler::getVgprClassId(const OpWidthTy Width) const {
657e1818af8STom Stellard   using namespace AMDGPU;
658c8fbf6ffSEugene Zelenko 
659212a251cSArtem Tamazov   assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
660212a251cSArtem Tamazov   switch (Width) {
661212a251cSArtem Tamazov   default: // fall
6624bd72361SMatt Arsenault   case OPW32:
6634bd72361SMatt Arsenault   case OPW16:
6649be7b0d4SMatt Arsenault   case OPWV216:
6654bd72361SMatt Arsenault     return VGPR_32RegClassID;
666212a251cSArtem Tamazov   case OPW64: return VReg_64RegClassID;
667212a251cSArtem Tamazov   case OPW128: return VReg_128RegClassID;
668212a251cSArtem Tamazov   }
669212a251cSArtem Tamazov }
670212a251cSArtem Tamazov 
671212a251cSArtem Tamazov unsigned AMDGPUDisassembler::getSgprClassId(const OpWidthTy Width) const {
672212a251cSArtem Tamazov   using namespace AMDGPU;
673c8fbf6ffSEugene Zelenko 
674212a251cSArtem Tamazov   assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
675212a251cSArtem Tamazov   switch (Width) {
676212a251cSArtem Tamazov   default: // fall
6774bd72361SMatt Arsenault   case OPW32:
6784bd72361SMatt Arsenault   case OPW16:
6799be7b0d4SMatt Arsenault   case OPWV216:
6804bd72361SMatt Arsenault     return SGPR_32RegClassID;
681212a251cSArtem Tamazov   case OPW64: return SGPR_64RegClassID;
682212a251cSArtem Tamazov   case OPW128: return SGPR_128RegClassID;
68327134953SDmitry Preobrazhensky   case OPW256: return SGPR_256RegClassID;
68427134953SDmitry Preobrazhensky   case OPW512: return SGPR_512RegClassID;
685212a251cSArtem Tamazov   }
686212a251cSArtem Tamazov }
687212a251cSArtem Tamazov 
688212a251cSArtem Tamazov unsigned AMDGPUDisassembler::getTtmpClassId(const OpWidthTy Width) const {
689212a251cSArtem Tamazov   using namespace AMDGPU;
690c8fbf6ffSEugene Zelenko 
691212a251cSArtem Tamazov   assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
692212a251cSArtem Tamazov   switch (Width) {
693212a251cSArtem Tamazov   default: // fall
6944bd72361SMatt Arsenault   case OPW32:
6954bd72361SMatt Arsenault   case OPW16:
6969be7b0d4SMatt Arsenault   case OPWV216:
6974bd72361SMatt Arsenault     return TTMP_32RegClassID;
698212a251cSArtem Tamazov   case OPW64: return TTMP_64RegClassID;
699212a251cSArtem Tamazov   case OPW128: return TTMP_128RegClassID;
70027134953SDmitry Preobrazhensky   case OPW256: return TTMP_256RegClassID;
70127134953SDmitry Preobrazhensky   case OPW512: return TTMP_512RegClassID;
702212a251cSArtem Tamazov   }
703212a251cSArtem Tamazov }
704212a251cSArtem Tamazov 
705ac2b0264SDmitry Preobrazhensky int AMDGPUDisassembler::getTTmpIdx(unsigned Val) const {
706ac2b0264SDmitry Preobrazhensky   using namespace AMDGPU::EncValues;
707ac2b0264SDmitry Preobrazhensky 
708ac2b0264SDmitry Preobrazhensky   unsigned TTmpMin = isGFX9() ? TTMP_GFX9_MIN : TTMP_VI_MIN;
709ac2b0264SDmitry Preobrazhensky   unsigned TTmpMax = isGFX9() ? TTMP_GFX9_MAX : TTMP_VI_MAX;
710ac2b0264SDmitry Preobrazhensky 
711ac2b0264SDmitry Preobrazhensky   return (TTmpMin <= Val && Val <= TTmpMax)? Val - TTmpMin : -1;
712ac2b0264SDmitry Preobrazhensky }
713ac2b0264SDmitry Preobrazhensky 
714212a251cSArtem Tamazov MCOperand AMDGPUDisassembler::decodeSrcOp(const OpWidthTy Width, unsigned Val) const {
715212a251cSArtem Tamazov   using namespace AMDGPU::EncValues;
716c8fbf6ffSEugene Zelenko 
717ac106addSNikolay Haustov   assert(Val < 512); // enum9
718ac106addSNikolay Haustov 
719212a251cSArtem Tamazov   if (VGPR_MIN <= Val && Val <= VGPR_MAX) {
720212a251cSArtem Tamazov     return createRegOperand(getVgprClassId(Width), Val - VGPR_MIN);
721212a251cSArtem Tamazov   }
722b49c3361SArtem Tamazov   if (Val <= SGPR_MAX) {
723b49c3361SArtem Tamazov     assert(SGPR_MIN == 0); // "SGPR_MIN <= Val" is always true and causes compilation warning.
724212a251cSArtem Tamazov     return createSRegOperand(getSgprClassId(Width), Val - SGPR_MIN);
725212a251cSArtem Tamazov   }
726ac2b0264SDmitry Preobrazhensky 
727ac2b0264SDmitry Preobrazhensky   int TTmpIdx = getTTmpIdx(Val);
728ac2b0264SDmitry Preobrazhensky   if (TTmpIdx >= 0) {
729ac2b0264SDmitry Preobrazhensky     return createSRegOperand(getTtmpClassId(Width), TTmpIdx);
730212a251cSArtem Tamazov   }
731ac106addSNikolay Haustov 
732212a251cSArtem Tamazov   if (INLINE_INTEGER_C_MIN <= Val && Val <= INLINE_INTEGER_C_MAX)
733ac106addSNikolay Haustov     return decodeIntImmed(Val);
734ac106addSNikolay Haustov 
735212a251cSArtem Tamazov   if (INLINE_FLOATING_C_MIN <= Val && Val <= INLINE_FLOATING_C_MAX)
7364bd72361SMatt Arsenault     return decodeFPImmed(Width, Val);
737ac106addSNikolay Haustov 
738212a251cSArtem Tamazov   if (Val == LITERAL_CONST)
739ac106addSNikolay Haustov     return decodeLiteralConstant();
740ac106addSNikolay Haustov 
7414bd72361SMatt Arsenault   switch (Width) {
7424bd72361SMatt Arsenault   case OPW32:
7434bd72361SMatt Arsenault   case OPW16:
7449be7b0d4SMatt Arsenault   case OPWV216:
7454bd72361SMatt Arsenault     return decodeSpecialReg32(Val);
7464bd72361SMatt Arsenault   case OPW64:
7474bd72361SMatt Arsenault     return decodeSpecialReg64(Val);
7484bd72361SMatt Arsenault   default:
7494bd72361SMatt Arsenault     llvm_unreachable("unexpected immediate type");
7504bd72361SMatt Arsenault   }
751ac106addSNikolay Haustov }
752ac106addSNikolay Haustov 
75327134953SDmitry Preobrazhensky MCOperand AMDGPUDisassembler::decodeDstOp(const OpWidthTy Width, unsigned Val) const {
75427134953SDmitry Preobrazhensky   using namespace AMDGPU::EncValues;
75527134953SDmitry Preobrazhensky 
75627134953SDmitry Preobrazhensky   assert(Val < 128);
75727134953SDmitry Preobrazhensky   assert(Width == OPW256 || Width == OPW512);
75827134953SDmitry Preobrazhensky 
75927134953SDmitry Preobrazhensky   if (Val <= SGPR_MAX) {
76027134953SDmitry Preobrazhensky     assert(SGPR_MIN == 0); // "SGPR_MIN <= Val" is always true and causes compilation warning.
76127134953SDmitry Preobrazhensky     return createSRegOperand(getSgprClassId(Width), Val - SGPR_MIN);
76227134953SDmitry Preobrazhensky   }
76327134953SDmitry Preobrazhensky 
76427134953SDmitry Preobrazhensky   int TTmpIdx = getTTmpIdx(Val);
76527134953SDmitry Preobrazhensky   if (TTmpIdx >= 0) {
76627134953SDmitry Preobrazhensky     return createSRegOperand(getTtmpClassId(Width), TTmpIdx);
76727134953SDmitry Preobrazhensky   }
76827134953SDmitry Preobrazhensky 
76927134953SDmitry Preobrazhensky   llvm_unreachable("unknown dst register");
77027134953SDmitry Preobrazhensky }
77127134953SDmitry Preobrazhensky 
772ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeSpecialReg32(unsigned Val) const {
773ac106addSNikolay Haustov   using namespace AMDGPU;
774c8fbf6ffSEugene Zelenko 
775e1818af8STom Stellard   switch (Val) {
776ac2b0264SDmitry Preobrazhensky   case 102: return createRegOperand(FLAT_SCR_LO);
777ac2b0264SDmitry Preobrazhensky   case 103: return createRegOperand(FLAT_SCR_HI);
7783afbd825SDmitry Preobrazhensky   case 104: return createRegOperand(XNACK_MASK_LO);
7793afbd825SDmitry Preobrazhensky   case 105: return createRegOperand(XNACK_MASK_HI);
780ac106addSNikolay Haustov   case 106: return createRegOperand(VCC_LO);
781ac106addSNikolay Haustov   case 107: return createRegOperand(VCC_HI);
782ac2b0264SDmitry Preobrazhensky   case 108: assert(!isGFX9()); return createRegOperand(TBA_LO);
783ac2b0264SDmitry Preobrazhensky   case 109: assert(!isGFX9()); return createRegOperand(TBA_HI);
784ac2b0264SDmitry Preobrazhensky   case 110: assert(!isGFX9()); return createRegOperand(TMA_LO);
785ac2b0264SDmitry Preobrazhensky   case 111: assert(!isGFX9()); return createRegOperand(TMA_HI);
786ac106addSNikolay Haustov   case 124: return createRegOperand(M0);
787ac106addSNikolay Haustov   case 126: return createRegOperand(EXEC_LO);
788ac106addSNikolay Haustov   case 127: return createRegOperand(EXEC_HI);
789a3b3b489SMatt Arsenault   case 235: return createRegOperand(SRC_SHARED_BASE);
790a3b3b489SMatt Arsenault   case 236: return createRegOperand(SRC_SHARED_LIMIT);
791a3b3b489SMatt Arsenault   case 237: return createRegOperand(SRC_PRIVATE_BASE);
792a3b3b489SMatt Arsenault   case 238: return createRegOperand(SRC_PRIVATE_LIMIT);
793a3b3b489SMatt Arsenault     // TODO: SRC_POPS_EXITING_WAVE_ID
794e1818af8STom Stellard     // ToDo: no support for vccz register
795ac106addSNikolay Haustov   case 251: break;
796e1818af8STom Stellard     // ToDo: no support for execz register
797ac106addSNikolay Haustov   case 252: break;
798ac106addSNikolay Haustov   case 253: return createRegOperand(SCC);
799942c273dSDmitry Preobrazhensky   case 254: return createRegOperand(LDS_DIRECT);
800ac106addSNikolay Haustov   default: break;
801e1818af8STom Stellard   }
802ac106addSNikolay Haustov   return errOperand(Val, "unknown operand encoding " + Twine(Val));
803e1818af8STom Stellard }
804e1818af8STom Stellard 
805ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeSpecialReg64(unsigned Val) const {
806161a158eSNikolay Haustov   using namespace AMDGPU;
807c8fbf6ffSEugene Zelenko 
808161a158eSNikolay Haustov   switch (Val) {
809ac2b0264SDmitry Preobrazhensky   case 102: return createRegOperand(FLAT_SCR);
8103afbd825SDmitry Preobrazhensky   case 104: return createRegOperand(XNACK_MASK);
811ac106addSNikolay Haustov   case 106: return createRegOperand(VCC);
812ac2b0264SDmitry Preobrazhensky   case 108: assert(!isGFX9()); return createRegOperand(TBA);
813ac2b0264SDmitry Preobrazhensky   case 110: assert(!isGFX9()); return createRegOperand(TMA);
814ac106addSNikolay Haustov   case 126: return createRegOperand(EXEC);
815ac106addSNikolay Haustov   default: break;
816161a158eSNikolay Haustov   }
817ac106addSNikolay Haustov   return errOperand(Val, "unknown operand encoding " + Twine(Val));
818161a158eSNikolay Haustov }
819161a158eSNikolay Haustov 
820549c89d2SSam Kolton MCOperand AMDGPUDisassembler::decodeSDWASrc(const OpWidthTy Width,
8216b65f7c3SDmitry Preobrazhensky                                             const unsigned Val) const {
822363f47a2SSam Kolton   using namespace AMDGPU::SDWA;
8236b65f7c3SDmitry Preobrazhensky   using namespace AMDGPU::EncValues;
824363f47a2SSam Kolton 
825549c89d2SSam Kolton   if (STI.getFeatureBits()[AMDGPU::FeatureGFX9]) {
826a179d25bSSam Kolton     // XXX: static_cast<int> is needed to avoid stupid warning:
827a179d25bSSam Kolton     // compare with unsigned is always true
828a179d25bSSam Kolton     if (SDWA9EncValues::SRC_VGPR_MIN <= static_cast<int>(Val) &&
829363f47a2SSam Kolton         Val <= SDWA9EncValues::SRC_VGPR_MAX) {
830363f47a2SSam Kolton       return createRegOperand(getVgprClassId(Width),
831363f47a2SSam Kolton                               Val - SDWA9EncValues::SRC_VGPR_MIN);
832363f47a2SSam Kolton     }
833363f47a2SSam Kolton     if (SDWA9EncValues::SRC_SGPR_MIN <= Val &&
834363f47a2SSam Kolton         Val <= SDWA9EncValues::SRC_SGPR_MAX) {
835363f47a2SSam Kolton       return createSRegOperand(getSgprClassId(Width),
836363f47a2SSam Kolton                                Val - SDWA9EncValues::SRC_SGPR_MIN);
837363f47a2SSam Kolton     }
838ac2b0264SDmitry Preobrazhensky     if (SDWA9EncValues::SRC_TTMP_MIN <= Val &&
839ac2b0264SDmitry Preobrazhensky         Val <= SDWA9EncValues::SRC_TTMP_MAX) {
840ac2b0264SDmitry Preobrazhensky       return createSRegOperand(getTtmpClassId(Width),
841ac2b0264SDmitry Preobrazhensky                                Val - SDWA9EncValues::SRC_TTMP_MIN);
842ac2b0264SDmitry Preobrazhensky     }
843363f47a2SSam Kolton 
8446b65f7c3SDmitry Preobrazhensky     const unsigned SVal = Val - SDWA9EncValues::SRC_SGPR_MIN;
8456b65f7c3SDmitry Preobrazhensky 
8466b65f7c3SDmitry Preobrazhensky     if (INLINE_INTEGER_C_MIN <= SVal && SVal <= INLINE_INTEGER_C_MAX)
8476b65f7c3SDmitry Preobrazhensky       return decodeIntImmed(SVal);
8486b65f7c3SDmitry Preobrazhensky 
8496b65f7c3SDmitry Preobrazhensky     if (INLINE_FLOATING_C_MIN <= SVal && SVal <= INLINE_FLOATING_C_MAX)
8506b65f7c3SDmitry Preobrazhensky       return decodeFPImmed(Width, SVal);
8516b65f7c3SDmitry Preobrazhensky 
8526b65f7c3SDmitry Preobrazhensky     return decodeSpecialReg32(SVal);
853549c89d2SSam Kolton   } else if (STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]) {
854549c89d2SSam Kolton     return createRegOperand(getVgprClassId(Width), Val);
855549c89d2SSam Kolton   }
856549c89d2SSam Kolton   llvm_unreachable("unsupported target");
857363f47a2SSam Kolton }
858363f47a2SSam Kolton 
859549c89d2SSam Kolton MCOperand AMDGPUDisassembler::decodeSDWASrc16(unsigned Val) const {
860549c89d2SSam Kolton   return decodeSDWASrc(OPW16, Val);
861363f47a2SSam Kolton }
862363f47a2SSam Kolton 
863549c89d2SSam Kolton MCOperand AMDGPUDisassembler::decodeSDWASrc32(unsigned Val) const {
864549c89d2SSam Kolton   return decodeSDWASrc(OPW32, Val);
865363f47a2SSam Kolton }
866363f47a2SSam Kolton 
867549c89d2SSam Kolton MCOperand AMDGPUDisassembler::decodeSDWAVopcDst(unsigned Val) const {
868363f47a2SSam Kolton   using namespace AMDGPU::SDWA;
869363f47a2SSam Kolton 
870549c89d2SSam Kolton   assert(STI.getFeatureBits()[AMDGPU::FeatureGFX9] &&
871549c89d2SSam Kolton          "SDWAVopcDst should be present only on GFX9");
872363f47a2SSam Kolton   if (Val & SDWA9EncValues::VOPC_DST_VCC_MASK) {
873363f47a2SSam Kolton     Val &= SDWA9EncValues::VOPC_DST_SGPR_MASK;
874ac2b0264SDmitry Preobrazhensky 
875ac2b0264SDmitry Preobrazhensky     int TTmpIdx = getTTmpIdx(Val);
876ac2b0264SDmitry Preobrazhensky     if (TTmpIdx >= 0) {
877ac2b0264SDmitry Preobrazhensky       return createSRegOperand(getTtmpClassId(OPW64), TTmpIdx);
878ac2b0264SDmitry Preobrazhensky     } else if (Val > AMDGPU::EncValues::SGPR_MAX) {
879363f47a2SSam Kolton       return decodeSpecialReg64(Val);
880363f47a2SSam Kolton     } else {
881363f47a2SSam Kolton       return createSRegOperand(getSgprClassId(OPW64), Val);
882363f47a2SSam Kolton     }
883363f47a2SSam Kolton   } else {
884363f47a2SSam Kolton     return createRegOperand(AMDGPU::VCC);
885363f47a2SSam Kolton   }
886363f47a2SSam Kolton }
887363f47a2SSam Kolton 
888ac2b0264SDmitry Preobrazhensky bool AMDGPUDisassembler::isVI() const {
889ac2b0264SDmitry Preobrazhensky   return STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands];
890ac2b0264SDmitry Preobrazhensky }
891ac2b0264SDmitry Preobrazhensky 
892ac2b0264SDmitry Preobrazhensky bool AMDGPUDisassembler::isGFX9() const {
893ac2b0264SDmitry Preobrazhensky   return STI.getFeatureBits()[AMDGPU::FeatureGFX9];
894ac2b0264SDmitry Preobrazhensky }
895ac2b0264SDmitry Preobrazhensky 
8963381d7a2SSam Kolton //===----------------------------------------------------------------------===//
8973381d7a2SSam Kolton // AMDGPUSymbolizer
8983381d7a2SSam Kolton //===----------------------------------------------------------------------===//
8993381d7a2SSam Kolton 
9003381d7a2SSam Kolton // Try to find symbol name for specified label
9013381d7a2SSam Kolton bool AMDGPUSymbolizer::tryAddingSymbolicOperand(MCInst &Inst,
9023381d7a2SSam Kolton                                 raw_ostream &/*cStream*/, int64_t Value,
9033381d7a2SSam Kolton                                 uint64_t /*Address*/, bool IsBranch,
9043381d7a2SSam Kolton                                 uint64_t /*Offset*/, uint64_t /*InstSize*/) {
905c8fbf6ffSEugene Zelenko   using SymbolInfoTy = std::tuple<uint64_t, StringRef, uint8_t>;
906c8fbf6ffSEugene Zelenko   using SectionSymbolsTy = std::vector<SymbolInfoTy>;
9073381d7a2SSam Kolton 
9083381d7a2SSam Kolton   if (!IsBranch) {
9093381d7a2SSam Kolton     return false;
9103381d7a2SSam Kolton   }
9113381d7a2SSam Kolton 
9123381d7a2SSam Kolton   auto *Symbols = static_cast<SectionSymbolsTy *>(DisInfo);
913b1c3b22bSNicolai Haehnle   if (!Symbols)
914b1c3b22bSNicolai Haehnle     return false;
915b1c3b22bSNicolai Haehnle 
9163381d7a2SSam Kolton   auto Result = std::find_if(Symbols->begin(), Symbols->end(),
9173381d7a2SSam Kolton                              [Value](const SymbolInfoTy& Val) {
9183381d7a2SSam Kolton                                 return std::get<0>(Val) == static_cast<uint64_t>(Value)
9193381d7a2SSam Kolton                                     && std::get<2>(Val) == ELF::STT_NOTYPE;
9203381d7a2SSam Kolton                              });
9213381d7a2SSam Kolton   if (Result != Symbols->end()) {
9223381d7a2SSam Kolton     auto *Sym = Ctx.getOrCreateSymbol(std::get<1>(*Result));
9233381d7a2SSam Kolton     const auto *Add = MCSymbolRefExpr::create(Sym, Ctx);
9243381d7a2SSam Kolton     Inst.addOperand(MCOperand::createExpr(Add));
9253381d7a2SSam Kolton     return true;
9263381d7a2SSam Kolton   }
9273381d7a2SSam Kolton   return false;
9283381d7a2SSam Kolton }
9293381d7a2SSam Kolton 
93092b355b1SMatt Arsenault void AMDGPUSymbolizer::tryAddingPcLoadReferenceComment(raw_ostream &cStream,
93192b355b1SMatt Arsenault                                                        int64_t Value,
93292b355b1SMatt Arsenault                                                        uint64_t Address) {
93392b355b1SMatt Arsenault   llvm_unreachable("unimplemented");
93492b355b1SMatt Arsenault }
93592b355b1SMatt Arsenault 
9363381d7a2SSam Kolton //===----------------------------------------------------------------------===//
9373381d7a2SSam Kolton // Initialization
9383381d7a2SSam Kolton //===----------------------------------------------------------------------===//
9393381d7a2SSam Kolton 
9403381d7a2SSam Kolton static MCSymbolizer *createAMDGPUSymbolizer(const Triple &/*TT*/,
9413381d7a2SSam Kolton                               LLVMOpInfoCallback /*GetOpInfo*/,
9423381d7a2SSam Kolton                               LLVMSymbolLookupCallback /*SymbolLookUp*/,
9433381d7a2SSam Kolton                               void *DisInfo,
9443381d7a2SSam Kolton                               MCContext *Ctx,
9453381d7a2SSam Kolton                               std::unique_ptr<MCRelocationInfo> &&RelInfo) {
9463381d7a2SSam Kolton   return new AMDGPUSymbolizer(*Ctx, std::move(RelInfo), DisInfo);
9473381d7a2SSam Kolton }
9483381d7a2SSam Kolton 
949e1818af8STom Stellard static MCDisassembler *createAMDGPUDisassembler(const Target &T,
950e1818af8STom Stellard                                                 const MCSubtargetInfo &STI,
951e1818af8STom Stellard                                                 MCContext &Ctx) {
952cad7fa85SMatt Arsenault   return new AMDGPUDisassembler(STI, Ctx, T.createMCInstrInfo());
953e1818af8STom Stellard }
954e1818af8STom Stellard 
955e1818af8STom Stellard extern "C" void LLVMInitializeAMDGPUDisassembler() {
956f42454b9SMehdi Amini   TargetRegistry::RegisterMCDisassembler(getTheGCNTarget(),
957f42454b9SMehdi Amini                                          createAMDGPUDisassembler);
958f42454b9SMehdi Amini   TargetRegistry::RegisterMCSymbolizer(getTheGCNTarget(),
959f42454b9SMehdi Amini                                        createAMDGPUSymbolizer);
960e1818af8STom Stellard }
961