1 //===- AMDGPUDisassembler.cpp - Disassembler for AMDGPU ISA ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 //===----------------------------------------------------------------------===//
10 //
11 /// \file
12 ///
13 /// This file contains definition for AMDGPU ISA disassembler
14 //
15 //===----------------------------------------------------------------------===//
16
17 // ToDo: What to do with instruction suffixes (v_mov_b32 vs v_mov_b32_e32)?
18
19 #include "Disassembler/AMDGPUDisassembler.h"
20 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
21 #include "TargetInfo/AMDGPUTargetInfo.h"
22 #include "Utils/AMDGPUBaseInfo.h"
23 #include "llvm-c/DisassemblerTypes.h"
24 #include "llvm/MC/MCAsmInfo.h"
25 #include "llvm/MC/MCContext.h"
26 #include "llvm/MC/MCExpr.h"
27 #include "llvm/MC/MCFixedLenDisassembler.h"
28 #include "llvm/Support/AMDHSAKernelDescriptor.h"
29 #include "llvm/Support/TargetRegistry.h"
30
31 using namespace llvm;
32
33 #define DEBUG_TYPE "amdgpu-disassembler"
34
35 #define SGPR_MAX \
36 (isGFX10Plus() ? AMDGPU::EncValues::SGPR_MAX_GFX10 \
37 : AMDGPU::EncValues::SGPR_MAX_SI)
38
39 using DecodeStatus = llvm::MCDisassembler::DecodeStatus;
40
AMDGPUDisassembler(const MCSubtargetInfo & STI,MCContext & Ctx,MCInstrInfo const * MCII)41 AMDGPUDisassembler::AMDGPUDisassembler(const MCSubtargetInfo &STI,
42 MCContext &Ctx,
43 MCInstrInfo const *MCII) :
44 MCDisassembler(STI, Ctx), MCII(MCII), MRI(*Ctx.getRegisterInfo()),
45 TargetMaxInstBytes(Ctx.getAsmInfo()->getMaxInstLength(&STI)) {
46
47 // ToDo: AMDGPUDisassembler supports only VI ISA.
48 if (!STI.getFeatureBits()[AMDGPU::FeatureGCN3Encoding] && !isGFX10Plus())
49 report_fatal_error("Disassembly not yet supported for subtarget");
50 }
51
52 inline static MCDisassembler::DecodeStatus
addOperand(MCInst & Inst,const MCOperand & Opnd)53 addOperand(MCInst &Inst, const MCOperand& Opnd) {
54 Inst.addOperand(Opnd);
55 return Opnd.isValid() ?
56 MCDisassembler::Success :
57 MCDisassembler::Fail;
58 }
59
insertNamedMCOperand(MCInst & MI,const MCOperand & Op,uint16_t NameIdx)60 static int insertNamedMCOperand(MCInst &MI, const MCOperand &Op,
61 uint16_t NameIdx) {
62 int OpIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), NameIdx);
63 if (OpIdx != -1) {
64 auto I = MI.begin();
65 std::advance(I, OpIdx);
66 MI.insert(I, Op);
67 }
68 return OpIdx;
69 }
70
decodeSoppBrTarget(MCInst & Inst,unsigned Imm,uint64_t Addr,const void * Decoder)71 static DecodeStatus decodeSoppBrTarget(MCInst &Inst, unsigned Imm,
72 uint64_t Addr, const void *Decoder) {
73 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
74
75 // Our branches take a simm16, but we need two extra bits to account for the
76 // factor of 4.
77 APInt SignedOffset(18, Imm * 4, true);
78 int64_t Offset = (SignedOffset.sext(64) + 4 + Addr).getSExtValue();
79
80 if (DAsm->tryAddingSymbolicOperand(Inst, Offset, Addr, true, 2, 2))
81 return MCDisassembler::Success;
82 return addOperand(Inst, MCOperand::createImm(Imm));
83 }
84
decodeSMEMOffset(MCInst & Inst,unsigned Imm,uint64_t Addr,const void * Decoder)85 static DecodeStatus decodeSMEMOffset(MCInst &Inst, unsigned Imm,
86 uint64_t Addr, const void *Decoder) {
87 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
88 int64_t Offset;
89 if (DAsm->isVI()) { // VI supports 20-bit unsigned offsets.
90 Offset = Imm & 0xFFFFF;
91 } else { // GFX9+ supports 21-bit signed offsets.
92 Offset = SignExtend64<21>(Imm);
93 }
94 return addOperand(Inst, MCOperand::createImm(Offset));
95 }
96
decodeBoolReg(MCInst & Inst,unsigned Val,uint64_t Addr,const void * Decoder)97 static DecodeStatus decodeBoolReg(MCInst &Inst, unsigned Val,
98 uint64_t Addr, const void *Decoder) {
99 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
100 return addOperand(Inst, DAsm->decodeBoolReg(Val));
101 }
102
103 #define DECODE_OPERAND(StaticDecoderName, DecoderName) \
104 static DecodeStatus StaticDecoderName(MCInst &Inst, \
105 unsigned Imm, \
106 uint64_t /*Addr*/, \
107 const void *Decoder) { \
108 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); \
109 return addOperand(Inst, DAsm->DecoderName(Imm)); \
110 }
111
112 #define DECODE_OPERAND_REG(RegClass) \
113 DECODE_OPERAND(Decode##RegClass##RegisterClass, decodeOperand_##RegClass)
114
115 DECODE_OPERAND_REG(VGPR_32)
DECODE_OPERAND_REG(VRegOrLds_32)116 DECODE_OPERAND_REG(VRegOrLds_32)
117 DECODE_OPERAND_REG(VS_32)
118 DECODE_OPERAND_REG(VS_64)
119 DECODE_OPERAND_REG(VS_128)
120
121 DECODE_OPERAND_REG(VReg_64)
122 DECODE_OPERAND_REG(VReg_96)
123 DECODE_OPERAND_REG(VReg_128)
124 DECODE_OPERAND_REG(VReg_256)
125 DECODE_OPERAND_REG(VReg_512)
126 DECODE_OPERAND_REG(VReg_1024)
127
128 DECODE_OPERAND_REG(SReg_32)
129 DECODE_OPERAND_REG(SReg_32_XM0_XEXEC)
130 DECODE_OPERAND_REG(SReg_32_XEXEC_HI)
131 DECODE_OPERAND_REG(SRegOrLds_32)
132 DECODE_OPERAND_REG(SReg_64)
133 DECODE_OPERAND_REG(SReg_64_XEXEC)
134 DECODE_OPERAND_REG(SReg_128)
135 DECODE_OPERAND_REG(SReg_256)
136 DECODE_OPERAND_REG(SReg_512)
137
138 DECODE_OPERAND_REG(AGPR_32)
139 DECODE_OPERAND_REG(AReg_64)
140 DECODE_OPERAND_REG(AReg_128)
141 DECODE_OPERAND_REG(AReg_256)
142 DECODE_OPERAND_REG(AReg_512)
143 DECODE_OPERAND_REG(AReg_1024)
144 DECODE_OPERAND_REG(AV_32)
145 DECODE_OPERAND_REG(AV_64)
146
147 static DecodeStatus decodeOperand_VSrc16(MCInst &Inst,
148 unsigned Imm,
149 uint64_t Addr,
150 const void *Decoder) {
151 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
152 return addOperand(Inst, DAsm->decodeOperand_VSrc16(Imm));
153 }
154
decodeOperand_VSrcV216(MCInst & Inst,unsigned Imm,uint64_t Addr,const void * Decoder)155 static DecodeStatus decodeOperand_VSrcV216(MCInst &Inst,
156 unsigned Imm,
157 uint64_t Addr,
158 const void *Decoder) {
159 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
160 return addOperand(Inst, DAsm->decodeOperand_VSrcV216(Imm));
161 }
162
decodeOperand_VSrcV232(MCInst & Inst,unsigned Imm,uint64_t Addr,const void * Decoder)163 static DecodeStatus decodeOperand_VSrcV232(MCInst &Inst,
164 unsigned Imm,
165 uint64_t Addr,
166 const void *Decoder) {
167 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
168 return addOperand(Inst, DAsm->decodeOperand_VSrcV232(Imm));
169 }
170
decodeOperand_VS_16(MCInst & Inst,unsigned Imm,uint64_t Addr,const void * Decoder)171 static DecodeStatus decodeOperand_VS_16(MCInst &Inst,
172 unsigned Imm,
173 uint64_t Addr,
174 const void *Decoder) {
175 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
176 return addOperand(Inst, DAsm->decodeOperand_VSrc16(Imm));
177 }
178
decodeOperand_VS_32(MCInst & Inst,unsigned Imm,uint64_t Addr,const void * Decoder)179 static DecodeStatus decodeOperand_VS_32(MCInst &Inst,
180 unsigned Imm,
181 uint64_t Addr,
182 const void *Decoder) {
183 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
184 return addOperand(Inst, DAsm->decodeOperand_VS_32(Imm));
185 }
186
decodeOperand_AReg_64(MCInst & Inst,unsigned Imm,uint64_t Addr,const void * Decoder)187 static DecodeStatus decodeOperand_AReg_64(MCInst &Inst,
188 unsigned Imm,
189 uint64_t Addr,
190 const void *Decoder) {
191 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
192 return addOperand(Inst, DAsm->decodeSrcOp(AMDGPUDisassembler::OPW64, Imm | 512));
193 }
194
decodeOperand_AReg_128(MCInst & Inst,unsigned Imm,uint64_t Addr,const void * Decoder)195 static DecodeStatus decodeOperand_AReg_128(MCInst &Inst,
196 unsigned Imm,
197 uint64_t Addr,
198 const void *Decoder) {
199 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
200 return addOperand(Inst, DAsm->decodeSrcOp(AMDGPUDisassembler::OPW128, Imm | 512));
201 }
202
decodeOperand_AReg_256(MCInst & Inst,unsigned Imm,uint64_t Addr,const void * Decoder)203 static DecodeStatus decodeOperand_AReg_256(MCInst &Inst,
204 unsigned Imm,
205 uint64_t Addr,
206 const void *Decoder) {
207 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
208 return addOperand(Inst, DAsm->decodeSrcOp(AMDGPUDisassembler::OPW256, Imm | 512));
209 }
210
decodeOperand_AReg_512(MCInst & Inst,unsigned Imm,uint64_t Addr,const void * Decoder)211 static DecodeStatus decodeOperand_AReg_512(MCInst &Inst,
212 unsigned Imm,
213 uint64_t Addr,
214 const void *Decoder) {
215 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
216 return addOperand(Inst, DAsm->decodeSrcOp(AMDGPUDisassembler::OPW512, Imm | 512));
217 }
218
decodeOperand_AReg_1024(MCInst & Inst,unsigned Imm,uint64_t Addr,const void * Decoder)219 static DecodeStatus decodeOperand_AReg_1024(MCInst &Inst,
220 unsigned Imm,
221 uint64_t Addr,
222 const void *Decoder) {
223 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
224 return addOperand(Inst, DAsm->decodeSrcOp(AMDGPUDisassembler::OPW1024, Imm | 512));
225 }
226
decodeOperand_VReg_64(MCInst & Inst,unsigned Imm,uint64_t Addr,const void * Decoder)227 static DecodeStatus decodeOperand_VReg_64(MCInst &Inst,
228 unsigned Imm,
229 uint64_t Addr,
230 const void *Decoder) {
231 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
232 return addOperand(Inst, DAsm->decodeSrcOp(AMDGPUDisassembler::OPW64, Imm));
233 }
234
decodeOperand_VReg_128(MCInst & Inst,unsigned Imm,uint64_t Addr,const void * Decoder)235 static DecodeStatus decodeOperand_VReg_128(MCInst &Inst,
236 unsigned Imm,
237 uint64_t Addr,
238 const void *Decoder) {
239 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
240 return addOperand(Inst, DAsm->decodeSrcOp(AMDGPUDisassembler::OPW128, Imm));
241 }
242
decodeOperand_VReg_256(MCInst & Inst,unsigned Imm,uint64_t Addr,const void * Decoder)243 static DecodeStatus decodeOperand_VReg_256(MCInst &Inst,
244 unsigned Imm,
245 uint64_t Addr,
246 const void *Decoder) {
247 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
248 return addOperand(Inst, DAsm->decodeSrcOp(AMDGPUDisassembler::OPW256, Imm));
249 }
250
decodeOperand_VReg_512(MCInst & Inst,unsigned Imm,uint64_t Addr,const void * Decoder)251 static DecodeStatus decodeOperand_VReg_512(MCInst &Inst,
252 unsigned Imm,
253 uint64_t Addr,
254 const void *Decoder) {
255 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
256 return addOperand(Inst, DAsm->decodeSrcOp(AMDGPUDisassembler::OPW512, Imm));
257 }
258
decodeOperand_VReg_1024(MCInst & Inst,unsigned Imm,uint64_t Addr,const void * Decoder)259 static DecodeStatus decodeOperand_VReg_1024(MCInst &Inst,
260 unsigned Imm,
261 uint64_t Addr,
262 const void *Decoder) {
263 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
264 return addOperand(Inst, DAsm->decodeSrcOp(AMDGPUDisassembler::OPW1024, Imm));
265 }
266
IsAGPROperand(const MCInst & Inst,int OpIdx,const MCRegisterInfo * MRI)267 static bool IsAGPROperand(const MCInst &Inst, int OpIdx,
268 const MCRegisterInfo *MRI) {
269 if (OpIdx < 0)
270 return false;
271
272 const MCOperand &Op = Inst.getOperand(OpIdx);
273 if (!Op.isReg())
274 return false;
275
276 unsigned Sub = MRI->getSubReg(Op.getReg(), AMDGPU::sub0);
277 auto Reg = Sub ? Sub : Op.getReg();
278 return Reg >= AMDGPU::AGPR0 && Reg <= AMDGPU::AGPR255;
279 }
280
decodeOperand_AVLdSt_Any(MCInst & Inst,unsigned Imm,AMDGPUDisassembler::OpWidthTy Opw,const void * Decoder)281 static DecodeStatus decodeOperand_AVLdSt_Any(MCInst &Inst,
282 unsigned Imm,
283 AMDGPUDisassembler::OpWidthTy Opw,
284 const void *Decoder) {
285 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
286 if (!DAsm->isGFX90A()) {
287 Imm &= 511;
288 } else {
289 // If atomic has both vdata and vdst their register classes are tied.
290 // The bit is decoded along with the vdst, first operand. We need to
291 // change register class to AGPR if vdst was AGPR.
292 // If a DS instruction has both data0 and data1 their register classes
293 // are also tied.
294 unsigned Opc = Inst.getOpcode();
295 uint64_t TSFlags = DAsm->getMCII()->get(Opc).TSFlags;
296 uint16_t DataNameIdx = (TSFlags & SIInstrFlags::DS) ? AMDGPU::OpName::data0
297 : AMDGPU::OpName::vdata;
298 const MCRegisterInfo *MRI = DAsm->getContext().getRegisterInfo();
299 int DataIdx = AMDGPU::getNamedOperandIdx(Opc, DataNameIdx);
300 if ((int)Inst.getNumOperands() == DataIdx) {
301 int DstIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst);
302 if (IsAGPROperand(Inst, DstIdx, MRI))
303 Imm |= 512;
304 }
305
306 if (TSFlags & SIInstrFlags::DS) {
307 int Data2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data1);
308 if ((int)Inst.getNumOperands() == Data2Idx &&
309 IsAGPROperand(Inst, DataIdx, MRI))
310 Imm |= 512;
311 }
312 }
313 return addOperand(Inst, DAsm->decodeSrcOp(Opw, Imm | 256));
314 }
315
DecodeAVLdSt_32RegisterClass(MCInst & Inst,unsigned Imm,uint64_t Addr,const void * Decoder)316 static DecodeStatus DecodeAVLdSt_32RegisterClass(MCInst &Inst,
317 unsigned Imm,
318 uint64_t Addr,
319 const void *Decoder) {
320 return decodeOperand_AVLdSt_Any(Inst, Imm,
321 AMDGPUDisassembler::OPW32, Decoder);
322 }
323
DecodeAVLdSt_64RegisterClass(MCInst & Inst,unsigned Imm,uint64_t Addr,const void * Decoder)324 static DecodeStatus DecodeAVLdSt_64RegisterClass(MCInst &Inst,
325 unsigned Imm,
326 uint64_t Addr,
327 const void *Decoder) {
328 return decodeOperand_AVLdSt_Any(Inst, Imm,
329 AMDGPUDisassembler::OPW64, Decoder);
330 }
331
DecodeAVLdSt_96RegisterClass(MCInst & Inst,unsigned Imm,uint64_t Addr,const void * Decoder)332 static DecodeStatus DecodeAVLdSt_96RegisterClass(MCInst &Inst,
333 unsigned Imm,
334 uint64_t Addr,
335 const void *Decoder) {
336 return decodeOperand_AVLdSt_Any(Inst, Imm,
337 AMDGPUDisassembler::OPW96, Decoder);
338 }
339
DecodeAVLdSt_128RegisterClass(MCInst & Inst,unsigned Imm,uint64_t Addr,const void * Decoder)340 static DecodeStatus DecodeAVLdSt_128RegisterClass(MCInst &Inst,
341 unsigned Imm,
342 uint64_t Addr,
343 const void *Decoder) {
344 return decodeOperand_AVLdSt_Any(Inst, Imm,
345 AMDGPUDisassembler::OPW128, Decoder);
346 }
347
decodeOperand_SReg_32(MCInst & Inst,unsigned Imm,uint64_t Addr,const void * Decoder)348 static DecodeStatus decodeOperand_SReg_32(MCInst &Inst,
349 unsigned Imm,
350 uint64_t Addr,
351 const void *Decoder) {
352 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
353 return addOperand(Inst, DAsm->decodeOperand_SReg_32(Imm));
354 }
355
decodeOperand_VGPR_32(MCInst & Inst,unsigned Imm,uint64_t Addr,const void * Decoder)356 static DecodeStatus decodeOperand_VGPR_32(MCInst &Inst,
357 unsigned Imm,
358 uint64_t Addr,
359 const void *Decoder) {
360 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
361 return addOperand(Inst, DAsm->decodeSrcOp(AMDGPUDisassembler::OPW32, Imm));
362 }
363
364 #define DECODE_SDWA(DecName) \
365 DECODE_OPERAND(decodeSDWA##DecName, decodeSDWA##DecName)
366
367 DECODE_SDWA(Src32)
DECODE_SDWA(Src16)368 DECODE_SDWA(Src16)
369 DECODE_SDWA(VopcDst)
370
371 #include "AMDGPUGenDisassemblerTables.inc"
372
373 //===----------------------------------------------------------------------===//
374 //
375 //===----------------------------------------------------------------------===//
376
377 template <typename T> static inline T eatBytes(ArrayRef<uint8_t>& Bytes) {
378 assert(Bytes.size() >= sizeof(T));
379 const auto Res = support::endian::read<T, support::endianness::little>(Bytes.data());
380 Bytes = Bytes.slice(sizeof(T));
381 return Res;
382 }
383
tryDecodeInst(const uint8_t * Table,MCInst & MI,uint64_t Inst,uint64_t Address) const384 DecodeStatus AMDGPUDisassembler::tryDecodeInst(const uint8_t* Table,
385 MCInst &MI,
386 uint64_t Inst,
387 uint64_t Address) const {
388 assert(MI.getOpcode() == 0);
389 assert(MI.getNumOperands() == 0);
390 MCInst TmpInst;
391 HasLiteral = false;
392 const auto SavedBytes = Bytes;
393 if (decodeInstruction(Table, TmpInst, Inst, Address, this, STI)) {
394 MI = TmpInst;
395 return MCDisassembler::Success;
396 }
397 Bytes = SavedBytes;
398 return MCDisassembler::Fail;
399 }
400
401 // The disassembler is greedy, so we need to check FI operand value to
402 // not parse a dpp if the correct literal is not set. For dpp16 the
403 // autogenerated decoder checks the dpp literal
isValidDPP8(const MCInst & MI)404 static bool isValidDPP8(const MCInst &MI) {
405 using namespace llvm::AMDGPU::DPP;
406 int FiIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::fi);
407 assert(FiIdx != -1);
408 if ((unsigned)FiIdx >= MI.getNumOperands())
409 return false;
410 unsigned Fi = MI.getOperand(FiIdx).getImm();
411 return Fi == DPP8_FI_0 || Fi == DPP8_FI_1;
412 }
413
getInstruction(MCInst & MI,uint64_t & Size,ArrayRef<uint8_t> Bytes_,uint64_t Address,raw_ostream & CS) const414 DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
415 ArrayRef<uint8_t> Bytes_,
416 uint64_t Address,
417 raw_ostream &CS) const {
418 CommentStream = &CS;
419 bool IsSDWA = false;
420
421 unsigned MaxInstBytesNum = std::min((size_t)TargetMaxInstBytes, Bytes_.size());
422 Bytes = Bytes_.slice(0, MaxInstBytesNum);
423
424 DecodeStatus Res = MCDisassembler::Fail;
425 do {
426 // ToDo: better to switch encoding length using some bit predicate
427 // but it is unknown yet, so try all we can
428
429 // Try to decode DPP and SDWA first to solve conflict with VOP1 and VOP2
430 // encodings
431 if (Bytes.size() >= 8) {
432 const uint64_t QW = eatBytes<uint64_t>(Bytes);
433
434 if (STI.getFeatureBits()[AMDGPU::FeatureGFX10_BEncoding]) {
435 Res = tryDecodeInst(DecoderTableGFX10_B64, MI, QW, Address);
436 if (Res) {
437 if (AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::dpp8)
438 == -1)
439 break;
440 if (convertDPP8Inst(MI) == MCDisassembler::Success)
441 break;
442 MI = MCInst(); // clear
443 }
444 }
445
446 Res = tryDecodeInst(DecoderTableDPP864, MI, QW, Address);
447 if (Res && convertDPP8Inst(MI) == MCDisassembler::Success)
448 break;
449
450 MI = MCInst(); // clear
451
452 Res = tryDecodeInst(DecoderTableDPP64, MI, QW, Address);
453 if (Res) break;
454
455 Res = tryDecodeInst(DecoderTableSDWA64, MI, QW, Address);
456 if (Res) { IsSDWA = true; break; }
457
458 Res = tryDecodeInst(DecoderTableSDWA964, MI, QW, Address);
459 if (Res) { IsSDWA = true; break; }
460
461 Res = tryDecodeInst(DecoderTableSDWA1064, MI, QW, Address);
462 if (Res) { IsSDWA = true; break; }
463
464 if (STI.getFeatureBits()[AMDGPU::FeatureUnpackedD16VMem]) {
465 Res = tryDecodeInst(DecoderTableGFX80_UNPACKED64, MI, QW, Address);
466 if (Res)
467 break;
468 }
469
470 // Some GFX9 subtargets repurposed the v_mad_mix_f32, v_mad_mixlo_f16 and
471 // v_mad_mixhi_f16 for FMA variants. Try to decode using this special
472 // table first so we print the correct name.
473 if (STI.getFeatureBits()[AMDGPU::FeatureFmaMixInsts]) {
474 Res = tryDecodeInst(DecoderTableGFX9_DL64, MI, QW, Address);
475 if (Res)
476 break;
477 }
478 }
479
480 // Reinitialize Bytes as DPP64 could have eaten too much
481 Bytes = Bytes_.slice(0, MaxInstBytesNum);
482
483 // Try decode 32-bit instruction
484 if (Bytes.size() < 4) break;
485 const uint32_t DW = eatBytes<uint32_t>(Bytes);
486 Res = tryDecodeInst(DecoderTableGFX832, MI, DW, Address);
487 if (Res) break;
488
489 Res = tryDecodeInst(DecoderTableAMDGPU32, MI, DW, Address);
490 if (Res) break;
491
492 Res = tryDecodeInst(DecoderTableGFX932, MI, DW, Address);
493 if (Res) break;
494
495 if (STI.getFeatureBits()[AMDGPU::FeatureGFX90AInsts]) {
496 Res = tryDecodeInst(DecoderTableGFX90A32, MI, DW, Address);
497 if (Res)
498 break;
499 }
500
501 if (STI.getFeatureBits()[AMDGPU::FeatureGFX10_BEncoding]) {
502 Res = tryDecodeInst(DecoderTableGFX10_B32, MI, DW, Address);
503 if (Res) break;
504 }
505
506 Res = tryDecodeInst(DecoderTableGFX1032, MI, DW, Address);
507 if (Res) break;
508
509 if (Bytes.size() < 4) break;
510 const uint64_t QW = ((uint64_t)eatBytes<uint32_t>(Bytes) << 32) | DW;
511
512 if (STI.getFeatureBits()[AMDGPU::FeatureGFX90AInsts]) {
513 Res = tryDecodeInst(DecoderTableGFX90A64, MI, QW, Address);
514 if (Res)
515 break;
516 }
517
518 Res = tryDecodeInst(DecoderTableGFX864, MI, QW, Address);
519 if (Res) break;
520
521 Res = tryDecodeInst(DecoderTableAMDGPU64, MI, QW, Address);
522 if (Res) break;
523
524 Res = tryDecodeInst(DecoderTableGFX964, MI, QW, Address);
525 if (Res) break;
526
527 Res = tryDecodeInst(DecoderTableGFX1064, MI, QW, Address);
528 } while (false);
529
530 if (Res && (MI.getOpcode() == AMDGPU::V_MAC_F32_e64_vi ||
531 MI.getOpcode() == AMDGPU::V_MAC_F32_e64_gfx6_gfx7 ||
532 MI.getOpcode() == AMDGPU::V_MAC_F32_e64_gfx10 ||
533 MI.getOpcode() == AMDGPU::V_MAC_LEGACY_F32_e64_gfx6_gfx7 ||
534 MI.getOpcode() == AMDGPU::V_MAC_LEGACY_F32_e64_gfx10 ||
535 MI.getOpcode() == AMDGPU::V_MAC_F16_e64_vi ||
536 MI.getOpcode() == AMDGPU::V_FMAC_F64_e64_gfx90a ||
537 MI.getOpcode() == AMDGPU::V_FMAC_F32_e64_vi ||
538 MI.getOpcode() == AMDGPU::V_FMAC_F32_e64_gfx10 ||
539 MI.getOpcode() == AMDGPU::V_FMAC_LEGACY_F32_e64_gfx10 ||
540 MI.getOpcode() == AMDGPU::V_FMAC_F16_e64_gfx10)) {
541 // Insert dummy unused src2_modifiers.
542 insertNamedMCOperand(MI, MCOperand::createImm(0),
543 AMDGPU::OpName::src2_modifiers);
544 }
545
546 if (Res && (MCII->get(MI.getOpcode()).TSFlags &
547 (SIInstrFlags::MUBUF | SIInstrFlags::FLAT | SIInstrFlags::SMRD))) {
548 int CPolPos = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
549 AMDGPU::OpName::cpol);
550 if (CPolPos != -1) {
551 unsigned CPol =
552 (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::IsAtomicRet) ?
553 AMDGPU::CPol::GLC : 0;
554 if (MI.getNumOperands() <= (unsigned)CPolPos) {
555 insertNamedMCOperand(MI, MCOperand::createImm(CPol),
556 AMDGPU::OpName::cpol);
557 } else if (CPol) {
558 MI.getOperand(CPolPos).setImm(MI.getOperand(CPolPos).getImm() | CPol);
559 }
560 }
561 }
562
563 if (Res && (MCII->get(MI.getOpcode()).TSFlags &
564 (SIInstrFlags::MTBUF | SIInstrFlags::MUBUF)) &&
565 (STI.getFeatureBits()[AMDGPU::FeatureGFX90AInsts])) {
566 // GFX90A lost TFE, its place is occupied by ACC.
567 int TFEOpIdx =
568 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::tfe);
569 if (TFEOpIdx != -1) {
570 auto TFEIter = MI.begin();
571 std::advance(TFEIter, TFEOpIdx);
572 MI.insert(TFEIter, MCOperand::createImm(0));
573 }
574 }
575
576 if (Res && (MCII->get(MI.getOpcode()).TSFlags &
577 (SIInstrFlags::MTBUF | SIInstrFlags::MUBUF))) {
578 int SWZOpIdx =
579 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::swz);
580 if (SWZOpIdx != -1) {
581 auto SWZIter = MI.begin();
582 std::advance(SWZIter, SWZOpIdx);
583 MI.insert(SWZIter, MCOperand::createImm(0));
584 }
585 }
586
587 if (Res && (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::MIMG)) {
588 int VAddr0Idx =
589 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vaddr0);
590 int RsrcIdx =
591 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::srsrc);
592 unsigned NSAArgs = RsrcIdx - VAddr0Idx - 1;
593 if (VAddr0Idx >= 0 && NSAArgs > 0) {
594 unsigned NSAWords = (NSAArgs + 3) / 4;
595 if (Bytes.size() < 4 * NSAWords) {
596 Res = MCDisassembler::Fail;
597 } else {
598 for (unsigned i = 0; i < NSAArgs; ++i) {
599 MI.insert(MI.begin() + VAddr0Idx + 1 + i,
600 decodeOperand_VGPR_32(Bytes[i]));
601 }
602 Bytes = Bytes.slice(4 * NSAWords);
603 }
604 }
605
606 if (Res)
607 Res = convertMIMGInst(MI);
608 }
609
610 if (Res && IsSDWA)
611 Res = convertSDWAInst(MI);
612
613 int VDstIn_Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
614 AMDGPU::OpName::vdst_in);
615 if (VDstIn_Idx != -1) {
616 int Tied = MCII->get(MI.getOpcode()).getOperandConstraint(VDstIn_Idx,
617 MCOI::OperandConstraint::TIED_TO);
618 if (Tied != -1 && (MI.getNumOperands() <= (unsigned)VDstIn_Idx ||
619 !MI.getOperand(VDstIn_Idx).isReg() ||
620 MI.getOperand(VDstIn_Idx).getReg() != MI.getOperand(Tied).getReg())) {
621 if (MI.getNumOperands() > (unsigned)VDstIn_Idx)
622 MI.erase(&MI.getOperand(VDstIn_Idx));
623 insertNamedMCOperand(MI,
624 MCOperand::createReg(MI.getOperand(Tied).getReg()),
625 AMDGPU::OpName::vdst_in);
626 }
627 }
628
629 // if the opcode was not recognized we'll assume a Size of 4 bytes
630 // (unless there are fewer bytes left)
631 Size = Res ? (MaxInstBytesNum - Bytes.size())
632 : std::min((size_t)4, Bytes_.size());
633 return Res;
634 }
635
convertSDWAInst(MCInst & MI) const636 DecodeStatus AMDGPUDisassembler::convertSDWAInst(MCInst &MI) const {
637 if (STI.getFeatureBits()[AMDGPU::FeatureGFX9] ||
638 STI.getFeatureBits()[AMDGPU::FeatureGFX10]) {
639 if (AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sdst) != -1)
640 // VOPC - insert clamp
641 insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::clamp);
642 } else if (STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]) {
643 int SDst = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sdst);
644 if (SDst != -1) {
645 // VOPC - insert VCC register as sdst
646 insertNamedMCOperand(MI, createRegOperand(AMDGPU::VCC),
647 AMDGPU::OpName::sdst);
648 } else {
649 // VOP1/2 - insert omod if present in instruction
650 insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::omod);
651 }
652 }
653 return MCDisassembler::Success;
654 }
655
656 // We must check FI == literal to reject not genuine dpp8 insts, and we must
657 // first add optional MI operands to check FI
convertDPP8Inst(MCInst & MI) const658 DecodeStatus AMDGPUDisassembler::convertDPP8Inst(MCInst &MI) const {
659 unsigned Opc = MI.getOpcode();
660 unsigned DescNumOps = MCII->get(Opc).getNumOperands();
661
662 // Insert dummy unused src modifiers.
663 if (MI.getNumOperands() < DescNumOps &&
664 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers) != -1)
665 insertNamedMCOperand(MI, MCOperand::createImm(0),
666 AMDGPU::OpName::src0_modifiers);
667
668 if (MI.getNumOperands() < DescNumOps &&
669 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1_modifiers) != -1)
670 insertNamedMCOperand(MI, MCOperand::createImm(0),
671 AMDGPU::OpName::src1_modifiers);
672
673 return isValidDPP8(MI) ? MCDisassembler::Success : MCDisassembler::SoftFail;
674 }
675
676 // Note that before gfx10, the MIMG encoding provided no information about
677 // VADDR size. Consequently, decoded instructions always show address as if it
678 // has 1 dword, which could be not really so.
convertMIMGInst(MCInst & MI) const679 DecodeStatus AMDGPUDisassembler::convertMIMGInst(MCInst &MI) const {
680
681 int VDstIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
682 AMDGPU::OpName::vdst);
683
684 int VDataIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
685 AMDGPU::OpName::vdata);
686 int VAddr0Idx =
687 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vaddr0);
688 int DMaskIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
689 AMDGPU::OpName::dmask);
690
691 int TFEIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
692 AMDGPU::OpName::tfe);
693 int D16Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
694 AMDGPU::OpName::d16);
695
696 assert(VDataIdx != -1);
697 if (DMaskIdx == -1 || TFEIdx == -1) {// intersect_ray
698 if (AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::a16) > -1) {
699 assert(MI.getOpcode() == AMDGPU::IMAGE_BVH_INTERSECT_RAY_a16_sa ||
700 MI.getOpcode() == AMDGPU::IMAGE_BVH_INTERSECT_RAY_a16_nsa ||
701 MI.getOpcode() == AMDGPU::IMAGE_BVH64_INTERSECT_RAY_a16_sa ||
702 MI.getOpcode() == AMDGPU::IMAGE_BVH64_INTERSECT_RAY_a16_nsa);
703 addOperand(MI, MCOperand::createImm(1));
704 }
705 return MCDisassembler::Success;
706 }
707
708 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(MI.getOpcode());
709 bool IsAtomic = (VDstIdx != -1);
710 bool IsGather4 = MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::Gather4;
711
712 bool IsNSA = false;
713 unsigned AddrSize = Info->VAddrDwords;
714
715 if (STI.getFeatureBits()[AMDGPU::FeatureGFX10]) {
716 unsigned DimIdx =
717 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::dim);
718 int A16Idx =
719 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::a16);
720 const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
721 AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode);
722 const AMDGPU::MIMGDimInfo *Dim =
723 AMDGPU::getMIMGDimInfoByEncoding(MI.getOperand(DimIdx).getImm());
724 const bool IsA16 = (A16Idx != -1 && MI.getOperand(A16Idx).getImm());
725
726 AddrSize =
727 AMDGPU::getAddrSizeMIMGOp(BaseOpcode, Dim, IsA16, AMDGPU::hasG16(STI));
728
729 IsNSA = Info->MIMGEncoding == AMDGPU::MIMGEncGfx10NSA;
730 if (!IsNSA) {
731 if (AddrSize > 8)
732 AddrSize = 16;
733 } else {
734 if (AddrSize > Info->VAddrDwords) {
735 // The NSA encoding does not contain enough operands for the combination
736 // of base opcode / dimension. Should this be an error?
737 return MCDisassembler::Success;
738 }
739 }
740 }
741
742 unsigned DMask = MI.getOperand(DMaskIdx).getImm() & 0xf;
743 unsigned DstSize = IsGather4 ? 4 : std::max(countPopulation(DMask), 1u);
744
745 bool D16 = D16Idx >= 0 && MI.getOperand(D16Idx).getImm();
746 if (D16 && AMDGPU::hasPackedD16(STI)) {
747 DstSize = (DstSize + 1) / 2;
748 }
749
750 if (TFEIdx != -1 && MI.getOperand(TFEIdx).getImm())
751 DstSize += 1;
752
753 if (DstSize == Info->VDataDwords && AddrSize == Info->VAddrDwords)
754 return MCDisassembler::Success;
755
756 int NewOpcode =
757 AMDGPU::getMIMGOpcode(Info->BaseOpcode, Info->MIMGEncoding, DstSize, AddrSize);
758 if (NewOpcode == -1)
759 return MCDisassembler::Success;
760
761 // Widen the register to the correct number of enabled channels.
762 unsigned NewVdata = AMDGPU::NoRegister;
763 if (DstSize != Info->VDataDwords) {
764 auto DataRCID = MCII->get(NewOpcode).OpInfo[VDataIdx].RegClass;
765
766 // Get first subregister of VData
767 unsigned Vdata0 = MI.getOperand(VDataIdx).getReg();
768 unsigned VdataSub0 = MRI.getSubReg(Vdata0, AMDGPU::sub0);
769 Vdata0 = (VdataSub0 != 0)? VdataSub0 : Vdata0;
770
771 NewVdata = MRI.getMatchingSuperReg(Vdata0, AMDGPU::sub0,
772 &MRI.getRegClass(DataRCID));
773 if (NewVdata == AMDGPU::NoRegister) {
774 // It's possible to encode this such that the low register + enabled
775 // components exceeds the register count.
776 return MCDisassembler::Success;
777 }
778 }
779
780 unsigned NewVAddr0 = AMDGPU::NoRegister;
781 if (STI.getFeatureBits()[AMDGPU::FeatureGFX10] && !IsNSA &&
782 AddrSize != Info->VAddrDwords) {
783 unsigned VAddr0 = MI.getOperand(VAddr0Idx).getReg();
784 unsigned VAddrSub0 = MRI.getSubReg(VAddr0, AMDGPU::sub0);
785 VAddr0 = (VAddrSub0 != 0) ? VAddrSub0 : VAddr0;
786
787 auto AddrRCID = MCII->get(NewOpcode).OpInfo[VAddr0Idx].RegClass;
788 NewVAddr0 = MRI.getMatchingSuperReg(VAddr0, AMDGPU::sub0,
789 &MRI.getRegClass(AddrRCID));
790 if (NewVAddr0 == AMDGPU::NoRegister)
791 return MCDisassembler::Success;
792 }
793
794 MI.setOpcode(NewOpcode);
795
796 if (NewVdata != AMDGPU::NoRegister) {
797 MI.getOperand(VDataIdx) = MCOperand::createReg(NewVdata);
798
799 if (IsAtomic) {
800 // Atomic operations have an additional operand (a copy of data)
801 MI.getOperand(VDstIdx) = MCOperand::createReg(NewVdata);
802 }
803 }
804
805 if (NewVAddr0 != AMDGPU::NoRegister) {
806 MI.getOperand(VAddr0Idx) = MCOperand::createReg(NewVAddr0);
807 } else if (IsNSA) {
808 assert(AddrSize <= Info->VAddrDwords);
809 MI.erase(MI.begin() + VAddr0Idx + AddrSize,
810 MI.begin() + VAddr0Idx + Info->VAddrDwords);
811 }
812
813 return MCDisassembler::Success;
814 }
815
getRegClassName(unsigned RegClassID) const816 const char* AMDGPUDisassembler::getRegClassName(unsigned RegClassID) const {
817 return getContext().getRegisterInfo()->
818 getRegClassName(&AMDGPUMCRegisterClasses[RegClassID]);
819 }
820
821 inline
errOperand(unsigned V,const Twine & ErrMsg) const822 MCOperand AMDGPUDisassembler::errOperand(unsigned V,
823 const Twine& ErrMsg) const {
824 *CommentStream << "Error: " + ErrMsg;
825
826 // ToDo: add support for error operands to MCInst.h
827 // return MCOperand::createError(V);
828 return MCOperand();
829 }
830
831 inline
createRegOperand(unsigned int RegId) const832 MCOperand AMDGPUDisassembler::createRegOperand(unsigned int RegId) const {
833 return MCOperand::createReg(AMDGPU::getMCReg(RegId, STI));
834 }
835
836 inline
createRegOperand(unsigned RegClassID,unsigned Val) const837 MCOperand AMDGPUDisassembler::createRegOperand(unsigned RegClassID,
838 unsigned Val) const {
839 const auto& RegCl = AMDGPUMCRegisterClasses[RegClassID];
840 if (Val >= RegCl.getNumRegs())
841 return errOperand(Val, Twine(getRegClassName(RegClassID)) +
842 ": unknown register " + Twine(Val));
843 return createRegOperand(RegCl.getRegister(Val));
844 }
845
846 inline
createSRegOperand(unsigned SRegClassID,unsigned Val) const847 MCOperand AMDGPUDisassembler::createSRegOperand(unsigned SRegClassID,
848 unsigned Val) const {
849 // ToDo: SI/CI have 104 SGPRs, VI - 102
850 // Valery: here we accepting as much as we can, let assembler sort it out
851 int shift = 0;
852 switch (SRegClassID) {
853 case AMDGPU::SGPR_32RegClassID:
854 case AMDGPU::TTMP_32RegClassID:
855 break;
856 case AMDGPU::SGPR_64RegClassID:
857 case AMDGPU::TTMP_64RegClassID:
858 shift = 1;
859 break;
860 case AMDGPU::SGPR_128RegClassID:
861 case AMDGPU::TTMP_128RegClassID:
862 // ToDo: unclear if s[100:104] is available on VI. Can we use VCC as SGPR in
863 // this bundle?
864 case AMDGPU::SGPR_256RegClassID:
865 case AMDGPU::TTMP_256RegClassID:
866 // ToDo: unclear if s[96:104] is available on VI. Can we use VCC as SGPR in
867 // this bundle?
868 case AMDGPU::SGPR_512RegClassID:
869 case AMDGPU::TTMP_512RegClassID:
870 shift = 2;
871 break;
872 // ToDo: unclear if s[88:104] is available on VI. Can we use VCC as SGPR in
873 // this bundle?
874 default:
875 llvm_unreachable("unhandled register class");
876 }
877
878 if (Val % (1 << shift)) {
879 *CommentStream << "Warning: " << getRegClassName(SRegClassID)
880 << ": scalar reg isn't aligned " << Val;
881 }
882
883 return createRegOperand(SRegClassID, Val >> shift);
884 }
885
decodeOperand_VS_32(unsigned Val) const886 MCOperand AMDGPUDisassembler::decodeOperand_VS_32(unsigned Val) const {
887 return decodeSrcOp(OPW32, Val);
888 }
889
decodeOperand_VS_64(unsigned Val) const890 MCOperand AMDGPUDisassembler::decodeOperand_VS_64(unsigned Val) const {
891 return decodeSrcOp(OPW64, Val);
892 }
893
decodeOperand_VS_128(unsigned Val) const894 MCOperand AMDGPUDisassembler::decodeOperand_VS_128(unsigned Val) const {
895 return decodeSrcOp(OPW128, Val);
896 }
897
decodeOperand_VSrc16(unsigned Val) const898 MCOperand AMDGPUDisassembler::decodeOperand_VSrc16(unsigned Val) const {
899 return decodeSrcOp(OPW16, Val);
900 }
901
decodeOperand_VSrcV216(unsigned Val) const902 MCOperand AMDGPUDisassembler::decodeOperand_VSrcV216(unsigned Val) const {
903 return decodeSrcOp(OPWV216, Val);
904 }
905
decodeOperand_VSrcV232(unsigned Val) const906 MCOperand AMDGPUDisassembler::decodeOperand_VSrcV232(unsigned Val) const {
907 return decodeSrcOp(OPWV232, Val);
908 }
909
decodeOperand_VGPR_32(unsigned Val) const910 MCOperand AMDGPUDisassembler::decodeOperand_VGPR_32(unsigned Val) const {
911 // Some instructions have operand restrictions beyond what the encoding
912 // allows. Some ordinarily VSrc_32 operands are VGPR_32, so clear the extra
913 // high bit.
914 Val &= 255;
915
916 return createRegOperand(AMDGPU::VGPR_32RegClassID, Val);
917 }
918
decodeOperand_VRegOrLds_32(unsigned Val) const919 MCOperand AMDGPUDisassembler::decodeOperand_VRegOrLds_32(unsigned Val) const {
920 return decodeSrcOp(OPW32, Val);
921 }
922
decodeOperand_AGPR_32(unsigned Val) const923 MCOperand AMDGPUDisassembler::decodeOperand_AGPR_32(unsigned Val) const {
924 return createRegOperand(AMDGPU::AGPR_32RegClassID, Val & 255);
925 }
926
decodeOperand_AReg_64(unsigned Val) const927 MCOperand AMDGPUDisassembler::decodeOperand_AReg_64(unsigned Val) const {
928 return createRegOperand(AMDGPU::AReg_64RegClassID, Val & 255);
929 }
930
decodeOperand_AReg_128(unsigned Val) const931 MCOperand AMDGPUDisassembler::decodeOperand_AReg_128(unsigned Val) const {
932 return createRegOperand(AMDGPU::AReg_128RegClassID, Val & 255);
933 }
934
decodeOperand_AReg_256(unsigned Val) const935 MCOperand AMDGPUDisassembler::decodeOperand_AReg_256(unsigned Val) const {
936 return createRegOperand(AMDGPU::AReg_256RegClassID, Val & 255);
937 }
938
decodeOperand_AReg_512(unsigned Val) const939 MCOperand AMDGPUDisassembler::decodeOperand_AReg_512(unsigned Val) const {
940 return createRegOperand(AMDGPU::AReg_512RegClassID, Val & 255);
941 }
942
decodeOperand_AReg_1024(unsigned Val) const943 MCOperand AMDGPUDisassembler::decodeOperand_AReg_1024(unsigned Val) const {
944 return createRegOperand(AMDGPU::AReg_1024RegClassID, Val & 255);
945 }
946
decodeOperand_AV_32(unsigned Val) const947 MCOperand AMDGPUDisassembler::decodeOperand_AV_32(unsigned Val) const {
948 return decodeSrcOp(OPW32, Val);
949 }
950
decodeOperand_AV_64(unsigned Val) const951 MCOperand AMDGPUDisassembler::decodeOperand_AV_64(unsigned Val) const {
952 return decodeSrcOp(OPW64, Val);
953 }
954
decodeOperand_VReg_64(unsigned Val) const955 MCOperand AMDGPUDisassembler::decodeOperand_VReg_64(unsigned Val) const {
956 return createRegOperand(AMDGPU::VReg_64RegClassID, Val);
957 }
958
decodeOperand_VReg_96(unsigned Val) const959 MCOperand AMDGPUDisassembler::decodeOperand_VReg_96(unsigned Val) const {
960 return createRegOperand(AMDGPU::VReg_96RegClassID, Val);
961 }
962
decodeOperand_VReg_128(unsigned Val) const963 MCOperand AMDGPUDisassembler::decodeOperand_VReg_128(unsigned Val) const {
964 return createRegOperand(AMDGPU::VReg_128RegClassID, Val);
965 }
966
decodeOperand_VReg_256(unsigned Val) const967 MCOperand AMDGPUDisassembler::decodeOperand_VReg_256(unsigned Val) const {
968 return createRegOperand(AMDGPU::VReg_256RegClassID, Val);
969 }
970
decodeOperand_VReg_512(unsigned Val) const971 MCOperand AMDGPUDisassembler::decodeOperand_VReg_512(unsigned Val) const {
972 return createRegOperand(AMDGPU::VReg_512RegClassID, Val);
973 }
974
decodeOperand_VReg_1024(unsigned Val) const975 MCOperand AMDGPUDisassembler::decodeOperand_VReg_1024(unsigned Val) const {
976 return createRegOperand(AMDGPU::VReg_1024RegClassID, Val);
977 }
978
decodeOperand_SReg_32(unsigned Val) const979 MCOperand AMDGPUDisassembler::decodeOperand_SReg_32(unsigned Val) const {
980 // table-gen generated disassembler doesn't care about operand types
981 // leaving only registry class so SSrc_32 operand turns into SReg_32
982 // and therefore we accept immediates and literals here as well
983 return decodeSrcOp(OPW32, Val);
984 }
985
decodeOperand_SReg_32_XM0_XEXEC(unsigned Val) const986 MCOperand AMDGPUDisassembler::decodeOperand_SReg_32_XM0_XEXEC(
987 unsigned Val) const {
988 // SReg_32_XM0 is SReg_32 without M0 or EXEC_LO/EXEC_HI
989 return decodeOperand_SReg_32(Val);
990 }
991
decodeOperand_SReg_32_XEXEC_HI(unsigned Val) const992 MCOperand AMDGPUDisassembler::decodeOperand_SReg_32_XEXEC_HI(
993 unsigned Val) const {
994 // SReg_32_XM0 is SReg_32 without EXEC_HI
995 return decodeOperand_SReg_32(Val);
996 }
997
decodeOperand_SRegOrLds_32(unsigned Val) const998 MCOperand AMDGPUDisassembler::decodeOperand_SRegOrLds_32(unsigned Val) const {
999 // table-gen generated disassembler doesn't care about operand types
1000 // leaving only registry class so SSrc_32 operand turns into SReg_32
1001 // and therefore we accept immediates and literals here as well
1002 return decodeSrcOp(OPW32, Val);
1003 }
1004
decodeOperand_SReg_64(unsigned Val) const1005 MCOperand AMDGPUDisassembler::decodeOperand_SReg_64(unsigned Val) const {
1006 return decodeSrcOp(OPW64, Val);
1007 }
1008
decodeOperand_SReg_64_XEXEC(unsigned Val) const1009 MCOperand AMDGPUDisassembler::decodeOperand_SReg_64_XEXEC(unsigned Val) const {
1010 return decodeSrcOp(OPW64, Val);
1011 }
1012
decodeOperand_SReg_128(unsigned Val) const1013 MCOperand AMDGPUDisassembler::decodeOperand_SReg_128(unsigned Val) const {
1014 return decodeSrcOp(OPW128, Val);
1015 }
1016
decodeOperand_SReg_256(unsigned Val) const1017 MCOperand AMDGPUDisassembler::decodeOperand_SReg_256(unsigned Val) const {
1018 return decodeDstOp(OPW256, Val);
1019 }
1020
decodeOperand_SReg_512(unsigned Val) const1021 MCOperand AMDGPUDisassembler::decodeOperand_SReg_512(unsigned Val) const {
1022 return decodeDstOp(OPW512, Val);
1023 }
1024
decodeLiteralConstant() const1025 MCOperand AMDGPUDisassembler::decodeLiteralConstant() const {
1026 // For now all literal constants are supposed to be unsigned integer
1027 // ToDo: deal with signed/unsigned 64-bit integer constants
1028 // ToDo: deal with float/double constants
1029 if (!HasLiteral) {
1030 if (Bytes.size() < 4) {
1031 return errOperand(0, "cannot read literal, inst bytes left " +
1032 Twine(Bytes.size()));
1033 }
1034 HasLiteral = true;
1035 Literal = eatBytes<uint32_t>(Bytes);
1036 }
1037 return MCOperand::createImm(Literal);
1038 }
1039
decodeIntImmed(unsigned Imm)1040 MCOperand AMDGPUDisassembler::decodeIntImmed(unsigned Imm) {
1041 using namespace AMDGPU::EncValues;
1042
1043 assert(Imm >= INLINE_INTEGER_C_MIN && Imm <= INLINE_INTEGER_C_MAX);
1044 return MCOperand::createImm((Imm <= INLINE_INTEGER_C_POSITIVE_MAX) ?
1045 (static_cast<int64_t>(Imm) - INLINE_INTEGER_C_MIN) :
1046 (INLINE_INTEGER_C_POSITIVE_MAX - static_cast<int64_t>(Imm)));
1047 // Cast prevents negative overflow.
1048 }
1049
getInlineImmVal32(unsigned Imm)1050 static int64_t getInlineImmVal32(unsigned Imm) {
1051 switch (Imm) {
1052 case 240:
1053 return FloatToBits(0.5f);
1054 case 241:
1055 return FloatToBits(-0.5f);
1056 case 242:
1057 return FloatToBits(1.0f);
1058 case 243:
1059 return FloatToBits(-1.0f);
1060 case 244:
1061 return FloatToBits(2.0f);
1062 case 245:
1063 return FloatToBits(-2.0f);
1064 case 246:
1065 return FloatToBits(4.0f);
1066 case 247:
1067 return FloatToBits(-4.0f);
1068 case 248: // 1 / (2 * PI)
1069 return 0x3e22f983;
1070 default:
1071 llvm_unreachable("invalid fp inline imm");
1072 }
1073 }
1074
getInlineImmVal64(unsigned Imm)1075 static int64_t getInlineImmVal64(unsigned Imm) {
1076 switch (Imm) {
1077 case 240:
1078 return DoubleToBits(0.5);
1079 case 241:
1080 return DoubleToBits(-0.5);
1081 case 242:
1082 return DoubleToBits(1.0);
1083 case 243:
1084 return DoubleToBits(-1.0);
1085 case 244:
1086 return DoubleToBits(2.0);
1087 case 245:
1088 return DoubleToBits(-2.0);
1089 case 246:
1090 return DoubleToBits(4.0);
1091 case 247:
1092 return DoubleToBits(-4.0);
1093 case 248: // 1 / (2 * PI)
1094 return 0x3fc45f306dc9c882;
1095 default:
1096 llvm_unreachable("invalid fp inline imm");
1097 }
1098 }
1099
getInlineImmVal16(unsigned Imm)1100 static int64_t getInlineImmVal16(unsigned Imm) {
1101 switch (Imm) {
1102 case 240:
1103 return 0x3800;
1104 case 241:
1105 return 0xB800;
1106 case 242:
1107 return 0x3C00;
1108 case 243:
1109 return 0xBC00;
1110 case 244:
1111 return 0x4000;
1112 case 245:
1113 return 0xC000;
1114 case 246:
1115 return 0x4400;
1116 case 247:
1117 return 0xC400;
1118 case 248: // 1 / (2 * PI)
1119 return 0x3118;
1120 default:
1121 llvm_unreachable("invalid fp inline imm");
1122 }
1123 }
1124
decodeFPImmed(OpWidthTy Width,unsigned Imm)1125 MCOperand AMDGPUDisassembler::decodeFPImmed(OpWidthTy Width, unsigned Imm) {
1126 assert(Imm >= AMDGPU::EncValues::INLINE_FLOATING_C_MIN
1127 && Imm <= AMDGPU::EncValues::INLINE_FLOATING_C_MAX);
1128
1129 // ToDo: case 248: 1/(2*PI) - is allowed only on VI
1130 switch (Width) {
1131 case OPW32:
1132 case OPW128: // splat constants
1133 case OPW512:
1134 case OPW1024:
1135 case OPWV232:
1136 return MCOperand::createImm(getInlineImmVal32(Imm));
1137 case OPW64:
1138 case OPW256:
1139 return MCOperand::createImm(getInlineImmVal64(Imm));
1140 case OPW16:
1141 case OPWV216:
1142 return MCOperand::createImm(getInlineImmVal16(Imm));
1143 default:
1144 llvm_unreachable("implement me");
1145 }
1146 }
1147
getVgprClassId(const OpWidthTy Width) const1148 unsigned AMDGPUDisassembler::getVgprClassId(const OpWidthTy Width) const {
1149 using namespace AMDGPU;
1150
1151 assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
1152 switch (Width) {
1153 default: // fall
1154 case OPW32:
1155 case OPW16:
1156 case OPWV216:
1157 return VGPR_32RegClassID;
1158 case OPW64:
1159 case OPWV232: return VReg_64RegClassID;
1160 case OPW96: return VReg_96RegClassID;
1161 case OPW128: return VReg_128RegClassID;
1162 case OPW160: return VReg_160RegClassID;
1163 case OPW256: return VReg_256RegClassID;
1164 case OPW512: return VReg_512RegClassID;
1165 case OPW1024: return VReg_1024RegClassID;
1166 }
1167 }
1168
getAgprClassId(const OpWidthTy Width) const1169 unsigned AMDGPUDisassembler::getAgprClassId(const OpWidthTy Width) const {
1170 using namespace AMDGPU;
1171
1172 assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
1173 switch (Width) {
1174 default: // fall
1175 case OPW32:
1176 case OPW16:
1177 case OPWV216:
1178 return AGPR_32RegClassID;
1179 case OPW64:
1180 case OPWV232: return AReg_64RegClassID;
1181 case OPW96: return AReg_96RegClassID;
1182 case OPW128: return AReg_128RegClassID;
1183 case OPW160: return AReg_160RegClassID;
1184 case OPW256: return AReg_256RegClassID;
1185 case OPW512: return AReg_512RegClassID;
1186 case OPW1024: return AReg_1024RegClassID;
1187 }
1188 }
1189
1190
getSgprClassId(const OpWidthTy Width) const1191 unsigned AMDGPUDisassembler::getSgprClassId(const OpWidthTy Width) const {
1192 using namespace AMDGPU;
1193
1194 assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
1195 switch (Width) {
1196 default: // fall
1197 case OPW32:
1198 case OPW16:
1199 case OPWV216:
1200 return SGPR_32RegClassID;
1201 case OPW64:
1202 case OPWV232: return SGPR_64RegClassID;
1203 case OPW96: return SGPR_96RegClassID;
1204 case OPW128: return SGPR_128RegClassID;
1205 case OPW160: return SGPR_160RegClassID;
1206 case OPW256: return SGPR_256RegClassID;
1207 case OPW512: return SGPR_512RegClassID;
1208 }
1209 }
1210
getTtmpClassId(const OpWidthTy Width) const1211 unsigned AMDGPUDisassembler::getTtmpClassId(const OpWidthTy Width) const {
1212 using namespace AMDGPU;
1213
1214 assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
1215 switch (Width) {
1216 default: // fall
1217 case OPW32:
1218 case OPW16:
1219 case OPWV216:
1220 return TTMP_32RegClassID;
1221 case OPW64:
1222 case OPWV232: return TTMP_64RegClassID;
1223 case OPW128: return TTMP_128RegClassID;
1224 case OPW256: return TTMP_256RegClassID;
1225 case OPW512: return TTMP_512RegClassID;
1226 }
1227 }
1228
getTTmpIdx(unsigned Val) const1229 int AMDGPUDisassembler::getTTmpIdx(unsigned Val) const {
1230 using namespace AMDGPU::EncValues;
1231
1232 unsigned TTmpMin = isGFX9Plus() ? TTMP_GFX9PLUS_MIN : TTMP_VI_MIN;
1233 unsigned TTmpMax = isGFX9Plus() ? TTMP_GFX9PLUS_MAX : TTMP_VI_MAX;
1234
1235 return (TTmpMin <= Val && Val <= TTmpMax)? Val - TTmpMin : -1;
1236 }
1237
decodeSrcOp(const OpWidthTy Width,unsigned Val) const1238 MCOperand AMDGPUDisassembler::decodeSrcOp(const OpWidthTy Width, unsigned Val) const {
1239 using namespace AMDGPU::EncValues;
1240
1241 assert(Val < 1024); // enum10
1242
1243 bool IsAGPR = Val & 512;
1244 Val &= 511;
1245
1246 if (VGPR_MIN <= Val && Val <= VGPR_MAX) {
1247 return createRegOperand(IsAGPR ? getAgprClassId(Width)
1248 : getVgprClassId(Width), Val - VGPR_MIN);
1249 }
1250 if (Val <= SGPR_MAX) {
1251 // "SGPR_MIN <= Val" is always true and causes compilation warning.
1252 static_assert(SGPR_MIN == 0, "");
1253 return createSRegOperand(getSgprClassId(Width), Val - SGPR_MIN);
1254 }
1255
1256 int TTmpIdx = getTTmpIdx(Val);
1257 if (TTmpIdx >= 0) {
1258 return createSRegOperand(getTtmpClassId(Width), TTmpIdx);
1259 }
1260
1261 if (INLINE_INTEGER_C_MIN <= Val && Val <= INLINE_INTEGER_C_MAX)
1262 return decodeIntImmed(Val);
1263
1264 if (INLINE_FLOATING_C_MIN <= Val && Val <= INLINE_FLOATING_C_MAX)
1265 return decodeFPImmed(Width, Val);
1266
1267 if (Val == LITERAL_CONST)
1268 return decodeLiteralConstant();
1269
1270 switch (Width) {
1271 case OPW32:
1272 case OPW16:
1273 case OPWV216:
1274 return decodeSpecialReg32(Val);
1275 case OPW64:
1276 case OPWV232:
1277 return decodeSpecialReg64(Val);
1278 default:
1279 llvm_unreachable("unexpected immediate type");
1280 }
1281 }
1282
decodeDstOp(const OpWidthTy Width,unsigned Val) const1283 MCOperand AMDGPUDisassembler::decodeDstOp(const OpWidthTy Width, unsigned Val) const {
1284 using namespace AMDGPU::EncValues;
1285
1286 assert(Val < 128);
1287 assert(Width == OPW256 || Width == OPW512);
1288
1289 if (Val <= SGPR_MAX) {
1290 // "SGPR_MIN <= Val" is always true and causes compilation warning.
1291 static_assert(SGPR_MIN == 0, "");
1292 return createSRegOperand(getSgprClassId(Width), Val - SGPR_MIN);
1293 }
1294
1295 int TTmpIdx = getTTmpIdx(Val);
1296 if (TTmpIdx >= 0) {
1297 return createSRegOperand(getTtmpClassId(Width), TTmpIdx);
1298 }
1299
1300 llvm_unreachable("unknown dst register");
1301 }
1302
decodeSpecialReg32(unsigned Val) const1303 MCOperand AMDGPUDisassembler::decodeSpecialReg32(unsigned Val) const {
1304 using namespace AMDGPU;
1305
1306 switch (Val) {
1307 case 102: return createRegOperand(FLAT_SCR_LO);
1308 case 103: return createRegOperand(FLAT_SCR_HI);
1309 case 104: return createRegOperand(XNACK_MASK_LO);
1310 case 105: return createRegOperand(XNACK_MASK_HI);
1311 case 106: return createRegOperand(VCC_LO);
1312 case 107: return createRegOperand(VCC_HI);
1313 case 108: return createRegOperand(TBA_LO);
1314 case 109: return createRegOperand(TBA_HI);
1315 case 110: return createRegOperand(TMA_LO);
1316 case 111: return createRegOperand(TMA_HI);
1317 case 124: return createRegOperand(M0);
1318 case 125: return createRegOperand(SGPR_NULL);
1319 case 126: return createRegOperand(EXEC_LO);
1320 case 127: return createRegOperand(EXEC_HI);
1321 case 235: return createRegOperand(SRC_SHARED_BASE);
1322 case 236: return createRegOperand(SRC_SHARED_LIMIT);
1323 case 237: return createRegOperand(SRC_PRIVATE_BASE);
1324 case 238: return createRegOperand(SRC_PRIVATE_LIMIT);
1325 case 239: return createRegOperand(SRC_POPS_EXITING_WAVE_ID);
1326 case 251: return createRegOperand(SRC_VCCZ);
1327 case 252: return createRegOperand(SRC_EXECZ);
1328 case 253: return createRegOperand(SRC_SCC);
1329 case 254: return createRegOperand(LDS_DIRECT);
1330 default: break;
1331 }
1332 return errOperand(Val, "unknown operand encoding " + Twine(Val));
1333 }
1334
decodeSpecialReg64(unsigned Val) const1335 MCOperand AMDGPUDisassembler::decodeSpecialReg64(unsigned Val) const {
1336 using namespace AMDGPU;
1337
1338 switch (Val) {
1339 case 102: return createRegOperand(FLAT_SCR);
1340 case 104: return createRegOperand(XNACK_MASK);
1341 case 106: return createRegOperand(VCC);
1342 case 108: return createRegOperand(TBA);
1343 case 110: return createRegOperand(TMA);
1344 case 125: return createRegOperand(SGPR_NULL);
1345 case 126: return createRegOperand(EXEC);
1346 case 235: return createRegOperand(SRC_SHARED_BASE);
1347 case 236: return createRegOperand(SRC_SHARED_LIMIT);
1348 case 237: return createRegOperand(SRC_PRIVATE_BASE);
1349 case 238: return createRegOperand(SRC_PRIVATE_LIMIT);
1350 case 239: return createRegOperand(SRC_POPS_EXITING_WAVE_ID);
1351 case 251: return createRegOperand(SRC_VCCZ);
1352 case 252: return createRegOperand(SRC_EXECZ);
1353 case 253: return createRegOperand(SRC_SCC);
1354 default: break;
1355 }
1356 return errOperand(Val, "unknown operand encoding " + Twine(Val));
1357 }
1358
decodeSDWASrc(const OpWidthTy Width,const unsigned Val) const1359 MCOperand AMDGPUDisassembler::decodeSDWASrc(const OpWidthTy Width,
1360 const unsigned Val) const {
1361 using namespace AMDGPU::SDWA;
1362 using namespace AMDGPU::EncValues;
1363
1364 if (STI.getFeatureBits()[AMDGPU::FeatureGFX9] ||
1365 STI.getFeatureBits()[AMDGPU::FeatureGFX10]) {
1366 // XXX: cast to int is needed to avoid stupid warning:
1367 // compare with unsigned is always true
1368 if (int(SDWA9EncValues::SRC_VGPR_MIN) <= int(Val) &&
1369 Val <= SDWA9EncValues::SRC_VGPR_MAX) {
1370 return createRegOperand(getVgprClassId(Width),
1371 Val - SDWA9EncValues::SRC_VGPR_MIN);
1372 }
1373 if (SDWA9EncValues::SRC_SGPR_MIN <= Val &&
1374 Val <= (isGFX10Plus() ? SDWA9EncValues::SRC_SGPR_MAX_GFX10
1375 : SDWA9EncValues::SRC_SGPR_MAX_SI)) {
1376 return createSRegOperand(getSgprClassId(Width),
1377 Val - SDWA9EncValues::SRC_SGPR_MIN);
1378 }
1379 if (SDWA9EncValues::SRC_TTMP_MIN <= Val &&
1380 Val <= SDWA9EncValues::SRC_TTMP_MAX) {
1381 return createSRegOperand(getTtmpClassId(Width),
1382 Val - SDWA9EncValues::SRC_TTMP_MIN);
1383 }
1384
1385 const unsigned SVal = Val - SDWA9EncValues::SRC_SGPR_MIN;
1386
1387 if (INLINE_INTEGER_C_MIN <= SVal && SVal <= INLINE_INTEGER_C_MAX)
1388 return decodeIntImmed(SVal);
1389
1390 if (INLINE_FLOATING_C_MIN <= SVal && SVal <= INLINE_FLOATING_C_MAX)
1391 return decodeFPImmed(Width, SVal);
1392
1393 return decodeSpecialReg32(SVal);
1394 } else if (STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]) {
1395 return createRegOperand(getVgprClassId(Width), Val);
1396 }
1397 llvm_unreachable("unsupported target");
1398 }
1399
decodeSDWASrc16(unsigned Val) const1400 MCOperand AMDGPUDisassembler::decodeSDWASrc16(unsigned Val) const {
1401 return decodeSDWASrc(OPW16, Val);
1402 }
1403
decodeSDWASrc32(unsigned Val) const1404 MCOperand AMDGPUDisassembler::decodeSDWASrc32(unsigned Val) const {
1405 return decodeSDWASrc(OPW32, Val);
1406 }
1407
decodeSDWAVopcDst(unsigned Val) const1408 MCOperand AMDGPUDisassembler::decodeSDWAVopcDst(unsigned Val) const {
1409 using namespace AMDGPU::SDWA;
1410
1411 assert((STI.getFeatureBits()[AMDGPU::FeatureGFX9] ||
1412 STI.getFeatureBits()[AMDGPU::FeatureGFX10]) &&
1413 "SDWAVopcDst should be present only on GFX9+");
1414
1415 bool IsWave64 = STI.getFeatureBits()[AMDGPU::FeatureWavefrontSize64];
1416
1417 if (Val & SDWA9EncValues::VOPC_DST_VCC_MASK) {
1418 Val &= SDWA9EncValues::VOPC_DST_SGPR_MASK;
1419
1420 int TTmpIdx = getTTmpIdx(Val);
1421 if (TTmpIdx >= 0) {
1422 auto TTmpClsId = getTtmpClassId(IsWave64 ? OPW64 : OPW32);
1423 return createSRegOperand(TTmpClsId, TTmpIdx);
1424 } else if (Val > SGPR_MAX) {
1425 return IsWave64 ? decodeSpecialReg64(Val)
1426 : decodeSpecialReg32(Val);
1427 } else {
1428 return createSRegOperand(getSgprClassId(IsWave64 ? OPW64 : OPW32), Val);
1429 }
1430 } else {
1431 return createRegOperand(IsWave64 ? AMDGPU::VCC : AMDGPU::VCC_LO);
1432 }
1433 }
1434
decodeBoolReg(unsigned Val) const1435 MCOperand AMDGPUDisassembler::decodeBoolReg(unsigned Val) const {
1436 return STI.getFeatureBits()[AMDGPU::FeatureWavefrontSize64] ?
1437 decodeOperand_SReg_64(Val) : decodeOperand_SReg_32(Val);
1438 }
1439
isVI() const1440 bool AMDGPUDisassembler::isVI() const {
1441 return STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands];
1442 }
1443
isGFX9() const1444 bool AMDGPUDisassembler::isGFX9() const { return AMDGPU::isGFX9(STI); }
1445
isGFX90A() const1446 bool AMDGPUDisassembler::isGFX90A() const {
1447 return STI.getFeatureBits()[AMDGPU::FeatureGFX90AInsts];
1448 }
1449
isGFX9Plus() const1450 bool AMDGPUDisassembler::isGFX9Plus() const { return AMDGPU::isGFX9Plus(STI); }
1451
isGFX10() const1452 bool AMDGPUDisassembler::isGFX10() const { return AMDGPU::isGFX10(STI); }
1453
isGFX10Plus() const1454 bool AMDGPUDisassembler::isGFX10Plus() const {
1455 return AMDGPU::isGFX10Plus(STI);
1456 }
1457
hasArchitectedFlatScratch() const1458 bool AMDGPUDisassembler::hasArchitectedFlatScratch() const {
1459 return STI.getFeatureBits()[AMDGPU::FeatureArchitectedFlatScratch];
1460 }
1461
1462 //===----------------------------------------------------------------------===//
1463 // AMDGPU specific symbol handling
1464 //===----------------------------------------------------------------------===//
1465 #define PRINT_DIRECTIVE(DIRECTIVE, MASK) \
1466 do { \
1467 KdStream << Indent << DIRECTIVE " " \
1468 << ((FourByteBuffer & MASK) >> (MASK##_SHIFT)) << '\n'; \
1469 } while (0)
1470
1471 // NOLINTNEXTLINE(readability-identifier-naming)
decodeCOMPUTE_PGM_RSRC1(uint32_t FourByteBuffer,raw_string_ostream & KdStream) const1472 MCDisassembler::DecodeStatus AMDGPUDisassembler::decodeCOMPUTE_PGM_RSRC1(
1473 uint32_t FourByteBuffer, raw_string_ostream &KdStream) const {
1474 using namespace amdhsa;
1475 StringRef Indent = "\t";
1476
1477 // We cannot accurately backward compute #VGPRs used from
1478 // GRANULATED_WORKITEM_VGPR_COUNT. But we are concerned with getting the same
1479 // value of GRANULATED_WORKITEM_VGPR_COUNT in the reassembled binary. So we
1480 // simply calculate the inverse of what the assembler does.
1481
1482 uint32_t GranulatedWorkitemVGPRCount =
1483 (FourByteBuffer & COMPUTE_PGM_RSRC1_GRANULATED_WORKITEM_VGPR_COUNT) >>
1484 COMPUTE_PGM_RSRC1_GRANULATED_WORKITEM_VGPR_COUNT_SHIFT;
1485
1486 uint32_t NextFreeVGPR = (GranulatedWorkitemVGPRCount + 1) *
1487 AMDGPU::IsaInfo::getVGPREncodingGranule(&STI);
1488
1489 KdStream << Indent << ".amdhsa_next_free_vgpr " << NextFreeVGPR << '\n';
1490
1491 // We cannot backward compute values used to calculate
1492 // GRANULATED_WAVEFRONT_SGPR_COUNT. Hence the original values for following
1493 // directives can't be computed:
1494 // .amdhsa_reserve_vcc
1495 // .amdhsa_reserve_flat_scratch
1496 // .amdhsa_reserve_xnack_mask
1497 // They take their respective default values if not specified in the assembly.
1498 //
1499 // GRANULATED_WAVEFRONT_SGPR_COUNT
1500 // = f(NEXT_FREE_SGPR + VCC + FLAT_SCRATCH + XNACK_MASK)
1501 //
1502 // We compute the inverse as though all directives apart from NEXT_FREE_SGPR
1503 // are set to 0. So while disassembling we consider that:
1504 //
1505 // GRANULATED_WAVEFRONT_SGPR_COUNT
1506 // = f(NEXT_FREE_SGPR + 0 + 0 + 0)
1507 //
1508 // The disassembler cannot recover the original values of those 3 directives.
1509
1510 uint32_t GranulatedWavefrontSGPRCount =
1511 (FourByteBuffer & COMPUTE_PGM_RSRC1_GRANULATED_WAVEFRONT_SGPR_COUNT) >>
1512 COMPUTE_PGM_RSRC1_GRANULATED_WAVEFRONT_SGPR_COUNT_SHIFT;
1513
1514 if (isGFX10Plus() && GranulatedWavefrontSGPRCount)
1515 return MCDisassembler::Fail;
1516
1517 uint32_t NextFreeSGPR = (GranulatedWavefrontSGPRCount + 1) *
1518 AMDGPU::IsaInfo::getSGPREncodingGranule(&STI);
1519
1520 KdStream << Indent << ".amdhsa_reserve_vcc " << 0 << '\n';
1521 if (!hasArchitectedFlatScratch())
1522 KdStream << Indent << ".amdhsa_reserve_flat_scratch " << 0 << '\n';
1523 KdStream << Indent << ".amdhsa_reserve_xnack_mask " << 0 << '\n';
1524 KdStream << Indent << ".amdhsa_next_free_sgpr " << NextFreeSGPR << "\n";
1525
1526 if (FourByteBuffer & COMPUTE_PGM_RSRC1_PRIORITY)
1527 return MCDisassembler::Fail;
1528
1529 PRINT_DIRECTIVE(".amdhsa_float_round_mode_32",
1530 COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_32);
1531 PRINT_DIRECTIVE(".amdhsa_float_round_mode_16_64",
1532 COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_16_64);
1533 PRINT_DIRECTIVE(".amdhsa_float_denorm_mode_32",
1534 COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_32);
1535 PRINT_DIRECTIVE(".amdhsa_float_denorm_mode_16_64",
1536 COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64);
1537
1538 if (FourByteBuffer & COMPUTE_PGM_RSRC1_PRIV)
1539 return MCDisassembler::Fail;
1540
1541 PRINT_DIRECTIVE(".amdhsa_dx10_clamp", COMPUTE_PGM_RSRC1_ENABLE_DX10_CLAMP);
1542
1543 if (FourByteBuffer & COMPUTE_PGM_RSRC1_DEBUG_MODE)
1544 return MCDisassembler::Fail;
1545
1546 PRINT_DIRECTIVE(".amdhsa_ieee_mode", COMPUTE_PGM_RSRC1_ENABLE_IEEE_MODE);
1547
1548 if (FourByteBuffer & COMPUTE_PGM_RSRC1_BULKY)
1549 return MCDisassembler::Fail;
1550
1551 if (FourByteBuffer & COMPUTE_PGM_RSRC1_CDBG_USER)
1552 return MCDisassembler::Fail;
1553
1554 PRINT_DIRECTIVE(".amdhsa_fp16_overflow", COMPUTE_PGM_RSRC1_FP16_OVFL);
1555
1556 if (FourByteBuffer & COMPUTE_PGM_RSRC1_RESERVED0)
1557 return MCDisassembler::Fail;
1558
1559 if (isGFX10Plus()) {
1560 PRINT_DIRECTIVE(".amdhsa_workgroup_processor_mode",
1561 COMPUTE_PGM_RSRC1_WGP_MODE);
1562 PRINT_DIRECTIVE(".amdhsa_memory_ordered", COMPUTE_PGM_RSRC1_MEM_ORDERED);
1563 PRINT_DIRECTIVE(".amdhsa_forward_progress", COMPUTE_PGM_RSRC1_FWD_PROGRESS);
1564 }
1565 return MCDisassembler::Success;
1566 }
1567
1568 // NOLINTNEXTLINE(readability-identifier-naming)
decodeCOMPUTE_PGM_RSRC2(uint32_t FourByteBuffer,raw_string_ostream & KdStream) const1569 MCDisassembler::DecodeStatus AMDGPUDisassembler::decodeCOMPUTE_PGM_RSRC2(
1570 uint32_t FourByteBuffer, raw_string_ostream &KdStream) const {
1571 using namespace amdhsa;
1572 StringRef Indent = "\t";
1573 if (hasArchitectedFlatScratch())
1574 PRINT_DIRECTIVE(".amdhsa_enable_private_segment",
1575 COMPUTE_PGM_RSRC2_ENABLE_PRIVATE_SEGMENT);
1576 else
1577 PRINT_DIRECTIVE(".amdhsa_system_sgpr_private_segment_wavefront_offset",
1578 COMPUTE_PGM_RSRC2_ENABLE_PRIVATE_SEGMENT);
1579 PRINT_DIRECTIVE(".amdhsa_system_sgpr_workgroup_id_x",
1580 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X);
1581 PRINT_DIRECTIVE(".amdhsa_system_sgpr_workgroup_id_y",
1582 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Y);
1583 PRINT_DIRECTIVE(".amdhsa_system_sgpr_workgroup_id_z",
1584 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Z);
1585 PRINT_DIRECTIVE(".amdhsa_system_sgpr_workgroup_info",
1586 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_INFO);
1587 PRINT_DIRECTIVE(".amdhsa_system_vgpr_workitem_id",
1588 COMPUTE_PGM_RSRC2_ENABLE_VGPR_WORKITEM_ID);
1589
1590 if (FourByteBuffer & COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_ADDRESS_WATCH)
1591 return MCDisassembler::Fail;
1592
1593 if (FourByteBuffer & COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_MEMORY)
1594 return MCDisassembler::Fail;
1595
1596 if (FourByteBuffer & COMPUTE_PGM_RSRC2_GRANULATED_LDS_SIZE)
1597 return MCDisassembler::Fail;
1598
1599 PRINT_DIRECTIVE(
1600 ".amdhsa_exception_fp_ieee_invalid_op",
1601 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INVALID_OPERATION);
1602 PRINT_DIRECTIVE(".amdhsa_exception_fp_denorm_src",
1603 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_FP_DENORMAL_SOURCE);
1604 PRINT_DIRECTIVE(
1605 ".amdhsa_exception_fp_ieee_div_zero",
1606 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_DIVISION_BY_ZERO);
1607 PRINT_DIRECTIVE(".amdhsa_exception_fp_ieee_overflow",
1608 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_OVERFLOW);
1609 PRINT_DIRECTIVE(".amdhsa_exception_fp_ieee_underflow",
1610 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_UNDERFLOW);
1611 PRINT_DIRECTIVE(".amdhsa_exception_fp_ieee_inexact",
1612 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INEXACT);
1613 PRINT_DIRECTIVE(".amdhsa_exception_int_div_zero",
1614 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_INT_DIVIDE_BY_ZERO);
1615
1616 if (FourByteBuffer & COMPUTE_PGM_RSRC2_RESERVED0)
1617 return MCDisassembler::Fail;
1618
1619 return MCDisassembler::Success;
1620 }
1621
1622 #undef PRINT_DIRECTIVE
1623
1624 MCDisassembler::DecodeStatus
decodeKernelDescriptorDirective(DataExtractor::Cursor & Cursor,ArrayRef<uint8_t> Bytes,raw_string_ostream & KdStream) const1625 AMDGPUDisassembler::decodeKernelDescriptorDirective(
1626 DataExtractor::Cursor &Cursor, ArrayRef<uint8_t> Bytes,
1627 raw_string_ostream &KdStream) const {
1628 #define PRINT_DIRECTIVE(DIRECTIVE, MASK) \
1629 do { \
1630 KdStream << Indent << DIRECTIVE " " \
1631 << ((TwoByteBuffer & MASK) >> (MASK##_SHIFT)) << '\n'; \
1632 } while (0)
1633
1634 uint16_t TwoByteBuffer = 0;
1635 uint32_t FourByteBuffer = 0;
1636
1637 StringRef ReservedBytes;
1638 StringRef Indent = "\t";
1639
1640 assert(Bytes.size() == 64);
1641 DataExtractor DE(Bytes, /*IsLittleEndian=*/true, /*AddressSize=*/8);
1642
1643 switch (Cursor.tell()) {
1644 case amdhsa::GROUP_SEGMENT_FIXED_SIZE_OFFSET:
1645 FourByteBuffer = DE.getU32(Cursor);
1646 KdStream << Indent << ".amdhsa_group_segment_fixed_size " << FourByteBuffer
1647 << '\n';
1648 return MCDisassembler::Success;
1649
1650 case amdhsa::PRIVATE_SEGMENT_FIXED_SIZE_OFFSET:
1651 FourByteBuffer = DE.getU32(Cursor);
1652 KdStream << Indent << ".amdhsa_private_segment_fixed_size "
1653 << FourByteBuffer << '\n';
1654 return MCDisassembler::Success;
1655
1656 case amdhsa::KERNARG_SIZE_OFFSET:
1657 FourByteBuffer = DE.getU32(Cursor);
1658 KdStream << Indent << ".amdhsa_kernarg_size "
1659 << FourByteBuffer << '\n';
1660 return MCDisassembler::Success;
1661
1662 case amdhsa::RESERVED0_OFFSET:
1663 // 4 reserved bytes, must be 0.
1664 ReservedBytes = DE.getBytes(Cursor, 4);
1665 for (int I = 0; I < 4; ++I) {
1666 if (ReservedBytes[I] != 0) {
1667 return MCDisassembler::Fail;
1668 }
1669 }
1670 return MCDisassembler::Success;
1671
1672 case amdhsa::KERNEL_CODE_ENTRY_BYTE_OFFSET_OFFSET:
1673 // KERNEL_CODE_ENTRY_BYTE_OFFSET
1674 // So far no directive controls this for Code Object V3, so simply skip for
1675 // disassembly.
1676 DE.skip(Cursor, 8);
1677 return MCDisassembler::Success;
1678
1679 case amdhsa::RESERVED1_OFFSET:
1680 // 20 reserved bytes, must be 0.
1681 ReservedBytes = DE.getBytes(Cursor, 20);
1682 for (int I = 0; I < 20; ++I) {
1683 if (ReservedBytes[I] != 0) {
1684 return MCDisassembler::Fail;
1685 }
1686 }
1687 return MCDisassembler::Success;
1688
1689 case amdhsa::COMPUTE_PGM_RSRC3_OFFSET:
1690 // COMPUTE_PGM_RSRC3
1691 // - Only set for GFX10, GFX6-9 have this to be 0.
1692 // - Currently no directives directly control this.
1693 FourByteBuffer = DE.getU32(Cursor);
1694 if (!isGFX10Plus() && FourByteBuffer) {
1695 return MCDisassembler::Fail;
1696 }
1697 return MCDisassembler::Success;
1698
1699 case amdhsa::COMPUTE_PGM_RSRC1_OFFSET:
1700 FourByteBuffer = DE.getU32(Cursor);
1701 if (decodeCOMPUTE_PGM_RSRC1(FourByteBuffer, KdStream) ==
1702 MCDisassembler::Fail) {
1703 return MCDisassembler::Fail;
1704 }
1705 return MCDisassembler::Success;
1706
1707 case amdhsa::COMPUTE_PGM_RSRC2_OFFSET:
1708 FourByteBuffer = DE.getU32(Cursor);
1709 if (decodeCOMPUTE_PGM_RSRC2(FourByteBuffer, KdStream) ==
1710 MCDisassembler::Fail) {
1711 return MCDisassembler::Fail;
1712 }
1713 return MCDisassembler::Success;
1714
1715 case amdhsa::KERNEL_CODE_PROPERTIES_OFFSET:
1716 using namespace amdhsa;
1717 TwoByteBuffer = DE.getU16(Cursor);
1718
1719 if (!hasArchitectedFlatScratch())
1720 PRINT_DIRECTIVE(".amdhsa_user_sgpr_private_segment_buffer",
1721 KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER);
1722 PRINT_DIRECTIVE(".amdhsa_user_sgpr_dispatch_ptr",
1723 KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR);
1724 PRINT_DIRECTIVE(".amdhsa_user_sgpr_queue_ptr",
1725 KERNEL_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR);
1726 PRINT_DIRECTIVE(".amdhsa_user_sgpr_kernarg_segment_ptr",
1727 KERNEL_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR);
1728 PRINT_DIRECTIVE(".amdhsa_user_sgpr_dispatch_id",
1729 KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID);
1730 if (!hasArchitectedFlatScratch())
1731 PRINT_DIRECTIVE(".amdhsa_user_sgpr_flat_scratch_init",
1732 KERNEL_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT);
1733 PRINT_DIRECTIVE(".amdhsa_user_sgpr_private_segment_size",
1734 KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE);
1735
1736 if (TwoByteBuffer & KERNEL_CODE_PROPERTY_RESERVED0)
1737 return MCDisassembler::Fail;
1738
1739 // Reserved for GFX9
1740 if (isGFX9() &&
1741 (TwoByteBuffer & KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32)) {
1742 return MCDisassembler::Fail;
1743 } else if (isGFX10Plus()) {
1744 PRINT_DIRECTIVE(".amdhsa_wavefront_size32",
1745 KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32);
1746 }
1747
1748 if (TwoByteBuffer & KERNEL_CODE_PROPERTY_RESERVED1)
1749 return MCDisassembler::Fail;
1750
1751 return MCDisassembler::Success;
1752
1753 case amdhsa::RESERVED2_OFFSET:
1754 // 6 bytes from here are reserved, must be 0.
1755 ReservedBytes = DE.getBytes(Cursor, 6);
1756 for (int I = 0; I < 6; ++I) {
1757 if (ReservedBytes[I] != 0)
1758 return MCDisassembler::Fail;
1759 }
1760 return MCDisassembler::Success;
1761
1762 default:
1763 llvm_unreachable("Unhandled index. Case statements cover everything.");
1764 return MCDisassembler::Fail;
1765 }
1766 #undef PRINT_DIRECTIVE
1767 }
1768
decodeKernelDescriptor(StringRef KdName,ArrayRef<uint8_t> Bytes,uint64_t KdAddress) const1769 MCDisassembler::DecodeStatus AMDGPUDisassembler::decodeKernelDescriptor(
1770 StringRef KdName, ArrayRef<uint8_t> Bytes, uint64_t KdAddress) const {
1771 // CP microcode requires the kernel descriptor to be 64 aligned.
1772 if (Bytes.size() != 64 || KdAddress % 64 != 0)
1773 return MCDisassembler::Fail;
1774
1775 std::string Kd;
1776 raw_string_ostream KdStream(Kd);
1777 KdStream << ".amdhsa_kernel " << KdName << '\n';
1778
1779 DataExtractor::Cursor C(0);
1780 while (C && C.tell() < Bytes.size()) {
1781 MCDisassembler::DecodeStatus Status =
1782 decodeKernelDescriptorDirective(C, Bytes, KdStream);
1783
1784 cantFail(C.takeError());
1785
1786 if (Status == MCDisassembler::Fail)
1787 return MCDisassembler::Fail;
1788 }
1789 KdStream << ".end_amdhsa_kernel\n";
1790 outs() << KdStream.str();
1791 return MCDisassembler::Success;
1792 }
1793
1794 Optional<MCDisassembler::DecodeStatus>
onSymbolStart(SymbolInfoTy & Symbol,uint64_t & Size,ArrayRef<uint8_t> Bytes,uint64_t Address,raw_ostream & CStream) const1795 AMDGPUDisassembler::onSymbolStart(SymbolInfoTy &Symbol, uint64_t &Size,
1796 ArrayRef<uint8_t> Bytes, uint64_t Address,
1797 raw_ostream &CStream) const {
1798 // Right now only kernel descriptor needs to be handled.
1799 // We ignore all other symbols for target specific handling.
1800 // TODO:
1801 // Fix the spurious symbol issue for AMDGPU kernels. Exists for both Code
1802 // Object V2 and V3 when symbols are marked protected.
1803
1804 // amd_kernel_code_t for Code Object V2.
1805 if (Symbol.Type == ELF::STT_AMDGPU_HSA_KERNEL) {
1806 Size = 256;
1807 return MCDisassembler::Fail;
1808 }
1809
1810 // Code Object V3 kernel descriptors.
1811 StringRef Name = Symbol.Name;
1812 if (Symbol.Type == ELF::STT_OBJECT && Name.endswith(StringRef(".kd"))) {
1813 Size = 64; // Size = 64 regardless of success or failure.
1814 return decodeKernelDescriptor(Name.drop_back(3), Bytes, Address);
1815 }
1816 return None;
1817 }
1818
1819 //===----------------------------------------------------------------------===//
1820 // AMDGPUSymbolizer
1821 //===----------------------------------------------------------------------===//
1822
1823 // Try to find symbol name for specified label
tryAddingSymbolicOperand(MCInst & Inst,raw_ostream &,int64_t Value,uint64_t,bool IsBranch,uint64_t,uint64_t)1824 bool AMDGPUSymbolizer::tryAddingSymbolicOperand(MCInst &Inst,
1825 raw_ostream &/*cStream*/, int64_t Value,
1826 uint64_t /*Address*/, bool IsBranch,
1827 uint64_t /*Offset*/, uint64_t /*InstSize*/) {
1828
1829 if (!IsBranch) {
1830 return false;
1831 }
1832
1833 auto *Symbols = static_cast<SectionSymbolsTy *>(DisInfo);
1834 if (!Symbols)
1835 return false;
1836
1837 auto Result = llvm::find_if(*Symbols, [Value](const SymbolInfoTy &Val) {
1838 return Val.Addr == static_cast<uint64_t>(Value) &&
1839 Val.Type == ELF::STT_NOTYPE;
1840 });
1841 if (Result != Symbols->end()) {
1842 auto *Sym = Ctx.getOrCreateSymbol(Result->Name);
1843 const auto *Add = MCSymbolRefExpr::create(Sym, Ctx);
1844 Inst.addOperand(MCOperand::createExpr(Add));
1845 return true;
1846 }
1847 // Add to list of referenced addresses, so caller can synthesize a label.
1848 ReferencedAddresses.push_back(static_cast<uint64_t>(Value));
1849 return false;
1850 }
1851
tryAddingPcLoadReferenceComment(raw_ostream & cStream,int64_t Value,uint64_t Address)1852 void AMDGPUSymbolizer::tryAddingPcLoadReferenceComment(raw_ostream &cStream,
1853 int64_t Value,
1854 uint64_t Address) {
1855 llvm_unreachable("unimplemented");
1856 }
1857
1858 //===----------------------------------------------------------------------===//
1859 // Initialization
1860 //===----------------------------------------------------------------------===//
1861
createAMDGPUSymbolizer(const Triple &,LLVMOpInfoCallback,LLVMSymbolLookupCallback,void * DisInfo,MCContext * Ctx,std::unique_ptr<MCRelocationInfo> && RelInfo)1862 static MCSymbolizer *createAMDGPUSymbolizer(const Triple &/*TT*/,
1863 LLVMOpInfoCallback /*GetOpInfo*/,
1864 LLVMSymbolLookupCallback /*SymbolLookUp*/,
1865 void *DisInfo,
1866 MCContext *Ctx,
1867 std::unique_ptr<MCRelocationInfo> &&RelInfo) {
1868 return new AMDGPUSymbolizer(*Ctx, std::move(RelInfo), DisInfo);
1869 }
1870
createAMDGPUDisassembler(const Target & T,const MCSubtargetInfo & STI,MCContext & Ctx)1871 static MCDisassembler *createAMDGPUDisassembler(const Target &T,
1872 const MCSubtargetInfo &STI,
1873 MCContext &Ctx) {
1874 return new AMDGPUDisassembler(STI, Ctx, T.createMCInstrInfo());
1875 }
1876
LLVMInitializeAMDGPUDisassembler()1877 extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUDisassembler() {
1878 TargetRegistry::RegisterMCDisassembler(getTheGCNTarget(),
1879 createAMDGPUDisassembler);
1880 TargetRegistry::RegisterMCSymbolizer(getTheGCNTarget(),
1881 createAMDGPUSymbolizer);
1882 }
1883