1c8fbf6ffSEugene Zelenko //===- AMDGPUDisassembler.cpp - Disassembler for AMDGPU ISA ---------------===// 2e1818af8STom Stellard // 32946cd70SChandler Carruth // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 42946cd70SChandler Carruth // See https://llvm.org/LICENSE.txt for license information. 52946cd70SChandler Carruth // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6e1818af8STom Stellard // 7e1818af8STom Stellard //===----------------------------------------------------------------------===// 8e1818af8STom Stellard // 9e1818af8STom Stellard //===----------------------------------------------------------------------===// 10e1818af8STom Stellard // 11e1818af8STom Stellard /// \file 12e1818af8STom Stellard /// 13e1818af8STom Stellard /// This file contains definition for AMDGPU ISA disassembler 14e1818af8STom Stellard // 15e1818af8STom Stellard //===----------------------------------------------------------------------===// 16e1818af8STom Stellard 17e1818af8STom Stellard // ToDo: What to do with instruction suffixes (v_mov_b32 vs v_mov_b32_e32)? 18e1818af8STom Stellard 19c8fbf6ffSEugene Zelenko #include "Disassembler/AMDGPUDisassembler.h" 20e1818af8STom Stellard #include "AMDGPU.h" 21e1818af8STom Stellard #include "AMDGPURegisterInfo.h" 22c5a154dbSTom Stellard #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 23212a251cSArtem Tamazov #include "SIDefines.h" 248ce2ee9dSRichard Trieu #include "TargetInfo/AMDGPUTargetInfo.h" 25e1818af8STom Stellard #include "Utils/AMDGPUBaseInfo.h" 26c8fbf6ffSEugene Zelenko #include "llvm-c/Disassembler.h" 27c8fbf6ffSEugene Zelenko #include "llvm/ADT/APInt.h" 28c8fbf6ffSEugene Zelenko #include "llvm/ADT/ArrayRef.h" 29c8fbf6ffSEugene Zelenko #include "llvm/ADT/Twine.h" 30264b5d9eSZachary Turner #include "llvm/BinaryFormat/ELF.h" 31ca64ef20SMatt Arsenault #include "llvm/MC/MCAsmInfo.h" 32ac106addSNikolay Haustov #include "llvm/MC/MCContext.h" 33c8fbf6ffSEugene Zelenko #include "llvm/MC/MCDisassembler/MCDisassembler.h" 34c8fbf6ffSEugene Zelenko #include "llvm/MC/MCExpr.h" 35e1818af8STom Stellard #include "llvm/MC/MCFixedLenDisassembler.h" 36e1818af8STom Stellard #include "llvm/MC/MCInst.h" 37e1818af8STom Stellard #include "llvm/MC/MCSubtargetInfo.h" 38ac106addSNikolay Haustov #include "llvm/Support/Endian.h" 39c8fbf6ffSEugene Zelenko #include "llvm/Support/ErrorHandling.h" 40c8fbf6ffSEugene Zelenko #include "llvm/Support/MathExtras.h" 41e1818af8STom Stellard #include "llvm/Support/TargetRegistry.h" 42c8fbf6ffSEugene Zelenko #include "llvm/Support/raw_ostream.h" 43c8fbf6ffSEugene Zelenko #include <algorithm> 44c8fbf6ffSEugene Zelenko #include <cassert> 45c8fbf6ffSEugene Zelenko #include <cstddef> 46c8fbf6ffSEugene Zelenko #include <cstdint> 47c8fbf6ffSEugene Zelenko #include <iterator> 48c8fbf6ffSEugene Zelenko #include <tuple> 49c8fbf6ffSEugene Zelenko #include <vector> 50e1818af8STom Stellard 51e1818af8STom Stellard using namespace llvm; 52e1818af8STom Stellard 53e1818af8STom Stellard #define DEBUG_TYPE "amdgpu-disassembler" 54e1818af8STom Stellard 5533d806a5SStanislav Mekhanoshin #define SGPR_MAX (isGFX10() ? AMDGPU::EncValues::SGPR_MAX_GFX10 \ 5633d806a5SStanislav Mekhanoshin : AMDGPU::EncValues::SGPR_MAX_SI) 5733d806a5SStanislav Mekhanoshin 58c8fbf6ffSEugene Zelenko using DecodeStatus = llvm::MCDisassembler::DecodeStatus; 59e1818af8STom Stellard 60ca64ef20SMatt Arsenault AMDGPUDisassembler::AMDGPUDisassembler(const MCSubtargetInfo &STI, 61ca64ef20SMatt Arsenault MCContext &Ctx, 62ca64ef20SMatt Arsenault MCInstrInfo const *MCII) : 63ca64ef20SMatt Arsenault MCDisassembler(STI, Ctx), MCII(MCII), MRI(*Ctx.getRegisterInfo()), 64418e23e3SMatt Arsenault TargetMaxInstBytes(Ctx.getAsmInfo()->getMaxInstLength(&STI)) { 65418e23e3SMatt Arsenault 66418e23e3SMatt Arsenault // ToDo: AMDGPUDisassembler supports only VI ISA. 67418e23e3SMatt Arsenault if (!STI.getFeatureBits()[AMDGPU::FeatureGCN3Encoding] && !isGFX10()) 68418e23e3SMatt Arsenault report_fatal_error("Disassembly not yet supported for subtarget"); 69418e23e3SMatt Arsenault } 70ca64ef20SMatt Arsenault 71ac106addSNikolay Haustov inline static MCDisassembler::DecodeStatus 72ac106addSNikolay Haustov addOperand(MCInst &Inst, const MCOperand& Opnd) { 73ac106addSNikolay Haustov Inst.addOperand(Opnd); 74ac106addSNikolay Haustov return Opnd.isValid() ? 75ac106addSNikolay Haustov MCDisassembler::Success : 76ac106addSNikolay Haustov MCDisassembler::SoftFail; 77e1818af8STom Stellard } 78e1818af8STom Stellard 79549c89d2SSam Kolton static int insertNamedMCOperand(MCInst &MI, const MCOperand &Op, 80549c89d2SSam Kolton uint16_t NameIdx) { 81549c89d2SSam Kolton int OpIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), NameIdx); 82549c89d2SSam Kolton if (OpIdx != -1) { 83549c89d2SSam Kolton auto I = MI.begin(); 84549c89d2SSam Kolton std::advance(I, OpIdx); 85549c89d2SSam Kolton MI.insert(I, Op); 86549c89d2SSam Kolton } 87549c89d2SSam Kolton return OpIdx; 88549c89d2SSam Kolton } 89549c89d2SSam Kolton 903381d7a2SSam Kolton static DecodeStatus decodeSoppBrTarget(MCInst &Inst, unsigned Imm, 913381d7a2SSam Kolton uint64_t Addr, const void *Decoder) { 923381d7a2SSam Kolton auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); 933381d7a2SSam Kolton 94efec1396SScott Linder // Our branches take a simm16, but we need two extra bits to account for the 95efec1396SScott Linder // factor of 4. 963381d7a2SSam Kolton APInt SignedOffset(18, Imm * 4, true); 973381d7a2SSam Kolton int64_t Offset = (SignedOffset.sext(64) + 4 + Addr).getSExtValue(); 983381d7a2SSam Kolton 993381d7a2SSam Kolton if (DAsm->tryAddingSymbolicOperand(Inst, Offset, Addr, true, 2, 2)) 1003381d7a2SSam Kolton return MCDisassembler::Success; 1013381d7a2SSam Kolton return addOperand(Inst, MCOperand::createImm(Imm)); 1023381d7a2SSam Kolton } 1033381d7a2SSam Kolton 104363f47a2SSam Kolton #define DECODE_OPERAND(StaticDecoderName, DecoderName) \ 105363f47a2SSam Kolton static DecodeStatus StaticDecoderName(MCInst &Inst, \ 106ac106addSNikolay Haustov unsigned Imm, \ 107ac106addSNikolay Haustov uint64_t /*Addr*/, \ 108ac106addSNikolay Haustov const void *Decoder) { \ 109ac106addSNikolay Haustov auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); \ 110363f47a2SSam Kolton return addOperand(Inst, DAsm->DecoderName(Imm)); \ 111e1818af8STom Stellard } 112e1818af8STom Stellard 113363f47a2SSam Kolton #define DECODE_OPERAND_REG(RegClass) \ 114363f47a2SSam Kolton DECODE_OPERAND(Decode##RegClass##RegisterClass, decodeOperand_##RegClass) 115e1818af8STom Stellard 116363f47a2SSam Kolton DECODE_OPERAND_REG(VGPR_32) 1176023d599SDmitry Preobrazhensky DECODE_OPERAND_REG(VRegOrLds_32) 118363f47a2SSam Kolton DECODE_OPERAND_REG(VS_32) 119363f47a2SSam Kolton DECODE_OPERAND_REG(VS_64) 12030fc5239SDmitry Preobrazhensky DECODE_OPERAND_REG(VS_128) 121e1818af8STom Stellard 122363f47a2SSam Kolton DECODE_OPERAND_REG(VReg_64) 123363f47a2SSam Kolton DECODE_OPERAND_REG(VReg_96) 124363f47a2SSam Kolton DECODE_OPERAND_REG(VReg_128) 125e1818af8STom Stellard 126363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_32) 127363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_32_XM0_XEXEC) 128ca7b0a17SMatt Arsenault DECODE_OPERAND_REG(SReg_32_XEXEC_HI) 1296023d599SDmitry Preobrazhensky DECODE_OPERAND_REG(SRegOrLds_32) 130363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_64) 131363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_64_XEXEC) 132363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_128) 133363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_256) 134363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_512) 135e1818af8STom Stellard 1364bd72361SMatt Arsenault static DecodeStatus decodeOperand_VSrc16(MCInst &Inst, 1374bd72361SMatt Arsenault unsigned Imm, 1384bd72361SMatt Arsenault uint64_t Addr, 1394bd72361SMatt Arsenault const void *Decoder) { 1404bd72361SMatt Arsenault auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); 1414bd72361SMatt Arsenault return addOperand(Inst, DAsm->decodeOperand_VSrc16(Imm)); 1424bd72361SMatt Arsenault } 1434bd72361SMatt Arsenault 1449be7b0d4SMatt Arsenault static DecodeStatus decodeOperand_VSrcV216(MCInst &Inst, 1459be7b0d4SMatt Arsenault unsigned Imm, 1469be7b0d4SMatt Arsenault uint64_t Addr, 1479be7b0d4SMatt Arsenault const void *Decoder) { 1489be7b0d4SMatt Arsenault auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); 1499be7b0d4SMatt Arsenault return addOperand(Inst, DAsm->decodeOperand_VSrcV216(Imm)); 1509be7b0d4SMatt Arsenault } 1519be7b0d4SMatt Arsenault 152549c89d2SSam Kolton #define DECODE_SDWA(DecName) \ 153549c89d2SSam Kolton DECODE_OPERAND(decodeSDWA##DecName, decodeSDWA##DecName) 154363f47a2SSam Kolton 155549c89d2SSam Kolton DECODE_SDWA(Src32) 156549c89d2SSam Kolton DECODE_SDWA(Src16) 157549c89d2SSam Kolton DECODE_SDWA(VopcDst) 158363f47a2SSam Kolton 159e1818af8STom Stellard #include "AMDGPUGenDisassemblerTables.inc" 160e1818af8STom Stellard 161e1818af8STom Stellard //===----------------------------------------------------------------------===// 162e1818af8STom Stellard // 163e1818af8STom Stellard //===----------------------------------------------------------------------===// 164e1818af8STom Stellard 1651048fb18SSam Kolton template <typename T> static inline T eatBytes(ArrayRef<uint8_t>& Bytes) { 1661048fb18SSam Kolton assert(Bytes.size() >= sizeof(T)); 1671048fb18SSam Kolton const auto Res = support::endian::read<T, support::endianness::little>(Bytes.data()); 1681048fb18SSam Kolton Bytes = Bytes.slice(sizeof(T)); 169ac106addSNikolay Haustov return Res; 170ac106addSNikolay Haustov } 171ac106addSNikolay Haustov 172ac106addSNikolay Haustov DecodeStatus AMDGPUDisassembler::tryDecodeInst(const uint8_t* Table, 173ac106addSNikolay Haustov MCInst &MI, 174ac106addSNikolay Haustov uint64_t Inst, 175ac106addSNikolay Haustov uint64_t Address) const { 176ac106addSNikolay Haustov assert(MI.getOpcode() == 0); 177ac106addSNikolay Haustov assert(MI.getNumOperands() == 0); 178ac106addSNikolay Haustov MCInst TmpInst; 179ce941c9cSDmitry Preobrazhensky HasLiteral = false; 180ac106addSNikolay Haustov const auto SavedBytes = Bytes; 181ac106addSNikolay Haustov if (decodeInstruction(Table, TmpInst, Inst, Address, this, STI)) { 182ac106addSNikolay Haustov MI = TmpInst; 183ac106addSNikolay Haustov return MCDisassembler::Success; 184ac106addSNikolay Haustov } 185ac106addSNikolay Haustov Bytes = SavedBytes; 186ac106addSNikolay Haustov return MCDisassembler::Fail; 187ac106addSNikolay Haustov } 188ac106addSNikolay Haustov 189e1818af8STom Stellard DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size, 190ac106addSNikolay Haustov ArrayRef<uint8_t> Bytes_, 191e1818af8STom Stellard uint64_t Address, 192e1818af8STom Stellard raw_ostream &WS, 193e1818af8STom Stellard raw_ostream &CS) const { 194e1818af8STom Stellard CommentStream = &CS; 195549c89d2SSam Kolton bool IsSDWA = false; 196e1818af8STom Stellard 197ca64ef20SMatt Arsenault unsigned MaxInstBytesNum = std::min((size_t)TargetMaxInstBytes, Bytes_.size()); 198ac106addSNikolay Haustov Bytes = Bytes_.slice(0, MaxInstBytesNum); 199161a158eSNikolay Haustov 200ac106addSNikolay Haustov DecodeStatus Res = MCDisassembler::Fail; 201ac106addSNikolay Haustov do { 202824e804bSValery Pykhtin // ToDo: better to switch encoding length using some bit predicate 203ac106addSNikolay Haustov // but it is unknown yet, so try all we can 2041048fb18SSam Kolton 205c9bdcb75SSam Kolton // Try to decode DPP and SDWA first to solve conflict with VOP1 and VOP2 206c9bdcb75SSam Kolton // encodings 2071048fb18SSam Kolton if (Bytes.size() >= 8) { 2081048fb18SSam Kolton const uint64_t QW = eatBytes<uint64_t>(Bytes); 2091048fb18SSam Kolton Res = tryDecodeInst(DecoderTableDPP64, MI, QW, Address); 2101048fb18SSam Kolton if (Res) break; 211c9bdcb75SSam Kolton 212c9bdcb75SSam Kolton Res = tryDecodeInst(DecoderTableSDWA64, MI, QW, Address); 213549c89d2SSam Kolton if (Res) { IsSDWA = true; break; } 214363f47a2SSam Kolton 215363f47a2SSam Kolton Res = tryDecodeInst(DecoderTableSDWA964, MI, QW, Address); 216549c89d2SSam Kolton if (Res) { IsSDWA = true; break; } 2170905870fSChangpeng Fang 2188f3da70eSStanislav Mekhanoshin Res = tryDecodeInst(DecoderTableSDWA1064, MI, QW, Address); 2198f3da70eSStanislav Mekhanoshin if (Res) { IsSDWA = true; break; } 2208f3da70eSStanislav Mekhanoshin 2218f3da70eSStanislav Mekhanoshin // Some GFX9 subtargets repurposed the v_mad_mix_f32, v_mad_mixlo_f16 and 2228f3da70eSStanislav Mekhanoshin // v_mad_mixhi_f16 for FMA variants. Try to decode using this special 2238f3da70eSStanislav Mekhanoshin // table first so we print the correct name. 2248f3da70eSStanislav Mekhanoshin 2258f3da70eSStanislav Mekhanoshin if (STI.getFeatureBits()[AMDGPU::FeatureFmaMixInsts]) { 2268f3da70eSStanislav Mekhanoshin Res = tryDecodeInst(DecoderTableGFX9_DL64, MI, QW, Address); 2278f3da70eSStanislav Mekhanoshin if (Res) break; 2288f3da70eSStanislav Mekhanoshin } 2298f3da70eSStanislav Mekhanoshin 2300905870fSChangpeng Fang if (STI.getFeatureBits()[AMDGPU::FeatureUnpackedD16VMem]) { 2310905870fSChangpeng Fang Res = tryDecodeInst(DecoderTableGFX80_UNPACKED64, MI, QW, Address); 2320084adc5SMatt Arsenault if (Res) 2330084adc5SMatt Arsenault break; 2340084adc5SMatt Arsenault } 2350084adc5SMatt Arsenault 2360084adc5SMatt Arsenault // Some GFX9 subtargets repurposed the v_mad_mix_f32, v_mad_mixlo_f16 and 2370084adc5SMatt Arsenault // v_mad_mixhi_f16 for FMA variants. Try to decode using this special 2380084adc5SMatt Arsenault // table first so we print the correct name. 2390084adc5SMatt Arsenault if (STI.getFeatureBits()[AMDGPU::FeatureFmaMixInsts]) { 2400084adc5SMatt Arsenault Res = tryDecodeInst(DecoderTableGFX9_DL64, MI, QW, Address); 2410084adc5SMatt Arsenault if (Res) 2420084adc5SMatt Arsenault break; 2430905870fSChangpeng Fang } 2441048fb18SSam Kolton } 2451048fb18SSam Kolton 2461048fb18SSam Kolton // Reinitialize Bytes as DPP64 could have eaten too much 2471048fb18SSam Kolton Bytes = Bytes_.slice(0, MaxInstBytesNum); 2481048fb18SSam Kolton 2491048fb18SSam Kolton // Try decode 32-bit instruction 250ac106addSNikolay Haustov if (Bytes.size() < 4) break; 2511048fb18SSam Kolton const uint32_t DW = eatBytes<uint32_t>(Bytes); 2525182302aSStanislav Mekhanoshin Res = tryDecodeInst(DecoderTableGFX832, MI, DW, Address); 253ac106addSNikolay Haustov if (Res) break; 254e1818af8STom Stellard 255ac106addSNikolay Haustov Res = tryDecodeInst(DecoderTableAMDGPU32, MI, DW, Address); 256ac106addSNikolay Haustov if (Res) break; 257ac106addSNikolay Haustov 258a0342dc9SDmitry Preobrazhensky Res = tryDecodeInst(DecoderTableGFX932, MI, DW, Address); 259a0342dc9SDmitry Preobrazhensky if (Res) break; 260a0342dc9SDmitry Preobrazhensky 2618f3da70eSStanislav Mekhanoshin Res = tryDecodeInst(DecoderTableGFX1032, MI, DW, Address); 2628f3da70eSStanislav Mekhanoshin if (Res) break; 2638f3da70eSStanislav Mekhanoshin 264ac106addSNikolay Haustov if (Bytes.size() < 4) break; 2651048fb18SSam Kolton const uint64_t QW = ((uint64_t)eatBytes<uint32_t>(Bytes) << 32) | DW; 2665182302aSStanislav Mekhanoshin Res = tryDecodeInst(DecoderTableGFX864, MI, QW, Address); 267ac106addSNikolay Haustov if (Res) break; 268ac106addSNikolay Haustov 269ac106addSNikolay Haustov Res = tryDecodeInst(DecoderTableAMDGPU64, MI, QW, Address); 2701e32550dSDmitry Preobrazhensky if (Res) break; 2711e32550dSDmitry Preobrazhensky 2721e32550dSDmitry Preobrazhensky Res = tryDecodeInst(DecoderTableGFX964, MI, QW, Address); 2738f3da70eSStanislav Mekhanoshin if (Res) break; 2748f3da70eSStanislav Mekhanoshin 2758f3da70eSStanislav Mekhanoshin Res = tryDecodeInst(DecoderTableGFX1064, MI, QW, Address); 276ac106addSNikolay Haustov } while (false); 277ac106addSNikolay Haustov 2788f3da70eSStanislav Mekhanoshin if (Res && (MaxInstBytesNum - Bytes.size()) == 12 && (!HasLiteral || 2798f3da70eSStanislav Mekhanoshin !(MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::VOP3))) { 2808f3da70eSStanislav Mekhanoshin MaxInstBytesNum = 8; 2818f3da70eSStanislav Mekhanoshin Bytes = Bytes_.slice(0, MaxInstBytesNum); 2828f3da70eSStanislav Mekhanoshin eatBytes<uint64_t>(Bytes); 2838f3da70eSStanislav Mekhanoshin } 2848f3da70eSStanislav Mekhanoshin 285678e111eSMatt Arsenault if (Res && (MI.getOpcode() == AMDGPU::V_MAC_F32_e64_vi || 2868f3da70eSStanislav Mekhanoshin MI.getOpcode() == AMDGPU::V_MAC_F32_e64_gfx6_gfx7 || 2878f3da70eSStanislav Mekhanoshin MI.getOpcode() == AMDGPU::V_MAC_F32_e64_gfx10 || 288603a43fcSKonstantin Zhuravlyov MI.getOpcode() == AMDGPU::V_MAC_F16_e64_vi || 2898f3da70eSStanislav Mekhanoshin MI.getOpcode() == AMDGPU::V_FMAC_F32_e64_vi || 2908f3da70eSStanislav Mekhanoshin MI.getOpcode() == AMDGPU::V_FMAC_F32_e64_gfx10 || 2918f3da70eSStanislav Mekhanoshin MI.getOpcode() == AMDGPU::V_FMAC_F16_e64_gfx10)) { 292678e111eSMatt Arsenault // Insert dummy unused src2_modifiers. 293549c89d2SSam Kolton insertNamedMCOperand(MI, MCOperand::createImm(0), 294678e111eSMatt Arsenault AMDGPU::OpName::src2_modifiers); 295678e111eSMatt Arsenault } 296678e111eSMatt Arsenault 297cad7fa85SMatt Arsenault if (Res && (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::MIMG)) { 298692560dcSStanislav Mekhanoshin int VAddr0Idx = 299692560dcSStanislav Mekhanoshin AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vaddr0); 300692560dcSStanislav Mekhanoshin int RsrcIdx = 301692560dcSStanislav Mekhanoshin AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::srsrc); 302692560dcSStanislav Mekhanoshin unsigned NSAArgs = RsrcIdx - VAddr0Idx - 1; 303692560dcSStanislav Mekhanoshin if (VAddr0Idx >= 0 && NSAArgs > 0) { 304692560dcSStanislav Mekhanoshin unsigned NSAWords = (NSAArgs + 3) / 4; 305692560dcSStanislav Mekhanoshin if (Bytes.size() < 4 * NSAWords) { 306692560dcSStanislav Mekhanoshin Res = MCDisassembler::Fail; 307692560dcSStanislav Mekhanoshin } else { 308692560dcSStanislav Mekhanoshin for (unsigned i = 0; i < NSAArgs; ++i) { 309692560dcSStanislav Mekhanoshin MI.insert(MI.begin() + VAddr0Idx + 1 + i, 310692560dcSStanislav Mekhanoshin decodeOperand_VGPR_32(Bytes[i])); 311692560dcSStanislav Mekhanoshin } 312692560dcSStanislav Mekhanoshin Bytes = Bytes.slice(4 * NSAWords); 313692560dcSStanislav Mekhanoshin } 314692560dcSStanislav Mekhanoshin } 315692560dcSStanislav Mekhanoshin 316692560dcSStanislav Mekhanoshin if (Res) 317cad7fa85SMatt Arsenault Res = convertMIMGInst(MI); 318cad7fa85SMatt Arsenault } 319cad7fa85SMatt Arsenault 320549c89d2SSam Kolton if (Res && IsSDWA) 321549c89d2SSam Kolton Res = convertSDWAInst(MI); 322549c89d2SSam Kolton 3238f3da70eSStanislav Mekhanoshin int VDstIn_Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 3248f3da70eSStanislav Mekhanoshin AMDGPU::OpName::vdst_in); 3258f3da70eSStanislav Mekhanoshin if (VDstIn_Idx != -1) { 3268f3da70eSStanislav Mekhanoshin int Tied = MCII->get(MI.getOpcode()).getOperandConstraint(VDstIn_Idx, 3278f3da70eSStanislav Mekhanoshin MCOI::OperandConstraint::TIED_TO); 3288f3da70eSStanislav Mekhanoshin if (Tied != -1 && (MI.getNumOperands() <= (unsigned)VDstIn_Idx || 3298f3da70eSStanislav Mekhanoshin !MI.getOperand(VDstIn_Idx).isReg() || 3308f3da70eSStanislav Mekhanoshin MI.getOperand(VDstIn_Idx).getReg() != MI.getOperand(Tied).getReg())) { 3318f3da70eSStanislav Mekhanoshin if (MI.getNumOperands() > (unsigned)VDstIn_Idx) 3328f3da70eSStanislav Mekhanoshin MI.erase(&MI.getOperand(VDstIn_Idx)); 3338f3da70eSStanislav Mekhanoshin insertNamedMCOperand(MI, 3348f3da70eSStanislav Mekhanoshin MCOperand::createReg(MI.getOperand(Tied).getReg()), 3358f3da70eSStanislav Mekhanoshin AMDGPU::OpName::vdst_in); 3368f3da70eSStanislav Mekhanoshin } 3378f3da70eSStanislav Mekhanoshin } 3388f3da70eSStanislav Mekhanoshin 3397116e896STim Corringham // if the opcode was not recognized we'll assume a Size of 4 bytes 3407116e896STim Corringham // (unless there are fewer bytes left) 3417116e896STim Corringham Size = Res ? (MaxInstBytesNum - Bytes.size()) 3427116e896STim Corringham : std::min((size_t)4, Bytes_.size()); 343ac106addSNikolay Haustov return Res; 344161a158eSNikolay Haustov } 345e1818af8STom Stellard 346549c89d2SSam Kolton DecodeStatus AMDGPUDisassembler::convertSDWAInst(MCInst &MI) const { 3478f3da70eSStanislav Mekhanoshin if (STI.getFeatureBits()[AMDGPU::FeatureGFX9] || 3488f3da70eSStanislav Mekhanoshin STI.getFeatureBits()[AMDGPU::FeatureGFX10]) { 349549c89d2SSam Kolton if (AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sdst) != -1) 350549c89d2SSam Kolton // VOPC - insert clamp 351549c89d2SSam Kolton insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::clamp); 352549c89d2SSam Kolton } else if (STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]) { 353549c89d2SSam Kolton int SDst = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sdst); 354549c89d2SSam Kolton if (SDst != -1) { 355549c89d2SSam Kolton // VOPC - insert VCC register as sdst 356ac2b0264SDmitry Preobrazhensky insertNamedMCOperand(MI, createRegOperand(AMDGPU::VCC), 357549c89d2SSam Kolton AMDGPU::OpName::sdst); 358549c89d2SSam Kolton } else { 359549c89d2SSam Kolton // VOP1/2 - insert omod if present in instruction 360549c89d2SSam Kolton insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::omod); 361549c89d2SSam Kolton } 362549c89d2SSam Kolton } 363549c89d2SSam Kolton return MCDisassembler::Success; 364549c89d2SSam Kolton } 365549c89d2SSam Kolton 366692560dcSStanislav Mekhanoshin // Note that before gfx10, the MIMG encoding provided no information about 367692560dcSStanislav Mekhanoshin // VADDR size. Consequently, decoded instructions always show address as if it 368692560dcSStanislav Mekhanoshin // has 1 dword, which could be not really so. 369cad7fa85SMatt Arsenault DecodeStatus AMDGPUDisassembler::convertMIMGInst(MCInst &MI) const { 370da4a7c01SDmitry Preobrazhensky 3710b4eb1eaSDmitry Preobrazhensky int VDstIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 3720b4eb1eaSDmitry Preobrazhensky AMDGPU::OpName::vdst); 3730b4eb1eaSDmitry Preobrazhensky 374cad7fa85SMatt Arsenault int VDataIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 375cad7fa85SMatt Arsenault AMDGPU::OpName::vdata); 376692560dcSStanislav Mekhanoshin int VAddr0Idx = 377692560dcSStanislav Mekhanoshin AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vaddr0); 378cad7fa85SMatt Arsenault int DMaskIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 379cad7fa85SMatt Arsenault AMDGPU::OpName::dmask); 3800b4eb1eaSDmitry Preobrazhensky 3810a1ff464SDmitry Preobrazhensky int TFEIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 3820a1ff464SDmitry Preobrazhensky AMDGPU::OpName::tfe); 383f2674319SNicolai Haehnle int D16Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 384f2674319SNicolai Haehnle AMDGPU::OpName::d16); 3850a1ff464SDmitry Preobrazhensky 3860b4eb1eaSDmitry Preobrazhensky assert(VDataIdx != -1); 3870b4eb1eaSDmitry Preobrazhensky assert(DMaskIdx != -1); 3880a1ff464SDmitry Preobrazhensky assert(TFEIdx != -1); 3890b4eb1eaSDmitry Preobrazhensky 390692560dcSStanislav Mekhanoshin const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(MI.getOpcode()); 391da4a7c01SDmitry Preobrazhensky bool IsAtomic = (VDstIdx != -1); 392f2674319SNicolai Haehnle bool IsGather4 = MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::Gather4; 3930b4eb1eaSDmitry Preobrazhensky 394692560dcSStanislav Mekhanoshin bool IsNSA = false; 395692560dcSStanislav Mekhanoshin unsigned AddrSize = Info->VAddrDwords; 396cad7fa85SMatt Arsenault 397692560dcSStanislav Mekhanoshin if (STI.getFeatureBits()[AMDGPU::FeatureGFX10]) { 398692560dcSStanislav Mekhanoshin unsigned DimIdx = 399692560dcSStanislav Mekhanoshin AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::dim); 400692560dcSStanislav Mekhanoshin const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode = 401692560dcSStanislav Mekhanoshin AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode); 402692560dcSStanislav Mekhanoshin const AMDGPU::MIMGDimInfo *Dim = 403692560dcSStanislav Mekhanoshin AMDGPU::getMIMGDimInfoByEncoding(MI.getOperand(DimIdx).getImm()); 404692560dcSStanislav Mekhanoshin 405692560dcSStanislav Mekhanoshin AddrSize = BaseOpcode->NumExtraArgs + 406692560dcSStanislav Mekhanoshin (BaseOpcode->Gradients ? Dim->NumGradients : 0) + 407692560dcSStanislav Mekhanoshin (BaseOpcode->Coordinates ? Dim->NumCoords : 0) + 408692560dcSStanislav Mekhanoshin (BaseOpcode->LodOrClampOrMip ? 1 : 0); 409692560dcSStanislav Mekhanoshin IsNSA = Info->MIMGEncoding == AMDGPU::MIMGEncGfx10NSA; 410692560dcSStanislav Mekhanoshin if (!IsNSA) { 411692560dcSStanislav Mekhanoshin if (AddrSize > 8) 412692560dcSStanislav Mekhanoshin AddrSize = 16; 413692560dcSStanislav Mekhanoshin else if (AddrSize > 4) 414692560dcSStanislav Mekhanoshin AddrSize = 8; 415692560dcSStanislav Mekhanoshin } else { 416692560dcSStanislav Mekhanoshin if (AddrSize > Info->VAddrDwords) { 417692560dcSStanislav Mekhanoshin // The NSA encoding does not contain enough operands for the combination 418692560dcSStanislav Mekhanoshin // of base opcode / dimension. Should this be an error? 4190a1ff464SDmitry Preobrazhensky return MCDisassembler::Success; 420692560dcSStanislav Mekhanoshin } 421692560dcSStanislav Mekhanoshin } 422692560dcSStanislav Mekhanoshin } 423692560dcSStanislav Mekhanoshin 424692560dcSStanislav Mekhanoshin unsigned DMask = MI.getOperand(DMaskIdx).getImm() & 0xf; 425692560dcSStanislav Mekhanoshin unsigned DstSize = IsGather4 ? 4 : std::max(countPopulation(DMask), 1u); 4260a1ff464SDmitry Preobrazhensky 427f2674319SNicolai Haehnle bool D16 = D16Idx >= 0 && MI.getOperand(D16Idx).getImm(); 4280a1ff464SDmitry Preobrazhensky if (D16 && AMDGPU::hasPackedD16(STI)) { 4290a1ff464SDmitry Preobrazhensky DstSize = (DstSize + 1) / 2; 4300a1ff464SDmitry Preobrazhensky } 4310a1ff464SDmitry Preobrazhensky 4320a1ff464SDmitry Preobrazhensky // FIXME: Add tfe support 4330a1ff464SDmitry Preobrazhensky if (MI.getOperand(TFEIdx).getImm()) 434cad7fa85SMatt Arsenault return MCDisassembler::Success; 435cad7fa85SMatt Arsenault 436692560dcSStanislav Mekhanoshin if (DstSize == Info->VDataDwords && AddrSize == Info->VAddrDwords) 437f2674319SNicolai Haehnle return MCDisassembler::Success; 438692560dcSStanislav Mekhanoshin 439692560dcSStanislav Mekhanoshin int NewOpcode = 440692560dcSStanislav Mekhanoshin AMDGPU::getMIMGOpcode(Info->BaseOpcode, Info->MIMGEncoding, DstSize, AddrSize); 4410ab200b6SNicolai Haehnle if (NewOpcode == -1) 4420ab200b6SNicolai Haehnle return MCDisassembler::Success; 4430b4eb1eaSDmitry Preobrazhensky 444692560dcSStanislav Mekhanoshin // Widen the register to the correct number of enabled channels. 445692560dcSStanislav Mekhanoshin unsigned NewVdata = AMDGPU::NoRegister; 446692560dcSStanislav Mekhanoshin if (DstSize != Info->VDataDwords) { 447692560dcSStanislav Mekhanoshin auto DataRCID = MCII->get(NewOpcode).OpInfo[VDataIdx].RegClass; 448cad7fa85SMatt Arsenault 4490b4eb1eaSDmitry Preobrazhensky // Get first subregister of VData 450cad7fa85SMatt Arsenault unsigned Vdata0 = MI.getOperand(VDataIdx).getReg(); 4510b4eb1eaSDmitry Preobrazhensky unsigned VdataSub0 = MRI.getSubReg(Vdata0, AMDGPU::sub0); 4520b4eb1eaSDmitry Preobrazhensky Vdata0 = (VdataSub0 != 0)? VdataSub0 : Vdata0; 4530b4eb1eaSDmitry Preobrazhensky 454692560dcSStanislav Mekhanoshin NewVdata = MRI.getMatchingSuperReg(Vdata0, AMDGPU::sub0, 455692560dcSStanislav Mekhanoshin &MRI.getRegClass(DataRCID)); 456cad7fa85SMatt Arsenault if (NewVdata == AMDGPU::NoRegister) { 457cad7fa85SMatt Arsenault // It's possible to encode this such that the low register + enabled 458cad7fa85SMatt Arsenault // components exceeds the register count. 459cad7fa85SMatt Arsenault return MCDisassembler::Success; 460cad7fa85SMatt Arsenault } 461692560dcSStanislav Mekhanoshin } 462692560dcSStanislav Mekhanoshin 463692560dcSStanislav Mekhanoshin unsigned NewVAddr0 = AMDGPU::NoRegister; 464692560dcSStanislav Mekhanoshin if (STI.getFeatureBits()[AMDGPU::FeatureGFX10] && !IsNSA && 465692560dcSStanislav Mekhanoshin AddrSize != Info->VAddrDwords) { 466692560dcSStanislav Mekhanoshin unsigned VAddr0 = MI.getOperand(VAddr0Idx).getReg(); 467692560dcSStanislav Mekhanoshin unsigned VAddrSub0 = MRI.getSubReg(VAddr0, AMDGPU::sub0); 468692560dcSStanislav Mekhanoshin VAddr0 = (VAddrSub0 != 0) ? VAddrSub0 : VAddr0; 469692560dcSStanislav Mekhanoshin 470692560dcSStanislav Mekhanoshin auto AddrRCID = MCII->get(NewOpcode).OpInfo[VAddr0Idx].RegClass; 471692560dcSStanislav Mekhanoshin NewVAddr0 = MRI.getMatchingSuperReg(VAddr0, AMDGPU::sub0, 472692560dcSStanislav Mekhanoshin &MRI.getRegClass(AddrRCID)); 473692560dcSStanislav Mekhanoshin if (NewVAddr0 == AMDGPU::NoRegister) 474692560dcSStanislav Mekhanoshin return MCDisassembler::Success; 475692560dcSStanislav Mekhanoshin } 476cad7fa85SMatt Arsenault 477cad7fa85SMatt Arsenault MI.setOpcode(NewOpcode); 478692560dcSStanislav Mekhanoshin 479692560dcSStanislav Mekhanoshin if (NewVdata != AMDGPU::NoRegister) { 480cad7fa85SMatt Arsenault MI.getOperand(VDataIdx) = MCOperand::createReg(NewVdata); 4810b4eb1eaSDmitry Preobrazhensky 482da4a7c01SDmitry Preobrazhensky if (IsAtomic) { 4830b4eb1eaSDmitry Preobrazhensky // Atomic operations have an additional operand (a copy of data) 4840b4eb1eaSDmitry Preobrazhensky MI.getOperand(VDstIdx) = MCOperand::createReg(NewVdata); 4850b4eb1eaSDmitry Preobrazhensky } 486692560dcSStanislav Mekhanoshin } 487692560dcSStanislav Mekhanoshin 488692560dcSStanislav Mekhanoshin if (NewVAddr0 != AMDGPU::NoRegister) { 489692560dcSStanislav Mekhanoshin MI.getOperand(VAddr0Idx) = MCOperand::createReg(NewVAddr0); 490692560dcSStanislav Mekhanoshin } else if (IsNSA) { 491692560dcSStanislav Mekhanoshin assert(AddrSize <= Info->VAddrDwords); 492692560dcSStanislav Mekhanoshin MI.erase(MI.begin() + VAddr0Idx + AddrSize, 493692560dcSStanislav Mekhanoshin MI.begin() + VAddr0Idx + Info->VAddrDwords); 494692560dcSStanislav Mekhanoshin } 4950b4eb1eaSDmitry Preobrazhensky 496cad7fa85SMatt Arsenault return MCDisassembler::Success; 497cad7fa85SMatt Arsenault } 498cad7fa85SMatt Arsenault 499ac106addSNikolay Haustov const char* AMDGPUDisassembler::getRegClassName(unsigned RegClassID) const { 500ac106addSNikolay Haustov return getContext().getRegisterInfo()-> 501ac106addSNikolay Haustov getRegClassName(&AMDGPUMCRegisterClasses[RegClassID]); 502e1818af8STom Stellard } 503e1818af8STom Stellard 504ac106addSNikolay Haustov inline 505ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::errOperand(unsigned V, 506ac106addSNikolay Haustov const Twine& ErrMsg) const { 507ac106addSNikolay Haustov *CommentStream << "Error: " + ErrMsg; 508ac106addSNikolay Haustov 509ac106addSNikolay Haustov // ToDo: add support for error operands to MCInst.h 510ac106addSNikolay Haustov // return MCOperand::createError(V); 511ac106addSNikolay Haustov return MCOperand(); 512ac106addSNikolay Haustov } 513ac106addSNikolay Haustov 514ac106addSNikolay Haustov inline 515ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::createRegOperand(unsigned int RegId) const { 516ac2b0264SDmitry Preobrazhensky return MCOperand::createReg(AMDGPU::getMCReg(RegId, STI)); 517ac106addSNikolay Haustov } 518ac106addSNikolay Haustov 519ac106addSNikolay Haustov inline 520ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::createRegOperand(unsigned RegClassID, 521ac106addSNikolay Haustov unsigned Val) const { 522ac106addSNikolay Haustov const auto& RegCl = AMDGPUMCRegisterClasses[RegClassID]; 523ac106addSNikolay Haustov if (Val >= RegCl.getNumRegs()) 524ac106addSNikolay Haustov return errOperand(Val, Twine(getRegClassName(RegClassID)) + 525ac106addSNikolay Haustov ": unknown register " + Twine(Val)); 526ac106addSNikolay Haustov return createRegOperand(RegCl.getRegister(Val)); 527ac106addSNikolay Haustov } 528ac106addSNikolay Haustov 529ac106addSNikolay Haustov inline 530ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::createSRegOperand(unsigned SRegClassID, 531ac106addSNikolay Haustov unsigned Val) const { 532ac106addSNikolay Haustov // ToDo: SI/CI have 104 SGPRs, VI - 102 533ac106addSNikolay Haustov // Valery: here we accepting as much as we can, let assembler sort it out 534ac106addSNikolay Haustov int shift = 0; 535ac106addSNikolay Haustov switch (SRegClassID) { 536ac106addSNikolay Haustov case AMDGPU::SGPR_32RegClassID: 537212a251cSArtem Tamazov case AMDGPU::TTMP_32RegClassID: 538212a251cSArtem Tamazov break; 539ac106addSNikolay Haustov case AMDGPU::SGPR_64RegClassID: 540212a251cSArtem Tamazov case AMDGPU::TTMP_64RegClassID: 541212a251cSArtem Tamazov shift = 1; 542212a251cSArtem Tamazov break; 543212a251cSArtem Tamazov case AMDGPU::SGPR_128RegClassID: 544212a251cSArtem Tamazov case AMDGPU::TTMP_128RegClassID: 545ac106addSNikolay Haustov // ToDo: unclear if s[100:104] is available on VI. Can we use VCC as SGPR in 546ac106addSNikolay Haustov // this bundle? 54727134953SDmitry Preobrazhensky case AMDGPU::SGPR_256RegClassID: 54827134953SDmitry Preobrazhensky case AMDGPU::TTMP_256RegClassID: 549ac106addSNikolay Haustov // ToDo: unclear if s[96:104] is available on VI. Can we use VCC as SGPR in 550ac106addSNikolay Haustov // this bundle? 55127134953SDmitry Preobrazhensky case AMDGPU::SGPR_512RegClassID: 55227134953SDmitry Preobrazhensky case AMDGPU::TTMP_512RegClassID: 553212a251cSArtem Tamazov shift = 2; 554212a251cSArtem Tamazov break; 555ac106addSNikolay Haustov // ToDo: unclear if s[88:104] is available on VI. Can we use VCC as SGPR in 556ac106addSNikolay Haustov // this bundle? 557212a251cSArtem Tamazov default: 55892b355b1SMatt Arsenault llvm_unreachable("unhandled register class"); 559ac106addSNikolay Haustov } 56092b355b1SMatt Arsenault 56192b355b1SMatt Arsenault if (Val % (1 << shift)) { 562ac106addSNikolay Haustov *CommentStream << "Warning: " << getRegClassName(SRegClassID) 563ac106addSNikolay Haustov << ": scalar reg isn't aligned " << Val; 56492b355b1SMatt Arsenault } 56592b355b1SMatt Arsenault 566ac106addSNikolay Haustov return createRegOperand(SRegClassID, Val >> shift); 567ac106addSNikolay Haustov } 568ac106addSNikolay Haustov 569ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_VS_32(unsigned Val) const { 570212a251cSArtem Tamazov return decodeSrcOp(OPW32, Val); 571ac106addSNikolay Haustov } 572ac106addSNikolay Haustov 573ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_VS_64(unsigned Val) const { 574212a251cSArtem Tamazov return decodeSrcOp(OPW64, Val); 575ac106addSNikolay Haustov } 576ac106addSNikolay Haustov 57730fc5239SDmitry Preobrazhensky MCOperand AMDGPUDisassembler::decodeOperand_VS_128(unsigned Val) const { 57830fc5239SDmitry Preobrazhensky return decodeSrcOp(OPW128, Val); 57930fc5239SDmitry Preobrazhensky } 58030fc5239SDmitry Preobrazhensky 5814bd72361SMatt Arsenault MCOperand AMDGPUDisassembler::decodeOperand_VSrc16(unsigned Val) const { 5824bd72361SMatt Arsenault return decodeSrcOp(OPW16, Val); 5834bd72361SMatt Arsenault } 5844bd72361SMatt Arsenault 5859be7b0d4SMatt Arsenault MCOperand AMDGPUDisassembler::decodeOperand_VSrcV216(unsigned Val) const { 5869be7b0d4SMatt Arsenault return decodeSrcOp(OPWV216, Val); 5879be7b0d4SMatt Arsenault } 5889be7b0d4SMatt Arsenault 589ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_VGPR_32(unsigned Val) const { 590cb540bc0SMatt Arsenault // Some instructions have operand restrictions beyond what the encoding 591cb540bc0SMatt Arsenault // allows. Some ordinarily VSrc_32 operands are VGPR_32, so clear the extra 592cb540bc0SMatt Arsenault // high bit. 593cb540bc0SMatt Arsenault Val &= 255; 594cb540bc0SMatt Arsenault 595ac106addSNikolay Haustov return createRegOperand(AMDGPU::VGPR_32RegClassID, Val); 596ac106addSNikolay Haustov } 597ac106addSNikolay Haustov 5986023d599SDmitry Preobrazhensky MCOperand AMDGPUDisassembler::decodeOperand_VRegOrLds_32(unsigned Val) const { 5996023d599SDmitry Preobrazhensky return decodeSrcOp(OPW32, Val); 6006023d599SDmitry Preobrazhensky } 6016023d599SDmitry Preobrazhensky 602ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_VReg_64(unsigned Val) const { 603ac106addSNikolay Haustov return createRegOperand(AMDGPU::VReg_64RegClassID, Val); 604ac106addSNikolay Haustov } 605ac106addSNikolay Haustov 606ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_VReg_96(unsigned Val) const { 607ac106addSNikolay Haustov return createRegOperand(AMDGPU::VReg_96RegClassID, Val); 608ac106addSNikolay Haustov } 609ac106addSNikolay Haustov 610ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_VReg_128(unsigned Val) const { 611ac106addSNikolay Haustov return createRegOperand(AMDGPU::VReg_128RegClassID, Val); 612ac106addSNikolay Haustov } 613ac106addSNikolay Haustov 614ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_SReg_32(unsigned Val) const { 615ac106addSNikolay Haustov // table-gen generated disassembler doesn't care about operand types 616ac106addSNikolay Haustov // leaving only registry class so SSrc_32 operand turns into SReg_32 617ac106addSNikolay Haustov // and therefore we accept immediates and literals here as well 618212a251cSArtem Tamazov return decodeSrcOp(OPW32, Val); 619ac106addSNikolay Haustov } 620ac106addSNikolay Haustov 621640c44b8SMatt Arsenault MCOperand AMDGPUDisassembler::decodeOperand_SReg_32_XM0_XEXEC( 622640c44b8SMatt Arsenault unsigned Val) const { 623640c44b8SMatt Arsenault // SReg_32_XM0 is SReg_32 without M0 or EXEC_LO/EXEC_HI 62438e496b1SArtem Tamazov return decodeOperand_SReg_32(Val); 62538e496b1SArtem Tamazov } 62638e496b1SArtem Tamazov 627ca7b0a17SMatt Arsenault MCOperand AMDGPUDisassembler::decodeOperand_SReg_32_XEXEC_HI( 628ca7b0a17SMatt Arsenault unsigned Val) const { 629ca7b0a17SMatt Arsenault // SReg_32_XM0 is SReg_32 without EXEC_HI 630ca7b0a17SMatt Arsenault return decodeOperand_SReg_32(Val); 631ca7b0a17SMatt Arsenault } 632ca7b0a17SMatt Arsenault 6336023d599SDmitry Preobrazhensky MCOperand AMDGPUDisassembler::decodeOperand_SRegOrLds_32(unsigned Val) const { 6346023d599SDmitry Preobrazhensky // table-gen generated disassembler doesn't care about operand types 6356023d599SDmitry Preobrazhensky // leaving only registry class so SSrc_32 operand turns into SReg_32 6366023d599SDmitry Preobrazhensky // and therefore we accept immediates and literals here as well 6376023d599SDmitry Preobrazhensky return decodeSrcOp(OPW32, Val); 6386023d599SDmitry Preobrazhensky } 6396023d599SDmitry Preobrazhensky 640ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_SReg_64(unsigned Val) const { 641640c44b8SMatt Arsenault return decodeSrcOp(OPW64, Val); 642640c44b8SMatt Arsenault } 643640c44b8SMatt Arsenault 644640c44b8SMatt Arsenault MCOperand AMDGPUDisassembler::decodeOperand_SReg_64_XEXEC(unsigned Val) const { 645212a251cSArtem Tamazov return decodeSrcOp(OPW64, Val); 646ac106addSNikolay Haustov } 647ac106addSNikolay Haustov 648ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_SReg_128(unsigned Val) const { 649212a251cSArtem Tamazov return decodeSrcOp(OPW128, Val); 650ac106addSNikolay Haustov } 651ac106addSNikolay Haustov 652ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_SReg_256(unsigned Val) const { 65327134953SDmitry Preobrazhensky return decodeDstOp(OPW256, Val); 654ac106addSNikolay Haustov } 655ac106addSNikolay Haustov 656ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_SReg_512(unsigned Val) const { 65727134953SDmitry Preobrazhensky return decodeDstOp(OPW512, Val); 658ac106addSNikolay Haustov } 659ac106addSNikolay Haustov 660ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeLiteralConstant() const { 661ac106addSNikolay Haustov // For now all literal constants are supposed to be unsigned integer 662ac106addSNikolay Haustov // ToDo: deal with signed/unsigned 64-bit integer constants 663ac106addSNikolay Haustov // ToDo: deal with float/double constants 664ce941c9cSDmitry Preobrazhensky if (!HasLiteral) { 665ce941c9cSDmitry Preobrazhensky if (Bytes.size() < 4) { 666ac106addSNikolay Haustov return errOperand(0, "cannot read literal, inst bytes left " + 667ac106addSNikolay Haustov Twine(Bytes.size())); 668ce941c9cSDmitry Preobrazhensky } 669ce941c9cSDmitry Preobrazhensky HasLiteral = true; 670ce941c9cSDmitry Preobrazhensky Literal = eatBytes<uint32_t>(Bytes); 671ce941c9cSDmitry Preobrazhensky } 672ce941c9cSDmitry Preobrazhensky return MCOperand::createImm(Literal); 673ac106addSNikolay Haustov } 674ac106addSNikolay Haustov 675ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeIntImmed(unsigned Imm) { 676212a251cSArtem Tamazov using namespace AMDGPU::EncValues; 677c8fbf6ffSEugene Zelenko 678212a251cSArtem Tamazov assert(Imm >= INLINE_INTEGER_C_MIN && Imm <= INLINE_INTEGER_C_MAX); 679212a251cSArtem Tamazov return MCOperand::createImm((Imm <= INLINE_INTEGER_C_POSITIVE_MAX) ? 680212a251cSArtem Tamazov (static_cast<int64_t>(Imm) - INLINE_INTEGER_C_MIN) : 681212a251cSArtem Tamazov (INLINE_INTEGER_C_POSITIVE_MAX - static_cast<int64_t>(Imm))); 682212a251cSArtem Tamazov // Cast prevents negative overflow. 683ac106addSNikolay Haustov } 684ac106addSNikolay Haustov 6854bd72361SMatt Arsenault static int64_t getInlineImmVal32(unsigned Imm) { 6864bd72361SMatt Arsenault switch (Imm) { 6874bd72361SMatt Arsenault case 240: 6884bd72361SMatt Arsenault return FloatToBits(0.5f); 6894bd72361SMatt Arsenault case 241: 6904bd72361SMatt Arsenault return FloatToBits(-0.5f); 6914bd72361SMatt Arsenault case 242: 6924bd72361SMatt Arsenault return FloatToBits(1.0f); 6934bd72361SMatt Arsenault case 243: 6944bd72361SMatt Arsenault return FloatToBits(-1.0f); 6954bd72361SMatt Arsenault case 244: 6964bd72361SMatt Arsenault return FloatToBits(2.0f); 6974bd72361SMatt Arsenault case 245: 6984bd72361SMatt Arsenault return FloatToBits(-2.0f); 6994bd72361SMatt Arsenault case 246: 7004bd72361SMatt Arsenault return FloatToBits(4.0f); 7014bd72361SMatt Arsenault case 247: 7024bd72361SMatt Arsenault return FloatToBits(-4.0f); 7034bd72361SMatt Arsenault case 248: // 1 / (2 * PI) 7044bd72361SMatt Arsenault return 0x3e22f983; 7054bd72361SMatt Arsenault default: 7064bd72361SMatt Arsenault llvm_unreachable("invalid fp inline imm"); 7074bd72361SMatt Arsenault } 7084bd72361SMatt Arsenault } 7094bd72361SMatt Arsenault 7104bd72361SMatt Arsenault static int64_t getInlineImmVal64(unsigned Imm) { 7114bd72361SMatt Arsenault switch (Imm) { 7124bd72361SMatt Arsenault case 240: 7134bd72361SMatt Arsenault return DoubleToBits(0.5); 7144bd72361SMatt Arsenault case 241: 7154bd72361SMatt Arsenault return DoubleToBits(-0.5); 7164bd72361SMatt Arsenault case 242: 7174bd72361SMatt Arsenault return DoubleToBits(1.0); 7184bd72361SMatt Arsenault case 243: 7194bd72361SMatt Arsenault return DoubleToBits(-1.0); 7204bd72361SMatt Arsenault case 244: 7214bd72361SMatt Arsenault return DoubleToBits(2.0); 7224bd72361SMatt Arsenault case 245: 7234bd72361SMatt Arsenault return DoubleToBits(-2.0); 7244bd72361SMatt Arsenault case 246: 7254bd72361SMatt Arsenault return DoubleToBits(4.0); 7264bd72361SMatt Arsenault case 247: 7274bd72361SMatt Arsenault return DoubleToBits(-4.0); 7284bd72361SMatt Arsenault case 248: // 1 / (2 * PI) 7294bd72361SMatt Arsenault return 0x3fc45f306dc9c882; 7304bd72361SMatt Arsenault default: 7314bd72361SMatt Arsenault llvm_unreachable("invalid fp inline imm"); 7324bd72361SMatt Arsenault } 7334bd72361SMatt Arsenault } 7344bd72361SMatt Arsenault 7354bd72361SMatt Arsenault static int64_t getInlineImmVal16(unsigned Imm) { 7364bd72361SMatt Arsenault switch (Imm) { 7374bd72361SMatt Arsenault case 240: 7384bd72361SMatt Arsenault return 0x3800; 7394bd72361SMatt Arsenault case 241: 7404bd72361SMatt Arsenault return 0xB800; 7414bd72361SMatt Arsenault case 242: 7424bd72361SMatt Arsenault return 0x3C00; 7434bd72361SMatt Arsenault case 243: 7444bd72361SMatt Arsenault return 0xBC00; 7454bd72361SMatt Arsenault case 244: 7464bd72361SMatt Arsenault return 0x4000; 7474bd72361SMatt Arsenault case 245: 7484bd72361SMatt Arsenault return 0xC000; 7494bd72361SMatt Arsenault case 246: 7504bd72361SMatt Arsenault return 0x4400; 7514bd72361SMatt Arsenault case 247: 7524bd72361SMatt Arsenault return 0xC400; 7534bd72361SMatt Arsenault case 248: // 1 / (2 * PI) 7544bd72361SMatt Arsenault return 0x3118; 7554bd72361SMatt Arsenault default: 7564bd72361SMatt Arsenault llvm_unreachable("invalid fp inline imm"); 7574bd72361SMatt Arsenault } 7584bd72361SMatt Arsenault } 7594bd72361SMatt Arsenault 7604bd72361SMatt Arsenault MCOperand AMDGPUDisassembler::decodeFPImmed(OpWidthTy Width, unsigned Imm) { 761212a251cSArtem Tamazov assert(Imm >= AMDGPU::EncValues::INLINE_FLOATING_C_MIN 762212a251cSArtem Tamazov && Imm <= AMDGPU::EncValues::INLINE_FLOATING_C_MAX); 7634bd72361SMatt Arsenault 764e1818af8STom Stellard // ToDo: case 248: 1/(2*PI) - is allowed only on VI 7654bd72361SMatt Arsenault switch (Width) { 7664bd72361SMatt Arsenault case OPW32: 7674bd72361SMatt Arsenault return MCOperand::createImm(getInlineImmVal32(Imm)); 7684bd72361SMatt Arsenault case OPW64: 7694bd72361SMatt Arsenault return MCOperand::createImm(getInlineImmVal64(Imm)); 7704bd72361SMatt Arsenault case OPW16: 7719be7b0d4SMatt Arsenault case OPWV216: 7724bd72361SMatt Arsenault return MCOperand::createImm(getInlineImmVal16(Imm)); 7734bd72361SMatt Arsenault default: 7744bd72361SMatt Arsenault llvm_unreachable("implement me"); 775e1818af8STom Stellard } 776e1818af8STom Stellard } 777e1818af8STom Stellard 778212a251cSArtem Tamazov unsigned AMDGPUDisassembler::getVgprClassId(const OpWidthTy Width) const { 779e1818af8STom Stellard using namespace AMDGPU; 780c8fbf6ffSEugene Zelenko 781212a251cSArtem Tamazov assert(OPW_FIRST_ <= Width && Width < OPW_LAST_); 782212a251cSArtem Tamazov switch (Width) { 783212a251cSArtem Tamazov default: // fall 7844bd72361SMatt Arsenault case OPW32: 7854bd72361SMatt Arsenault case OPW16: 7869be7b0d4SMatt Arsenault case OPWV216: 7874bd72361SMatt Arsenault return VGPR_32RegClassID; 788212a251cSArtem Tamazov case OPW64: return VReg_64RegClassID; 789212a251cSArtem Tamazov case OPW128: return VReg_128RegClassID; 790212a251cSArtem Tamazov } 791212a251cSArtem Tamazov } 792212a251cSArtem Tamazov 793212a251cSArtem Tamazov unsigned AMDGPUDisassembler::getSgprClassId(const OpWidthTy Width) const { 794212a251cSArtem Tamazov using namespace AMDGPU; 795c8fbf6ffSEugene Zelenko 796212a251cSArtem Tamazov assert(OPW_FIRST_ <= Width && Width < OPW_LAST_); 797212a251cSArtem Tamazov switch (Width) { 798212a251cSArtem Tamazov default: // fall 7994bd72361SMatt Arsenault case OPW32: 8004bd72361SMatt Arsenault case OPW16: 8019be7b0d4SMatt Arsenault case OPWV216: 8024bd72361SMatt Arsenault return SGPR_32RegClassID; 803212a251cSArtem Tamazov case OPW64: return SGPR_64RegClassID; 804212a251cSArtem Tamazov case OPW128: return SGPR_128RegClassID; 80527134953SDmitry Preobrazhensky case OPW256: return SGPR_256RegClassID; 80627134953SDmitry Preobrazhensky case OPW512: return SGPR_512RegClassID; 807212a251cSArtem Tamazov } 808212a251cSArtem Tamazov } 809212a251cSArtem Tamazov 810212a251cSArtem Tamazov unsigned AMDGPUDisassembler::getTtmpClassId(const OpWidthTy Width) const { 811212a251cSArtem Tamazov using namespace AMDGPU; 812c8fbf6ffSEugene Zelenko 813212a251cSArtem Tamazov assert(OPW_FIRST_ <= Width && Width < OPW_LAST_); 814212a251cSArtem Tamazov switch (Width) { 815212a251cSArtem Tamazov default: // fall 8164bd72361SMatt Arsenault case OPW32: 8174bd72361SMatt Arsenault case OPW16: 8189be7b0d4SMatt Arsenault case OPWV216: 8194bd72361SMatt Arsenault return TTMP_32RegClassID; 820212a251cSArtem Tamazov case OPW64: return TTMP_64RegClassID; 821212a251cSArtem Tamazov case OPW128: return TTMP_128RegClassID; 82227134953SDmitry Preobrazhensky case OPW256: return TTMP_256RegClassID; 82327134953SDmitry Preobrazhensky case OPW512: return TTMP_512RegClassID; 824212a251cSArtem Tamazov } 825212a251cSArtem Tamazov } 826212a251cSArtem Tamazov 827ac2b0264SDmitry Preobrazhensky int AMDGPUDisassembler::getTTmpIdx(unsigned Val) const { 828ac2b0264SDmitry Preobrazhensky using namespace AMDGPU::EncValues; 829ac2b0264SDmitry Preobrazhensky 83033d806a5SStanislav Mekhanoshin unsigned TTmpMin = 83133d806a5SStanislav Mekhanoshin (isGFX9() || isGFX10()) ? TTMP_GFX9_GFX10_MIN : TTMP_VI_MIN; 83233d806a5SStanislav Mekhanoshin unsigned TTmpMax = 83333d806a5SStanislav Mekhanoshin (isGFX9() || isGFX10()) ? TTMP_GFX9_GFX10_MAX : TTMP_VI_MAX; 834ac2b0264SDmitry Preobrazhensky 835ac2b0264SDmitry Preobrazhensky return (TTmpMin <= Val && Val <= TTmpMax)? Val - TTmpMin : -1; 836ac2b0264SDmitry Preobrazhensky } 837ac2b0264SDmitry Preobrazhensky 838212a251cSArtem Tamazov MCOperand AMDGPUDisassembler::decodeSrcOp(const OpWidthTy Width, unsigned Val) const { 839212a251cSArtem Tamazov using namespace AMDGPU::EncValues; 840c8fbf6ffSEugene Zelenko 841ac106addSNikolay Haustov assert(Val < 512); // enum9 842ac106addSNikolay Haustov 843212a251cSArtem Tamazov if (VGPR_MIN <= Val && Val <= VGPR_MAX) { 844212a251cSArtem Tamazov return createRegOperand(getVgprClassId(Width), Val - VGPR_MIN); 845212a251cSArtem Tamazov } 846b49c3361SArtem Tamazov if (Val <= SGPR_MAX) { 847b49c3361SArtem Tamazov assert(SGPR_MIN == 0); // "SGPR_MIN <= Val" is always true and causes compilation warning. 848212a251cSArtem Tamazov return createSRegOperand(getSgprClassId(Width), Val - SGPR_MIN); 849212a251cSArtem Tamazov } 850ac2b0264SDmitry Preobrazhensky 851ac2b0264SDmitry Preobrazhensky int TTmpIdx = getTTmpIdx(Val); 852ac2b0264SDmitry Preobrazhensky if (TTmpIdx >= 0) { 853ac2b0264SDmitry Preobrazhensky return createSRegOperand(getTtmpClassId(Width), TTmpIdx); 854212a251cSArtem Tamazov } 855ac106addSNikolay Haustov 856212a251cSArtem Tamazov if (INLINE_INTEGER_C_MIN <= Val && Val <= INLINE_INTEGER_C_MAX) 857ac106addSNikolay Haustov return decodeIntImmed(Val); 858ac106addSNikolay Haustov 859212a251cSArtem Tamazov if (INLINE_FLOATING_C_MIN <= Val && Val <= INLINE_FLOATING_C_MAX) 8604bd72361SMatt Arsenault return decodeFPImmed(Width, Val); 861ac106addSNikolay Haustov 862212a251cSArtem Tamazov if (Val == LITERAL_CONST) 863ac106addSNikolay Haustov return decodeLiteralConstant(); 864ac106addSNikolay Haustov 8654bd72361SMatt Arsenault switch (Width) { 8664bd72361SMatt Arsenault case OPW32: 8674bd72361SMatt Arsenault case OPW16: 8689be7b0d4SMatt Arsenault case OPWV216: 8694bd72361SMatt Arsenault return decodeSpecialReg32(Val); 8704bd72361SMatt Arsenault case OPW64: 8714bd72361SMatt Arsenault return decodeSpecialReg64(Val); 8724bd72361SMatt Arsenault default: 8734bd72361SMatt Arsenault llvm_unreachable("unexpected immediate type"); 8744bd72361SMatt Arsenault } 875ac106addSNikolay Haustov } 876ac106addSNikolay Haustov 87727134953SDmitry Preobrazhensky MCOperand AMDGPUDisassembler::decodeDstOp(const OpWidthTy Width, unsigned Val) const { 87827134953SDmitry Preobrazhensky using namespace AMDGPU::EncValues; 87927134953SDmitry Preobrazhensky 88027134953SDmitry Preobrazhensky assert(Val < 128); 88127134953SDmitry Preobrazhensky assert(Width == OPW256 || Width == OPW512); 88227134953SDmitry Preobrazhensky 88327134953SDmitry Preobrazhensky if (Val <= SGPR_MAX) { 88427134953SDmitry Preobrazhensky assert(SGPR_MIN == 0); // "SGPR_MIN <= Val" is always true and causes compilation warning. 88527134953SDmitry Preobrazhensky return createSRegOperand(getSgprClassId(Width), Val - SGPR_MIN); 88627134953SDmitry Preobrazhensky } 88727134953SDmitry Preobrazhensky 88827134953SDmitry Preobrazhensky int TTmpIdx = getTTmpIdx(Val); 88927134953SDmitry Preobrazhensky if (TTmpIdx >= 0) { 89027134953SDmitry Preobrazhensky return createSRegOperand(getTtmpClassId(Width), TTmpIdx); 89127134953SDmitry Preobrazhensky } 89227134953SDmitry Preobrazhensky 89327134953SDmitry Preobrazhensky llvm_unreachable("unknown dst register"); 89427134953SDmitry Preobrazhensky } 89527134953SDmitry Preobrazhensky 896ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeSpecialReg32(unsigned Val) const { 897ac106addSNikolay Haustov using namespace AMDGPU; 898c8fbf6ffSEugene Zelenko 899e1818af8STom Stellard switch (Val) { 900ac2b0264SDmitry Preobrazhensky case 102: return createRegOperand(FLAT_SCR_LO); 901ac2b0264SDmitry Preobrazhensky case 103: return createRegOperand(FLAT_SCR_HI); 9023afbd825SDmitry Preobrazhensky case 104: return createRegOperand(XNACK_MASK_LO); 9033afbd825SDmitry Preobrazhensky case 105: return createRegOperand(XNACK_MASK_HI); 904ac106addSNikolay Haustov case 106: return createRegOperand(VCC_LO); 905ac106addSNikolay Haustov case 107: return createRegOperand(VCC_HI); 906137976faSDmitry Preobrazhensky case 108: return createRegOperand(TBA_LO); 907137976faSDmitry Preobrazhensky case 109: return createRegOperand(TBA_HI); 908137976faSDmitry Preobrazhensky case 110: return createRegOperand(TMA_LO); 909137976faSDmitry Preobrazhensky case 111: return createRegOperand(TMA_HI); 910ac106addSNikolay Haustov case 124: return createRegOperand(M0); 91133d806a5SStanislav Mekhanoshin case 125: return createRegOperand(SGPR_NULL); 912ac106addSNikolay Haustov case 126: return createRegOperand(EXEC_LO); 913ac106addSNikolay Haustov case 127: return createRegOperand(EXEC_HI); 914a3b3b489SMatt Arsenault case 235: return createRegOperand(SRC_SHARED_BASE); 915a3b3b489SMatt Arsenault case 236: return createRegOperand(SRC_SHARED_LIMIT); 916a3b3b489SMatt Arsenault case 237: return createRegOperand(SRC_PRIVATE_BASE); 917a3b3b489SMatt Arsenault case 238: return createRegOperand(SRC_PRIVATE_LIMIT); 918137976faSDmitry Preobrazhensky case 239: return createRegOperand(SRC_POPS_EXITING_WAVE_ID); 919*9111f35fSDmitry Preobrazhensky case 251: return createRegOperand(SRC_VCCZ); 920*9111f35fSDmitry Preobrazhensky case 252: return createRegOperand(SRC_EXECZ); 921*9111f35fSDmitry Preobrazhensky case 253: return createRegOperand(SRC_SCC); 922942c273dSDmitry Preobrazhensky case 254: return createRegOperand(LDS_DIRECT); 923ac106addSNikolay Haustov default: break; 924e1818af8STom Stellard } 925ac106addSNikolay Haustov return errOperand(Val, "unknown operand encoding " + Twine(Val)); 926e1818af8STom Stellard } 927e1818af8STom Stellard 928ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeSpecialReg64(unsigned Val) const { 929161a158eSNikolay Haustov using namespace AMDGPU; 930c8fbf6ffSEugene Zelenko 931161a158eSNikolay Haustov switch (Val) { 932ac2b0264SDmitry Preobrazhensky case 102: return createRegOperand(FLAT_SCR); 9333afbd825SDmitry Preobrazhensky case 104: return createRegOperand(XNACK_MASK); 934ac106addSNikolay Haustov case 106: return createRegOperand(VCC); 935137976faSDmitry Preobrazhensky case 108: return createRegOperand(TBA); 936137976faSDmitry Preobrazhensky case 110: return createRegOperand(TMA); 937ac106addSNikolay Haustov case 126: return createRegOperand(EXEC); 938137976faSDmitry Preobrazhensky case 235: return createRegOperand(SRC_SHARED_BASE); 939137976faSDmitry Preobrazhensky case 236: return createRegOperand(SRC_SHARED_LIMIT); 940137976faSDmitry Preobrazhensky case 237: return createRegOperand(SRC_PRIVATE_BASE); 941137976faSDmitry Preobrazhensky case 238: return createRegOperand(SRC_PRIVATE_LIMIT); 942137976faSDmitry Preobrazhensky case 239: return createRegOperand(SRC_POPS_EXITING_WAVE_ID); 943*9111f35fSDmitry Preobrazhensky case 251: return createRegOperand(SRC_VCCZ); 944*9111f35fSDmitry Preobrazhensky case 252: return createRegOperand(SRC_EXECZ); 945*9111f35fSDmitry Preobrazhensky case 253: return createRegOperand(SRC_SCC); 946ac106addSNikolay Haustov default: break; 947161a158eSNikolay Haustov } 948ac106addSNikolay Haustov return errOperand(Val, "unknown operand encoding " + Twine(Val)); 949161a158eSNikolay Haustov } 950161a158eSNikolay Haustov 951549c89d2SSam Kolton MCOperand AMDGPUDisassembler::decodeSDWASrc(const OpWidthTy Width, 9526b65f7c3SDmitry Preobrazhensky const unsigned Val) const { 953363f47a2SSam Kolton using namespace AMDGPU::SDWA; 9546b65f7c3SDmitry Preobrazhensky using namespace AMDGPU::EncValues; 955363f47a2SSam Kolton 95633d806a5SStanislav Mekhanoshin if (STI.getFeatureBits()[AMDGPU::FeatureGFX9] || 95733d806a5SStanislav Mekhanoshin STI.getFeatureBits()[AMDGPU::FeatureGFX10]) { 958da644c02SStanislav Mekhanoshin // XXX: cast to int is needed to avoid stupid warning: 959a179d25bSSam Kolton // compare with unsigned is always true 960da644c02SStanislav Mekhanoshin if (int(SDWA9EncValues::SRC_VGPR_MIN) <= int(Val) && 961363f47a2SSam Kolton Val <= SDWA9EncValues::SRC_VGPR_MAX) { 962363f47a2SSam Kolton return createRegOperand(getVgprClassId(Width), 963363f47a2SSam Kolton Val - SDWA9EncValues::SRC_VGPR_MIN); 964363f47a2SSam Kolton } 965363f47a2SSam Kolton if (SDWA9EncValues::SRC_SGPR_MIN <= Val && 96633d806a5SStanislav Mekhanoshin Val <= (isGFX10() ? SDWA9EncValues::SRC_SGPR_MAX_GFX10 96733d806a5SStanislav Mekhanoshin : SDWA9EncValues::SRC_SGPR_MAX_SI)) { 968363f47a2SSam Kolton return createSRegOperand(getSgprClassId(Width), 969363f47a2SSam Kolton Val - SDWA9EncValues::SRC_SGPR_MIN); 970363f47a2SSam Kolton } 971ac2b0264SDmitry Preobrazhensky if (SDWA9EncValues::SRC_TTMP_MIN <= Val && 972ac2b0264SDmitry Preobrazhensky Val <= SDWA9EncValues::SRC_TTMP_MAX) { 973ac2b0264SDmitry Preobrazhensky return createSRegOperand(getTtmpClassId(Width), 974ac2b0264SDmitry Preobrazhensky Val - SDWA9EncValues::SRC_TTMP_MIN); 975ac2b0264SDmitry Preobrazhensky } 976363f47a2SSam Kolton 9776b65f7c3SDmitry Preobrazhensky const unsigned SVal = Val - SDWA9EncValues::SRC_SGPR_MIN; 9786b65f7c3SDmitry Preobrazhensky 9796b65f7c3SDmitry Preobrazhensky if (INLINE_INTEGER_C_MIN <= SVal && SVal <= INLINE_INTEGER_C_MAX) 9806b65f7c3SDmitry Preobrazhensky return decodeIntImmed(SVal); 9816b65f7c3SDmitry Preobrazhensky 9826b65f7c3SDmitry Preobrazhensky if (INLINE_FLOATING_C_MIN <= SVal && SVal <= INLINE_FLOATING_C_MAX) 9836b65f7c3SDmitry Preobrazhensky return decodeFPImmed(Width, SVal); 9846b65f7c3SDmitry Preobrazhensky 9856b65f7c3SDmitry Preobrazhensky return decodeSpecialReg32(SVal); 986549c89d2SSam Kolton } else if (STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]) { 987549c89d2SSam Kolton return createRegOperand(getVgprClassId(Width), Val); 988549c89d2SSam Kolton } 989549c89d2SSam Kolton llvm_unreachable("unsupported target"); 990363f47a2SSam Kolton } 991363f47a2SSam Kolton 992549c89d2SSam Kolton MCOperand AMDGPUDisassembler::decodeSDWASrc16(unsigned Val) const { 993549c89d2SSam Kolton return decodeSDWASrc(OPW16, Val); 994363f47a2SSam Kolton } 995363f47a2SSam Kolton 996549c89d2SSam Kolton MCOperand AMDGPUDisassembler::decodeSDWASrc32(unsigned Val) const { 997549c89d2SSam Kolton return decodeSDWASrc(OPW32, Val); 998363f47a2SSam Kolton } 999363f47a2SSam Kolton 1000549c89d2SSam Kolton MCOperand AMDGPUDisassembler::decodeSDWAVopcDst(unsigned Val) const { 1001363f47a2SSam Kolton using namespace AMDGPU::SDWA; 1002363f47a2SSam Kolton 100333d806a5SStanislav Mekhanoshin assert((STI.getFeatureBits()[AMDGPU::FeatureGFX9] || 100433d806a5SStanislav Mekhanoshin STI.getFeatureBits()[AMDGPU::FeatureGFX10]) && 100533d806a5SStanislav Mekhanoshin "SDWAVopcDst should be present only on GFX9+"); 100633d806a5SStanislav Mekhanoshin 1007363f47a2SSam Kolton if (Val & SDWA9EncValues::VOPC_DST_VCC_MASK) { 1008363f47a2SSam Kolton Val &= SDWA9EncValues::VOPC_DST_SGPR_MASK; 1009ac2b0264SDmitry Preobrazhensky 1010ac2b0264SDmitry Preobrazhensky int TTmpIdx = getTTmpIdx(Val); 1011ac2b0264SDmitry Preobrazhensky if (TTmpIdx >= 0) { 1012ac2b0264SDmitry Preobrazhensky return createSRegOperand(getTtmpClassId(OPW64), TTmpIdx); 101333d806a5SStanislav Mekhanoshin } else if (Val > SGPR_MAX) { 1014363f47a2SSam Kolton return decodeSpecialReg64(Val); 1015363f47a2SSam Kolton } else { 1016363f47a2SSam Kolton return createSRegOperand(getSgprClassId(OPW64), Val); 1017363f47a2SSam Kolton } 1018363f47a2SSam Kolton } else { 1019363f47a2SSam Kolton return createRegOperand(AMDGPU::VCC); 1020363f47a2SSam Kolton } 1021363f47a2SSam Kolton } 1022363f47a2SSam Kolton 1023ac2b0264SDmitry Preobrazhensky bool AMDGPUDisassembler::isVI() const { 1024ac2b0264SDmitry Preobrazhensky return STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]; 1025ac2b0264SDmitry Preobrazhensky } 1026ac2b0264SDmitry Preobrazhensky 1027ac2b0264SDmitry Preobrazhensky bool AMDGPUDisassembler::isGFX9() const { 1028ac2b0264SDmitry Preobrazhensky return STI.getFeatureBits()[AMDGPU::FeatureGFX9]; 1029ac2b0264SDmitry Preobrazhensky } 1030ac2b0264SDmitry Preobrazhensky 103133d806a5SStanislav Mekhanoshin bool AMDGPUDisassembler::isGFX10() const { 103233d806a5SStanislav Mekhanoshin return STI.getFeatureBits()[AMDGPU::FeatureGFX10]; 103333d806a5SStanislav Mekhanoshin } 103433d806a5SStanislav Mekhanoshin 10353381d7a2SSam Kolton //===----------------------------------------------------------------------===// 10363381d7a2SSam Kolton // AMDGPUSymbolizer 10373381d7a2SSam Kolton //===----------------------------------------------------------------------===// 10383381d7a2SSam Kolton 10393381d7a2SSam Kolton // Try to find symbol name for specified label 10403381d7a2SSam Kolton bool AMDGPUSymbolizer::tryAddingSymbolicOperand(MCInst &Inst, 10413381d7a2SSam Kolton raw_ostream &/*cStream*/, int64_t Value, 10423381d7a2SSam Kolton uint64_t /*Address*/, bool IsBranch, 10433381d7a2SSam Kolton uint64_t /*Offset*/, uint64_t /*InstSize*/) { 1044c8fbf6ffSEugene Zelenko using SymbolInfoTy = std::tuple<uint64_t, StringRef, uint8_t>; 1045c8fbf6ffSEugene Zelenko using SectionSymbolsTy = std::vector<SymbolInfoTy>; 10463381d7a2SSam Kolton 10473381d7a2SSam Kolton if (!IsBranch) { 10483381d7a2SSam Kolton return false; 10493381d7a2SSam Kolton } 10503381d7a2SSam Kolton 10513381d7a2SSam Kolton auto *Symbols = static_cast<SectionSymbolsTy *>(DisInfo); 1052b1c3b22bSNicolai Haehnle if (!Symbols) 1053b1c3b22bSNicolai Haehnle return false; 1054b1c3b22bSNicolai Haehnle 10553381d7a2SSam Kolton auto Result = std::find_if(Symbols->begin(), Symbols->end(), 10563381d7a2SSam Kolton [Value](const SymbolInfoTy& Val) { 10573381d7a2SSam Kolton return std::get<0>(Val) == static_cast<uint64_t>(Value) 10583381d7a2SSam Kolton && std::get<2>(Val) == ELF::STT_NOTYPE; 10593381d7a2SSam Kolton }); 10603381d7a2SSam Kolton if (Result != Symbols->end()) { 10613381d7a2SSam Kolton auto *Sym = Ctx.getOrCreateSymbol(std::get<1>(*Result)); 10623381d7a2SSam Kolton const auto *Add = MCSymbolRefExpr::create(Sym, Ctx); 10633381d7a2SSam Kolton Inst.addOperand(MCOperand::createExpr(Add)); 10643381d7a2SSam Kolton return true; 10653381d7a2SSam Kolton } 10663381d7a2SSam Kolton return false; 10673381d7a2SSam Kolton } 10683381d7a2SSam Kolton 106992b355b1SMatt Arsenault void AMDGPUSymbolizer::tryAddingPcLoadReferenceComment(raw_ostream &cStream, 107092b355b1SMatt Arsenault int64_t Value, 107192b355b1SMatt Arsenault uint64_t Address) { 107292b355b1SMatt Arsenault llvm_unreachable("unimplemented"); 107392b355b1SMatt Arsenault } 107492b355b1SMatt Arsenault 10753381d7a2SSam Kolton //===----------------------------------------------------------------------===// 10763381d7a2SSam Kolton // Initialization 10773381d7a2SSam Kolton //===----------------------------------------------------------------------===// 10783381d7a2SSam Kolton 10793381d7a2SSam Kolton static MCSymbolizer *createAMDGPUSymbolizer(const Triple &/*TT*/, 10803381d7a2SSam Kolton LLVMOpInfoCallback /*GetOpInfo*/, 10813381d7a2SSam Kolton LLVMSymbolLookupCallback /*SymbolLookUp*/, 10823381d7a2SSam Kolton void *DisInfo, 10833381d7a2SSam Kolton MCContext *Ctx, 10843381d7a2SSam Kolton std::unique_ptr<MCRelocationInfo> &&RelInfo) { 10853381d7a2SSam Kolton return new AMDGPUSymbolizer(*Ctx, std::move(RelInfo), DisInfo); 10863381d7a2SSam Kolton } 10873381d7a2SSam Kolton 1088e1818af8STom Stellard static MCDisassembler *createAMDGPUDisassembler(const Target &T, 1089e1818af8STom Stellard const MCSubtargetInfo &STI, 1090e1818af8STom Stellard MCContext &Ctx) { 1091cad7fa85SMatt Arsenault return new AMDGPUDisassembler(STI, Ctx, T.createMCInstrInfo()); 1092e1818af8STom Stellard } 1093e1818af8STom Stellard 1094e1818af8STom Stellard extern "C" void LLVMInitializeAMDGPUDisassembler() { 1095f42454b9SMehdi Amini TargetRegistry::RegisterMCDisassembler(getTheGCNTarget(), 1096f42454b9SMehdi Amini createAMDGPUDisassembler); 1097f42454b9SMehdi Amini TargetRegistry::RegisterMCSymbolizer(getTheGCNTarget(), 1098f42454b9SMehdi Amini createAMDGPUSymbolizer); 1099e1818af8STom Stellard } 1100