1c8fbf6ffSEugene Zelenko //===- AMDGPUDisassembler.cpp - Disassembler for AMDGPU ISA ---------------===// 2e1818af8STom Stellard // 3e1818af8STom Stellard // The LLVM Compiler Infrastructure 4e1818af8STom Stellard // 5e1818af8STom Stellard // This file is distributed under the University of Illinois Open Source 6e1818af8STom Stellard // License. See LICENSE.TXT for details. 7e1818af8STom Stellard // 8e1818af8STom Stellard //===----------------------------------------------------------------------===// 9e1818af8STom Stellard // 10e1818af8STom Stellard //===----------------------------------------------------------------------===// 11e1818af8STom Stellard // 12e1818af8STom Stellard /// \file 13e1818af8STom Stellard /// 14e1818af8STom Stellard /// This file contains definition for AMDGPU ISA disassembler 15e1818af8STom Stellard // 16e1818af8STom Stellard //===----------------------------------------------------------------------===// 17e1818af8STom Stellard 18e1818af8STom Stellard // ToDo: What to do with instruction suffixes (v_mov_b32 vs v_mov_b32_e32)? 19e1818af8STom Stellard 20c8fbf6ffSEugene Zelenko #include "Disassembler/AMDGPUDisassembler.h" 21e1818af8STom Stellard #include "AMDGPU.h" 22e1818af8STom Stellard #include "AMDGPURegisterInfo.h" 23212a251cSArtem Tamazov #include "SIDefines.h" 24e1818af8STom Stellard #include "Utils/AMDGPUBaseInfo.h" 25c8fbf6ffSEugene Zelenko #include "llvm-c/Disassembler.h" 26c8fbf6ffSEugene Zelenko #include "llvm/ADT/APInt.h" 27c8fbf6ffSEugene Zelenko #include "llvm/ADT/ArrayRef.h" 28c8fbf6ffSEugene Zelenko #include "llvm/ADT/Twine.h" 29264b5d9eSZachary Turner #include "llvm/BinaryFormat/ELF.h" 30ac106addSNikolay Haustov #include "llvm/MC/MCContext.h" 31c8fbf6ffSEugene Zelenko #include "llvm/MC/MCDisassembler/MCDisassembler.h" 32c8fbf6ffSEugene Zelenko #include "llvm/MC/MCExpr.h" 33e1818af8STom Stellard #include "llvm/MC/MCFixedLenDisassembler.h" 34e1818af8STom Stellard #include "llvm/MC/MCInst.h" 35e1818af8STom Stellard #include "llvm/MC/MCSubtargetInfo.h" 36ac106addSNikolay Haustov #include "llvm/Support/Endian.h" 37c8fbf6ffSEugene Zelenko #include "llvm/Support/ErrorHandling.h" 38c8fbf6ffSEugene Zelenko #include "llvm/Support/MathExtras.h" 39e1818af8STom Stellard #include "llvm/Support/TargetRegistry.h" 40c8fbf6ffSEugene Zelenko #include "llvm/Support/raw_ostream.h" 41c8fbf6ffSEugene Zelenko #include <algorithm> 42c8fbf6ffSEugene Zelenko #include <cassert> 43c8fbf6ffSEugene Zelenko #include <cstddef> 44c8fbf6ffSEugene Zelenko #include <cstdint> 45c8fbf6ffSEugene Zelenko #include <iterator> 46c8fbf6ffSEugene Zelenko #include <tuple> 47c8fbf6ffSEugene Zelenko #include <vector> 48e1818af8STom Stellard 49e1818af8STom Stellard using namespace llvm; 50e1818af8STom Stellard 51e1818af8STom Stellard #define DEBUG_TYPE "amdgpu-disassembler" 52e1818af8STom Stellard 53c8fbf6ffSEugene Zelenko using DecodeStatus = llvm::MCDisassembler::DecodeStatus; 54e1818af8STom Stellard 55ac106addSNikolay Haustov inline static MCDisassembler::DecodeStatus 56ac106addSNikolay Haustov addOperand(MCInst &Inst, const MCOperand& Opnd) { 57ac106addSNikolay Haustov Inst.addOperand(Opnd); 58ac106addSNikolay Haustov return Opnd.isValid() ? 59ac106addSNikolay Haustov MCDisassembler::Success : 60ac106addSNikolay Haustov MCDisassembler::SoftFail; 61e1818af8STom Stellard } 62e1818af8STom Stellard 63549c89d2SSam Kolton static int insertNamedMCOperand(MCInst &MI, const MCOperand &Op, 64549c89d2SSam Kolton uint16_t NameIdx) { 65549c89d2SSam Kolton int OpIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), NameIdx); 66549c89d2SSam Kolton if (OpIdx != -1) { 67549c89d2SSam Kolton auto I = MI.begin(); 68549c89d2SSam Kolton std::advance(I, OpIdx); 69549c89d2SSam Kolton MI.insert(I, Op); 70549c89d2SSam Kolton } 71549c89d2SSam Kolton return OpIdx; 72549c89d2SSam Kolton } 73549c89d2SSam Kolton 743381d7a2SSam Kolton static DecodeStatus decodeSoppBrTarget(MCInst &Inst, unsigned Imm, 753381d7a2SSam Kolton uint64_t Addr, const void *Decoder) { 763381d7a2SSam Kolton auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); 773381d7a2SSam Kolton 783381d7a2SSam Kolton APInt SignedOffset(18, Imm * 4, true); 793381d7a2SSam Kolton int64_t Offset = (SignedOffset.sext(64) + 4 + Addr).getSExtValue(); 803381d7a2SSam Kolton 813381d7a2SSam Kolton if (DAsm->tryAddingSymbolicOperand(Inst, Offset, Addr, true, 2, 2)) 823381d7a2SSam Kolton return MCDisassembler::Success; 833381d7a2SSam Kolton return addOperand(Inst, MCOperand::createImm(Imm)); 843381d7a2SSam Kolton } 853381d7a2SSam Kolton 86363f47a2SSam Kolton #define DECODE_OPERAND(StaticDecoderName, DecoderName) \ 87363f47a2SSam Kolton static DecodeStatus StaticDecoderName(MCInst &Inst, \ 88ac106addSNikolay Haustov unsigned Imm, \ 89ac106addSNikolay Haustov uint64_t /*Addr*/, \ 90ac106addSNikolay Haustov const void *Decoder) { \ 91ac106addSNikolay Haustov auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); \ 92363f47a2SSam Kolton return addOperand(Inst, DAsm->DecoderName(Imm)); \ 93e1818af8STom Stellard } 94e1818af8STom Stellard 95363f47a2SSam Kolton #define DECODE_OPERAND_REG(RegClass) \ 96363f47a2SSam Kolton DECODE_OPERAND(Decode##RegClass##RegisterClass, decodeOperand_##RegClass) 97e1818af8STom Stellard 98363f47a2SSam Kolton DECODE_OPERAND_REG(VGPR_32) 99363f47a2SSam Kolton DECODE_OPERAND_REG(VS_32) 100363f47a2SSam Kolton DECODE_OPERAND_REG(VS_64) 10130fc5239SDmitry Preobrazhensky DECODE_OPERAND_REG(VS_128) 102e1818af8STom Stellard 103363f47a2SSam Kolton DECODE_OPERAND_REG(VReg_64) 104363f47a2SSam Kolton DECODE_OPERAND_REG(VReg_96) 105363f47a2SSam Kolton DECODE_OPERAND_REG(VReg_128) 106e1818af8STom Stellard 107363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_32) 108363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_32_XM0_XEXEC) 109ca7b0a17SMatt Arsenault DECODE_OPERAND_REG(SReg_32_XEXEC_HI) 110363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_64) 111363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_64_XEXEC) 112363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_128) 113363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_256) 114363f47a2SSam Kolton DECODE_OPERAND_REG(SReg_512) 115e1818af8STom Stellard 1164bd72361SMatt Arsenault static DecodeStatus decodeOperand_VSrc16(MCInst &Inst, 1174bd72361SMatt Arsenault unsigned Imm, 1184bd72361SMatt Arsenault uint64_t Addr, 1194bd72361SMatt Arsenault const void *Decoder) { 1204bd72361SMatt Arsenault auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); 1214bd72361SMatt Arsenault return addOperand(Inst, DAsm->decodeOperand_VSrc16(Imm)); 1224bd72361SMatt Arsenault } 1234bd72361SMatt Arsenault 1249be7b0d4SMatt Arsenault static DecodeStatus decodeOperand_VSrcV216(MCInst &Inst, 1259be7b0d4SMatt Arsenault unsigned Imm, 1269be7b0d4SMatt Arsenault uint64_t Addr, 1279be7b0d4SMatt Arsenault const void *Decoder) { 1289be7b0d4SMatt Arsenault auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); 1299be7b0d4SMatt Arsenault return addOperand(Inst, DAsm->decodeOperand_VSrcV216(Imm)); 1309be7b0d4SMatt Arsenault } 1319be7b0d4SMatt Arsenault 132549c89d2SSam Kolton #define DECODE_SDWA(DecName) \ 133549c89d2SSam Kolton DECODE_OPERAND(decodeSDWA##DecName, decodeSDWA##DecName) 134363f47a2SSam Kolton 135549c89d2SSam Kolton DECODE_SDWA(Src32) 136549c89d2SSam Kolton DECODE_SDWA(Src16) 137549c89d2SSam Kolton DECODE_SDWA(VopcDst) 138363f47a2SSam Kolton 139e1818af8STom Stellard #include "AMDGPUGenDisassemblerTables.inc" 140e1818af8STom Stellard 141e1818af8STom Stellard //===----------------------------------------------------------------------===// 142e1818af8STom Stellard // 143e1818af8STom Stellard //===----------------------------------------------------------------------===// 144e1818af8STom Stellard 1451048fb18SSam Kolton template <typename T> static inline T eatBytes(ArrayRef<uint8_t>& Bytes) { 1461048fb18SSam Kolton assert(Bytes.size() >= sizeof(T)); 1471048fb18SSam Kolton const auto Res = support::endian::read<T, support::endianness::little>(Bytes.data()); 1481048fb18SSam Kolton Bytes = Bytes.slice(sizeof(T)); 149ac106addSNikolay Haustov return Res; 150ac106addSNikolay Haustov } 151ac106addSNikolay Haustov 152ac106addSNikolay Haustov DecodeStatus AMDGPUDisassembler::tryDecodeInst(const uint8_t* Table, 153ac106addSNikolay Haustov MCInst &MI, 154ac106addSNikolay Haustov uint64_t Inst, 155ac106addSNikolay Haustov uint64_t Address) const { 156ac106addSNikolay Haustov assert(MI.getOpcode() == 0); 157ac106addSNikolay Haustov assert(MI.getNumOperands() == 0); 158ac106addSNikolay Haustov MCInst TmpInst; 159ce941c9cSDmitry Preobrazhensky HasLiteral = false; 160ac106addSNikolay Haustov const auto SavedBytes = Bytes; 161ac106addSNikolay Haustov if (decodeInstruction(Table, TmpInst, Inst, Address, this, STI)) { 162ac106addSNikolay Haustov MI = TmpInst; 163ac106addSNikolay Haustov return MCDisassembler::Success; 164ac106addSNikolay Haustov } 165ac106addSNikolay Haustov Bytes = SavedBytes; 166ac106addSNikolay Haustov return MCDisassembler::Fail; 167ac106addSNikolay Haustov } 168ac106addSNikolay Haustov 169e1818af8STom Stellard DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size, 170ac106addSNikolay Haustov ArrayRef<uint8_t> Bytes_, 171e1818af8STom Stellard uint64_t Address, 172e1818af8STom Stellard raw_ostream &WS, 173e1818af8STom Stellard raw_ostream &CS) const { 174e1818af8STom Stellard CommentStream = &CS; 175549c89d2SSam Kolton bool IsSDWA = false; 176e1818af8STom Stellard 177e1818af8STom Stellard // ToDo: AMDGPUDisassembler supports only VI ISA. 178d122abeaSMatt Arsenault if (!STI.getFeatureBits()[AMDGPU::FeatureGCN3Encoding]) 179d122abeaSMatt Arsenault report_fatal_error("Disassembly not yet supported for subtarget"); 180e1818af8STom Stellard 181ac106addSNikolay Haustov const unsigned MaxInstBytesNum = (std::min)((size_t)8, Bytes_.size()); 182ac106addSNikolay Haustov Bytes = Bytes_.slice(0, MaxInstBytesNum); 183161a158eSNikolay Haustov 184ac106addSNikolay Haustov DecodeStatus Res = MCDisassembler::Fail; 185ac106addSNikolay Haustov do { 186824e804bSValery Pykhtin // ToDo: better to switch encoding length using some bit predicate 187ac106addSNikolay Haustov // but it is unknown yet, so try all we can 1881048fb18SSam Kolton 189c9bdcb75SSam Kolton // Try to decode DPP and SDWA first to solve conflict with VOP1 and VOP2 190c9bdcb75SSam Kolton // encodings 1911048fb18SSam Kolton if (Bytes.size() >= 8) { 1921048fb18SSam Kolton const uint64_t QW = eatBytes<uint64_t>(Bytes); 1931048fb18SSam Kolton Res = tryDecodeInst(DecoderTableDPP64, MI, QW, Address); 1941048fb18SSam Kolton if (Res) break; 195c9bdcb75SSam Kolton 196c9bdcb75SSam Kolton Res = tryDecodeInst(DecoderTableSDWA64, MI, QW, Address); 197549c89d2SSam Kolton if (Res) { IsSDWA = true; break; } 198363f47a2SSam Kolton 199363f47a2SSam Kolton Res = tryDecodeInst(DecoderTableSDWA964, MI, QW, Address); 200549c89d2SSam Kolton if (Res) { IsSDWA = true; break; } 2010905870fSChangpeng Fang 2020905870fSChangpeng Fang if (STI.getFeatureBits()[AMDGPU::FeatureUnpackedD16VMem]) { 2030905870fSChangpeng Fang Res = tryDecodeInst(DecoderTableGFX80_UNPACKED64, MI, QW, Address); 2040905870fSChangpeng Fang if (Res) break; 2050905870fSChangpeng Fang } 2061048fb18SSam Kolton } 2071048fb18SSam Kolton 2081048fb18SSam Kolton // Reinitialize Bytes as DPP64 could have eaten too much 2091048fb18SSam Kolton Bytes = Bytes_.slice(0, MaxInstBytesNum); 2101048fb18SSam Kolton 2111048fb18SSam Kolton // Try decode 32-bit instruction 212ac106addSNikolay Haustov if (Bytes.size() < 4) break; 2131048fb18SSam Kolton const uint32_t DW = eatBytes<uint32_t>(Bytes); 214ac106addSNikolay Haustov Res = tryDecodeInst(DecoderTableVI32, MI, DW, Address); 215ac106addSNikolay Haustov if (Res) break; 216e1818af8STom Stellard 217ac106addSNikolay Haustov Res = tryDecodeInst(DecoderTableAMDGPU32, MI, DW, Address); 218ac106addSNikolay Haustov if (Res) break; 219ac106addSNikolay Haustov 220a0342dc9SDmitry Preobrazhensky Res = tryDecodeInst(DecoderTableGFX932, MI, DW, Address); 221a0342dc9SDmitry Preobrazhensky if (Res) break; 222a0342dc9SDmitry Preobrazhensky 223ac106addSNikolay Haustov if (Bytes.size() < 4) break; 2241048fb18SSam Kolton const uint64_t QW = ((uint64_t)eatBytes<uint32_t>(Bytes) << 32) | DW; 225ac106addSNikolay Haustov Res = tryDecodeInst(DecoderTableVI64, MI, QW, Address); 226ac106addSNikolay Haustov if (Res) break; 227ac106addSNikolay Haustov 228ac106addSNikolay Haustov Res = tryDecodeInst(DecoderTableAMDGPU64, MI, QW, Address); 2291e32550dSDmitry Preobrazhensky if (Res) break; 2301e32550dSDmitry Preobrazhensky 2311e32550dSDmitry Preobrazhensky Res = tryDecodeInst(DecoderTableGFX964, MI, QW, Address); 232ac106addSNikolay Haustov } while (false); 233ac106addSNikolay Haustov 234678e111eSMatt Arsenault if (Res && (MI.getOpcode() == AMDGPU::V_MAC_F32_e64_vi || 235678e111eSMatt Arsenault MI.getOpcode() == AMDGPU::V_MAC_F32_e64_si || 236678e111eSMatt Arsenault MI.getOpcode() == AMDGPU::V_MAC_F16_e64_vi)) { 237678e111eSMatt Arsenault // Insert dummy unused src2_modifiers. 238549c89d2SSam Kolton insertNamedMCOperand(MI, MCOperand::createImm(0), 239678e111eSMatt Arsenault AMDGPU::OpName::src2_modifiers); 240678e111eSMatt Arsenault } 241678e111eSMatt Arsenault 242cad7fa85SMatt Arsenault if (Res && (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::MIMG)) { 243cad7fa85SMatt Arsenault Res = convertMIMGInst(MI); 244cad7fa85SMatt Arsenault } 245cad7fa85SMatt Arsenault 246549c89d2SSam Kolton if (Res && IsSDWA) 247549c89d2SSam Kolton Res = convertSDWAInst(MI); 248549c89d2SSam Kolton 249ac106addSNikolay Haustov Size = Res ? (MaxInstBytesNum - Bytes.size()) : 0; 250ac106addSNikolay Haustov return Res; 251161a158eSNikolay Haustov } 252e1818af8STom Stellard 253549c89d2SSam Kolton DecodeStatus AMDGPUDisassembler::convertSDWAInst(MCInst &MI) const { 254549c89d2SSam Kolton if (STI.getFeatureBits()[AMDGPU::FeatureGFX9]) { 255549c89d2SSam Kolton if (AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sdst) != -1) 256549c89d2SSam Kolton // VOPC - insert clamp 257549c89d2SSam Kolton insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::clamp); 258549c89d2SSam Kolton } else if (STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]) { 259549c89d2SSam Kolton int SDst = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sdst); 260549c89d2SSam Kolton if (SDst != -1) { 261549c89d2SSam Kolton // VOPC - insert VCC register as sdst 262ac2b0264SDmitry Preobrazhensky insertNamedMCOperand(MI, createRegOperand(AMDGPU::VCC), 263549c89d2SSam Kolton AMDGPU::OpName::sdst); 264549c89d2SSam Kolton } else { 265549c89d2SSam Kolton // VOP1/2 - insert omod if present in instruction 266549c89d2SSam Kolton insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::omod); 267549c89d2SSam Kolton } 268549c89d2SSam Kolton } 269549c89d2SSam Kolton return MCDisassembler::Success; 270549c89d2SSam Kolton } 271549c89d2SSam Kolton 2720a1ff464SDmitry Preobrazhensky // Note that MIMG format provides no information about VADDR size. 2730a1ff464SDmitry Preobrazhensky // Consequently, decoded instructions always show address 2740a1ff464SDmitry Preobrazhensky // as if it has 1 dword, which could be not really so. 275cad7fa85SMatt Arsenault DecodeStatus AMDGPUDisassembler::convertMIMGInst(MCInst &MI) const { 276*da4a7c01SDmitry Preobrazhensky 277*da4a7c01SDmitry Preobrazhensky if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::Gather4) { 278*da4a7c01SDmitry Preobrazhensky return MCDisassembler::Success; 279*da4a7c01SDmitry Preobrazhensky } 280*da4a7c01SDmitry Preobrazhensky 2810b4eb1eaSDmitry Preobrazhensky int VDstIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 2820b4eb1eaSDmitry Preobrazhensky AMDGPU::OpName::vdst); 2830b4eb1eaSDmitry Preobrazhensky 284cad7fa85SMatt Arsenault int VDataIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 285cad7fa85SMatt Arsenault AMDGPU::OpName::vdata); 286cad7fa85SMatt Arsenault 287cad7fa85SMatt Arsenault int DMaskIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 288cad7fa85SMatt Arsenault AMDGPU::OpName::dmask); 2890b4eb1eaSDmitry Preobrazhensky 2900a1ff464SDmitry Preobrazhensky int TFEIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 2910a1ff464SDmitry Preobrazhensky AMDGPU::OpName::tfe); 2920a1ff464SDmitry Preobrazhensky 2930b4eb1eaSDmitry Preobrazhensky assert(VDataIdx != -1); 2940b4eb1eaSDmitry Preobrazhensky assert(DMaskIdx != -1); 2950a1ff464SDmitry Preobrazhensky assert(TFEIdx != -1); 2960b4eb1eaSDmitry Preobrazhensky 297*da4a7c01SDmitry Preobrazhensky bool IsAtomic = (VDstIdx != -1); 2980b4eb1eaSDmitry Preobrazhensky 299cad7fa85SMatt Arsenault unsigned DMask = MI.getOperand(DMaskIdx).getImm() & 0xf; 300cad7fa85SMatt Arsenault if (DMask == 0) 301cad7fa85SMatt Arsenault return MCDisassembler::Success; 302cad7fa85SMatt Arsenault 3030a1ff464SDmitry Preobrazhensky unsigned DstSize = countPopulation(DMask); 3040a1ff464SDmitry Preobrazhensky if (DstSize == 1) 3050a1ff464SDmitry Preobrazhensky return MCDisassembler::Success; 3060a1ff464SDmitry Preobrazhensky 3070a1ff464SDmitry Preobrazhensky bool D16 = MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::D16; 3080a1ff464SDmitry Preobrazhensky if (D16 && AMDGPU::hasPackedD16(STI)) { 3090a1ff464SDmitry Preobrazhensky DstSize = (DstSize + 1) / 2; 3100a1ff464SDmitry Preobrazhensky } 3110a1ff464SDmitry Preobrazhensky 3120a1ff464SDmitry Preobrazhensky // FIXME: Add tfe support 3130a1ff464SDmitry Preobrazhensky if (MI.getOperand(TFEIdx).getImm()) 314cad7fa85SMatt Arsenault return MCDisassembler::Success; 315cad7fa85SMatt Arsenault 3160b4eb1eaSDmitry Preobrazhensky int NewOpcode = -1; 3170b4eb1eaSDmitry Preobrazhensky 318*da4a7c01SDmitry Preobrazhensky if (IsAtomic) { 3190b4eb1eaSDmitry Preobrazhensky if (DMask == 0x1 || DMask == 0x3 || DMask == 0xF) { 3200a1ff464SDmitry Preobrazhensky NewOpcode = AMDGPU::getMaskedMIMGAtomicOp(*MCII, MI.getOpcode(), DstSize); 3210b4eb1eaSDmitry Preobrazhensky } 3220b4eb1eaSDmitry Preobrazhensky if (NewOpcode == -1) return MCDisassembler::Success; 3230b4eb1eaSDmitry Preobrazhensky } else { 3240a1ff464SDmitry Preobrazhensky NewOpcode = AMDGPU::getMaskedMIMGOp(*MCII, MI.getOpcode(), DstSize); 325cad7fa85SMatt Arsenault assert(NewOpcode != -1 && "could not find matching mimg channel instruction"); 3260b4eb1eaSDmitry Preobrazhensky } 3270b4eb1eaSDmitry Preobrazhensky 328cad7fa85SMatt Arsenault auto RCID = MCII->get(NewOpcode).OpInfo[VDataIdx].RegClass; 329cad7fa85SMatt Arsenault 3300b4eb1eaSDmitry Preobrazhensky // Get first subregister of VData 331cad7fa85SMatt Arsenault unsigned Vdata0 = MI.getOperand(VDataIdx).getReg(); 3320b4eb1eaSDmitry Preobrazhensky unsigned VdataSub0 = MRI.getSubReg(Vdata0, AMDGPU::sub0); 3330b4eb1eaSDmitry Preobrazhensky Vdata0 = (VdataSub0 != 0)? VdataSub0 : Vdata0; 3340b4eb1eaSDmitry Preobrazhensky 3350b4eb1eaSDmitry Preobrazhensky // Widen the register to the correct number of enabled channels. 336cad7fa85SMatt Arsenault auto NewVdata = MRI.getMatchingSuperReg(Vdata0, AMDGPU::sub0, 337cad7fa85SMatt Arsenault &MRI.getRegClass(RCID)); 338cad7fa85SMatt Arsenault if (NewVdata == AMDGPU::NoRegister) { 339cad7fa85SMatt Arsenault // It's possible to encode this such that the low register + enabled 340cad7fa85SMatt Arsenault // components exceeds the register count. 341cad7fa85SMatt Arsenault return MCDisassembler::Success; 342cad7fa85SMatt Arsenault } 343cad7fa85SMatt Arsenault 344cad7fa85SMatt Arsenault MI.setOpcode(NewOpcode); 345cad7fa85SMatt Arsenault // vaddr will be always appear as a single VGPR. This will look different than 346cad7fa85SMatt Arsenault // how it is usually emitted because the number of register components is not 347cad7fa85SMatt Arsenault // in the instruction encoding. 348cad7fa85SMatt Arsenault MI.getOperand(VDataIdx) = MCOperand::createReg(NewVdata); 3490b4eb1eaSDmitry Preobrazhensky 350*da4a7c01SDmitry Preobrazhensky if (IsAtomic) { 3510b4eb1eaSDmitry Preobrazhensky // Atomic operations have an additional operand (a copy of data) 3520b4eb1eaSDmitry Preobrazhensky MI.getOperand(VDstIdx) = MCOperand::createReg(NewVdata); 3530b4eb1eaSDmitry Preobrazhensky } 3540b4eb1eaSDmitry Preobrazhensky 355cad7fa85SMatt Arsenault return MCDisassembler::Success; 356cad7fa85SMatt Arsenault } 357cad7fa85SMatt Arsenault 358ac106addSNikolay Haustov const char* AMDGPUDisassembler::getRegClassName(unsigned RegClassID) const { 359ac106addSNikolay Haustov return getContext().getRegisterInfo()-> 360ac106addSNikolay Haustov getRegClassName(&AMDGPUMCRegisterClasses[RegClassID]); 361e1818af8STom Stellard } 362e1818af8STom Stellard 363ac106addSNikolay Haustov inline 364ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::errOperand(unsigned V, 365ac106addSNikolay Haustov const Twine& ErrMsg) const { 366ac106addSNikolay Haustov *CommentStream << "Error: " + ErrMsg; 367ac106addSNikolay Haustov 368ac106addSNikolay Haustov // ToDo: add support for error operands to MCInst.h 369ac106addSNikolay Haustov // return MCOperand::createError(V); 370ac106addSNikolay Haustov return MCOperand(); 371ac106addSNikolay Haustov } 372ac106addSNikolay Haustov 373ac106addSNikolay Haustov inline 374ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::createRegOperand(unsigned int RegId) const { 375ac2b0264SDmitry Preobrazhensky return MCOperand::createReg(AMDGPU::getMCReg(RegId, STI)); 376ac106addSNikolay Haustov } 377ac106addSNikolay Haustov 378ac106addSNikolay Haustov inline 379ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::createRegOperand(unsigned RegClassID, 380ac106addSNikolay Haustov unsigned Val) const { 381ac106addSNikolay Haustov const auto& RegCl = AMDGPUMCRegisterClasses[RegClassID]; 382ac106addSNikolay Haustov if (Val >= RegCl.getNumRegs()) 383ac106addSNikolay Haustov return errOperand(Val, Twine(getRegClassName(RegClassID)) + 384ac106addSNikolay Haustov ": unknown register " + Twine(Val)); 385ac106addSNikolay Haustov return createRegOperand(RegCl.getRegister(Val)); 386ac106addSNikolay Haustov } 387ac106addSNikolay Haustov 388ac106addSNikolay Haustov inline 389ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::createSRegOperand(unsigned SRegClassID, 390ac106addSNikolay Haustov unsigned Val) const { 391ac106addSNikolay Haustov // ToDo: SI/CI have 104 SGPRs, VI - 102 392ac106addSNikolay Haustov // Valery: here we accepting as much as we can, let assembler sort it out 393ac106addSNikolay Haustov int shift = 0; 394ac106addSNikolay Haustov switch (SRegClassID) { 395ac106addSNikolay Haustov case AMDGPU::SGPR_32RegClassID: 396212a251cSArtem Tamazov case AMDGPU::TTMP_32RegClassID: 397212a251cSArtem Tamazov break; 398ac106addSNikolay Haustov case AMDGPU::SGPR_64RegClassID: 399212a251cSArtem Tamazov case AMDGPU::TTMP_64RegClassID: 400212a251cSArtem Tamazov shift = 1; 401212a251cSArtem Tamazov break; 402212a251cSArtem Tamazov case AMDGPU::SGPR_128RegClassID: 403212a251cSArtem Tamazov case AMDGPU::TTMP_128RegClassID: 404ac106addSNikolay Haustov // ToDo: unclear if s[100:104] is available on VI. Can we use VCC as SGPR in 405ac106addSNikolay Haustov // this bundle? 40627134953SDmitry Preobrazhensky case AMDGPU::SGPR_256RegClassID: 40727134953SDmitry Preobrazhensky case AMDGPU::TTMP_256RegClassID: 408ac106addSNikolay Haustov // ToDo: unclear if s[96:104] is available on VI. Can we use VCC as SGPR in 409ac106addSNikolay Haustov // this bundle? 41027134953SDmitry Preobrazhensky case AMDGPU::SGPR_512RegClassID: 41127134953SDmitry Preobrazhensky case AMDGPU::TTMP_512RegClassID: 412212a251cSArtem Tamazov shift = 2; 413212a251cSArtem Tamazov break; 414ac106addSNikolay Haustov // ToDo: unclear if s[88:104] is available on VI. Can we use VCC as SGPR in 415ac106addSNikolay Haustov // this bundle? 416212a251cSArtem Tamazov default: 41792b355b1SMatt Arsenault llvm_unreachable("unhandled register class"); 418ac106addSNikolay Haustov } 41992b355b1SMatt Arsenault 42092b355b1SMatt Arsenault if (Val % (1 << shift)) { 421ac106addSNikolay Haustov *CommentStream << "Warning: " << getRegClassName(SRegClassID) 422ac106addSNikolay Haustov << ": scalar reg isn't aligned " << Val; 42392b355b1SMatt Arsenault } 42492b355b1SMatt Arsenault 425ac106addSNikolay Haustov return createRegOperand(SRegClassID, Val >> shift); 426ac106addSNikolay Haustov } 427ac106addSNikolay Haustov 428ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_VS_32(unsigned Val) const { 429212a251cSArtem Tamazov return decodeSrcOp(OPW32, Val); 430ac106addSNikolay Haustov } 431ac106addSNikolay Haustov 432ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_VS_64(unsigned Val) const { 433212a251cSArtem Tamazov return decodeSrcOp(OPW64, Val); 434ac106addSNikolay Haustov } 435ac106addSNikolay Haustov 43630fc5239SDmitry Preobrazhensky MCOperand AMDGPUDisassembler::decodeOperand_VS_128(unsigned Val) const { 43730fc5239SDmitry Preobrazhensky return decodeSrcOp(OPW128, Val); 43830fc5239SDmitry Preobrazhensky } 43930fc5239SDmitry Preobrazhensky 4404bd72361SMatt Arsenault MCOperand AMDGPUDisassembler::decodeOperand_VSrc16(unsigned Val) const { 4414bd72361SMatt Arsenault return decodeSrcOp(OPW16, Val); 4424bd72361SMatt Arsenault } 4434bd72361SMatt Arsenault 4449be7b0d4SMatt Arsenault MCOperand AMDGPUDisassembler::decodeOperand_VSrcV216(unsigned Val) const { 4459be7b0d4SMatt Arsenault return decodeSrcOp(OPWV216, Val); 4469be7b0d4SMatt Arsenault } 4479be7b0d4SMatt Arsenault 448ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_VGPR_32(unsigned Val) const { 449cb540bc0SMatt Arsenault // Some instructions have operand restrictions beyond what the encoding 450cb540bc0SMatt Arsenault // allows. Some ordinarily VSrc_32 operands are VGPR_32, so clear the extra 451cb540bc0SMatt Arsenault // high bit. 452cb540bc0SMatt Arsenault Val &= 255; 453cb540bc0SMatt Arsenault 454ac106addSNikolay Haustov return createRegOperand(AMDGPU::VGPR_32RegClassID, Val); 455ac106addSNikolay Haustov } 456ac106addSNikolay Haustov 457ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_VReg_64(unsigned Val) const { 458ac106addSNikolay Haustov return createRegOperand(AMDGPU::VReg_64RegClassID, Val); 459ac106addSNikolay Haustov } 460ac106addSNikolay Haustov 461ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_VReg_96(unsigned Val) const { 462ac106addSNikolay Haustov return createRegOperand(AMDGPU::VReg_96RegClassID, Val); 463ac106addSNikolay Haustov } 464ac106addSNikolay Haustov 465ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_VReg_128(unsigned Val) const { 466ac106addSNikolay Haustov return createRegOperand(AMDGPU::VReg_128RegClassID, Val); 467ac106addSNikolay Haustov } 468ac106addSNikolay Haustov 469ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_SReg_32(unsigned Val) const { 470ac106addSNikolay Haustov // table-gen generated disassembler doesn't care about operand types 471ac106addSNikolay Haustov // leaving only registry class so SSrc_32 operand turns into SReg_32 472ac106addSNikolay Haustov // and therefore we accept immediates and literals here as well 473212a251cSArtem Tamazov return decodeSrcOp(OPW32, Val); 474ac106addSNikolay Haustov } 475ac106addSNikolay Haustov 476640c44b8SMatt Arsenault MCOperand AMDGPUDisassembler::decodeOperand_SReg_32_XM0_XEXEC( 477640c44b8SMatt Arsenault unsigned Val) const { 478640c44b8SMatt Arsenault // SReg_32_XM0 is SReg_32 without M0 or EXEC_LO/EXEC_HI 47938e496b1SArtem Tamazov return decodeOperand_SReg_32(Val); 48038e496b1SArtem Tamazov } 48138e496b1SArtem Tamazov 482ca7b0a17SMatt Arsenault MCOperand AMDGPUDisassembler::decodeOperand_SReg_32_XEXEC_HI( 483ca7b0a17SMatt Arsenault unsigned Val) const { 484ca7b0a17SMatt Arsenault // SReg_32_XM0 is SReg_32 without EXEC_HI 485ca7b0a17SMatt Arsenault return decodeOperand_SReg_32(Val); 486ca7b0a17SMatt Arsenault } 487ca7b0a17SMatt Arsenault 488ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_SReg_64(unsigned Val) const { 489640c44b8SMatt Arsenault return decodeSrcOp(OPW64, Val); 490640c44b8SMatt Arsenault } 491640c44b8SMatt Arsenault 492640c44b8SMatt Arsenault MCOperand AMDGPUDisassembler::decodeOperand_SReg_64_XEXEC(unsigned Val) const { 493212a251cSArtem Tamazov return decodeSrcOp(OPW64, Val); 494ac106addSNikolay Haustov } 495ac106addSNikolay Haustov 496ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_SReg_128(unsigned Val) const { 497212a251cSArtem Tamazov return decodeSrcOp(OPW128, Val); 498ac106addSNikolay Haustov } 499ac106addSNikolay Haustov 500ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_SReg_256(unsigned Val) const { 50127134953SDmitry Preobrazhensky return decodeDstOp(OPW256, Val); 502ac106addSNikolay Haustov } 503ac106addSNikolay Haustov 504ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeOperand_SReg_512(unsigned Val) const { 50527134953SDmitry Preobrazhensky return decodeDstOp(OPW512, Val); 506ac106addSNikolay Haustov } 507ac106addSNikolay Haustov 508ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeLiteralConstant() const { 509ac106addSNikolay Haustov // For now all literal constants are supposed to be unsigned integer 510ac106addSNikolay Haustov // ToDo: deal with signed/unsigned 64-bit integer constants 511ac106addSNikolay Haustov // ToDo: deal with float/double constants 512ce941c9cSDmitry Preobrazhensky if (!HasLiteral) { 513ce941c9cSDmitry Preobrazhensky if (Bytes.size() < 4) { 514ac106addSNikolay Haustov return errOperand(0, "cannot read literal, inst bytes left " + 515ac106addSNikolay Haustov Twine(Bytes.size())); 516ce941c9cSDmitry Preobrazhensky } 517ce941c9cSDmitry Preobrazhensky HasLiteral = true; 518ce941c9cSDmitry Preobrazhensky Literal = eatBytes<uint32_t>(Bytes); 519ce941c9cSDmitry Preobrazhensky } 520ce941c9cSDmitry Preobrazhensky return MCOperand::createImm(Literal); 521ac106addSNikolay Haustov } 522ac106addSNikolay Haustov 523ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeIntImmed(unsigned Imm) { 524212a251cSArtem Tamazov using namespace AMDGPU::EncValues; 525c8fbf6ffSEugene Zelenko 526212a251cSArtem Tamazov assert(Imm >= INLINE_INTEGER_C_MIN && Imm <= INLINE_INTEGER_C_MAX); 527212a251cSArtem Tamazov return MCOperand::createImm((Imm <= INLINE_INTEGER_C_POSITIVE_MAX) ? 528212a251cSArtem Tamazov (static_cast<int64_t>(Imm) - INLINE_INTEGER_C_MIN) : 529212a251cSArtem Tamazov (INLINE_INTEGER_C_POSITIVE_MAX - static_cast<int64_t>(Imm))); 530212a251cSArtem Tamazov // Cast prevents negative overflow. 531ac106addSNikolay Haustov } 532ac106addSNikolay Haustov 5334bd72361SMatt Arsenault static int64_t getInlineImmVal32(unsigned Imm) { 5344bd72361SMatt Arsenault switch (Imm) { 5354bd72361SMatt Arsenault case 240: 5364bd72361SMatt Arsenault return FloatToBits(0.5f); 5374bd72361SMatt Arsenault case 241: 5384bd72361SMatt Arsenault return FloatToBits(-0.5f); 5394bd72361SMatt Arsenault case 242: 5404bd72361SMatt Arsenault return FloatToBits(1.0f); 5414bd72361SMatt Arsenault case 243: 5424bd72361SMatt Arsenault return FloatToBits(-1.0f); 5434bd72361SMatt Arsenault case 244: 5444bd72361SMatt Arsenault return FloatToBits(2.0f); 5454bd72361SMatt Arsenault case 245: 5464bd72361SMatt Arsenault return FloatToBits(-2.0f); 5474bd72361SMatt Arsenault case 246: 5484bd72361SMatt Arsenault return FloatToBits(4.0f); 5494bd72361SMatt Arsenault case 247: 5504bd72361SMatt Arsenault return FloatToBits(-4.0f); 5514bd72361SMatt Arsenault case 248: // 1 / (2 * PI) 5524bd72361SMatt Arsenault return 0x3e22f983; 5534bd72361SMatt Arsenault default: 5544bd72361SMatt Arsenault llvm_unreachable("invalid fp inline imm"); 5554bd72361SMatt Arsenault } 5564bd72361SMatt Arsenault } 5574bd72361SMatt Arsenault 5584bd72361SMatt Arsenault static int64_t getInlineImmVal64(unsigned Imm) { 5594bd72361SMatt Arsenault switch (Imm) { 5604bd72361SMatt Arsenault case 240: 5614bd72361SMatt Arsenault return DoubleToBits(0.5); 5624bd72361SMatt Arsenault case 241: 5634bd72361SMatt Arsenault return DoubleToBits(-0.5); 5644bd72361SMatt Arsenault case 242: 5654bd72361SMatt Arsenault return DoubleToBits(1.0); 5664bd72361SMatt Arsenault case 243: 5674bd72361SMatt Arsenault return DoubleToBits(-1.0); 5684bd72361SMatt Arsenault case 244: 5694bd72361SMatt Arsenault return DoubleToBits(2.0); 5704bd72361SMatt Arsenault case 245: 5714bd72361SMatt Arsenault return DoubleToBits(-2.0); 5724bd72361SMatt Arsenault case 246: 5734bd72361SMatt Arsenault return DoubleToBits(4.0); 5744bd72361SMatt Arsenault case 247: 5754bd72361SMatt Arsenault return DoubleToBits(-4.0); 5764bd72361SMatt Arsenault case 248: // 1 / (2 * PI) 5774bd72361SMatt Arsenault return 0x3fc45f306dc9c882; 5784bd72361SMatt Arsenault default: 5794bd72361SMatt Arsenault llvm_unreachable("invalid fp inline imm"); 5804bd72361SMatt Arsenault } 5814bd72361SMatt Arsenault } 5824bd72361SMatt Arsenault 5834bd72361SMatt Arsenault static int64_t getInlineImmVal16(unsigned Imm) { 5844bd72361SMatt Arsenault switch (Imm) { 5854bd72361SMatt Arsenault case 240: 5864bd72361SMatt Arsenault return 0x3800; 5874bd72361SMatt Arsenault case 241: 5884bd72361SMatt Arsenault return 0xB800; 5894bd72361SMatt Arsenault case 242: 5904bd72361SMatt Arsenault return 0x3C00; 5914bd72361SMatt Arsenault case 243: 5924bd72361SMatt Arsenault return 0xBC00; 5934bd72361SMatt Arsenault case 244: 5944bd72361SMatt Arsenault return 0x4000; 5954bd72361SMatt Arsenault case 245: 5964bd72361SMatt Arsenault return 0xC000; 5974bd72361SMatt Arsenault case 246: 5984bd72361SMatt Arsenault return 0x4400; 5994bd72361SMatt Arsenault case 247: 6004bd72361SMatt Arsenault return 0xC400; 6014bd72361SMatt Arsenault case 248: // 1 / (2 * PI) 6024bd72361SMatt Arsenault return 0x3118; 6034bd72361SMatt Arsenault default: 6044bd72361SMatt Arsenault llvm_unreachable("invalid fp inline imm"); 6054bd72361SMatt Arsenault } 6064bd72361SMatt Arsenault } 6074bd72361SMatt Arsenault 6084bd72361SMatt Arsenault MCOperand AMDGPUDisassembler::decodeFPImmed(OpWidthTy Width, unsigned Imm) { 609212a251cSArtem Tamazov assert(Imm >= AMDGPU::EncValues::INLINE_FLOATING_C_MIN 610212a251cSArtem Tamazov && Imm <= AMDGPU::EncValues::INLINE_FLOATING_C_MAX); 6114bd72361SMatt Arsenault 612e1818af8STom Stellard // ToDo: case 248: 1/(2*PI) - is allowed only on VI 6134bd72361SMatt Arsenault switch (Width) { 6144bd72361SMatt Arsenault case OPW32: 6154bd72361SMatt Arsenault return MCOperand::createImm(getInlineImmVal32(Imm)); 6164bd72361SMatt Arsenault case OPW64: 6174bd72361SMatt Arsenault return MCOperand::createImm(getInlineImmVal64(Imm)); 6184bd72361SMatt Arsenault case OPW16: 6199be7b0d4SMatt Arsenault case OPWV216: 6204bd72361SMatt Arsenault return MCOperand::createImm(getInlineImmVal16(Imm)); 6214bd72361SMatt Arsenault default: 6224bd72361SMatt Arsenault llvm_unreachable("implement me"); 623e1818af8STom Stellard } 624e1818af8STom Stellard } 625e1818af8STom Stellard 626212a251cSArtem Tamazov unsigned AMDGPUDisassembler::getVgprClassId(const OpWidthTy Width) const { 627e1818af8STom Stellard using namespace AMDGPU; 628c8fbf6ffSEugene Zelenko 629212a251cSArtem Tamazov assert(OPW_FIRST_ <= Width && Width < OPW_LAST_); 630212a251cSArtem Tamazov switch (Width) { 631212a251cSArtem Tamazov default: // fall 6324bd72361SMatt Arsenault case OPW32: 6334bd72361SMatt Arsenault case OPW16: 6349be7b0d4SMatt Arsenault case OPWV216: 6354bd72361SMatt Arsenault return VGPR_32RegClassID; 636212a251cSArtem Tamazov case OPW64: return VReg_64RegClassID; 637212a251cSArtem Tamazov case OPW128: return VReg_128RegClassID; 638212a251cSArtem Tamazov } 639212a251cSArtem Tamazov } 640212a251cSArtem Tamazov 641212a251cSArtem Tamazov unsigned AMDGPUDisassembler::getSgprClassId(const OpWidthTy Width) const { 642212a251cSArtem Tamazov using namespace AMDGPU; 643c8fbf6ffSEugene Zelenko 644212a251cSArtem Tamazov assert(OPW_FIRST_ <= Width && Width < OPW_LAST_); 645212a251cSArtem Tamazov switch (Width) { 646212a251cSArtem Tamazov default: // fall 6474bd72361SMatt Arsenault case OPW32: 6484bd72361SMatt Arsenault case OPW16: 6499be7b0d4SMatt Arsenault case OPWV216: 6504bd72361SMatt Arsenault return SGPR_32RegClassID; 651212a251cSArtem Tamazov case OPW64: return SGPR_64RegClassID; 652212a251cSArtem Tamazov case OPW128: return SGPR_128RegClassID; 65327134953SDmitry Preobrazhensky case OPW256: return SGPR_256RegClassID; 65427134953SDmitry Preobrazhensky case OPW512: return SGPR_512RegClassID; 655212a251cSArtem Tamazov } 656212a251cSArtem Tamazov } 657212a251cSArtem Tamazov 658212a251cSArtem Tamazov unsigned AMDGPUDisassembler::getTtmpClassId(const OpWidthTy Width) const { 659212a251cSArtem Tamazov using namespace AMDGPU; 660c8fbf6ffSEugene Zelenko 661212a251cSArtem Tamazov assert(OPW_FIRST_ <= Width && Width < OPW_LAST_); 662212a251cSArtem Tamazov switch (Width) { 663212a251cSArtem Tamazov default: // fall 6644bd72361SMatt Arsenault case OPW32: 6654bd72361SMatt Arsenault case OPW16: 6669be7b0d4SMatt Arsenault case OPWV216: 6674bd72361SMatt Arsenault return TTMP_32RegClassID; 668212a251cSArtem Tamazov case OPW64: return TTMP_64RegClassID; 669212a251cSArtem Tamazov case OPW128: return TTMP_128RegClassID; 67027134953SDmitry Preobrazhensky case OPW256: return TTMP_256RegClassID; 67127134953SDmitry Preobrazhensky case OPW512: return TTMP_512RegClassID; 672212a251cSArtem Tamazov } 673212a251cSArtem Tamazov } 674212a251cSArtem Tamazov 675ac2b0264SDmitry Preobrazhensky int AMDGPUDisassembler::getTTmpIdx(unsigned Val) const { 676ac2b0264SDmitry Preobrazhensky using namespace AMDGPU::EncValues; 677ac2b0264SDmitry Preobrazhensky 678ac2b0264SDmitry Preobrazhensky unsigned TTmpMin = isGFX9() ? TTMP_GFX9_MIN : TTMP_VI_MIN; 679ac2b0264SDmitry Preobrazhensky unsigned TTmpMax = isGFX9() ? TTMP_GFX9_MAX : TTMP_VI_MAX; 680ac2b0264SDmitry Preobrazhensky 681ac2b0264SDmitry Preobrazhensky return (TTmpMin <= Val && Val <= TTmpMax)? Val - TTmpMin : -1; 682ac2b0264SDmitry Preobrazhensky } 683ac2b0264SDmitry Preobrazhensky 684212a251cSArtem Tamazov MCOperand AMDGPUDisassembler::decodeSrcOp(const OpWidthTy Width, unsigned Val) const { 685212a251cSArtem Tamazov using namespace AMDGPU::EncValues; 686c8fbf6ffSEugene Zelenko 687ac106addSNikolay Haustov assert(Val < 512); // enum9 688ac106addSNikolay Haustov 689212a251cSArtem Tamazov if (VGPR_MIN <= Val && Val <= VGPR_MAX) { 690212a251cSArtem Tamazov return createRegOperand(getVgprClassId(Width), Val - VGPR_MIN); 691212a251cSArtem Tamazov } 692b49c3361SArtem Tamazov if (Val <= SGPR_MAX) { 693b49c3361SArtem Tamazov assert(SGPR_MIN == 0); // "SGPR_MIN <= Val" is always true and causes compilation warning. 694212a251cSArtem Tamazov return createSRegOperand(getSgprClassId(Width), Val - SGPR_MIN); 695212a251cSArtem Tamazov } 696ac2b0264SDmitry Preobrazhensky 697ac2b0264SDmitry Preobrazhensky int TTmpIdx = getTTmpIdx(Val); 698ac2b0264SDmitry Preobrazhensky if (TTmpIdx >= 0) { 699ac2b0264SDmitry Preobrazhensky return createSRegOperand(getTtmpClassId(Width), TTmpIdx); 700212a251cSArtem Tamazov } 701ac106addSNikolay Haustov 702212a251cSArtem Tamazov if (INLINE_INTEGER_C_MIN <= Val && Val <= INLINE_INTEGER_C_MAX) 703ac106addSNikolay Haustov return decodeIntImmed(Val); 704ac106addSNikolay Haustov 705212a251cSArtem Tamazov if (INLINE_FLOATING_C_MIN <= Val && Val <= INLINE_FLOATING_C_MAX) 7064bd72361SMatt Arsenault return decodeFPImmed(Width, Val); 707ac106addSNikolay Haustov 708212a251cSArtem Tamazov if (Val == LITERAL_CONST) 709ac106addSNikolay Haustov return decodeLiteralConstant(); 710ac106addSNikolay Haustov 7114bd72361SMatt Arsenault switch (Width) { 7124bd72361SMatt Arsenault case OPW32: 7134bd72361SMatt Arsenault case OPW16: 7149be7b0d4SMatt Arsenault case OPWV216: 7154bd72361SMatt Arsenault return decodeSpecialReg32(Val); 7164bd72361SMatt Arsenault case OPW64: 7174bd72361SMatt Arsenault return decodeSpecialReg64(Val); 7184bd72361SMatt Arsenault default: 7194bd72361SMatt Arsenault llvm_unreachable("unexpected immediate type"); 7204bd72361SMatt Arsenault } 721ac106addSNikolay Haustov } 722ac106addSNikolay Haustov 72327134953SDmitry Preobrazhensky MCOperand AMDGPUDisassembler::decodeDstOp(const OpWidthTy Width, unsigned Val) const { 72427134953SDmitry Preobrazhensky using namespace AMDGPU::EncValues; 72527134953SDmitry Preobrazhensky 72627134953SDmitry Preobrazhensky assert(Val < 128); 72727134953SDmitry Preobrazhensky assert(Width == OPW256 || Width == OPW512); 72827134953SDmitry Preobrazhensky 72927134953SDmitry Preobrazhensky if (Val <= SGPR_MAX) { 73027134953SDmitry Preobrazhensky assert(SGPR_MIN == 0); // "SGPR_MIN <= Val" is always true and causes compilation warning. 73127134953SDmitry Preobrazhensky return createSRegOperand(getSgprClassId(Width), Val - SGPR_MIN); 73227134953SDmitry Preobrazhensky } 73327134953SDmitry Preobrazhensky 73427134953SDmitry Preobrazhensky int TTmpIdx = getTTmpIdx(Val); 73527134953SDmitry Preobrazhensky if (TTmpIdx >= 0) { 73627134953SDmitry Preobrazhensky return createSRegOperand(getTtmpClassId(Width), TTmpIdx); 73727134953SDmitry Preobrazhensky } 73827134953SDmitry Preobrazhensky 73927134953SDmitry Preobrazhensky llvm_unreachable("unknown dst register"); 74027134953SDmitry Preobrazhensky } 74127134953SDmitry Preobrazhensky 742ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeSpecialReg32(unsigned Val) const { 743ac106addSNikolay Haustov using namespace AMDGPU; 744c8fbf6ffSEugene Zelenko 745e1818af8STom Stellard switch (Val) { 746ac2b0264SDmitry Preobrazhensky case 102: return createRegOperand(FLAT_SCR_LO); 747ac2b0264SDmitry Preobrazhensky case 103: return createRegOperand(FLAT_SCR_HI); 7483afbd825SDmitry Preobrazhensky case 104: return createRegOperand(XNACK_MASK_LO); 7493afbd825SDmitry Preobrazhensky case 105: return createRegOperand(XNACK_MASK_HI); 750ac106addSNikolay Haustov case 106: return createRegOperand(VCC_LO); 751ac106addSNikolay Haustov case 107: return createRegOperand(VCC_HI); 752ac2b0264SDmitry Preobrazhensky case 108: assert(!isGFX9()); return createRegOperand(TBA_LO); 753ac2b0264SDmitry Preobrazhensky case 109: assert(!isGFX9()); return createRegOperand(TBA_HI); 754ac2b0264SDmitry Preobrazhensky case 110: assert(!isGFX9()); return createRegOperand(TMA_LO); 755ac2b0264SDmitry Preobrazhensky case 111: assert(!isGFX9()); return createRegOperand(TMA_HI); 756ac106addSNikolay Haustov case 124: return createRegOperand(M0); 757ac106addSNikolay Haustov case 126: return createRegOperand(EXEC_LO); 758ac106addSNikolay Haustov case 127: return createRegOperand(EXEC_HI); 759a3b3b489SMatt Arsenault case 235: return createRegOperand(SRC_SHARED_BASE); 760a3b3b489SMatt Arsenault case 236: return createRegOperand(SRC_SHARED_LIMIT); 761a3b3b489SMatt Arsenault case 237: return createRegOperand(SRC_PRIVATE_BASE); 762a3b3b489SMatt Arsenault case 238: return createRegOperand(SRC_PRIVATE_LIMIT); 763a3b3b489SMatt Arsenault // TODO: SRC_POPS_EXITING_WAVE_ID 764e1818af8STom Stellard // ToDo: no support for vccz register 765ac106addSNikolay Haustov case 251: break; 766e1818af8STom Stellard // ToDo: no support for execz register 767ac106addSNikolay Haustov case 252: break; 768ac106addSNikolay Haustov case 253: return createRegOperand(SCC); 769ac106addSNikolay Haustov default: break; 770e1818af8STom Stellard } 771ac106addSNikolay Haustov return errOperand(Val, "unknown operand encoding " + Twine(Val)); 772e1818af8STom Stellard } 773e1818af8STom Stellard 774ac106addSNikolay Haustov MCOperand AMDGPUDisassembler::decodeSpecialReg64(unsigned Val) const { 775161a158eSNikolay Haustov using namespace AMDGPU; 776c8fbf6ffSEugene Zelenko 777161a158eSNikolay Haustov switch (Val) { 778ac2b0264SDmitry Preobrazhensky case 102: return createRegOperand(FLAT_SCR); 7793afbd825SDmitry Preobrazhensky case 104: return createRegOperand(XNACK_MASK); 780ac106addSNikolay Haustov case 106: return createRegOperand(VCC); 781ac2b0264SDmitry Preobrazhensky case 108: assert(!isGFX9()); return createRegOperand(TBA); 782ac2b0264SDmitry Preobrazhensky case 110: assert(!isGFX9()); return createRegOperand(TMA); 783ac106addSNikolay Haustov case 126: return createRegOperand(EXEC); 784ac106addSNikolay Haustov default: break; 785161a158eSNikolay Haustov } 786ac106addSNikolay Haustov return errOperand(Val, "unknown operand encoding " + Twine(Val)); 787161a158eSNikolay Haustov } 788161a158eSNikolay Haustov 789549c89d2SSam Kolton MCOperand AMDGPUDisassembler::decodeSDWASrc(const OpWidthTy Width, 7906b65f7c3SDmitry Preobrazhensky const unsigned Val) const { 791363f47a2SSam Kolton using namespace AMDGPU::SDWA; 7926b65f7c3SDmitry Preobrazhensky using namespace AMDGPU::EncValues; 793363f47a2SSam Kolton 794549c89d2SSam Kolton if (STI.getFeatureBits()[AMDGPU::FeatureGFX9]) { 795a179d25bSSam Kolton // XXX: static_cast<int> is needed to avoid stupid warning: 796a179d25bSSam Kolton // compare with unsigned is always true 797a179d25bSSam Kolton if (SDWA9EncValues::SRC_VGPR_MIN <= static_cast<int>(Val) && 798363f47a2SSam Kolton Val <= SDWA9EncValues::SRC_VGPR_MAX) { 799363f47a2SSam Kolton return createRegOperand(getVgprClassId(Width), 800363f47a2SSam Kolton Val - SDWA9EncValues::SRC_VGPR_MIN); 801363f47a2SSam Kolton } 802363f47a2SSam Kolton if (SDWA9EncValues::SRC_SGPR_MIN <= Val && 803363f47a2SSam Kolton Val <= SDWA9EncValues::SRC_SGPR_MAX) { 804363f47a2SSam Kolton return createSRegOperand(getSgprClassId(Width), 805363f47a2SSam Kolton Val - SDWA9EncValues::SRC_SGPR_MIN); 806363f47a2SSam Kolton } 807ac2b0264SDmitry Preobrazhensky if (SDWA9EncValues::SRC_TTMP_MIN <= Val && 808ac2b0264SDmitry Preobrazhensky Val <= SDWA9EncValues::SRC_TTMP_MAX) { 809ac2b0264SDmitry Preobrazhensky return createSRegOperand(getTtmpClassId(Width), 810ac2b0264SDmitry Preobrazhensky Val - SDWA9EncValues::SRC_TTMP_MIN); 811ac2b0264SDmitry Preobrazhensky } 812363f47a2SSam Kolton 8136b65f7c3SDmitry Preobrazhensky const unsigned SVal = Val - SDWA9EncValues::SRC_SGPR_MIN; 8146b65f7c3SDmitry Preobrazhensky 8156b65f7c3SDmitry Preobrazhensky if (INLINE_INTEGER_C_MIN <= SVal && SVal <= INLINE_INTEGER_C_MAX) 8166b65f7c3SDmitry Preobrazhensky return decodeIntImmed(SVal); 8176b65f7c3SDmitry Preobrazhensky 8186b65f7c3SDmitry Preobrazhensky if (INLINE_FLOATING_C_MIN <= SVal && SVal <= INLINE_FLOATING_C_MAX) 8196b65f7c3SDmitry Preobrazhensky return decodeFPImmed(Width, SVal); 8206b65f7c3SDmitry Preobrazhensky 8216b65f7c3SDmitry Preobrazhensky return decodeSpecialReg32(SVal); 822549c89d2SSam Kolton } else if (STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]) { 823549c89d2SSam Kolton return createRegOperand(getVgprClassId(Width), Val); 824549c89d2SSam Kolton } 825549c89d2SSam Kolton llvm_unreachable("unsupported target"); 826363f47a2SSam Kolton } 827363f47a2SSam Kolton 828549c89d2SSam Kolton MCOperand AMDGPUDisassembler::decodeSDWASrc16(unsigned Val) const { 829549c89d2SSam Kolton return decodeSDWASrc(OPW16, Val); 830363f47a2SSam Kolton } 831363f47a2SSam Kolton 832549c89d2SSam Kolton MCOperand AMDGPUDisassembler::decodeSDWASrc32(unsigned Val) const { 833549c89d2SSam Kolton return decodeSDWASrc(OPW32, Val); 834363f47a2SSam Kolton } 835363f47a2SSam Kolton 836549c89d2SSam Kolton MCOperand AMDGPUDisassembler::decodeSDWAVopcDst(unsigned Val) const { 837363f47a2SSam Kolton using namespace AMDGPU::SDWA; 838363f47a2SSam Kolton 839549c89d2SSam Kolton assert(STI.getFeatureBits()[AMDGPU::FeatureGFX9] && 840549c89d2SSam Kolton "SDWAVopcDst should be present only on GFX9"); 841363f47a2SSam Kolton if (Val & SDWA9EncValues::VOPC_DST_VCC_MASK) { 842363f47a2SSam Kolton Val &= SDWA9EncValues::VOPC_DST_SGPR_MASK; 843ac2b0264SDmitry Preobrazhensky 844ac2b0264SDmitry Preobrazhensky int TTmpIdx = getTTmpIdx(Val); 845ac2b0264SDmitry Preobrazhensky if (TTmpIdx >= 0) { 846ac2b0264SDmitry Preobrazhensky return createSRegOperand(getTtmpClassId(OPW64), TTmpIdx); 847ac2b0264SDmitry Preobrazhensky } else if (Val > AMDGPU::EncValues::SGPR_MAX) { 848363f47a2SSam Kolton return decodeSpecialReg64(Val); 849363f47a2SSam Kolton } else { 850363f47a2SSam Kolton return createSRegOperand(getSgprClassId(OPW64), Val); 851363f47a2SSam Kolton } 852363f47a2SSam Kolton } else { 853363f47a2SSam Kolton return createRegOperand(AMDGPU::VCC); 854363f47a2SSam Kolton } 855363f47a2SSam Kolton } 856363f47a2SSam Kolton 857ac2b0264SDmitry Preobrazhensky bool AMDGPUDisassembler::isVI() const { 858ac2b0264SDmitry Preobrazhensky return STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]; 859ac2b0264SDmitry Preobrazhensky } 860ac2b0264SDmitry Preobrazhensky 861ac2b0264SDmitry Preobrazhensky bool AMDGPUDisassembler::isGFX9() const { 862ac2b0264SDmitry Preobrazhensky return STI.getFeatureBits()[AMDGPU::FeatureGFX9]; 863ac2b0264SDmitry Preobrazhensky } 864ac2b0264SDmitry Preobrazhensky 8653381d7a2SSam Kolton //===----------------------------------------------------------------------===// 8663381d7a2SSam Kolton // AMDGPUSymbolizer 8673381d7a2SSam Kolton //===----------------------------------------------------------------------===// 8683381d7a2SSam Kolton 8693381d7a2SSam Kolton // Try to find symbol name for specified label 8703381d7a2SSam Kolton bool AMDGPUSymbolizer::tryAddingSymbolicOperand(MCInst &Inst, 8713381d7a2SSam Kolton raw_ostream &/*cStream*/, int64_t Value, 8723381d7a2SSam Kolton uint64_t /*Address*/, bool IsBranch, 8733381d7a2SSam Kolton uint64_t /*Offset*/, uint64_t /*InstSize*/) { 874c8fbf6ffSEugene Zelenko using SymbolInfoTy = std::tuple<uint64_t, StringRef, uint8_t>; 875c8fbf6ffSEugene Zelenko using SectionSymbolsTy = std::vector<SymbolInfoTy>; 8763381d7a2SSam Kolton 8773381d7a2SSam Kolton if (!IsBranch) { 8783381d7a2SSam Kolton return false; 8793381d7a2SSam Kolton } 8803381d7a2SSam Kolton 8813381d7a2SSam Kolton auto *Symbols = static_cast<SectionSymbolsTy *>(DisInfo); 8823381d7a2SSam Kolton auto Result = std::find_if(Symbols->begin(), Symbols->end(), 8833381d7a2SSam Kolton [Value](const SymbolInfoTy& Val) { 8843381d7a2SSam Kolton return std::get<0>(Val) == static_cast<uint64_t>(Value) 8853381d7a2SSam Kolton && std::get<2>(Val) == ELF::STT_NOTYPE; 8863381d7a2SSam Kolton }); 8873381d7a2SSam Kolton if (Result != Symbols->end()) { 8883381d7a2SSam Kolton auto *Sym = Ctx.getOrCreateSymbol(std::get<1>(*Result)); 8893381d7a2SSam Kolton const auto *Add = MCSymbolRefExpr::create(Sym, Ctx); 8903381d7a2SSam Kolton Inst.addOperand(MCOperand::createExpr(Add)); 8913381d7a2SSam Kolton return true; 8923381d7a2SSam Kolton } 8933381d7a2SSam Kolton return false; 8943381d7a2SSam Kolton } 8953381d7a2SSam Kolton 89692b355b1SMatt Arsenault void AMDGPUSymbolizer::tryAddingPcLoadReferenceComment(raw_ostream &cStream, 89792b355b1SMatt Arsenault int64_t Value, 89892b355b1SMatt Arsenault uint64_t Address) { 89992b355b1SMatt Arsenault llvm_unreachable("unimplemented"); 90092b355b1SMatt Arsenault } 90192b355b1SMatt Arsenault 9023381d7a2SSam Kolton //===----------------------------------------------------------------------===// 9033381d7a2SSam Kolton // Initialization 9043381d7a2SSam Kolton //===----------------------------------------------------------------------===// 9053381d7a2SSam Kolton 9063381d7a2SSam Kolton static MCSymbolizer *createAMDGPUSymbolizer(const Triple &/*TT*/, 9073381d7a2SSam Kolton LLVMOpInfoCallback /*GetOpInfo*/, 9083381d7a2SSam Kolton LLVMSymbolLookupCallback /*SymbolLookUp*/, 9093381d7a2SSam Kolton void *DisInfo, 9103381d7a2SSam Kolton MCContext *Ctx, 9113381d7a2SSam Kolton std::unique_ptr<MCRelocationInfo> &&RelInfo) { 9123381d7a2SSam Kolton return new AMDGPUSymbolizer(*Ctx, std::move(RelInfo), DisInfo); 9133381d7a2SSam Kolton } 9143381d7a2SSam Kolton 915e1818af8STom Stellard static MCDisassembler *createAMDGPUDisassembler(const Target &T, 916e1818af8STom Stellard const MCSubtargetInfo &STI, 917e1818af8STom Stellard MCContext &Ctx) { 918cad7fa85SMatt Arsenault return new AMDGPUDisassembler(STI, Ctx, T.createMCInstrInfo()); 919e1818af8STom Stellard } 920e1818af8STom Stellard 921e1818af8STom Stellard extern "C" void LLVMInitializeAMDGPUDisassembler() { 922f42454b9SMehdi Amini TargetRegistry::RegisterMCDisassembler(getTheGCNTarget(), 923f42454b9SMehdi Amini createAMDGPUDisassembler); 924f42454b9SMehdi Amini TargetRegistry::RegisterMCSymbolizer(getTheGCNTarget(), 925f42454b9SMehdi Amini createAMDGPUSymbolizer); 926e1818af8STom Stellard } 927