1 //===-- SIMCCodeEmitter.cpp - SI Code Emitter -----------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// The SI code emitter produces machine code that can be executed 11 /// directly on the GPU device. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "MCTargetDesc/AMDGPUFixupKinds.h" 16 #include "MCTargetDesc/AMDGPUMCCodeEmitter.h" 17 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 18 #include "SIDefines.h" 19 #include "Utils/AMDGPUBaseInfo.h" 20 #include "llvm/ADT/APInt.h" 21 #include "llvm/MC/MCCodeEmitter.h" 22 #include "llvm/MC/MCContext.h" 23 #include "llvm/MC/MCExpr.h" 24 #include "llvm/MC/MCInstrInfo.h" 25 #include "llvm/MC/MCRegisterInfo.h" 26 #include "llvm/MC/MCSubtargetInfo.h" 27 #include "llvm/MC/SubtargetFeature.h" 28 #include "llvm/Support/Casting.h" 29 30 using namespace llvm; 31 32 namespace { 33 34 class SIMCCodeEmitter : public AMDGPUMCCodeEmitter { 35 const MCRegisterInfo &MRI; 36 37 /// Encode an fp or int literal 38 uint32_t getLitEncoding(const MCOperand &MO, const MCOperandInfo &OpInfo, 39 const MCSubtargetInfo &STI) const; 40 41 public: 42 SIMCCodeEmitter(const MCInstrInfo &mcii, MCContext &ctx) 43 : AMDGPUMCCodeEmitter(mcii), MRI(*ctx.getRegisterInfo()) {} 44 SIMCCodeEmitter(const SIMCCodeEmitter &) = delete; 45 SIMCCodeEmitter &operator=(const SIMCCodeEmitter &) = delete; 46 47 /// Encode the instruction and write it to the OS. 48 void encodeInstruction(const MCInst &MI, raw_ostream &OS, 49 SmallVectorImpl<MCFixup> &Fixups, 50 const MCSubtargetInfo &STI) const override; 51 52 /// \returns the encoding for an MCOperand. 53 void getMachineOpValue(const MCInst &MI, const MCOperand &MO, APInt &Op, 54 SmallVectorImpl<MCFixup> &Fixups, 55 const MCSubtargetInfo &STI) const override; 56 57 /// Use a fixup to encode the simm16 field for SOPP branch 58 /// instructions. 59 void getSOPPBrEncoding(const MCInst &MI, unsigned OpNo, APInt &Op, 60 SmallVectorImpl<MCFixup> &Fixups, 61 const MCSubtargetInfo &STI) const override; 62 63 void getSMEMOffsetEncoding(const MCInst &MI, unsigned OpNo, APInt &Op, 64 SmallVectorImpl<MCFixup> &Fixups, 65 const MCSubtargetInfo &STI) const override; 66 67 void getSDWASrcEncoding(const MCInst &MI, unsigned OpNo, APInt &Op, 68 SmallVectorImpl<MCFixup> &Fixups, 69 const MCSubtargetInfo &STI) const override; 70 71 void getSDWAVopcDstEncoding(const MCInst &MI, unsigned OpNo, APInt &Op, 72 SmallVectorImpl<MCFixup> &Fixups, 73 const MCSubtargetInfo &STI) const override; 74 75 void getAVOperandEncoding(const MCInst &MI, unsigned OpNo, APInt &Op, 76 SmallVectorImpl<MCFixup> &Fixups, 77 const MCSubtargetInfo &STI) const override; 78 79 private: 80 uint64_t getImplicitOpSelHiEncoding(int Opcode) const; 81 void getMachineOpValueCommon(const MCInst &MI, const MCOperand &MO, 82 unsigned OpNo, APInt &Op, 83 SmallVectorImpl<MCFixup> &Fixups, 84 const MCSubtargetInfo &STI) const; 85 }; 86 87 } // end anonymous namespace 88 89 MCCodeEmitter *llvm::createSIMCCodeEmitter(const MCInstrInfo &MCII, 90 MCContext &Ctx) { 91 return new SIMCCodeEmitter(MCII, Ctx); 92 } 93 94 // Returns the encoding value to use if the given integer is an integer inline 95 // immediate value, or 0 if it is not. 96 template <typename IntTy> 97 static uint32_t getIntInlineImmEncoding(IntTy Imm) { 98 if (Imm >= 0 && Imm <= 64) 99 return 128 + Imm; 100 101 if (Imm >= -16 && Imm <= -1) 102 return 192 + std::abs(Imm); 103 104 return 0; 105 } 106 107 static uint32_t getLit16IntEncoding(uint16_t Val, const MCSubtargetInfo &STI) { 108 uint16_t IntImm = getIntInlineImmEncoding(static_cast<int16_t>(Val)); 109 return IntImm == 0 ? 255 : IntImm; 110 } 111 112 static uint32_t getLit16Encoding(uint16_t Val, const MCSubtargetInfo &STI) { 113 uint16_t IntImm = getIntInlineImmEncoding(static_cast<int16_t>(Val)); 114 if (IntImm != 0) 115 return IntImm; 116 117 if (Val == 0x3800) // 0.5 118 return 240; 119 120 if (Val == 0xB800) // -0.5 121 return 241; 122 123 if (Val == 0x3C00) // 1.0 124 return 242; 125 126 if (Val == 0xBC00) // -1.0 127 return 243; 128 129 if (Val == 0x4000) // 2.0 130 return 244; 131 132 if (Val == 0xC000) // -2.0 133 return 245; 134 135 if (Val == 0x4400) // 4.0 136 return 246; 137 138 if (Val == 0xC400) // -4.0 139 return 247; 140 141 if (Val == 0x3118 && // 1.0 / (2.0 * pi) 142 STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm]) 143 return 248; 144 145 return 255; 146 } 147 148 static uint32_t getLit32Encoding(uint32_t Val, const MCSubtargetInfo &STI) { 149 uint32_t IntImm = getIntInlineImmEncoding(static_cast<int32_t>(Val)); 150 if (IntImm != 0) 151 return IntImm; 152 153 if (Val == FloatToBits(0.5f)) 154 return 240; 155 156 if (Val == FloatToBits(-0.5f)) 157 return 241; 158 159 if (Val == FloatToBits(1.0f)) 160 return 242; 161 162 if (Val == FloatToBits(-1.0f)) 163 return 243; 164 165 if (Val == FloatToBits(2.0f)) 166 return 244; 167 168 if (Val == FloatToBits(-2.0f)) 169 return 245; 170 171 if (Val == FloatToBits(4.0f)) 172 return 246; 173 174 if (Val == FloatToBits(-4.0f)) 175 return 247; 176 177 if (Val == 0x3e22f983 && // 1.0 / (2.0 * pi) 178 STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm]) 179 return 248; 180 181 return 255; 182 } 183 184 static uint32_t getLit64Encoding(uint64_t Val, const MCSubtargetInfo &STI) { 185 uint32_t IntImm = getIntInlineImmEncoding(static_cast<int64_t>(Val)); 186 if (IntImm != 0) 187 return IntImm; 188 189 if (Val == DoubleToBits(0.5)) 190 return 240; 191 192 if (Val == DoubleToBits(-0.5)) 193 return 241; 194 195 if (Val == DoubleToBits(1.0)) 196 return 242; 197 198 if (Val == DoubleToBits(-1.0)) 199 return 243; 200 201 if (Val == DoubleToBits(2.0)) 202 return 244; 203 204 if (Val == DoubleToBits(-2.0)) 205 return 245; 206 207 if (Val == DoubleToBits(4.0)) 208 return 246; 209 210 if (Val == DoubleToBits(-4.0)) 211 return 247; 212 213 if (Val == 0x3fc45f306dc9c882 && // 1.0 / (2.0 * pi) 214 STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm]) 215 return 248; 216 217 return 255; 218 } 219 220 uint32_t SIMCCodeEmitter::getLitEncoding(const MCOperand &MO, 221 const MCOperandInfo &OpInfo, 222 const MCSubtargetInfo &STI) const { 223 int64_t Imm; 224 if (MO.isExpr()) { 225 const auto *C = dyn_cast<MCConstantExpr>(MO.getExpr()); 226 if (!C) 227 return 255; 228 229 Imm = C->getValue(); 230 } else { 231 232 assert(!MO.isDFPImm()); 233 234 if (!MO.isImm()) 235 return ~0; 236 237 Imm = MO.getImm(); 238 } 239 240 switch (OpInfo.OperandType) { 241 case AMDGPU::OPERAND_REG_IMM_INT32: 242 case AMDGPU::OPERAND_REG_IMM_FP32: 243 case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED: 244 case AMDGPU::OPERAND_REG_INLINE_C_INT32: 245 case AMDGPU::OPERAND_REG_INLINE_C_FP32: 246 case AMDGPU::OPERAND_REG_INLINE_AC_INT32: 247 case AMDGPU::OPERAND_REG_INLINE_AC_FP32: 248 case AMDGPU::OPERAND_REG_IMM_V2INT32: 249 case AMDGPU::OPERAND_REG_IMM_V2FP32: 250 case AMDGPU::OPERAND_REG_INLINE_C_V2INT32: 251 case AMDGPU::OPERAND_REG_INLINE_C_V2FP32: 252 return getLit32Encoding(static_cast<uint32_t>(Imm), STI); 253 254 case AMDGPU::OPERAND_REG_IMM_INT64: 255 case AMDGPU::OPERAND_REG_IMM_FP64: 256 case AMDGPU::OPERAND_REG_INLINE_C_INT64: 257 case AMDGPU::OPERAND_REG_INLINE_C_FP64: 258 case AMDGPU::OPERAND_REG_INLINE_AC_FP64: 259 return getLit64Encoding(static_cast<uint64_t>(Imm), STI); 260 261 case AMDGPU::OPERAND_REG_IMM_INT16: 262 case AMDGPU::OPERAND_REG_INLINE_C_INT16: 263 case AMDGPU::OPERAND_REG_INLINE_AC_INT16: 264 return getLit16IntEncoding(static_cast<uint16_t>(Imm), STI); 265 case AMDGPU::OPERAND_REG_IMM_FP16: 266 case AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED: 267 case AMDGPU::OPERAND_REG_INLINE_C_FP16: 268 case AMDGPU::OPERAND_REG_INLINE_AC_FP16: 269 // FIXME Is this correct? What do inline immediates do on SI for f16 src 270 // which does not have f16 support? 271 return getLit16Encoding(static_cast<uint16_t>(Imm), STI); 272 case AMDGPU::OPERAND_REG_IMM_V2INT16: 273 case AMDGPU::OPERAND_REG_IMM_V2FP16: { 274 if (!isUInt<16>(Imm) && STI.getFeatureBits()[AMDGPU::FeatureVOP3Literal]) 275 return getLit32Encoding(static_cast<uint32_t>(Imm), STI); 276 if (OpInfo.OperandType == AMDGPU::OPERAND_REG_IMM_V2FP16) 277 return getLit16Encoding(static_cast<uint16_t>(Imm), STI); 278 LLVM_FALLTHROUGH; 279 } 280 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16: 281 case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16: 282 return getLit16IntEncoding(static_cast<uint16_t>(Imm), STI); 283 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: 284 case AMDGPU::OPERAND_REG_INLINE_AC_V2FP16: { 285 uint16_t Lo16 = static_cast<uint16_t>(Imm); 286 uint32_t Encoding = getLit16Encoding(Lo16, STI); 287 return Encoding; 288 } 289 case AMDGPU::OPERAND_KIMM32: 290 case AMDGPU::OPERAND_KIMM16: 291 return MO.getImm(); 292 default: 293 llvm_unreachable("invalid operand size"); 294 } 295 } 296 297 uint64_t SIMCCodeEmitter::getImplicitOpSelHiEncoding(int Opcode) const { 298 using namespace AMDGPU::VOP3PEncoding; 299 using namespace AMDGPU::OpName; 300 301 if (AMDGPU::getNamedOperandIdx(Opcode, op_sel_hi) != -1) { 302 if (AMDGPU::getNamedOperandIdx(Opcode, src2) != -1) 303 return 0; 304 if (AMDGPU::getNamedOperandIdx(Opcode, src1) != -1) 305 return OP_SEL_HI_2; 306 if (AMDGPU::getNamedOperandIdx(Opcode, src0) != -1) 307 return OP_SEL_HI_1 | OP_SEL_HI_2; 308 } 309 return OP_SEL_HI_0 | OP_SEL_HI_1 | OP_SEL_HI_2; 310 } 311 312 void SIMCCodeEmitter::encodeInstruction(const MCInst &MI, raw_ostream &OS, 313 SmallVectorImpl<MCFixup> &Fixups, 314 const MCSubtargetInfo &STI) const { 315 int Opcode = MI.getOpcode(); 316 APInt Encoding, Scratch; 317 getBinaryCodeForInstr(MI, Fixups, Encoding, Scratch, STI); 318 const MCInstrDesc &Desc = MCII.get(MI.getOpcode()); 319 unsigned bytes = Desc.getSize(); 320 321 // Set unused op_sel_hi bits to 1 for VOP3P and MAI instructions. 322 // Note that accvgpr_read/write are MAI, have src0, but do not use op_sel. 323 if ((Desc.TSFlags & SIInstrFlags::VOP3P) || 324 Opcode == AMDGPU::V_ACCVGPR_READ_B32_vi || 325 Opcode == AMDGPU::V_ACCVGPR_WRITE_B32_vi) { 326 Encoding |= getImplicitOpSelHiEncoding(Opcode); 327 } 328 329 for (unsigned i = 0; i < bytes; i++) { 330 OS.write((uint8_t)Encoding.extractBitsAsZExtValue(8, 8 * i)); 331 } 332 333 // NSA encoding. 334 if (AMDGPU::isGFX10Plus(STI) && Desc.TSFlags & SIInstrFlags::MIMG) { 335 int vaddr0 = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 336 AMDGPU::OpName::vaddr0); 337 int srsrc = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 338 AMDGPU::OpName::srsrc); 339 assert(vaddr0 >= 0 && srsrc > vaddr0); 340 unsigned NumExtraAddrs = srsrc - vaddr0 - 1; 341 unsigned NumPadding = (-NumExtraAddrs) & 3; 342 343 for (unsigned i = 0; i < NumExtraAddrs; ++i) { 344 getMachineOpValue(MI, MI.getOperand(vaddr0 + 1 + i), Encoding, Fixups, 345 STI); 346 OS.write((uint8_t)Encoding.getLimitedValue()); 347 } 348 for (unsigned i = 0; i < NumPadding; ++i) 349 OS.write(0); 350 } 351 352 if ((bytes > 8 && STI.getFeatureBits()[AMDGPU::FeatureVOP3Literal]) || 353 (bytes > 4 && !STI.getFeatureBits()[AMDGPU::FeatureVOP3Literal])) 354 return; 355 356 // Do not print literals from SISrc Operands for insts with mandatory literals 357 int ImmLitIdx = 358 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::imm); 359 if (ImmLitIdx != -1) 360 return; 361 362 // Check for additional literals 363 for (unsigned i = 0, e = Desc.getNumOperands(); i < e; ++i) { 364 365 // Check if this operand should be encoded as [SV]Src 366 if (!AMDGPU::isSISrcOperand(Desc, i)) 367 continue; 368 369 // Is this operand a literal immediate? 370 const MCOperand &Op = MI.getOperand(i); 371 if (getLitEncoding(Op, Desc.OpInfo[i], STI) != 255) 372 continue; 373 374 // Yes! Encode it 375 int64_t Imm = 0; 376 377 if (Op.isImm()) 378 Imm = Op.getImm(); 379 else if (Op.isExpr()) { 380 if (const auto *C = dyn_cast<MCConstantExpr>(Op.getExpr())) 381 Imm = C->getValue(); 382 383 } else if (!Op.isExpr()) // Exprs will be replaced with a fixup value. 384 llvm_unreachable("Must be immediate or expr"); 385 386 for (unsigned j = 0; j < 4; j++) { 387 OS.write((uint8_t) ((Imm >> (8 * j)) & 0xff)); 388 } 389 390 // Only one literal value allowed 391 break; 392 } 393 } 394 395 void SIMCCodeEmitter::getSOPPBrEncoding(const MCInst &MI, unsigned OpNo, 396 APInt &Op, 397 SmallVectorImpl<MCFixup> &Fixups, 398 const MCSubtargetInfo &STI) const { 399 const MCOperand &MO = MI.getOperand(OpNo); 400 401 if (MO.isExpr()) { 402 const MCExpr *Expr = MO.getExpr(); 403 MCFixupKind Kind = (MCFixupKind)AMDGPU::fixup_si_sopp_br; 404 Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc())); 405 Op = APInt::getNullValue(96); 406 } else { 407 getMachineOpValue(MI, MO, Op, Fixups, STI); 408 } 409 } 410 411 void SIMCCodeEmitter::getSMEMOffsetEncoding(const MCInst &MI, unsigned OpNo, 412 APInt &Op, 413 SmallVectorImpl<MCFixup> &Fixups, 414 const MCSubtargetInfo &STI) const { 415 auto Offset = MI.getOperand(OpNo).getImm(); 416 // VI only supports 20-bit unsigned offsets. 417 assert(!AMDGPU::isVI(STI) || isUInt<20>(Offset)); 418 Op = Offset; 419 } 420 421 void SIMCCodeEmitter::getSDWASrcEncoding(const MCInst &MI, unsigned OpNo, 422 APInt &Op, 423 SmallVectorImpl<MCFixup> &Fixups, 424 const MCSubtargetInfo &STI) const { 425 using namespace AMDGPU::SDWA; 426 427 uint64_t RegEnc = 0; 428 429 const MCOperand &MO = MI.getOperand(OpNo); 430 431 if (MO.isReg()) { 432 unsigned Reg = MO.getReg(); 433 RegEnc |= MRI.getEncodingValue(Reg); 434 RegEnc &= SDWA9EncValues::SRC_VGPR_MASK; 435 if (AMDGPU::isSGPR(AMDGPU::mc2PseudoReg(Reg), &MRI)) { 436 RegEnc |= SDWA9EncValues::SRC_SGPR_MASK; 437 } 438 Op = RegEnc; 439 return; 440 } else { 441 const MCInstrDesc &Desc = MCII.get(MI.getOpcode()); 442 uint32_t Enc = getLitEncoding(MO, Desc.OpInfo[OpNo], STI); 443 if (Enc != ~0U && Enc != 255) { 444 Op = Enc | SDWA9EncValues::SRC_SGPR_MASK; 445 return; 446 } 447 } 448 449 llvm_unreachable("Unsupported operand kind"); 450 } 451 452 void SIMCCodeEmitter::getSDWAVopcDstEncoding(const MCInst &MI, unsigned OpNo, 453 APInt &Op, 454 SmallVectorImpl<MCFixup> &Fixups, 455 const MCSubtargetInfo &STI) const { 456 using namespace AMDGPU::SDWA; 457 458 uint64_t RegEnc = 0; 459 460 const MCOperand &MO = MI.getOperand(OpNo); 461 462 unsigned Reg = MO.getReg(); 463 if (Reg != AMDGPU::VCC && Reg != AMDGPU::VCC_LO) { 464 RegEnc |= MRI.getEncodingValue(Reg); 465 RegEnc &= SDWA9EncValues::VOPC_DST_SGPR_MASK; 466 RegEnc |= SDWA9EncValues::VOPC_DST_VCC_MASK; 467 } 468 Op = RegEnc; 469 } 470 471 void SIMCCodeEmitter::getAVOperandEncoding(const MCInst &MI, unsigned OpNo, 472 APInt &Op, 473 SmallVectorImpl<MCFixup> &Fixups, 474 const MCSubtargetInfo &STI) const { 475 unsigned Reg = MI.getOperand(OpNo).getReg(); 476 uint64_t Enc = MRI.getEncodingValue(Reg); 477 478 // VGPR and AGPR have the same encoding, but SrcA and SrcB operands of mfma 479 // instructions use acc[0:1] modifier bits to distinguish. These bits are 480 // encoded as a virtual 9th bit of the register for these operands. 481 if (MRI.getRegClass(AMDGPU::AGPR_32RegClassID).contains(Reg) || 482 MRI.getRegClass(AMDGPU::AReg_64RegClassID).contains(Reg) || 483 MRI.getRegClass(AMDGPU::AReg_96RegClassID).contains(Reg) || 484 MRI.getRegClass(AMDGPU::AReg_128RegClassID).contains(Reg) || 485 MRI.getRegClass(AMDGPU::AReg_160RegClassID).contains(Reg) || 486 MRI.getRegClass(AMDGPU::AReg_192RegClassID).contains(Reg) || 487 MRI.getRegClass(AMDGPU::AReg_224RegClassID).contains(Reg) || 488 MRI.getRegClass(AMDGPU::AReg_256RegClassID).contains(Reg) || 489 MRI.getRegClass(AMDGPU::AReg_512RegClassID).contains(Reg) || 490 MRI.getRegClass(AMDGPU::AGPR_LO16RegClassID).contains(Reg)) 491 Enc |= 512; 492 493 Op = Enc; 494 } 495 496 static bool needsPCRel(const MCExpr *Expr) { 497 switch (Expr->getKind()) { 498 case MCExpr::SymbolRef: { 499 auto *SE = cast<MCSymbolRefExpr>(Expr); 500 MCSymbolRefExpr::VariantKind Kind = SE->getKind(); 501 return Kind != MCSymbolRefExpr::VK_AMDGPU_ABS32_LO && 502 Kind != MCSymbolRefExpr::VK_AMDGPU_ABS32_HI; 503 } 504 case MCExpr::Binary: { 505 auto *BE = cast<MCBinaryExpr>(Expr); 506 if (BE->getOpcode() == MCBinaryExpr::Sub) 507 return false; 508 return needsPCRel(BE->getLHS()) || needsPCRel(BE->getRHS()); 509 } 510 case MCExpr::Unary: 511 return needsPCRel(cast<MCUnaryExpr>(Expr)->getSubExpr()); 512 case MCExpr::Target: 513 case MCExpr::Constant: 514 return false; 515 } 516 llvm_unreachable("invalid kind"); 517 } 518 519 void SIMCCodeEmitter::getMachineOpValue(const MCInst &MI, 520 const MCOperand &MO, APInt &Op, 521 SmallVectorImpl<MCFixup> &Fixups, 522 const MCSubtargetInfo &STI) const { 523 if (MO.isReg()){ 524 Op = MRI.getEncodingValue(MO.getReg()); 525 return; 526 } 527 unsigned OpNo = &MO - MI.begin(); 528 getMachineOpValueCommon(MI, MO, OpNo, Op, Fixups, STI); 529 } 530 531 void SIMCCodeEmitter::getMachineOpValueCommon( 532 const MCInst &MI, const MCOperand &MO, unsigned OpNo, APInt &Op, 533 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const { 534 535 if (MO.isExpr() && MO.getExpr()->getKind() != MCExpr::Constant) { 536 // FIXME: If this is expression is PCRel or not should not depend on what 537 // the expression looks like. Given that this is just a general expression, 538 // it should probably be FK_Data_4 and whatever is producing 539 // 540 // s_add_u32 s2, s2, (extern_const_addrspace+16 541 // 542 // And expecting a PCRel should instead produce 543 // 544 // .Ltmp1: 545 // s_add_u32 s2, s2, (extern_const_addrspace+16)-.Ltmp1 546 MCFixupKind Kind; 547 if (needsPCRel(MO.getExpr())) 548 Kind = FK_PCRel_4; 549 else 550 Kind = FK_Data_4; 551 552 const MCInstrDesc &Desc = MCII.get(MI.getOpcode()); 553 uint32_t Offset = Desc.getSize(); 554 assert(Offset == 4 || Offset == 8); 555 556 Fixups.push_back(MCFixup::create(Offset, MO.getExpr(), Kind, MI.getLoc())); 557 } 558 559 const MCInstrDesc &Desc = MCII.get(MI.getOpcode()); 560 if (AMDGPU::isSISrcOperand(Desc, OpNo)) { 561 uint32_t Enc = getLitEncoding(MO, Desc.OpInfo[OpNo], STI); 562 if (Enc != ~0U) { 563 Op = Enc; 564 return; 565 } 566 } else if (MO.isImm()) { 567 Op = MO.getImm(); 568 return; 569 } 570 571 llvm_unreachable("Encoding of this operand type is not supported yet."); 572 } 573 574 #include "AMDGPUGenMCCodeEmitter.inc" 575