1 //===-- SIMCCodeEmitter.cpp - SI Code Emitter -----------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// The SI code emitter produces machine code that can be executed 11 /// directly on the GPU device. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "MCTargetDesc/AMDGPUFixupKinds.h" 16 #include "MCTargetDesc/AMDGPUMCCodeEmitter.h" 17 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 18 #include "SIDefines.h" 19 #include "Utils/AMDGPUBaseInfo.h" 20 #include "llvm/ADT/APInt.h" 21 #include "llvm/MC/MCCodeEmitter.h" 22 #include "llvm/MC/MCContext.h" 23 #include "llvm/MC/MCExpr.h" 24 #include "llvm/MC/MCInstrInfo.h" 25 #include "llvm/MC/MCRegisterInfo.h" 26 #include "llvm/MC/MCSubtargetInfo.h" 27 #include "llvm/MC/SubtargetFeature.h" 28 #include "llvm/Support/Casting.h" 29 30 using namespace llvm; 31 32 namespace { 33 34 class SIMCCodeEmitter : public AMDGPUMCCodeEmitter { 35 const MCRegisterInfo &MRI; 36 37 /// Encode an fp or int literal 38 uint32_t getLitEncoding(const MCOperand &MO, const MCOperandInfo &OpInfo, 39 const MCSubtargetInfo &STI) const; 40 41 public: 42 SIMCCodeEmitter(const MCInstrInfo &mcii, MCContext &ctx) 43 : AMDGPUMCCodeEmitter(mcii), MRI(*ctx.getRegisterInfo()) {} 44 SIMCCodeEmitter(const SIMCCodeEmitter &) = delete; 45 SIMCCodeEmitter &operator=(const SIMCCodeEmitter &) = delete; 46 47 /// Encode the instruction and write it to the OS. 48 void encodeInstruction(const MCInst &MI, raw_ostream &OS, 49 SmallVectorImpl<MCFixup> &Fixups, 50 const MCSubtargetInfo &STI) const override; 51 52 /// \returns the encoding for an MCOperand. 53 void getMachineOpValue(const MCInst &MI, const MCOperand &MO, APInt &Op, 54 SmallVectorImpl<MCFixup> &Fixups, 55 const MCSubtargetInfo &STI) const override; 56 57 /// Use a fixup to encode the simm16 field for SOPP branch 58 /// instructions. 59 void getSOPPBrEncoding(const MCInst &MI, unsigned OpNo, APInt &Op, 60 SmallVectorImpl<MCFixup> &Fixups, 61 const MCSubtargetInfo &STI) const override; 62 63 void getSMEMOffsetEncoding(const MCInst &MI, unsigned OpNo, APInt &Op, 64 SmallVectorImpl<MCFixup> &Fixups, 65 const MCSubtargetInfo &STI) const override; 66 67 void getSDWASrcEncoding(const MCInst &MI, unsigned OpNo, APInt &Op, 68 SmallVectorImpl<MCFixup> &Fixups, 69 const MCSubtargetInfo &STI) const override; 70 71 void getSDWAVopcDstEncoding(const MCInst &MI, unsigned OpNo, APInt &Op, 72 SmallVectorImpl<MCFixup> &Fixups, 73 const MCSubtargetInfo &STI) const override; 74 75 void getAVOperandEncoding(const MCInst &MI, unsigned OpNo, APInt &Op, 76 SmallVectorImpl<MCFixup> &Fixups, 77 const MCSubtargetInfo &STI) const override; 78 79 private: 80 uint64_t getImplicitOpSelHiEncoding(int Opcode) const; 81 void getMachineOpValueCommon(const MCInst &MI, const MCOperand &MO, 82 unsigned OpNo, APInt &Op, 83 SmallVectorImpl<MCFixup> &Fixups, 84 const MCSubtargetInfo &STI) const; 85 }; 86 87 } // end anonymous namespace 88 89 MCCodeEmitter *llvm::createSIMCCodeEmitter(const MCInstrInfo &MCII, 90 MCContext &Ctx) { 91 return new SIMCCodeEmitter(MCII, Ctx); 92 } 93 94 // Returns the encoding value to use if the given integer is an integer inline 95 // immediate value, or 0 if it is not. 96 template <typename IntTy> 97 static uint32_t getIntInlineImmEncoding(IntTy Imm) { 98 if (Imm >= 0 && Imm <= 64) 99 return 128 + Imm; 100 101 if (Imm >= -16 && Imm <= -1) 102 return 192 + std::abs(Imm); 103 104 return 0; 105 } 106 107 static uint32_t getLit16IntEncoding(uint16_t Val, const MCSubtargetInfo &STI) { 108 uint16_t IntImm = getIntInlineImmEncoding(static_cast<int16_t>(Val)); 109 return IntImm == 0 ? 255 : IntImm; 110 } 111 112 static uint32_t getLit16Encoding(uint16_t Val, const MCSubtargetInfo &STI) { 113 uint16_t IntImm = getIntInlineImmEncoding(static_cast<int16_t>(Val)); 114 if (IntImm != 0) 115 return IntImm; 116 117 if (Val == 0x3800) // 0.5 118 return 240; 119 120 if (Val == 0xB800) // -0.5 121 return 241; 122 123 if (Val == 0x3C00) // 1.0 124 return 242; 125 126 if (Val == 0xBC00) // -1.0 127 return 243; 128 129 if (Val == 0x4000) // 2.0 130 return 244; 131 132 if (Val == 0xC000) // -2.0 133 return 245; 134 135 if (Val == 0x4400) // 4.0 136 return 246; 137 138 if (Val == 0xC400) // -4.0 139 return 247; 140 141 if (Val == 0x3118 && // 1.0 / (2.0 * pi) 142 STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm]) 143 return 248; 144 145 return 255; 146 } 147 148 static uint32_t getLit32Encoding(uint32_t Val, const MCSubtargetInfo &STI) { 149 uint32_t IntImm = getIntInlineImmEncoding(static_cast<int32_t>(Val)); 150 if (IntImm != 0) 151 return IntImm; 152 153 if (Val == FloatToBits(0.5f)) 154 return 240; 155 156 if (Val == FloatToBits(-0.5f)) 157 return 241; 158 159 if (Val == FloatToBits(1.0f)) 160 return 242; 161 162 if (Val == FloatToBits(-1.0f)) 163 return 243; 164 165 if (Val == FloatToBits(2.0f)) 166 return 244; 167 168 if (Val == FloatToBits(-2.0f)) 169 return 245; 170 171 if (Val == FloatToBits(4.0f)) 172 return 246; 173 174 if (Val == FloatToBits(-4.0f)) 175 return 247; 176 177 if (Val == 0x3e22f983 && // 1.0 / (2.0 * pi) 178 STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm]) 179 return 248; 180 181 return 255; 182 } 183 184 static uint32_t getLit64Encoding(uint64_t Val, const MCSubtargetInfo &STI) { 185 uint32_t IntImm = getIntInlineImmEncoding(static_cast<int64_t>(Val)); 186 if (IntImm != 0) 187 return IntImm; 188 189 if (Val == DoubleToBits(0.5)) 190 return 240; 191 192 if (Val == DoubleToBits(-0.5)) 193 return 241; 194 195 if (Val == DoubleToBits(1.0)) 196 return 242; 197 198 if (Val == DoubleToBits(-1.0)) 199 return 243; 200 201 if (Val == DoubleToBits(2.0)) 202 return 244; 203 204 if (Val == DoubleToBits(-2.0)) 205 return 245; 206 207 if (Val == DoubleToBits(4.0)) 208 return 246; 209 210 if (Val == DoubleToBits(-4.0)) 211 return 247; 212 213 if (Val == 0x3fc45f306dc9c882 && // 1.0 / (2.0 * pi) 214 STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm]) 215 return 248; 216 217 return 255; 218 } 219 220 uint32_t SIMCCodeEmitter::getLitEncoding(const MCOperand &MO, 221 const MCOperandInfo &OpInfo, 222 const MCSubtargetInfo &STI) const { 223 int64_t Imm; 224 if (MO.isExpr()) { 225 const auto *C = dyn_cast<MCConstantExpr>(MO.getExpr()); 226 if (!C) 227 return 255; 228 229 Imm = C->getValue(); 230 } else { 231 232 assert(!MO.isDFPImm()); 233 234 if (!MO.isImm()) 235 return ~0; 236 237 Imm = MO.getImm(); 238 } 239 240 switch (OpInfo.OperandType) { 241 case AMDGPU::OPERAND_REG_IMM_INT32: 242 case AMDGPU::OPERAND_REG_IMM_FP32: 243 case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED: 244 case AMDGPU::OPERAND_REG_INLINE_C_INT32: 245 case AMDGPU::OPERAND_REG_INLINE_C_FP32: 246 case AMDGPU::OPERAND_REG_INLINE_AC_INT32: 247 case AMDGPU::OPERAND_REG_INLINE_AC_FP32: 248 case AMDGPU::OPERAND_REG_IMM_V2INT32: 249 case AMDGPU::OPERAND_REG_IMM_V2FP32: 250 case AMDGPU::OPERAND_REG_INLINE_C_V2INT32: 251 case AMDGPU::OPERAND_REG_INLINE_C_V2FP32: 252 return getLit32Encoding(static_cast<uint32_t>(Imm), STI); 253 254 case AMDGPU::OPERAND_REG_IMM_INT64: 255 case AMDGPU::OPERAND_REG_IMM_FP64: 256 case AMDGPU::OPERAND_REG_INLINE_C_INT64: 257 case AMDGPU::OPERAND_REG_INLINE_C_FP64: 258 case AMDGPU::OPERAND_REG_INLINE_AC_FP64: 259 return getLit64Encoding(static_cast<uint64_t>(Imm), STI); 260 261 case AMDGPU::OPERAND_REG_IMM_INT16: 262 case AMDGPU::OPERAND_REG_INLINE_C_INT16: 263 case AMDGPU::OPERAND_REG_INLINE_AC_INT16: 264 return getLit16IntEncoding(static_cast<uint16_t>(Imm), STI); 265 case AMDGPU::OPERAND_REG_IMM_FP16: 266 case AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED: 267 case AMDGPU::OPERAND_REG_INLINE_C_FP16: 268 case AMDGPU::OPERAND_REG_INLINE_AC_FP16: 269 // FIXME Is this correct? What do inline immediates do on SI for f16 src 270 // which does not have f16 support? 271 return getLit16Encoding(static_cast<uint16_t>(Imm), STI); 272 case AMDGPU::OPERAND_REG_IMM_V2INT16: 273 case AMDGPU::OPERAND_REG_IMM_V2FP16: { 274 if (!isUInt<16>(Imm) && STI.getFeatureBits()[AMDGPU::FeatureVOP3Literal]) 275 return getLit32Encoding(static_cast<uint32_t>(Imm), STI); 276 if (OpInfo.OperandType == AMDGPU::OPERAND_REG_IMM_V2FP16) 277 return getLit16Encoding(static_cast<uint16_t>(Imm), STI); 278 LLVM_FALLTHROUGH; 279 } 280 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16: 281 case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16: 282 return getLit16IntEncoding(static_cast<uint16_t>(Imm), STI); 283 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: 284 case AMDGPU::OPERAND_REG_INLINE_AC_V2FP16: { 285 uint16_t Lo16 = static_cast<uint16_t>(Imm); 286 uint32_t Encoding = getLit16Encoding(Lo16, STI); 287 return Encoding; 288 } 289 case AMDGPU::OPERAND_KIMM32: 290 case AMDGPU::OPERAND_KIMM16: 291 return MO.getImm(); 292 default: 293 llvm_unreachable("invalid operand size"); 294 } 295 } 296 297 uint64_t SIMCCodeEmitter::getImplicitOpSelHiEncoding(int Opcode) const { 298 using namespace AMDGPU::VOP3PEncoding; 299 using namespace AMDGPU::OpName; 300 301 if (AMDGPU::getNamedOperandIdx(Opcode, op_sel_hi) != -1) { 302 if (AMDGPU::getNamedOperandIdx(Opcode, src2) != -1) 303 return 0; 304 if (AMDGPU::getNamedOperandIdx(Opcode, src1) != -1) 305 return OP_SEL_HI_2; 306 if (AMDGPU::getNamedOperandIdx(Opcode, src0) != -1) 307 return OP_SEL_HI_1 | OP_SEL_HI_2; 308 } 309 return OP_SEL_HI_0 | OP_SEL_HI_1 | OP_SEL_HI_2; 310 } 311 312 void SIMCCodeEmitter::encodeInstruction(const MCInst &MI, raw_ostream &OS, 313 SmallVectorImpl<MCFixup> &Fixups, 314 const MCSubtargetInfo &STI) const { 315 verifyInstructionPredicates(MI, 316 computeAvailableFeatures(STI.getFeatureBits())); 317 318 int Opcode = MI.getOpcode(); 319 APInt Encoding, Scratch; 320 getBinaryCodeForInstr(MI, Fixups, Encoding, Scratch, STI); 321 const MCInstrDesc &Desc = MCII.get(MI.getOpcode()); 322 unsigned bytes = Desc.getSize(); 323 324 // Set unused op_sel_hi bits to 1 for VOP3P and MAI instructions. 325 // Note that accvgpr_read/write are MAI, have src0, but do not use op_sel. 326 if ((Desc.TSFlags & SIInstrFlags::VOP3P) || 327 Opcode == AMDGPU::V_ACCVGPR_READ_B32_vi || 328 Opcode == AMDGPU::V_ACCVGPR_WRITE_B32_vi) { 329 Encoding |= getImplicitOpSelHiEncoding(Opcode); 330 } 331 332 for (unsigned i = 0; i < bytes; i++) { 333 OS.write((uint8_t)Encoding.extractBitsAsZExtValue(8, 8 * i)); 334 } 335 336 // NSA encoding. 337 if (AMDGPU::isGFX10Plus(STI) && Desc.TSFlags & SIInstrFlags::MIMG) { 338 int vaddr0 = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 339 AMDGPU::OpName::vaddr0); 340 int srsrc = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 341 AMDGPU::OpName::srsrc); 342 assert(vaddr0 >= 0 && srsrc > vaddr0); 343 unsigned NumExtraAddrs = srsrc - vaddr0 - 1; 344 unsigned NumPadding = (-NumExtraAddrs) & 3; 345 346 for (unsigned i = 0; i < NumExtraAddrs; ++i) { 347 getMachineOpValue(MI, MI.getOperand(vaddr0 + 1 + i), Encoding, Fixups, 348 STI); 349 OS.write((uint8_t)Encoding.getLimitedValue()); 350 } 351 for (unsigned i = 0; i < NumPadding; ++i) 352 OS.write(0); 353 } 354 355 if ((bytes > 8 && STI.getFeatureBits()[AMDGPU::FeatureVOP3Literal]) || 356 (bytes > 4 && !STI.getFeatureBits()[AMDGPU::FeatureVOP3Literal])) 357 return; 358 359 // Do not print literals from SISrc Operands for insts with mandatory literals 360 int ImmLitIdx = 361 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::imm); 362 if (ImmLitIdx != -1) 363 return; 364 365 // Check for additional literals 366 for (unsigned i = 0, e = Desc.getNumOperands(); i < e; ++i) { 367 368 // Check if this operand should be encoded as [SV]Src 369 if (!AMDGPU::isSISrcOperand(Desc, i)) 370 continue; 371 372 // Is this operand a literal immediate? 373 const MCOperand &Op = MI.getOperand(i); 374 if (getLitEncoding(Op, Desc.OpInfo[i], STI) != 255) 375 continue; 376 377 // Yes! Encode it 378 int64_t Imm = 0; 379 380 if (Op.isImm()) 381 Imm = Op.getImm(); 382 else if (Op.isExpr()) { 383 if (const auto *C = dyn_cast<MCConstantExpr>(Op.getExpr())) 384 Imm = C->getValue(); 385 386 } else if (!Op.isExpr()) // Exprs will be replaced with a fixup value. 387 llvm_unreachable("Must be immediate or expr"); 388 389 for (unsigned j = 0; j < 4; j++) { 390 OS.write((uint8_t) ((Imm >> (8 * j)) & 0xff)); 391 } 392 393 // Only one literal value allowed 394 break; 395 } 396 } 397 398 void SIMCCodeEmitter::getSOPPBrEncoding(const MCInst &MI, unsigned OpNo, 399 APInt &Op, 400 SmallVectorImpl<MCFixup> &Fixups, 401 const MCSubtargetInfo &STI) const { 402 const MCOperand &MO = MI.getOperand(OpNo); 403 404 if (MO.isExpr()) { 405 const MCExpr *Expr = MO.getExpr(); 406 MCFixupKind Kind = (MCFixupKind)AMDGPU::fixup_si_sopp_br; 407 Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc())); 408 Op = APInt::getNullValue(96); 409 } else { 410 getMachineOpValue(MI, MO, Op, Fixups, STI); 411 } 412 } 413 414 void SIMCCodeEmitter::getSMEMOffsetEncoding(const MCInst &MI, unsigned OpNo, 415 APInt &Op, 416 SmallVectorImpl<MCFixup> &Fixups, 417 const MCSubtargetInfo &STI) const { 418 auto Offset = MI.getOperand(OpNo).getImm(); 419 // VI only supports 20-bit unsigned offsets. 420 assert(!AMDGPU::isVI(STI) || isUInt<20>(Offset)); 421 Op = Offset; 422 } 423 424 void SIMCCodeEmitter::getSDWASrcEncoding(const MCInst &MI, unsigned OpNo, 425 APInt &Op, 426 SmallVectorImpl<MCFixup> &Fixups, 427 const MCSubtargetInfo &STI) const { 428 using namespace AMDGPU::SDWA; 429 430 uint64_t RegEnc = 0; 431 432 const MCOperand &MO = MI.getOperand(OpNo); 433 434 if (MO.isReg()) { 435 unsigned Reg = MO.getReg(); 436 RegEnc |= MRI.getEncodingValue(Reg); 437 RegEnc &= SDWA9EncValues::SRC_VGPR_MASK; 438 if (AMDGPU::isSGPR(AMDGPU::mc2PseudoReg(Reg), &MRI)) { 439 RegEnc |= SDWA9EncValues::SRC_SGPR_MASK; 440 } 441 Op = RegEnc; 442 return; 443 } else { 444 const MCInstrDesc &Desc = MCII.get(MI.getOpcode()); 445 uint32_t Enc = getLitEncoding(MO, Desc.OpInfo[OpNo], STI); 446 if (Enc != ~0U && Enc != 255) { 447 Op = Enc | SDWA9EncValues::SRC_SGPR_MASK; 448 return; 449 } 450 } 451 452 llvm_unreachable("Unsupported operand kind"); 453 } 454 455 void SIMCCodeEmitter::getSDWAVopcDstEncoding(const MCInst &MI, unsigned OpNo, 456 APInt &Op, 457 SmallVectorImpl<MCFixup> &Fixups, 458 const MCSubtargetInfo &STI) const { 459 using namespace AMDGPU::SDWA; 460 461 uint64_t RegEnc = 0; 462 463 const MCOperand &MO = MI.getOperand(OpNo); 464 465 unsigned Reg = MO.getReg(); 466 if (Reg != AMDGPU::VCC && Reg != AMDGPU::VCC_LO) { 467 RegEnc |= MRI.getEncodingValue(Reg); 468 RegEnc &= SDWA9EncValues::VOPC_DST_SGPR_MASK; 469 RegEnc |= SDWA9EncValues::VOPC_DST_VCC_MASK; 470 } 471 Op = RegEnc; 472 } 473 474 void SIMCCodeEmitter::getAVOperandEncoding(const MCInst &MI, unsigned OpNo, 475 APInt &Op, 476 SmallVectorImpl<MCFixup> &Fixups, 477 const MCSubtargetInfo &STI) const { 478 unsigned Reg = MI.getOperand(OpNo).getReg(); 479 uint64_t Enc = MRI.getEncodingValue(Reg); 480 481 // VGPR and AGPR have the same encoding, but SrcA and SrcB operands of mfma 482 // instructions use acc[0:1] modifier bits to distinguish. These bits are 483 // encoded as a virtual 9th bit of the register for these operands. 484 if (MRI.getRegClass(AMDGPU::AGPR_32RegClassID).contains(Reg) || 485 MRI.getRegClass(AMDGPU::AReg_64RegClassID).contains(Reg) || 486 MRI.getRegClass(AMDGPU::AReg_96RegClassID).contains(Reg) || 487 MRI.getRegClass(AMDGPU::AReg_128RegClassID).contains(Reg) || 488 MRI.getRegClass(AMDGPU::AReg_160RegClassID).contains(Reg) || 489 MRI.getRegClass(AMDGPU::AReg_192RegClassID).contains(Reg) || 490 MRI.getRegClass(AMDGPU::AReg_224RegClassID).contains(Reg) || 491 MRI.getRegClass(AMDGPU::AReg_256RegClassID).contains(Reg) || 492 MRI.getRegClass(AMDGPU::AReg_512RegClassID).contains(Reg) || 493 MRI.getRegClass(AMDGPU::AGPR_LO16RegClassID).contains(Reg)) 494 Enc |= 512; 495 496 Op = Enc; 497 } 498 499 static bool needsPCRel(const MCExpr *Expr) { 500 switch (Expr->getKind()) { 501 case MCExpr::SymbolRef: { 502 auto *SE = cast<MCSymbolRefExpr>(Expr); 503 MCSymbolRefExpr::VariantKind Kind = SE->getKind(); 504 return Kind != MCSymbolRefExpr::VK_AMDGPU_ABS32_LO && 505 Kind != MCSymbolRefExpr::VK_AMDGPU_ABS32_HI; 506 } 507 case MCExpr::Binary: { 508 auto *BE = cast<MCBinaryExpr>(Expr); 509 if (BE->getOpcode() == MCBinaryExpr::Sub) 510 return false; 511 return needsPCRel(BE->getLHS()) || needsPCRel(BE->getRHS()); 512 } 513 case MCExpr::Unary: 514 return needsPCRel(cast<MCUnaryExpr>(Expr)->getSubExpr()); 515 case MCExpr::Target: 516 case MCExpr::Constant: 517 return false; 518 } 519 llvm_unreachable("invalid kind"); 520 } 521 522 void SIMCCodeEmitter::getMachineOpValue(const MCInst &MI, 523 const MCOperand &MO, APInt &Op, 524 SmallVectorImpl<MCFixup> &Fixups, 525 const MCSubtargetInfo &STI) const { 526 if (MO.isReg()){ 527 Op = MRI.getEncodingValue(MO.getReg()); 528 return; 529 } 530 unsigned OpNo = &MO - MI.begin(); 531 getMachineOpValueCommon(MI, MO, OpNo, Op, Fixups, STI); 532 } 533 534 void SIMCCodeEmitter::getMachineOpValueCommon( 535 const MCInst &MI, const MCOperand &MO, unsigned OpNo, APInt &Op, 536 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const { 537 538 if (MO.isExpr() && MO.getExpr()->getKind() != MCExpr::Constant) { 539 // FIXME: If this is expression is PCRel or not should not depend on what 540 // the expression looks like. Given that this is just a general expression, 541 // it should probably be FK_Data_4 and whatever is producing 542 // 543 // s_add_u32 s2, s2, (extern_const_addrspace+16 544 // 545 // And expecting a PCRel should instead produce 546 // 547 // .Ltmp1: 548 // s_add_u32 s2, s2, (extern_const_addrspace+16)-.Ltmp1 549 MCFixupKind Kind; 550 if (needsPCRel(MO.getExpr())) 551 Kind = FK_PCRel_4; 552 else 553 Kind = FK_Data_4; 554 555 const MCInstrDesc &Desc = MCII.get(MI.getOpcode()); 556 uint32_t Offset = Desc.getSize(); 557 assert(Offset == 4 || Offset == 8); 558 559 Fixups.push_back(MCFixup::create(Offset, MO.getExpr(), Kind, MI.getLoc())); 560 } 561 562 const MCInstrDesc &Desc = MCII.get(MI.getOpcode()); 563 if (AMDGPU::isSISrcOperand(Desc, OpNo)) { 564 uint32_t Enc = getLitEncoding(MO, Desc.OpInfo[OpNo], STI); 565 if (Enc != ~0U) { 566 Op = Enc; 567 return; 568 } 569 } else if (MO.isImm()) { 570 Op = MO.getImm(); 571 return; 572 } 573 574 llvm_unreachable("Encoding of this operand type is not supported yet."); 575 } 576 577 #define ENABLE_INSTR_PREDICATE_VERIFIER 578 #include "AMDGPUGenMCCodeEmitter.inc" 579