1 //===-- SIMCCodeEmitter.cpp - SI Code Emitter -----------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// The SI code emitter produces machine code that can be executed
11 /// directly on the GPU device.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "MCTargetDesc/AMDGPUFixupKinds.h"
16 #include "MCTargetDesc/AMDGPUMCCodeEmitter.h"
17 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
18 #include "SIDefines.h"
19 #include "Utils/AMDGPUBaseInfo.h"
20 #include "llvm/MC/MCContext.h"
21 #include "llvm/MC/MCExpr.h"
22 #include "llvm/MC/MCInstrInfo.h"
23 #include "llvm/MC/MCRegisterInfo.h"
24 #include "llvm/MC/MCSubtargetInfo.h"
25 #include "llvm/MC/SubtargetFeature.h"
26 #include "llvm/Support/Casting.h"
27 
28 using namespace llvm;
29 
30 namespace {
31 
32 class SIMCCodeEmitter : public  AMDGPUMCCodeEmitter {
33   const MCRegisterInfo &MRI;
34 
35   /// Encode an fp or int literal
36   uint32_t getLitEncoding(const MCOperand &MO, const MCOperandInfo &OpInfo,
37                           const MCSubtargetInfo &STI) const;
38 
39 public:
40   SIMCCodeEmitter(const MCInstrInfo &mcii, MCContext &ctx)
41       : AMDGPUMCCodeEmitter(mcii), MRI(*ctx.getRegisterInfo()) {}
42   SIMCCodeEmitter(const SIMCCodeEmitter &) = delete;
43   SIMCCodeEmitter &operator=(const SIMCCodeEmitter &) = delete;
44 
45   /// Encode the instruction and write it to the OS.
46   void encodeInstruction(const MCInst &MI, raw_ostream &OS,
47                          SmallVectorImpl<MCFixup> &Fixups,
48                          const MCSubtargetInfo &STI) const override;
49 
50   /// \returns the encoding for an MCOperand.
51   uint64_t getMachineOpValue(const MCInst &MI, const MCOperand &MO,
52                              SmallVectorImpl<MCFixup> &Fixups,
53                              const MCSubtargetInfo &STI) const override;
54 
55   /// Use a fixup to encode the simm16 field for SOPP branch
56   ///        instructions.
57   unsigned getSOPPBrEncoding(const MCInst &MI, unsigned OpNo,
58                              SmallVectorImpl<MCFixup> &Fixups,
59                              const MCSubtargetInfo &STI) const override;
60 
61   unsigned getSMEMOffsetEncoding(const MCInst &MI, unsigned OpNo,
62                                  SmallVectorImpl<MCFixup> &Fixups,
63                                  const MCSubtargetInfo &STI) const override;
64 
65   unsigned getSDWASrcEncoding(const MCInst &MI, unsigned OpNo,
66                               SmallVectorImpl<MCFixup> &Fixups,
67                               const MCSubtargetInfo &STI) const override;
68 
69   unsigned getSDWAVopcDstEncoding(const MCInst &MI, unsigned OpNo,
70                                   SmallVectorImpl<MCFixup> &Fixups,
71                                   const MCSubtargetInfo &STI) const override;
72 
73   unsigned getAVOperandEncoding(const MCInst &MI, unsigned OpNo,
74                                 SmallVectorImpl<MCFixup> &Fixups,
75                                 const MCSubtargetInfo &STI) const override;
76 
77 private:
78   uint64_t getImplicitOpSelHiEncoding(int Opcode) const;
79 };
80 
81 } // end anonymous namespace
82 
83 MCCodeEmitter *llvm::createSIMCCodeEmitter(const MCInstrInfo &MCII,
84                                            MCContext &Ctx) {
85   return new SIMCCodeEmitter(MCII, Ctx);
86 }
87 
88 // Returns the encoding value to use if the given integer is an integer inline
89 // immediate value, or 0 if it is not.
90 template <typename IntTy>
91 static uint32_t getIntInlineImmEncoding(IntTy Imm) {
92   if (Imm >= 0 && Imm <= 64)
93     return 128 + Imm;
94 
95   if (Imm >= -16 && Imm <= -1)
96     return 192 + std::abs(Imm);
97 
98   return 0;
99 }
100 
101 static uint32_t getLit16IntEncoding(uint16_t Val, const MCSubtargetInfo &STI) {
102   uint16_t IntImm = getIntInlineImmEncoding(static_cast<int16_t>(Val));
103   return IntImm == 0 ? 255 : IntImm;
104 }
105 
106 static uint32_t getLit16Encoding(uint16_t Val, const MCSubtargetInfo &STI) {
107   uint16_t IntImm = getIntInlineImmEncoding(static_cast<int16_t>(Val));
108   if (IntImm != 0)
109     return IntImm;
110 
111   if (Val == 0x3800) // 0.5
112     return 240;
113 
114   if (Val == 0xB800) // -0.5
115     return 241;
116 
117   if (Val == 0x3C00) // 1.0
118     return 242;
119 
120   if (Val == 0xBC00) // -1.0
121     return 243;
122 
123   if (Val == 0x4000) // 2.0
124     return 244;
125 
126   if (Val == 0xC000) // -2.0
127     return 245;
128 
129   if (Val == 0x4400) // 4.0
130     return 246;
131 
132   if (Val == 0xC400) // -4.0
133     return 247;
134 
135   if (Val == 0x3118 && // 1.0 / (2.0 * pi)
136       STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm])
137     return 248;
138 
139   return 255;
140 }
141 
142 static uint32_t getLit32Encoding(uint32_t Val, const MCSubtargetInfo &STI) {
143   uint32_t IntImm = getIntInlineImmEncoding(static_cast<int32_t>(Val));
144   if (IntImm != 0)
145     return IntImm;
146 
147   if (Val == FloatToBits(0.5f))
148     return 240;
149 
150   if (Val == FloatToBits(-0.5f))
151     return 241;
152 
153   if (Val == FloatToBits(1.0f))
154     return 242;
155 
156   if (Val == FloatToBits(-1.0f))
157     return 243;
158 
159   if (Val == FloatToBits(2.0f))
160     return 244;
161 
162   if (Val == FloatToBits(-2.0f))
163     return 245;
164 
165   if (Val == FloatToBits(4.0f))
166     return 246;
167 
168   if (Val == FloatToBits(-4.0f))
169     return 247;
170 
171   if (Val == 0x3e22f983 && // 1.0 / (2.0 * pi)
172       STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm])
173     return 248;
174 
175   return 255;
176 }
177 
178 static uint32_t getLit64Encoding(uint64_t Val, const MCSubtargetInfo &STI) {
179   uint32_t IntImm = getIntInlineImmEncoding(static_cast<int64_t>(Val));
180   if (IntImm != 0)
181     return IntImm;
182 
183   if (Val == DoubleToBits(0.5))
184     return 240;
185 
186   if (Val == DoubleToBits(-0.5))
187     return 241;
188 
189   if (Val == DoubleToBits(1.0))
190     return 242;
191 
192   if (Val == DoubleToBits(-1.0))
193     return 243;
194 
195   if (Val == DoubleToBits(2.0))
196     return 244;
197 
198   if (Val == DoubleToBits(-2.0))
199     return 245;
200 
201   if (Val == DoubleToBits(4.0))
202     return 246;
203 
204   if (Val == DoubleToBits(-4.0))
205     return 247;
206 
207   if (Val == 0x3fc45f306dc9c882 && // 1.0 / (2.0 * pi)
208       STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm])
209     return 248;
210 
211   return 255;
212 }
213 
214 uint32_t SIMCCodeEmitter::getLitEncoding(const MCOperand &MO,
215                                          const MCOperandInfo &OpInfo,
216                                          const MCSubtargetInfo &STI) const {
217   int64_t Imm;
218   if (MO.isExpr()) {
219     const auto *C = dyn_cast<MCConstantExpr>(MO.getExpr());
220     if (!C)
221       return 255;
222 
223     Imm = C->getValue();
224   } else {
225 
226     assert(!MO.isDFPImm());
227 
228     if (!MO.isImm())
229       return ~0;
230 
231     Imm = MO.getImm();
232   }
233 
234   switch (OpInfo.OperandType) {
235   case AMDGPU::OPERAND_REG_IMM_INT32:
236   case AMDGPU::OPERAND_REG_IMM_FP32:
237   case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED:
238   case AMDGPU::OPERAND_REG_INLINE_C_INT32:
239   case AMDGPU::OPERAND_REG_INLINE_C_FP32:
240   case AMDGPU::OPERAND_REG_INLINE_AC_INT32:
241   case AMDGPU::OPERAND_REG_INLINE_AC_FP32:
242   case AMDGPU::OPERAND_REG_IMM_V2INT32:
243   case AMDGPU::OPERAND_REG_IMM_V2FP32:
244   case AMDGPU::OPERAND_REG_INLINE_C_V2INT32:
245   case AMDGPU::OPERAND_REG_INLINE_C_V2FP32:
246     return getLit32Encoding(static_cast<uint32_t>(Imm), STI);
247 
248   case AMDGPU::OPERAND_REG_IMM_INT64:
249   case AMDGPU::OPERAND_REG_IMM_FP64:
250   case AMDGPU::OPERAND_REG_INLINE_C_INT64:
251   case AMDGPU::OPERAND_REG_INLINE_C_FP64:
252   case AMDGPU::OPERAND_REG_INLINE_AC_FP64:
253     return getLit64Encoding(static_cast<uint64_t>(Imm), STI);
254 
255   case AMDGPU::OPERAND_REG_IMM_INT16:
256   case AMDGPU::OPERAND_REG_INLINE_C_INT16:
257   case AMDGPU::OPERAND_REG_INLINE_AC_INT16:
258     return getLit16IntEncoding(static_cast<uint16_t>(Imm), STI);
259   case AMDGPU::OPERAND_REG_IMM_FP16:
260   case AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED:
261   case AMDGPU::OPERAND_REG_INLINE_C_FP16:
262   case AMDGPU::OPERAND_REG_INLINE_AC_FP16:
263     // FIXME Is this correct? What do inline immediates do on SI for f16 src
264     // which does not have f16 support?
265     return getLit16Encoding(static_cast<uint16_t>(Imm), STI);
266   case AMDGPU::OPERAND_REG_IMM_V2INT16:
267   case AMDGPU::OPERAND_REG_IMM_V2FP16: {
268     if (!isUInt<16>(Imm) && STI.getFeatureBits()[AMDGPU::FeatureVOP3Literal])
269       return getLit32Encoding(static_cast<uint32_t>(Imm), STI);
270     if (OpInfo.OperandType == AMDGPU::OPERAND_REG_IMM_V2FP16)
271       return getLit16Encoding(static_cast<uint16_t>(Imm), STI);
272     LLVM_FALLTHROUGH;
273   }
274   case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
275   case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16:
276     return getLit16IntEncoding(static_cast<uint16_t>(Imm), STI);
277   case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
278   case AMDGPU::OPERAND_REG_INLINE_AC_V2FP16: {
279     uint16_t Lo16 = static_cast<uint16_t>(Imm);
280     uint32_t Encoding = getLit16Encoding(Lo16, STI);
281     return Encoding;
282   }
283   case AMDGPU::OPERAND_KIMM32:
284   case AMDGPU::OPERAND_KIMM16:
285     return MO.getImm();
286   default:
287     llvm_unreachable("invalid operand size");
288   }
289 }
290 
291 uint64_t SIMCCodeEmitter::getImplicitOpSelHiEncoding(int Opcode) const {
292   using namespace AMDGPU::VOP3PEncoding;
293   using namespace AMDGPU::OpName;
294 
295   if (AMDGPU::getNamedOperandIdx(Opcode, op_sel_hi) != -1) {
296     if (AMDGPU::getNamedOperandIdx(Opcode, src2) != -1)
297       return 0;
298     if (AMDGPU::getNamedOperandIdx(Opcode, src1) != -1)
299       return OP_SEL_HI_2;
300     if (AMDGPU::getNamedOperandIdx(Opcode, src0) != -1)
301       return OP_SEL_HI_1 | OP_SEL_HI_2;
302   }
303   return OP_SEL_HI_0 | OP_SEL_HI_1 | OP_SEL_HI_2;
304 }
305 
306 void SIMCCodeEmitter::encodeInstruction(const MCInst &MI, raw_ostream &OS,
307                                        SmallVectorImpl<MCFixup> &Fixups,
308                                        const MCSubtargetInfo &STI) const {
309   verifyInstructionPredicates(MI,
310                               computeAvailableFeatures(STI.getFeatureBits()));
311 
312   int Opcode = MI.getOpcode();
313   uint64_t Encoding = getBinaryCodeForInstr(MI, Fixups, STI);
314   const MCInstrDesc &Desc = MCII.get(Opcode);
315   unsigned bytes = Desc.getSize();
316 
317   // Set unused op_sel_hi bits to 1 for VOP3P and MAI instructions.
318   // Note that accvgpr_read/write are MAI, have src0, but do not use op_sel.
319   if ((Desc.TSFlags & SIInstrFlags::VOP3P) ||
320       Opcode == AMDGPU::V_ACCVGPR_READ_B32_vi ||
321       Opcode == AMDGPU::V_ACCVGPR_WRITE_B32_vi) {
322     Encoding |= getImplicitOpSelHiEncoding(Opcode);
323   }
324 
325   for (unsigned i = 0; i < bytes; i++) {
326     OS.write((uint8_t) ((Encoding >> (8 * i)) & 0xff));
327   }
328 
329   // NSA encoding.
330   if (AMDGPU::isGFX10Plus(STI) && Desc.TSFlags & SIInstrFlags::MIMG) {
331     int vaddr0 = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
332                                             AMDGPU::OpName::vaddr0);
333     int srsrc = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
334                                            AMDGPU::OpName::srsrc);
335     assert(vaddr0 >= 0 && srsrc > vaddr0);
336     unsigned NumExtraAddrs = srsrc - vaddr0 - 1;
337     unsigned NumPadding = (-NumExtraAddrs) & 3;
338 
339     for (unsigned i = 0; i < NumExtraAddrs; ++i)
340       OS.write((uint8_t)getMachineOpValue(MI, MI.getOperand(vaddr0 + 1 + i),
341                                           Fixups, STI));
342     for (unsigned i = 0; i < NumPadding; ++i)
343       OS.write(0);
344   }
345 
346   if ((bytes > 8 && STI.getFeatureBits()[AMDGPU::FeatureVOP3Literal]) ||
347       (bytes > 4 && !STI.getFeatureBits()[AMDGPU::FeatureVOP3Literal]))
348     return;
349 
350   // Do not print literals from SISrc Operands for insts with mandatory literals
351   int ImmLitIdx =
352       AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::imm);
353   if (ImmLitIdx != -1)
354     return;
355 
356   // Check for additional literals
357   for (unsigned i = 0, e = Desc.getNumOperands(); i < e; ++i) {
358 
359     // Check if this operand should be encoded as [SV]Src
360     if (!AMDGPU::isSISrcOperand(Desc, i))
361       continue;
362 
363     // Is this operand a literal immediate?
364     const MCOperand &Op = MI.getOperand(i);
365     if (getLitEncoding(Op, Desc.OpInfo[i], STI) != 255)
366       continue;
367 
368     // Yes! Encode it
369     int64_t Imm = 0;
370 
371     if (Op.isImm())
372       Imm = Op.getImm();
373     else if (Op.isExpr()) {
374       if (const auto *C = dyn_cast<MCConstantExpr>(Op.getExpr()))
375         Imm = C->getValue();
376 
377     } else if (!Op.isExpr()) // Exprs will be replaced with a fixup value.
378       llvm_unreachable("Must be immediate or expr");
379 
380     for (unsigned j = 0; j < 4; j++) {
381       OS.write((uint8_t) ((Imm >> (8 * j)) & 0xff));
382     }
383 
384     // Only one literal value allowed
385     break;
386   }
387 }
388 
389 unsigned SIMCCodeEmitter::getSOPPBrEncoding(const MCInst &MI, unsigned OpNo,
390                                             SmallVectorImpl<MCFixup> &Fixups,
391                                             const MCSubtargetInfo &STI) const {
392   const MCOperand &MO = MI.getOperand(OpNo);
393 
394   if (MO.isExpr()) {
395     const MCExpr *Expr = MO.getExpr();
396     MCFixupKind Kind = (MCFixupKind)AMDGPU::fixup_si_sopp_br;
397     Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc()));
398     return 0;
399   }
400 
401   return getMachineOpValue(MI, MO, Fixups, STI);
402 }
403 
404 unsigned SIMCCodeEmitter::getSMEMOffsetEncoding(const MCInst &MI, unsigned OpNo,
405                                                 SmallVectorImpl<MCFixup> &Fixups,
406                                                 const MCSubtargetInfo &STI) const {
407   auto Offset = MI.getOperand(OpNo).getImm();
408   // VI only supports 20-bit unsigned offsets.
409   assert(!AMDGPU::isVI(STI) || isUInt<20>(Offset));
410   return Offset;
411 }
412 
413 unsigned
414 SIMCCodeEmitter::getSDWASrcEncoding(const MCInst &MI, unsigned OpNo,
415                                     SmallVectorImpl<MCFixup> &Fixups,
416                                     const MCSubtargetInfo &STI) const {
417   using namespace AMDGPU::SDWA;
418 
419   uint64_t RegEnc = 0;
420 
421   const MCOperand &MO = MI.getOperand(OpNo);
422 
423   if (MO.isReg()) {
424     unsigned Reg = MO.getReg();
425     RegEnc |= MRI.getEncodingValue(Reg);
426     RegEnc &= SDWA9EncValues::SRC_VGPR_MASK;
427     if (AMDGPU::isSGPR(AMDGPU::mc2PseudoReg(Reg), &MRI)) {
428       RegEnc |= SDWA9EncValues::SRC_SGPR_MASK;
429     }
430     return RegEnc;
431   } else {
432     const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
433     uint32_t Enc = getLitEncoding(MO, Desc.OpInfo[OpNo], STI);
434     if (Enc != ~0U && Enc != 255) {
435       return Enc | SDWA9EncValues::SRC_SGPR_MASK;
436     }
437   }
438 
439   llvm_unreachable("Unsupported operand kind");
440   return 0;
441 }
442 
443 unsigned
444 SIMCCodeEmitter::getSDWAVopcDstEncoding(const MCInst &MI, unsigned OpNo,
445                                         SmallVectorImpl<MCFixup> &Fixups,
446                                         const MCSubtargetInfo &STI) const {
447   using namespace AMDGPU::SDWA;
448 
449   uint64_t RegEnc = 0;
450 
451   const MCOperand &MO = MI.getOperand(OpNo);
452 
453   unsigned Reg = MO.getReg();
454   if (Reg != AMDGPU::VCC && Reg != AMDGPU::VCC_LO) {
455     RegEnc |= MRI.getEncodingValue(Reg);
456     RegEnc &= SDWA9EncValues::VOPC_DST_SGPR_MASK;
457     RegEnc |= SDWA9EncValues::VOPC_DST_VCC_MASK;
458   }
459   return RegEnc;
460 }
461 
462 unsigned
463 SIMCCodeEmitter::getAVOperandEncoding(const MCInst &MI, unsigned OpNo,
464                                       SmallVectorImpl<MCFixup> &Fixups,
465                                       const MCSubtargetInfo &STI) const {
466   unsigned Reg = MI.getOperand(OpNo).getReg();
467   uint64_t Enc = MRI.getEncodingValue(Reg);
468 
469   // VGPR and AGPR have the same encoding, but SrcA and SrcB operands of mfma
470   // instructions use acc[0:1] modifier bits to distinguish. These bits are
471   // encoded as a virtual 9th bit of the register for these operands.
472   if (MRI.getRegClass(AMDGPU::AGPR_32RegClassID).contains(Reg) ||
473       MRI.getRegClass(AMDGPU::AReg_64RegClassID).contains(Reg) ||
474       MRI.getRegClass(AMDGPU::AReg_96RegClassID).contains(Reg) ||
475       MRI.getRegClass(AMDGPU::AReg_128RegClassID).contains(Reg) ||
476       MRI.getRegClass(AMDGPU::AReg_160RegClassID).contains(Reg) ||
477       MRI.getRegClass(AMDGPU::AReg_192RegClassID).contains(Reg) ||
478       MRI.getRegClass(AMDGPU::AReg_224RegClassID).contains(Reg) ||
479       MRI.getRegClass(AMDGPU::AReg_256RegClassID).contains(Reg) ||
480       MRI.getRegClass(AMDGPU::AReg_512RegClassID).contains(Reg) ||
481       MRI.getRegClass(AMDGPU::AGPR_LO16RegClassID).contains(Reg))
482     Enc |= 512;
483 
484   return Enc;
485 }
486 
487 static bool needsPCRel(const MCExpr *Expr) {
488   switch (Expr->getKind()) {
489   case MCExpr::SymbolRef: {
490     auto *SE = cast<MCSymbolRefExpr>(Expr);
491     MCSymbolRefExpr::VariantKind Kind = SE->getKind();
492     return Kind != MCSymbolRefExpr::VK_AMDGPU_ABS32_LO &&
493            Kind != MCSymbolRefExpr::VK_AMDGPU_ABS32_HI;
494   }
495   case MCExpr::Binary: {
496     auto *BE = cast<MCBinaryExpr>(Expr);
497     if (BE->getOpcode() == MCBinaryExpr::Sub)
498       return false;
499     return needsPCRel(BE->getLHS()) || needsPCRel(BE->getRHS());
500   }
501   case MCExpr::Unary:
502     return needsPCRel(cast<MCUnaryExpr>(Expr)->getSubExpr());
503   case MCExpr::Target:
504   case MCExpr::Constant:
505     return false;
506   }
507   llvm_unreachable("invalid kind");
508 }
509 
510 uint64_t SIMCCodeEmitter::getMachineOpValue(const MCInst &MI,
511                                             const MCOperand &MO,
512                                        SmallVectorImpl<MCFixup> &Fixups,
513                                        const MCSubtargetInfo &STI) const {
514   if (MO.isReg())
515     return MRI.getEncodingValue(MO.getReg());
516 
517   if (MO.isExpr() && MO.getExpr()->getKind() != MCExpr::Constant) {
518     // FIXME: If this is expression is PCRel or not should not depend on what
519     // the expression looks like. Given that this is just a general expression,
520     // it should probably be FK_Data_4 and whatever is producing
521     //
522     //    s_add_u32 s2, s2, (extern_const_addrspace+16
523     //
524     // And expecting a PCRel should instead produce
525     //
526     // .Ltmp1:
527     //   s_add_u32 s2, s2, (extern_const_addrspace+16)-.Ltmp1
528     MCFixupKind Kind;
529     if (needsPCRel(MO.getExpr()))
530       Kind = FK_PCRel_4;
531     else
532       Kind = FK_Data_4;
533 
534     const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
535     uint32_t Offset = Desc.getSize();
536     assert(Offset == 4 || Offset == 8);
537 
538     Fixups.push_back(
539       MCFixup::create(Offset, MO.getExpr(), Kind, MI.getLoc()));
540   }
541 
542   // Figure out the operand number, needed for isSrcOperand check
543   unsigned OpNo = 0;
544   for (unsigned e = MI.getNumOperands(); OpNo < e; ++OpNo) {
545     if (&MO == &MI.getOperand(OpNo))
546       break;
547   }
548 
549   const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
550   if (AMDGPU::isSISrcOperand(Desc, OpNo)) {
551     uint32_t Enc = getLitEncoding(MO, Desc.OpInfo[OpNo], STI);
552     if (Enc != ~0U)
553       return Enc;
554 
555   } else if (MO.isImm())
556     return MO.getImm();
557 
558   llvm_unreachable("Encoding of this operand type is not supported yet.");
559   return 0;
560 }
561 
562 #define ENABLE_INSTR_PREDICATE_VERIFIER
563 #include "AMDGPUGenMCCodeEmitter.inc"
564