1 //===-- SIMCCodeEmitter.cpp - SI Code Emitter -----------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// The SI code emitter produces machine code that can be executed
11 /// directly on the GPU device.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "MCTargetDesc/AMDGPUFixupKinds.h"
16 #include "MCTargetDesc/AMDGPUMCCodeEmitter.h"
17 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
18 #include "SIDefines.h"
19 #include "Utils/AMDGPUBaseInfo.h"
20 #include "llvm/MC/MCContext.h"
21 #include "llvm/MC/MCExpr.h"
22 #include "llvm/MC/MCInstrInfo.h"
23 #include "llvm/MC/MCRegisterInfo.h"
24 
25 using namespace llvm;
26 
27 namespace {
28 
29 class SIMCCodeEmitter : public  AMDGPUMCCodeEmitter {
30   const MCRegisterInfo &MRI;
31 
32   /// Encode an fp or int literal
33   uint32_t getLitEncoding(const MCOperand &MO, const MCOperandInfo &OpInfo,
34                           const MCSubtargetInfo &STI) const;
35 
36 public:
37   SIMCCodeEmitter(const MCInstrInfo &mcii, const MCRegisterInfo &mri,
38                   MCContext &ctx)
39       : AMDGPUMCCodeEmitter(mcii), MRI(mri) {}
40   SIMCCodeEmitter(const SIMCCodeEmitter &) = delete;
41   SIMCCodeEmitter &operator=(const SIMCCodeEmitter &) = delete;
42 
43   /// Encode the instruction and write it to the OS.
44   void encodeInstruction(const MCInst &MI, raw_ostream &OS,
45                          SmallVectorImpl<MCFixup> &Fixups,
46                          const MCSubtargetInfo &STI) const override;
47 
48   /// \returns the encoding for an MCOperand.
49   uint64_t getMachineOpValue(const MCInst &MI, const MCOperand &MO,
50                              SmallVectorImpl<MCFixup> &Fixups,
51                              const MCSubtargetInfo &STI) const override;
52 
53   /// Use a fixup to encode the simm16 field for SOPP branch
54   ///        instructions.
55   unsigned getSOPPBrEncoding(const MCInst &MI, unsigned OpNo,
56                              SmallVectorImpl<MCFixup> &Fixups,
57                              const MCSubtargetInfo &STI) const override;
58 
59   unsigned getSMEMOffsetEncoding(const MCInst &MI, unsigned OpNo,
60                                  SmallVectorImpl<MCFixup> &Fixups,
61                                  const MCSubtargetInfo &STI) const override;
62 
63   unsigned getSDWASrcEncoding(const MCInst &MI, unsigned OpNo,
64                               SmallVectorImpl<MCFixup> &Fixups,
65                               const MCSubtargetInfo &STI) const override;
66 
67   unsigned getSDWAVopcDstEncoding(const MCInst &MI, unsigned OpNo,
68                                   SmallVectorImpl<MCFixup> &Fixups,
69                                   const MCSubtargetInfo &STI) const override;
70 
71   unsigned getAVOperandEncoding(const MCInst &MI, unsigned OpNo,
72                                 SmallVectorImpl<MCFixup> &Fixups,
73                                 const MCSubtargetInfo &STI) const override;
74 };
75 
76 } // end anonymous namespace
77 
78 MCCodeEmitter *llvm::createSIMCCodeEmitter(const MCInstrInfo &MCII,
79                                            const MCRegisterInfo &MRI,
80                                            MCContext &Ctx) {
81   return new SIMCCodeEmitter(MCII, MRI, Ctx);
82 }
83 
84 // Returns the encoding value to use if the given integer is an integer inline
85 // immediate value, or 0 if it is not.
86 template <typename IntTy>
87 static uint32_t getIntInlineImmEncoding(IntTy Imm) {
88   if (Imm >= 0 && Imm <= 64)
89     return 128 + Imm;
90 
91   if (Imm >= -16 && Imm <= -1)
92     return 192 + std::abs(Imm);
93 
94   return 0;
95 }
96 
97 static uint32_t getLit16IntEncoding(uint16_t Val, const MCSubtargetInfo &STI) {
98   uint16_t IntImm = getIntInlineImmEncoding(static_cast<int16_t>(Val));
99   return IntImm == 0 ? 255 : IntImm;
100 }
101 
102 static uint32_t getLit16Encoding(uint16_t Val, const MCSubtargetInfo &STI) {
103   uint16_t IntImm = getIntInlineImmEncoding(static_cast<int16_t>(Val));
104   if (IntImm != 0)
105     return IntImm;
106 
107   if (Val == 0x3800) // 0.5
108     return 240;
109 
110   if (Val == 0xB800) // -0.5
111     return 241;
112 
113   if (Val == 0x3C00) // 1.0
114     return 242;
115 
116   if (Val == 0xBC00) // -1.0
117     return 243;
118 
119   if (Val == 0x4000) // 2.0
120     return 244;
121 
122   if (Val == 0xC000) // -2.0
123     return 245;
124 
125   if (Val == 0x4400) // 4.0
126     return 246;
127 
128   if (Val == 0xC400) // -4.0
129     return 247;
130 
131   if (Val == 0x3118 && // 1.0 / (2.0 * pi)
132       STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm])
133     return 248;
134 
135   return 255;
136 }
137 
138 static uint32_t getLit32Encoding(uint32_t Val, const MCSubtargetInfo &STI) {
139   uint32_t IntImm = getIntInlineImmEncoding(static_cast<int32_t>(Val));
140   if (IntImm != 0)
141     return IntImm;
142 
143   if (Val == FloatToBits(0.5f))
144     return 240;
145 
146   if (Val == FloatToBits(-0.5f))
147     return 241;
148 
149   if (Val == FloatToBits(1.0f))
150     return 242;
151 
152   if (Val == FloatToBits(-1.0f))
153     return 243;
154 
155   if (Val == FloatToBits(2.0f))
156     return 244;
157 
158   if (Val == FloatToBits(-2.0f))
159     return 245;
160 
161   if (Val == FloatToBits(4.0f))
162     return 246;
163 
164   if (Val == FloatToBits(-4.0f))
165     return 247;
166 
167   if (Val == 0x3e22f983 && // 1.0 / (2.0 * pi)
168       STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm])
169     return 248;
170 
171   return 255;
172 }
173 
174 static uint32_t getLit64Encoding(uint64_t Val, const MCSubtargetInfo &STI) {
175   uint32_t IntImm = getIntInlineImmEncoding(static_cast<int64_t>(Val));
176   if (IntImm != 0)
177     return IntImm;
178 
179   if (Val == DoubleToBits(0.5))
180     return 240;
181 
182   if (Val == DoubleToBits(-0.5))
183     return 241;
184 
185   if (Val == DoubleToBits(1.0))
186     return 242;
187 
188   if (Val == DoubleToBits(-1.0))
189     return 243;
190 
191   if (Val == DoubleToBits(2.0))
192     return 244;
193 
194   if (Val == DoubleToBits(-2.0))
195     return 245;
196 
197   if (Val == DoubleToBits(4.0))
198     return 246;
199 
200   if (Val == DoubleToBits(-4.0))
201     return 247;
202 
203   if (Val == 0x3fc45f306dc9c882 && // 1.0 / (2.0 * pi)
204       STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm])
205     return 248;
206 
207   return 255;
208 }
209 
210 uint32_t SIMCCodeEmitter::getLitEncoding(const MCOperand &MO,
211                                          const MCOperandInfo &OpInfo,
212                                          const MCSubtargetInfo &STI) const {
213   int64_t Imm;
214   if (MO.isExpr()) {
215     const auto *C = dyn_cast<MCConstantExpr>(MO.getExpr());
216     if (!C)
217       return 255;
218 
219     Imm = C->getValue();
220   } else {
221 
222     assert(!MO.isDFPImm());
223 
224     if (!MO.isImm())
225       return ~0;
226 
227     Imm = MO.getImm();
228   }
229 
230   switch (OpInfo.OperandType) {
231   case AMDGPU::OPERAND_REG_IMM_INT32:
232   case AMDGPU::OPERAND_REG_IMM_FP32:
233   case AMDGPU::OPERAND_REG_INLINE_C_INT32:
234   case AMDGPU::OPERAND_REG_INLINE_C_FP32:
235   case AMDGPU::OPERAND_REG_INLINE_AC_INT32:
236   case AMDGPU::OPERAND_REG_INLINE_AC_FP32:
237   case AMDGPU::OPERAND_REG_IMM_V2INT32:
238   case AMDGPU::OPERAND_REG_IMM_V2FP32:
239   case AMDGPU::OPERAND_REG_INLINE_C_V2INT32:
240   case AMDGPU::OPERAND_REG_INLINE_C_V2FP32:
241     return getLit32Encoding(static_cast<uint32_t>(Imm), STI);
242 
243   case AMDGPU::OPERAND_REG_IMM_INT64:
244   case AMDGPU::OPERAND_REG_IMM_FP64:
245   case AMDGPU::OPERAND_REG_INLINE_C_INT64:
246   case AMDGPU::OPERAND_REG_INLINE_C_FP64:
247   case AMDGPU::OPERAND_REG_INLINE_AC_FP64:
248     return getLit64Encoding(static_cast<uint64_t>(Imm), STI);
249 
250   case AMDGPU::OPERAND_REG_IMM_INT16:
251   case AMDGPU::OPERAND_REG_INLINE_C_INT16:
252   case AMDGPU::OPERAND_REG_INLINE_AC_INT16:
253     return getLit16IntEncoding(static_cast<uint16_t>(Imm), STI);
254   case AMDGPU::OPERAND_REG_IMM_FP16:
255   case AMDGPU::OPERAND_REG_INLINE_C_FP16:
256   case AMDGPU::OPERAND_REG_INLINE_AC_FP16:
257     // FIXME Is this correct? What do inline immediates do on SI for f16 src
258     // which does not have f16 support?
259     return getLit16Encoding(static_cast<uint16_t>(Imm), STI);
260   case AMDGPU::OPERAND_REG_IMM_V2INT16:
261   case AMDGPU::OPERAND_REG_IMM_V2FP16: {
262     if (!isUInt<16>(Imm) && STI.getFeatureBits()[AMDGPU::FeatureVOP3Literal])
263       return getLit32Encoding(static_cast<uint32_t>(Imm), STI);
264     if (OpInfo.OperandType == AMDGPU::OPERAND_REG_IMM_V2FP16)
265       return getLit16Encoding(static_cast<uint16_t>(Imm), STI);
266     LLVM_FALLTHROUGH;
267   }
268   case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
269   case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16:
270     return getLit16IntEncoding(static_cast<uint16_t>(Imm), STI);
271   case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
272   case AMDGPU::OPERAND_REG_INLINE_AC_V2FP16: {
273     uint16_t Lo16 = static_cast<uint16_t>(Imm);
274     uint32_t Encoding = getLit16Encoding(Lo16, STI);
275     return Encoding;
276   }
277   default:
278     llvm_unreachable("invalid operand size");
279   }
280 }
281 
282 void SIMCCodeEmitter::encodeInstruction(const MCInst &MI, raw_ostream &OS,
283                                        SmallVectorImpl<MCFixup> &Fixups,
284                                        const MCSubtargetInfo &STI) const {
285   verifyInstructionPredicates(MI,
286                               computeAvailableFeatures(STI.getFeatureBits()));
287 
288   uint64_t Encoding = getBinaryCodeForInstr(MI, Fixups, STI);
289   const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
290   unsigned bytes = Desc.getSize();
291 
292   switch (MI.getOpcode()) {
293   case AMDGPU::V_ACCVGPR_READ_B32_vi:
294   case AMDGPU::V_ACCVGPR_WRITE_B32_vi:
295     // Set unused op_sel_hi bits to 1.
296     // FIXME: This shall be done for all VOP3P but not MAI instructions with
297     // unused op_sel_hi bits if corresponding operands do not exist.
298     // accvgpr_read/write are different, however. These are VOP3P, MAI, have
299     // src0, but do not use op_sel.
300     Encoding |= (1ull << 14) | (1ull << 59) | (1ull << 60);
301     break;
302   default:
303     break;
304   }
305 
306   for (unsigned i = 0; i < bytes; i++) {
307     OS.write((uint8_t) ((Encoding >> (8 * i)) & 0xff));
308   }
309 
310   // NSA encoding.
311   if (AMDGPU::isGFX10Plus(STI) && Desc.TSFlags & SIInstrFlags::MIMG) {
312     int vaddr0 = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
313                                             AMDGPU::OpName::vaddr0);
314     int srsrc = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
315                                            AMDGPU::OpName::srsrc);
316     assert(vaddr0 >= 0 && srsrc > vaddr0);
317     unsigned NumExtraAddrs = srsrc - vaddr0 - 1;
318     unsigned NumPadding = (-NumExtraAddrs) & 3;
319 
320     for (unsigned i = 0; i < NumExtraAddrs; ++i)
321       OS.write((uint8_t)getMachineOpValue(MI, MI.getOperand(vaddr0 + 1 + i),
322                                           Fixups, STI));
323     for (unsigned i = 0; i < NumPadding; ++i)
324       OS.write(0);
325   }
326 
327   if ((bytes > 8 && STI.getFeatureBits()[AMDGPU::FeatureVOP3Literal]) ||
328       (bytes > 4 && !STI.getFeatureBits()[AMDGPU::FeatureVOP3Literal]))
329     return;
330 
331   // Check for additional literals in SRC0/1/2 (Op 1/2/3)
332   for (unsigned i = 0, e = Desc.getNumOperands(); i < e; ++i) {
333 
334     // Check if this operand should be encoded as [SV]Src
335     if (!AMDGPU::isSISrcOperand(Desc, i))
336       continue;
337 
338     // Is this operand a literal immediate?
339     const MCOperand &Op = MI.getOperand(i);
340     if (getLitEncoding(Op, Desc.OpInfo[i], STI) != 255)
341       continue;
342 
343     // Yes! Encode it
344     int64_t Imm = 0;
345 
346     if (Op.isImm())
347       Imm = Op.getImm();
348     else if (Op.isExpr()) {
349       if (const auto *C = dyn_cast<MCConstantExpr>(Op.getExpr()))
350         Imm = C->getValue();
351 
352     } else if (!Op.isExpr()) // Exprs will be replaced with a fixup value.
353       llvm_unreachable("Must be immediate or expr");
354 
355     for (unsigned j = 0; j < 4; j++) {
356       OS.write((uint8_t) ((Imm >> (8 * j)) & 0xff));
357     }
358 
359     // Only one literal value allowed
360     break;
361   }
362 }
363 
364 unsigned SIMCCodeEmitter::getSOPPBrEncoding(const MCInst &MI, unsigned OpNo,
365                                             SmallVectorImpl<MCFixup> &Fixups,
366                                             const MCSubtargetInfo &STI) const {
367   const MCOperand &MO = MI.getOperand(OpNo);
368 
369   if (MO.isExpr()) {
370     const MCExpr *Expr = MO.getExpr();
371     MCFixupKind Kind = (MCFixupKind)AMDGPU::fixup_si_sopp_br;
372     Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc()));
373     return 0;
374   }
375 
376   return getMachineOpValue(MI, MO, Fixups, STI);
377 }
378 
379 unsigned SIMCCodeEmitter::getSMEMOffsetEncoding(const MCInst &MI, unsigned OpNo,
380                                                 SmallVectorImpl<MCFixup> &Fixups,
381                                                 const MCSubtargetInfo &STI) const {
382   auto Offset = MI.getOperand(OpNo).getImm();
383   // VI only supports 20-bit unsigned offsets.
384   assert(!AMDGPU::isVI(STI) || isUInt<20>(Offset));
385   return Offset;
386 }
387 
388 unsigned
389 SIMCCodeEmitter::getSDWASrcEncoding(const MCInst &MI, unsigned OpNo,
390                                     SmallVectorImpl<MCFixup> &Fixups,
391                                     const MCSubtargetInfo &STI) const {
392   using namespace AMDGPU::SDWA;
393 
394   uint64_t RegEnc = 0;
395 
396   const MCOperand &MO = MI.getOperand(OpNo);
397 
398   if (MO.isReg()) {
399     unsigned Reg = MO.getReg();
400     RegEnc |= MRI.getEncodingValue(Reg);
401     RegEnc &= SDWA9EncValues::SRC_VGPR_MASK;
402     if (AMDGPU::isSGPR(AMDGPU::mc2PseudoReg(Reg), &MRI)) {
403       RegEnc |= SDWA9EncValues::SRC_SGPR_MASK;
404     }
405     return RegEnc;
406   } else {
407     const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
408     uint32_t Enc = getLitEncoding(MO, Desc.OpInfo[OpNo], STI);
409     if (Enc != ~0U && Enc != 255) {
410       return Enc | SDWA9EncValues::SRC_SGPR_MASK;
411     }
412   }
413 
414   llvm_unreachable("Unsupported operand kind");
415   return 0;
416 }
417 
418 unsigned
419 SIMCCodeEmitter::getSDWAVopcDstEncoding(const MCInst &MI, unsigned OpNo,
420                                         SmallVectorImpl<MCFixup> &Fixups,
421                                         const MCSubtargetInfo &STI) const {
422   using namespace AMDGPU::SDWA;
423 
424   uint64_t RegEnc = 0;
425 
426   const MCOperand &MO = MI.getOperand(OpNo);
427 
428   unsigned Reg = MO.getReg();
429   if (Reg != AMDGPU::VCC && Reg != AMDGPU::VCC_LO) {
430     RegEnc |= MRI.getEncodingValue(Reg);
431     RegEnc &= SDWA9EncValues::VOPC_DST_SGPR_MASK;
432     RegEnc |= SDWA9EncValues::VOPC_DST_VCC_MASK;
433   }
434   return RegEnc;
435 }
436 
437 unsigned
438 SIMCCodeEmitter::getAVOperandEncoding(const MCInst &MI, unsigned OpNo,
439                                       SmallVectorImpl<MCFixup> &Fixups,
440                                       const MCSubtargetInfo &STI) const {
441   unsigned Reg = MI.getOperand(OpNo).getReg();
442   uint64_t Enc = MRI.getEncodingValue(Reg);
443 
444   // VGPR and AGPR have the same encoding, but SrcA and SrcB operands of mfma
445   // instructions use acc[0:1] modifier bits to distinguish. These bits are
446   // encoded as a virtual 9th bit of the register for these operands.
447   if (MRI.getRegClass(AMDGPU::AGPR_32RegClassID).contains(Reg) ||
448       MRI.getRegClass(AMDGPU::AReg_64RegClassID).contains(Reg) ||
449       MRI.getRegClass(AMDGPU::AReg_96RegClassID).contains(Reg) ||
450       MRI.getRegClass(AMDGPU::AReg_128RegClassID).contains(Reg) ||
451       MRI.getRegClass(AMDGPU::AReg_160RegClassID).contains(Reg) ||
452       MRI.getRegClass(AMDGPU::AReg_192RegClassID).contains(Reg) ||
453       MRI.getRegClass(AMDGPU::AReg_256RegClassID).contains(Reg) ||
454       MRI.getRegClass(AMDGPU::AGPR_LO16RegClassID).contains(Reg))
455     Enc |= 512;
456 
457   return Enc;
458 }
459 
460 static bool needsPCRel(const MCExpr *Expr) {
461   switch (Expr->getKind()) {
462   case MCExpr::SymbolRef: {
463     auto *SE = cast<MCSymbolRefExpr>(Expr);
464     MCSymbolRefExpr::VariantKind Kind = SE->getKind();
465     return Kind != MCSymbolRefExpr::VK_AMDGPU_ABS32_LO &&
466            Kind != MCSymbolRefExpr::VK_AMDGPU_ABS32_HI;
467   }
468   case MCExpr::Binary: {
469     auto *BE = cast<MCBinaryExpr>(Expr);
470     if (BE->getOpcode() == MCBinaryExpr::Sub)
471       return false;
472     return needsPCRel(BE->getLHS()) || needsPCRel(BE->getRHS());
473   }
474   case MCExpr::Unary:
475     return needsPCRel(cast<MCUnaryExpr>(Expr)->getSubExpr());
476   case MCExpr::Target:
477   case MCExpr::Constant:
478     return false;
479   }
480   llvm_unreachable("invalid kind");
481 }
482 
483 uint64_t SIMCCodeEmitter::getMachineOpValue(const MCInst &MI,
484                                             const MCOperand &MO,
485                                        SmallVectorImpl<MCFixup> &Fixups,
486                                        const MCSubtargetInfo &STI) const {
487   if (MO.isReg())
488     return MRI.getEncodingValue(MO.getReg());
489 
490   if (MO.isExpr() && MO.getExpr()->getKind() != MCExpr::Constant) {
491     // FIXME: If this is expression is PCRel or not should not depend on what
492     // the expression looks like. Given that this is just a general expression,
493     // it should probably be FK_Data_4 and whatever is producing
494     //
495     //    s_add_u32 s2, s2, (extern_const_addrspace+16
496     //
497     // And expecting a PCRel should instead produce
498     //
499     // .Ltmp1:
500     //   s_add_u32 s2, s2, (extern_const_addrspace+16)-.Ltmp1
501     MCFixupKind Kind;
502     if (needsPCRel(MO.getExpr()))
503       Kind = FK_PCRel_4;
504     else
505       Kind = FK_Data_4;
506 
507     const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
508     uint32_t Offset = Desc.getSize();
509     assert(Offset == 4 || Offset == 8);
510 
511     Fixups.push_back(
512       MCFixup::create(Offset, MO.getExpr(), Kind, MI.getLoc()));
513   }
514 
515   // Figure out the operand number, needed for isSrcOperand check
516   unsigned OpNo = 0;
517   for (unsigned e = MI.getNumOperands(); OpNo < e; ++OpNo) {
518     if (&MO == &MI.getOperand(OpNo))
519       break;
520   }
521 
522   const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
523   if (AMDGPU::isSISrcOperand(Desc, OpNo)) {
524     uint32_t Enc = getLitEncoding(MO, Desc.OpInfo[OpNo], STI);
525     if (Enc != ~0U &&
526         (Enc != 255 || Desc.getSize() == 4 || Desc.getSize() == 8))
527       return Enc;
528 
529   } else if (MO.isImm())
530     return MO.getImm();
531 
532   llvm_unreachable("Encoding of this operand type is not supported yet.");
533   return 0;
534 }
535 
536 #define ENABLE_INSTR_PREDICATE_VERIFIER
537 #include "AMDGPUGenMCCodeEmitter.inc"
538