1 //===-- SIMCCodeEmitter.cpp - SI Code Emitter -------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 /// \file
11 /// \brief The SI code emitter produces machine code that can be executed
12 /// directly on the GPU device.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "AMDGPU.h"
17 #include "MCTargetDesc/AMDGPUFixupKinds.h"
18 #include "MCTargetDesc/AMDGPUMCCodeEmitter.h"
19 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
20 #include "SIDefines.h"
21 #include "Utils/AMDGPUBaseInfo.h"
22 #include "llvm/MC/MCCodeEmitter.h"
23 #include "llvm/MC/MCContext.h"
24 #include "llvm/MC/MCFixup.h"
25 #include "llvm/MC/MCInst.h"
26 #include "llvm/MC/MCInstrInfo.h"
27 #include "llvm/MC/MCRegisterInfo.h"
28 #include "llvm/MC/MCSubtargetInfo.h"
29 #include "llvm/MC/MCSymbol.h"
30 #include "llvm/Support/raw_ostream.h"
31 
32 using namespace llvm;
33 
34 namespace {
35 
36 class SIMCCodeEmitter : public  AMDGPUMCCodeEmitter {
37   SIMCCodeEmitter(const SIMCCodeEmitter &) = delete;
38   void operator=(const SIMCCodeEmitter &) = delete;
39   const MCRegisterInfo &MRI;
40 
41   /// \brief Encode an fp or int literal
42   uint32_t getLitEncoding(const MCOperand &MO, unsigned OpSize,
43                           const MCSubtargetInfo &STI) const;
44 
45 public:
46   SIMCCodeEmitter(const MCInstrInfo &mcii, const MCRegisterInfo &mri,
47                   MCContext &ctx)
48       : AMDGPUMCCodeEmitter(mcii), MRI(mri) {}
49 
50   ~SIMCCodeEmitter() override {}
51 
52   /// \brief Encode the instruction and write it to the OS.
53   void encodeInstruction(const MCInst &MI, raw_ostream &OS,
54                          SmallVectorImpl<MCFixup> &Fixups,
55                          const MCSubtargetInfo &STI) const override;
56 
57   /// \returns the encoding for an MCOperand.
58   uint64_t getMachineOpValue(const MCInst &MI, const MCOperand &MO,
59                              SmallVectorImpl<MCFixup> &Fixups,
60                              const MCSubtargetInfo &STI) const override;
61 
62   /// \brief Use a fixup to encode the simm16 field for SOPP branch
63   ///        instructions.
64   unsigned getSOPPBrEncoding(const MCInst &MI, unsigned OpNo,
65                              SmallVectorImpl<MCFixup> &Fixups,
66                              const MCSubtargetInfo &STI) const override;
67 };
68 
69 } // End anonymous namespace
70 
71 MCCodeEmitter *llvm::createSIMCCodeEmitter(const MCInstrInfo &MCII,
72                                            const MCRegisterInfo &MRI,
73                                            MCContext &Ctx) {
74   return new SIMCCodeEmitter(MCII, MRI, Ctx);
75 }
76 
77 // Returns the encoding value to use if the given integer is an integer inline
78 // immediate value, or 0 if it is not.
79 template <typename IntTy>
80 static uint32_t getIntInlineImmEncoding(IntTy Imm) {
81   if (Imm >= 0 && Imm <= 64)
82     return 128 + Imm;
83 
84   if (Imm >= -16 && Imm <= -1)
85     return 192 + std::abs(Imm);
86 
87   return 0;
88 }
89 
90 static uint32_t getLit32Encoding(uint32_t Val, const MCSubtargetInfo &STI) {
91   uint32_t IntImm = getIntInlineImmEncoding(static_cast<int32_t>(Val));
92   if (IntImm != 0)
93     return IntImm;
94 
95   if (Val == FloatToBits(0.5f))
96     return 240;
97 
98   if (Val == FloatToBits(-0.5f))
99     return 241;
100 
101   if (Val == FloatToBits(1.0f))
102     return 242;
103 
104   if (Val == FloatToBits(-1.0f))
105     return 243;
106 
107   if (Val == FloatToBits(2.0f))
108     return 244;
109 
110   if (Val == FloatToBits(-2.0f))
111     return 245;
112 
113   if (Val == FloatToBits(4.0f))
114     return 246;
115 
116   if (Val == FloatToBits(-4.0f))
117     return 247;
118 
119   if (Val == 0x3e22f983 && // 1.0 / (2.0 * pi)
120       STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm])
121     return 248;
122 
123   return 255;
124 }
125 
126 static uint32_t getLit64Encoding(uint64_t Val, const MCSubtargetInfo &STI) {
127   uint32_t IntImm = getIntInlineImmEncoding(static_cast<int64_t>(Val));
128   if (IntImm != 0)
129     return IntImm;
130 
131   if (Val == DoubleToBits(0.5))
132     return 240;
133 
134   if (Val == DoubleToBits(-0.5))
135     return 241;
136 
137   if (Val == DoubleToBits(1.0))
138     return 242;
139 
140   if (Val == DoubleToBits(-1.0))
141     return 243;
142 
143   if (Val == DoubleToBits(2.0))
144     return 244;
145 
146   if (Val == DoubleToBits(-2.0))
147     return 245;
148 
149   if (Val == DoubleToBits(4.0))
150     return 246;
151 
152   if (Val == DoubleToBits(-4.0))
153     return 247;
154 
155   if (Val == 0x3fc45f306dc9c882 && // 1.0 / (2.0 * pi)
156       STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm])
157     return 248;
158 
159   return 255;
160 }
161 
162 uint32_t SIMCCodeEmitter::getLitEncoding(const MCOperand &MO,
163                                          unsigned OpSize,
164                                          const MCSubtargetInfo &STI) const {
165 
166   int64_t Imm;
167   if (MO.isExpr()) {
168     const MCConstantExpr *C = dyn_cast<MCConstantExpr>(MO.getExpr());
169     if (!C)
170       return 255;
171 
172     Imm = C->getValue();
173   } else {
174 
175     assert(!MO.isFPImm());
176 
177     if (!MO.isImm())
178       return ~0;
179 
180     Imm = MO.getImm();
181   }
182 
183   if (OpSize == 4)
184     return getLit32Encoding(static_cast<uint32_t>(Imm), STI);
185 
186   assert(OpSize == 8);
187 
188   return getLit64Encoding(static_cast<uint64_t>(Imm), STI);
189 }
190 
191 void SIMCCodeEmitter::encodeInstruction(const MCInst &MI, raw_ostream &OS,
192                                        SmallVectorImpl<MCFixup> &Fixups,
193                                        const MCSubtargetInfo &STI) const {
194   verifyInstructionPredicates(MI,
195                               computeAvailableFeatures(STI.getFeatureBits()));
196 
197   uint64_t Encoding = getBinaryCodeForInstr(MI, Fixups, STI);
198   const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
199   unsigned bytes = Desc.getSize();
200 
201   for (unsigned i = 0; i < bytes; i++) {
202     OS.write((uint8_t) ((Encoding >> (8 * i)) & 0xff));
203   }
204 
205   if (bytes > 4)
206     return;
207 
208   // Check for additional literals in SRC0/1/2 (Op 1/2/3)
209   for (unsigned i = 0, e = MI.getNumOperands(); i < e; ++i) {
210 
211     // Check if this operand should be encoded as [SV]Src
212     if (!AMDGPU::isSISrcOperand(Desc, i))
213       continue;
214 
215     int RCID = Desc.OpInfo[i].RegClass;
216     const MCRegisterClass &RC = MRI.getRegClass(RCID);
217 
218     // Is this operand a literal immediate?
219     const MCOperand &Op = MI.getOperand(i);
220     if (getLitEncoding(Op, AMDGPU::getRegBitWidth(RC) / 8, STI) != 255)
221       continue;
222 
223     // Yes! Encode it
224     int64_t Imm = 0;
225 
226     if (Op.isImm())
227       Imm = Op.getImm();
228     else if (Op.isExpr()) {
229       if (const MCConstantExpr *C = dyn_cast<MCConstantExpr>(Op.getExpr()))
230         Imm = C->getValue();
231 
232     } else if (!Op.isExpr()) // Exprs will be replaced with a fixup value.
233       llvm_unreachable("Must be immediate or expr");
234 
235     for (unsigned j = 0; j < 4; j++) {
236       OS.write((uint8_t) ((Imm >> (8 * j)) & 0xff));
237     }
238 
239     // Only one literal value allowed
240     break;
241   }
242 }
243 
244 unsigned SIMCCodeEmitter::getSOPPBrEncoding(const MCInst &MI, unsigned OpNo,
245                                             SmallVectorImpl<MCFixup> &Fixups,
246                                             const MCSubtargetInfo &STI) const {
247   const MCOperand &MO = MI.getOperand(OpNo);
248 
249   if (MO.isExpr()) {
250     const MCExpr *Expr = MO.getExpr();
251     MCFixupKind Kind = (MCFixupKind)AMDGPU::fixup_si_sopp_br;
252     Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc()));
253     return 0;
254   }
255 
256   return getMachineOpValue(MI, MO, Fixups, STI);
257 }
258 
259 uint64_t SIMCCodeEmitter::getMachineOpValue(const MCInst &MI,
260                                             const MCOperand &MO,
261                                        SmallVectorImpl<MCFixup> &Fixups,
262                                        const MCSubtargetInfo &STI) const {
263   if (MO.isReg())
264     return MRI.getEncodingValue(MO.getReg());
265 
266   if (MO.isExpr() && MO.getExpr()->getKind() != MCExpr::Constant) {
267     const MCSymbolRefExpr *Expr = dyn_cast<MCSymbolRefExpr>(MO.getExpr());
268     MCFixupKind Kind;
269     if (Expr && Expr->getSymbol().isExternal())
270       Kind = FK_Data_4;
271     else
272       Kind = FK_PCRel_4;
273     Fixups.push_back(MCFixup::create(4, MO.getExpr(), Kind, MI.getLoc()));
274   }
275 
276   // Figure out the operand number, needed for isSrcOperand check
277   unsigned OpNo = 0;
278   for (unsigned e = MI.getNumOperands(); OpNo < e; ++OpNo) {
279     if (&MO == &MI.getOperand(OpNo))
280       break;
281   }
282 
283   const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
284   if (AMDGPU::isSISrcOperand(Desc, OpNo)) {
285     uint32_t Enc = getLitEncoding(MO,
286                                   AMDGPU::getRegOperandSize(&MRI, Desc, OpNo),
287                                   STI);
288     if (Enc != ~0U && (Enc != 255 || Desc.getSize() == 4))
289       return Enc;
290 
291   } else if (MO.isImm())
292     return MO.getImm();
293 
294   llvm_unreachable("Encoding of this operand type is not supported yet.");
295   return 0;
296 }
297 
298