1 //===-- SIMCCodeEmitter.cpp - SI Code Emitter -----------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// The SI code emitter produces machine code that can be executed
11 /// directly on the GPU device.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "MCTargetDesc/AMDGPUFixupKinds.h"
16 #include "MCTargetDesc/AMDGPUMCCodeEmitter.h"
17 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
18 #include "SIDefines.h"
19 #include "Utils/AMDGPUBaseInfo.h"
20 #include "llvm/ADT/APInt.h"
21 #include "llvm/MC/MCCodeEmitter.h"
22 #include "llvm/MC/MCContext.h"
23 #include "llvm/MC/MCExpr.h"
24 #include "llvm/MC/MCInstrInfo.h"
25 #include "llvm/MC/MCRegisterInfo.h"
26 #include "llvm/MC/MCSubtargetInfo.h"
27 #include "llvm/MC/SubtargetFeature.h"
28 #include "llvm/Support/Casting.h"
29
30 using namespace llvm;
31
32 namespace {
33
34 class SIMCCodeEmitter : public AMDGPUMCCodeEmitter {
35 const MCRegisterInfo &MRI;
36
37 /// Encode an fp or int literal
38 uint32_t getLitEncoding(const MCOperand &MO, const MCOperandInfo &OpInfo,
39 const MCSubtargetInfo &STI) const;
40
41 public:
SIMCCodeEmitter(const MCInstrInfo & mcii,MCContext & ctx)42 SIMCCodeEmitter(const MCInstrInfo &mcii, MCContext &ctx)
43 : AMDGPUMCCodeEmitter(mcii), MRI(*ctx.getRegisterInfo()) {}
44 SIMCCodeEmitter(const SIMCCodeEmitter &) = delete;
45 SIMCCodeEmitter &operator=(const SIMCCodeEmitter &) = delete;
46
47 /// Encode the instruction and write it to the OS.
48 void encodeInstruction(const MCInst &MI, raw_ostream &OS,
49 SmallVectorImpl<MCFixup> &Fixups,
50 const MCSubtargetInfo &STI) const override;
51
52 /// \returns the encoding for an MCOperand.
53 void getMachineOpValue(const MCInst &MI, const MCOperand &MO, APInt &Op,
54 SmallVectorImpl<MCFixup> &Fixups,
55 const MCSubtargetInfo &STI) const override;
56
57 /// Use a fixup to encode the simm16 field for SOPP branch
58 /// instructions.
59 void getSOPPBrEncoding(const MCInst &MI, unsigned OpNo, APInt &Op,
60 SmallVectorImpl<MCFixup> &Fixups,
61 const MCSubtargetInfo &STI) const override;
62
63 void getSMEMOffsetEncoding(const MCInst &MI, unsigned OpNo, APInt &Op,
64 SmallVectorImpl<MCFixup> &Fixups,
65 const MCSubtargetInfo &STI) const override;
66
67 void getSDWASrcEncoding(const MCInst &MI, unsigned OpNo, APInt &Op,
68 SmallVectorImpl<MCFixup> &Fixups,
69 const MCSubtargetInfo &STI) const override;
70
71 void getSDWAVopcDstEncoding(const MCInst &MI, unsigned OpNo, APInt &Op,
72 SmallVectorImpl<MCFixup> &Fixups,
73 const MCSubtargetInfo &STI) const override;
74
75 void getAVOperandEncoding(const MCInst &MI, unsigned OpNo, APInt &Op,
76 SmallVectorImpl<MCFixup> &Fixups,
77 const MCSubtargetInfo &STI) const override;
78
79 private:
80 uint64_t getImplicitOpSelHiEncoding(int Opcode) const;
81 void getMachineOpValueCommon(const MCInst &MI, const MCOperand &MO,
82 unsigned OpNo, APInt &Op,
83 SmallVectorImpl<MCFixup> &Fixups,
84 const MCSubtargetInfo &STI) const;
85 };
86
87 } // end anonymous namespace
88
createSIMCCodeEmitter(const MCInstrInfo & MCII,MCContext & Ctx)89 MCCodeEmitter *llvm::createSIMCCodeEmitter(const MCInstrInfo &MCII,
90 MCContext &Ctx) {
91 return new SIMCCodeEmitter(MCII, Ctx);
92 }
93
94 // Returns the encoding value to use if the given integer is an integer inline
95 // immediate value, or 0 if it is not.
96 template <typename IntTy>
getIntInlineImmEncoding(IntTy Imm)97 static uint32_t getIntInlineImmEncoding(IntTy Imm) {
98 if (Imm >= 0 && Imm <= 64)
99 return 128 + Imm;
100
101 if (Imm >= -16 && Imm <= -1)
102 return 192 + std::abs(Imm);
103
104 return 0;
105 }
106
getLit16IntEncoding(uint16_t Val,const MCSubtargetInfo & STI)107 static uint32_t getLit16IntEncoding(uint16_t Val, const MCSubtargetInfo &STI) {
108 uint16_t IntImm = getIntInlineImmEncoding(static_cast<int16_t>(Val));
109 return IntImm == 0 ? 255 : IntImm;
110 }
111
getLit16Encoding(uint16_t Val,const MCSubtargetInfo & STI)112 static uint32_t getLit16Encoding(uint16_t Val, const MCSubtargetInfo &STI) {
113 uint16_t IntImm = getIntInlineImmEncoding(static_cast<int16_t>(Val));
114 if (IntImm != 0)
115 return IntImm;
116
117 if (Val == 0x3800) // 0.5
118 return 240;
119
120 if (Val == 0xB800) // -0.5
121 return 241;
122
123 if (Val == 0x3C00) // 1.0
124 return 242;
125
126 if (Val == 0xBC00) // -1.0
127 return 243;
128
129 if (Val == 0x4000) // 2.0
130 return 244;
131
132 if (Val == 0xC000) // -2.0
133 return 245;
134
135 if (Val == 0x4400) // 4.0
136 return 246;
137
138 if (Val == 0xC400) // -4.0
139 return 247;
140
141 if (Val == 0x3118 && // 1.0 / (2.0 * pi)
142 STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm])
143 return 248;
144
145 return 255;
146 }
147
getLit32Encoding(uint32_t Val,const MCSubtargetInfo & STI)148 static uint32_t getLit32Encoding(uint32_t Val, const MCSubtargetInfo &STI) {
149 uint32_t IntImm = getIntInlineImmEncoding(static_cast<int32_t>(Val));
150 if (IntImm != 0)
151 return IntImm;
152
153 if (Val == FloatToBits(0.5f))
154 return 240;
155
156 if (Val == FloatToBits(-0.5f))
157 return 241;
158
159 if (Val == FloatToBits(1.0f))
160 return 242;
161
162 if (Val == FloatToBits(-1.0f))
163 return 243;
164
165 if (Val == FloatToBits(2.0f))
166 return 244;
167
168 if (Val == FloatToBits(-2.0f))
169 return 245;
170
171 if (Val == FloatToBits(4.0f))
172 return 246;
173
174 if (Val == FloatToBits(-4.0f))
175 return 247;
176
177 if (Val == 0x3e22f983 && // 1.0 / (2.0 * pi)
178 STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm])
179 return 248;
180
181 return 255;
182 }
183
getLit64Encoding(uint64_t Val,const MCSubtargetInfo & STI)184 static uint32_t getLit64Encoding(uint64_t Val, const MCSubtargetInfo &STI) {
185 uint32_t IntImm = getIntInlineImmEncoding(static_cast<int64_t>(Val));
186 if (IntImm != 0)
187 return IntImm;
188
189 if (Val == DoubleToBits(0.5))
190 return 240;
191
192 if (Val == DoubleToBits(-0.5))
193 return 241;
194
195 if (Val == DoubleToBits(1.0))
196 return 242;
197
198 if (Val == DoubleToBits(-1.0))
199 return 243;
200
201 if (Val == DoubleToBits(2.0))
202 return 244;
203
204 if (Val == DoubleToBits(-2.0))
205 return 245;
206
207 if (Val == DoubleToBits(4.0))
208 return 246;
209
210 if (Val == DoubleToBits(-4.0))
211 return 247;
212
213 if (Val == 0x3fc45f306dc9c882 && // 1.0 / (2.0 * pi)
214 STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm])
215 return 248;
216
217 return 255;
218 }
219
getLitEncoding(const MCOperand & MO,const MCOperandInfo & OpInfo,const MCSubtargetInfo & STI) const220 uint32_t SIMCCodeEmitter::getLitEncoding(const MCOperand &MO,
221 const MCOperandInfo &OpInfo,
222 const MCSubtargetInfo &STI) const {
223 int64_t Imm;
224 if (MO.isExpr()) {
225 const auto *C = dyn_cast<MCConstantExpr>(MO.getExpr());
226 if (!C)
227 return 255;
228
229 Imm = C->getValue();
230 } else {
231
232 assert(!MO.isDFPImm());
233
234 if (!MO.isImm())
235 return ~0;
236
237 Imm = MO.getImm();
238 }
239
240 switch (OpInfo.OperandType) {
241 case AMDGPU::OPERAND_REG_IMM_INT32:
242 case AMDGPU::OPERAND_REG_IMM_FP32:
243 case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED:
244 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
245 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
246 case AMDGPU::OPERAND_REG_INLINE_AC_INT32:
247 case AMDGPU::OPERAND_REG_INLINE_AC_FP32:
248 case AMDGPU::OPERAND_REG_IMM_V2INT32:
249 case AMDGPU::OPERAND_REG_IMM_V2FP32:
250 case AMDGPU::OPERAND_REG_INLINE_C_V2INT32:
251 case AMDGPU::OPERAND_REG_INLINE_C_V2FP32:
252 return getLit32Encoding(static_cast<uint32_t>(Imm), STI);
253
254 case AMDGPU::OPERAND_REG_IMM_INT64:
255 case AMDGPU::OPERAND_REG_IMM_FP64:
256 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
257 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
258 case AMDGPU::OPERAND_REG_INLINE_AC_FP64:
259 return getLit64Encoding(static_cast<uint64_t>(Imm), STI);
260
261 case AMDGPU::OPERAND_REG_IMM_INT16:
262 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
263 case AMDGPU::OPERAND_REG_INLINE_AC_INT16:
264 return getLit16IntEncoding(static_cast<uint16_t>(Imm), STI);
265 case AMDGPU::OPERAND_REG_IMM_FP16:
266 case AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED:
267 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
268 case AMDGPU::OPERAND_REG_INLINE_AC_FP16:
269 // FIXME Is this correct? What do inline immediates do on SI for f16 src
270 // which does not have f16 support?
271 return getLit16Encoding(static_cast<uint16_t>(Imm), STI);
272 case AMDGPU::OPERAND_REG_IMM_V2INT16:
273 case AMDGPU::OPERAND_REG_IMM_V2FP16: {
274 if (!isUInt<16>(Imm) && STI.getFeatureBits()[AMDGPU::FeatureVOP3Literal])
275 return getLit32Encoding(static_cast<uint32_t>(Imm), STI);
276 if (OpInfo.OperandType == AMDGPU::OPERAND_REG_IMM_V2FP16)
277 return getLit16Encoding(static_cast<uint16_t>(Imm), STI);
278 LLVM_FALLTHROUGH;
279 }
280 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
281 case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16:
282 return getLit16IntEncoding(static_cast<uint16_t>(Imm), STI);
283 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
284 case AMDGPU::OPERAND_REG_INLINE_AC_V2FP16: {
285 uint16_t Lo16 = static_cast<uint16_t>(Imm);
286 uint32_t Encoding = getLit16Encoding(Lo16, STI);
287 return Encoding;
288 }
289 case AMDGPU::OPERAND_KIMM32:
290 case AMDGPU::OPERAND_KIMM16:
291 return MO.getImm();
292 default:
293 llvm_unreachable("invalid operand size");
294 }
295 }
296
getImplicitOpSelHiEncoding(int Opcode) const297 uint64_t SIMCCodeEmitter::getImplicitOpSelHiEncoding(int Opcode) const {
298 using namespace AMDGPU::VOP3PEncoding;
299 using namespace AMDGPU::OpName;
300
301 if (AMDGPU::getNamedOperandIdx(Opcode, op_sel_hi) != -1) {
302 if (AMDGPU::getNamedOperandIdx(Opcode, src2) != -1)
303 return 0;
304 if (AMDGPU::getNamedOperandIdx(Opcode, src1) != -1)
305 return OP_SEL_HI_2;
306 if (AMDGPU::getNamedOperandIdx(Opcode, src0) != -1)
307 return OP_SEL_HI_1 | OP_SEL_HI_2;
308 }
309 return OP_SEL_HI_0 | OP_SEL_HI_1 | OP_SEL_HI_2;
310 }
311
isVCMPX64(const MCInstrDesc & Desc)312 static bool isVCMPX64(const MCInstrDesc &Desc) {
313 return (Desc.TSFlags & SIInstrFlags::VOP3) &&
314 Desc.hasImplicitDefOfPhysReg(AMDGPU::EXEC);
315 }
316
encodeInstruction(const MCInst & MI,raw_ostream & OS,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const317 void SIMCCodeEmitter::encodeInstruction(const MCInst &MI, raw_ostream &OS,
318 SmallVectorImpl<MCFixup> &Fixups,
319 const MCSubtargetInfo &STI) const {
320 int Opcode = MI.getOpcode();
321 APInt Encoding, Scratch;
322 getBinaryCodeForInstr(MI, Fixups, Encoding, Scratch, STI);
323 const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
324 unsigned bytes = Desc.getSize();
325
326 // Set unused op_sel_hi bits to 1 for VOP3P and MAI instructions.
327 // Note that accvgpr_read/write are MAI, have src0, but do not use op_sel.
328 if ((Desc.TSFlags & SIInstrFlags::VOP3P) ||
329 Opcode == AMDGPU::V_ACCVGPR_READ_B32_vi ||
330 Opcode == AMDGPU::V_ACCVGPR_WRITE_B32_vi) {
331 Encoding |= getImplicitOpSelHiEncoding(Opcode);
332 }
333
334 // GFX11 v_cmpx opcodes promoted to VOP3 have implied dst=EXEC.
335 // Documentation requires dst to be encoded as EXEC (0x7E),
336 // but it looks like the actual value encoded for dst operand
337 // is ignored by HW. It was decided to define dst as "do not care"
338 // in td files to allow disassembler accept any dst value.
339 // However, dst is encoded as EXEC for compatibility with SP3.
340 if (AMDGPU::isGFX11Plus(STI) && isVCMPX64(Desc)) {
341 assert((Encoding & 0xFF) == 0);
342 Encoding |= MRI.getEncodingValue(AMDGPU::EXEC_LO);
343 }
344
345 for (unsigned i = 0; i < bytes; i++) {
346 OS.write((uint8_t)Encoding.extractBitsAsZExtValue(8, 8 * i));
347 }
348
349 // NSA encoding.
350 if (AMDGPU::isGFX10Plus(STI) && Desc.TSFlags & SIInstrFlags::MIMG) {
351 int vaddr0 = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
352 AMDGPU::OpName::vaddr0);
353 int srsrc = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
354 AMDGPU::OpName::srsrc);
355 assert(vaddr0 >= 0 && srsrc > vaddr0);
356 unsigned NumExtraAddrs = srsrc - vaddr0 - 1;
357 unsigned NumPadding = (-NumExtraAddrs) & 3;
358
359 for (unsigned i = 0; i < NumExtraAddrs; ++i) {
360 getMachineOpValue(MI, MI.getOperand(vaddr0 + 1 + i), Encoding, Fixups,
361 STI);
362 OS.write((uint8_t)Encoding.getLimitedValue());
363 }
364 for (unsigned i = 0; i < NumPadding; ++i)
365 OS.write(0);
366 }
367
368 if ((bytes > 8 && STI.getFeatureBits()[AMDGPU::FeatureVOP3Literal]) ||
369 (bytes > 4 && !STI.getFeatureBits()[AMDGPU::FeatureVOP3Literal]))
370 return;
371
372 // Do not print literals from SISrc Operands for insts with mandatory literals
373 int ImmLitIdx =
374 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::imm);
375 if (ImmLitIdx != -1)
376 return;
377
378 // Check for additional literals
379 for (unsigned i = 0, e = Desc.getNumOperands(); i < e; ++i) {
380
381 // Check if this operand should be encoded as [SV]Src
382 if (!AMDGPU::isSISrcOperand(Desc, i))
383 continue;
384
385 // Is this operand a literal immediate?
386 const MCOperand &Op = MI.getOperand(i);
387 if (getLitEncoding(Op, Desc.OpInfo[i], STI) != 255)
388 continue;
389
390 // Yes! Encode it
391 int64_t Imm = 0;
392
393 if (Op.isImm())
394 Imm = Op.getImm();
395 else if (Op.isExpr()) {
396 if (const auto *C = dyn_cast<MCConstantExpr>(Op.getExpr()))
397 Imm = C->getValue();
398
399 } else if (!Op.isExpr()) // Exprs will be replaced with a fixup value.
400 llvm_unreachable("Must be immediate or expr");
401
402 for (unsigned j = 0; j < 4; j++) {
403 OS.write((uint8_t) ((Imm >> (8 * j)) & 0xff));
404 }
405
406 // Only one literal value allowed
407 break;
408 }
409 }
410
getSOPPBrEncoding(const MCInst & MI,unsigned OpNo,APInt & Op,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const411 void SIMCCodeEmitter::getSOPPBrEncoding(const MCInst &MI, unsigned OpNo,
412 APInt &Op,
413 SmallVectorImpl<MCFixup> &Fixups,
414 const MCSubtargetInfo &STI) const {
415 const MCOperand &MO = MI.getOperand(OpNo);
416
417 if (MO.isExpr()) {
418 const MCExpr *Expr = MO.getExpr();
419 MCFixupKind Kind = (MCFixupKind)AMDGPU::fixup_si_sopp_br;
420 Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc()));
421 Op = APInt::getNullValue(96);
422 } else {
423 getMachineOpValue(MI, MO, Op, Fixups, STI);
424 }
425 }
426
getSMEMOffsetEncoding(const MCInst & MI,unsigned OpNo,APInt & Op,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const427 void SIMCCodeEmitter::getSMEMOffsetEncoding(const MCInst &MI, unsigned OpNo,
428 APInt &Op,
429 SmallVectorImpl<MCFixup> &Fixups,
430 const MCSubtargetInfo &STI) const {
431 auto Offset = MI.getOperand(OpNo).getImm();
432 // VI only supports 20-bit unsigned offsets.
433 assert(!AMDGPU::isVI(STI) || isUInt<20>(Offset));
434 Op = Offset;
435 }
436
getSDWASrcEncoding(const MCInst & MI,unsigned OpNo,APInt & Op,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const437 void SIMCCodeEmitter::getSDWASrcEncoding(const MCInst &MI, unsigned OpNo,
438 APInt &Op,
439 SmallVectorImpl<MCFixup> &Fixups,
440 const MCSubtargetInfo &STI) const {
441 using namespace AMDGPU::SDWA;
442
443 uint64_t RegEnc = 0;
444
445 const MCOperand &MO = MI.getOperand(OpNo);
446
447 if (MO.isReg()) {
448 unsigned Reg = MO.getReg();
449 RegEnc |= MRI.getEncodingValue(Reg);
450 RegEnc &= SDWA9EncValues::SRC_VGPR_MASK;
451 if (AMDGPU::isSGPR(AMDGPU::mc2PseudoReg(Reg), &MRI)) {
452 RegEnc |= SDWA9EncValues::SRC_SGPR_MASK;
453 }
454 Op = RegEnc;
455 return;
456 } else {
457 const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
458 uint32_t Enc = getLitEncoding(MO, Desc.OpInfo[OpNo], STI);
459 if (Enc != ~0U && Enc != 255) {
460 Op = Enc | SDWA9EncValues::SRC_SGPR_MASK;
461 return;
462 }
463 }
464
465 llvm_unreachable("Unsupported operand kind");
466 }
467
getSDWAVopcDstEncoding(const MCInst & MI,unsigned OpNo,APInt & Op,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const468 void SIMCCodeEmitter::getSDWAVopcDstEncoding(const MCInst &MI, unsigned OpNo,
469 APInt &Op,
470 SmallVectorImpl<MCFixup> &Fixups,
471 const MCSubtargetInfo &STI) const {
472 using namespace AMDGPU::SDWA;
473
474 uint64_t RegEnc = 0;
475
476 const MCOperand &MO = MI.getOperand(OpNo);
477
478 unsigned Reg = MO.getReg();
479 if (Reg != AMDGPU::VCC && Reg != AMDGPU::VCC_LO) {
480 RegEnc |= MRI.getEncodingValue(Reg);
481 RegEnc &= SDWA9EncValues::VOPC_DST_SGPR_MASK;
482 RegEnc |= SDWA9EncValues::VOPC_DST_VCC_MASK;
483 }
484 Op = RegEnc;
485 }
486
getAVOperandEncoding(const MCInst & MI,unsigned OpNo,APInt & Op,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const487 void SIMCCodeEmitter::getAVOperandEncoding(const MCInst &MI, unsigned OpNo,
488 APInt &Op,
489 SmallVectorImpl<MCFixup> &Fixups,
490 const MCSubtargetInfo &STI) const {
491 unsigned Reg = MI.getOperand(OpNo).getReg();
492 uint64_t Enc = MRI.getEncodingValue(Reg);
493
494 // VGPR and AGPR have the same encoding, but SrcA and SrcB operands of mfma
495 // instructions use acc[0:1] modifier bits to distinguish. These bits are
496 // encoded as a virtual 9th bit of the register for these operands.
497 if (MRI.getRegClass(AMDGPU::AGPR_32RegClassID).contains(Reg) ||
498 MRI.getRegClass(AMDGPU::AReg_64RegClassID).contains(Reg) ||
499 MRI.getRegClass(AMDGPU::AReg_96RegClassID).contains(Reg) ||
500 MRI.getRegClass(AMDGPU::AReg_128RegClassID).contains(Reg) ||
501 MRI.getRegClass(AMDGPU::AReg_160RegClassID).contains(Reg) ||
502 MRI.getRegClass(AMDGPU::AReg_192RegClassID).contains(Reg) ||
503 MRI.getRegClass(AMDGPU::AReg_224RegClassID).contains(Reg) ||
504 MRI.getRegClass(AMDGPU::AReg_256RegClassID).contains(Reg) ||
505 MRI.getRegClass(AMDGPU::AReg_512RegClassID).contains(Reg) ||
506 MRI.getRegClass(AMDGPU::AGPR_LO16RegClassID).contains(Reg))
507 Enc |= 512;
508
509 Op = Enc;
510 }
511
needsPCRel(const MCExpr * Expr)512 static bool needsPCRel(const MCExpr *Expr) {
513 switch (Expr->getKind()) {
514 case MCExpr::SymbolRef: {
515 auto *SE = cast<MCSymbolRefExpr>(Expr);
516 MCSymbolRefExpr::VariantKind Kind = SE->getKind();
517 return Kind != MCSymbolRefExpr::VK_AMDGPU_ABS32_LO &&
518 Kind != MCSymbolRefExpr::VK_AMDGPU_ABS32_HI;
519 }
520 case MCExpr::Binary: {
521 auto *BE = cast<MCBinaryExpr>(Expr);
522 if (BE->getOpcode() == MCBinaryExpr::Sub)
523 return false;
524 return needsPCRel(BE->getLHS()) || needsPCRel(BE->getRHS());
525 }
526 case MCExpr::Unary:
527 return needsPCRel(cast<MCUnaryExpr>(Expr)->getSubExpr());
528 case MCExpr::Target:
529 case MCExpr::Constant:
530 return false;
531 }
532 llvm_unreachable("invalid kind");
533 }
534
getMachineOpValue(const MCInst & MI,const MCOperand & MO,APInt & Op,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const535 void SIMCCodeEmitter::getMachineOpValue(const MCInst &MI,
536 const MCOperand &MO, APInt &Op,
537 SmallVectorImpl<MCFixup> &Fixups,
538 const MCSubtargetInfo &STI) const {
539 if (MO.isReg()){
540 Op = MRI.getEncodingValue(MO.getReg());
541 return;
542 }
543 unsigned OpNo = &MO - MI.begin();
544 getMachineOpValueCommon(MI, MO, OpNo, Op, Fixups, STI);
545 }
546
getMachineOpValueCommon(const MCInst & MI,const MCOperand & MO,unsigned OpNo,APInt & Op,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const547 void SIMCCodeEmitter::getMachineOpValueCommon(
548 const MCInst &MI, const MCOperand &MO, unsigned OpNo, APInt &Op,
549 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const {
550
551 if (MO.isExpr() && MO.getExpr()->getKind() != MCExpr::Constant) {
552 // FIXME: If this is expression is PCRel or not should not depend on what
553 // the expression looks like. Given that this is just a general expression,
554 // it should probably be FK_Data_4 and whatever is producing
555 //
556 // s_add_u32 s2, s2, (extern_const_addrspace+16
557 //
558 // And expecting a PCRel should instead produce
559 //
560 // .Ltmp1:
561 // s_add_u32 s2, s2, (extern_const_addrspace+16)-.Ltmp1
562 MCFixupKind Kind;
563 if (needsPCRel(MO.getExpr()))
564 Kind = FK_PCRel_4;
565 else
566 Kind = FK_Data_4;
567
568 const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
569 uint32_t Offset = Desc.getSize();
570 assert(Offset == 4 || Offset == 8);
571
572 Fixups.push_back(MCFixup::create(Offset, MO.getExpr(), Kind, MI.getLoc()));
573 }
574
575 const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
576 if (AMDGPU::isSISrcOperand(Desc, OpNo)) {
577 uint32_t Enc = getLitEncoding(MO, Desc.OpInfo[OpNo], STI);
578 if (Enc != ~0U) {
579 Op = Enc;
580 return;
581 }
582 } else if (MO.isImm()) {
583 Op = MO.getImm();
584 return;
585 }
586
587 llvm_unreachable("Encoding of this operand type is not supported yet.");
588 }
589
590 #include "AMDGPUGenMCCodeEmitter.inc"
591