1 //===-- AMDGPUAsmBackend.cpp - AMDGPU Assembler Backend -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 /// \file
8 //===----------------------------------------------------------------------===//
9 
10 #include "MCTargetDesc/AMDGPUFixupKinds.h"
11 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
12 #include "llvm/ADT/StringRef.h"
13 #include "llvm/BinaryFormat/ELF.h"
14 #include "llvm/MC/MCAsmBackend.h"
15 #include "llvm/MC/MCAssembler.h"
16 #include "llvm/MC/MCContext.h"
17 #include "llvm/MC/MCFixupKindInfo.h"
18 #include "llvm/MC/MCObjectWriter.h"
19 #include "llvm/MC/MCValue.h"
20 #include "llvm/Support/TargetRegistry.h"
21 
22 using namespace llvm;
23 
24 namespace {
25 
26 class AMDGPUAsmBackend : public MCAsmBackend {
27 public:
28   AMDGPUAsmBackend(const Target &T) : MCAsmBackend(support::little) {}
29 
30   unsigned getNumFixupKinds() const override { return AMDGPU::NumTargetFixupKinds; };
31 
32   void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
33                   const MCValue &Target, MutableArrayRef<char> Data,
34                   uint64_t Value, bool IsResolved,
35                   const MCSubtargetInfo *STI) const override;
36   bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
37                             const MCRelaxableFragment *DF,
38                             const MCAsmLayout &Layout) const override {
39     return false;
40   }
41   void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI,
42                         MCInst &Res) const override {
43     llvm_unreachable("Not implemented");
44   }
45   bool mayNeedRelaxation(const MCInst &Inst,
46                          const MCSubtargetInfo &STI) const override {
47     return false;
48   }
49 
50   unsigned getMinimumNopSize() const override;
51   bool writeNopData(raw_ostream &OS, uint64_t Count) const override;
52 
53   const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override;
54 };
55 
56 } //End anonymous namespace
57 
58 static unsigned getFixupKindNumBytes(unsigned Kind) {
59   switch (Kind) {
60   case AMDGPU::fixup_si_sopp_br:
61     return 2;
62   case FK_SecRel_1:
63   case FK_Data_1:
64     return 1;
65   case FK_SecRel_2:
66   case FK_Data_2:
67     return 2;
68   case FK_SecRel_4:
69   case FK_Data_4:
70   case FK_PCRel_4:
71     return 4;
72   case FK_SecRel_8:
73   case FK_Data_8:
74     return 8;
75   default:
76     llvm_unreachable("Unknown fixup kind!");
77   }
78 }
79 
80 static uint64_t adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
81                                  MCContext *Ctx) {
82   int64_t SignedValue = static_cast<int64_t>(Value);
83 
84   switch (static_cast<unsigned>(Fixup.getKind())) {
85   case AMDGPU::fixup_si_sopp_br: {
86     int64_t BrImm = (SignedValue - 4) / 4;
87 
88     if (Ctx && !isInt<16>(BrImm))
89       Ctx->reportError(Fixup.getLoc(), "branch size exceeds simm16");
90 
91     return BrImm;
92   }
93   case FK_Data_1:
94   case FK_Data_2:
95   case FK_Data_4:
96   case FK_Data_8:
97   case FK_PCRel_4:
98   case FK_SecRel_4:
99     return Value;
100   default:
101     llvm_unreachable("unhandled fixup kind");
102   }
103 }
104 
105 void AMDGPUAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
106                                   const MCValue &Target,
107                                   MutableArrayRef<char> Data, uint64_t Value,
108                                   bool IsResolved,
109                                   const MCSubtargetInfo *STI) const {
110   Value = adjustFixupValue(Fixup, Value, &Asm.getContext());
111   if (!Value)
112     return; // Doesn't change encoding.
113 
114   MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind());
115 
116   // Shift the value into position.
117   Value <<= Info.TargetOffset;
118 
119   unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind());
120   uint32_t Offset = Fixup.getOffset();
121   assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
122 
123   // For each byte of the fragment that the fixup touches, mask in the bits from
124   // the fixup value.
125   for (unsigned i = 0; i != NumBytes; ++i)
126     Data[Offset + i] |= static_cast<uint8_t>((Value >> (i * 8)) & 0xff);
127 }
128 
129 const MCFixupKindInfo &AMDGPUAsmBackend::getFixupKindInfo(
130                                                        MCFixupKind Kind) const {
131   const static MCFixupKindInfo Infos[AMDGPU::NumTargetFixupKinds] = {
132     // name                   offset bits  flags
133     { "fixup_si_sopp_br",     0,     16,   MCFixupKindInfo::FKF_IsPCRel },
134   };
135 
136   if (Kind < FirstTargetFixupKind)
137     return MCAsmBackend::getFixupKindInfo(Kind);
138 
139   return Infos[Kind - FirstTargetFixupKind];
140 }
141 
142 unsigned AMDGPUAsmBackend::getMinimumNopSize() const {
143   return 4;
144 }
145 
146 bool AMDGPUAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count) const {
147   // If the count is not 4-byte aligned, we must be writing data into the text
148   // section (otherwise we have unaligned instructions, and thus have far
149   // bigger problems), so just write zeros instead.
150   OS.write_zeros(Count % 4);
151 
152   // We are properly aligned, so write NOPs as requested.
153   Count /= 4;
154 
155   // FIXME: R600 support.
156   // s_nop 0
157   const uint32_t Encoded_S_NOP_0 = 0xbf800000;
158 
159   for (uint64_t I = 0; I != Count; ++I)
160     support::endian::write<uint32_t>(OS, Encoded_S_NOP_0, Endian);
161 
162   return true;
163 }
164 
165 //===----------------------------------------------------------------------===//
166 // ELFAMDGPUAsmBackend class
167 //===----------------------------------------------------------------------===//
168 
169 namespace {
170 
171 class ELFAMDGPUAsmBackend : public AMDGPUAsmBackend {
172   bool Is64Bit;
173   bool HasRelocationAddend;
174   uint8_t OSABI = ELF::ELFOSABI_NONE;
175 
176 public:
177   ELFAMDGPUAsmBackend(const Target &T, const Triple &TT) :
178       AMDGPUAsmBackend(T), Is64Bit(TT.getArch() == Triple::amdgcn),
179       HasRelocationAddend(TT.getOS() == Triple::AMDHSA) {
180     switch (TT.getOS()) {
181     case Triple::AMDHSA:
182       OSABI = ELF::ELFOSABI_AMDGPU_HSA;
183       break;
184     case Triple::AMDPAL:
185       OSABI = ELF::ELFOSABI_AMDGPU_PAL;
186       break;
187     case Triple::Mesa3D:
188       OSABI = ELF::ELFOSABI_AMDGPU_MESA3D;
189       break;
190     default:
191       break;
192     }
193   }
194 
195   std::unique_ptr<MCObjectTargetWriter>
196   createObjectTargetWriter() const override {
197     return createAMDGPUELFObjectWriter(Is64Bit, OSABI, HasRelocationAddend);
198   }
199 };
200 
201 } // end anonymous namespace
202 
203 MCAsmBackend *llvm::createAMDGPUAsmBackend(const Target &T,
204                                            const MCSubtargetInfo &STI,
205                                            const MCRegisterInfo &MRI,
206                                            const MCTargetOptions &Options) {
207   // Use 64-bit ELF for amdgcn
208   return new ELFAMDGPUAsmBackend(T, STI.getTargetTriple());
209 }
210