1 //===-- AMDGPUAsmBackend.cpp - AMDGPU Assembler Backend -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 /// \file
8 //===----------------------------------------------------------------------===//
9 
10 #include "MCTargetDesc/AMDGPUFixupKinds.h"
11 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
12 #include "llvm/ADT/StringRef.h"
13 #include "llvm/BinaryFormat/ELF.h"
14 #include "llvm/MC/MCAsmBackend.h"
15 #include "llvm/MC/MCAssembler.h"
16 #include "llvm/MC/MCContext.h"
17 #include "llvm/MC/MCFixupKindInfo.h"
18 #include "llvm/MC/MCObjectWriter.h"
19 #include "llvm/MC/MCValue.h"
20 #include "llvm/Support/TargetRegistry.h"
21 #include "Utils/AMDGPUBaseInfo.h"
22 
23 using namespace llvm;
24 using namespace llvm::AMDGPU;
25 
26 namespace {
27 
28 class AMDGPUAsmBackend : public MCAsmBackend {
29 public:
30   AMDGPUAsmBackend(const Target &T) : MCAsmBackend(support::little) {}
31 
32   unsigned getNumFixupKinds() const override { return AMDGPU::NumTargetFixupKinds; };
33 
34   void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
35                   const MCValue &Target, MutableArrayRef<char> Data,
36                   uint64_t Value, bool IsResolved,
37                   const MCSubtargetInfo *STI) const override;
38   bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
39                             const MCRelaxableFragment *DF,
40                             const MCAsmLayout &Layout) const override {
41     return false;
42   }
43   void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI,
44                         MCInst &Res) const override {
45     llvm_unreachable("Not implemented");
46   }
47   bool mayNeedRelaxation(const MCInst &Inst,
48                          const MCSubtargetInfo &STI) const override {
49     return false;
50   }
51 
52   unsigned getMinimumNopSize() const override;
53   bool writeNopData(raw_ostream &OS, uint64_t Count) const override;
54 
55   const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override;
56 };
57 
58 } //End anonymous namespace
59 
60 static unsigned getFixupKindNumBytes(unsigned Kind) {
61   switch (Kind) {
62   case AMDGPU::fixup_si_sopp_br:
63     return 2;
64   case FK_SecRel_1:
65   case FK_Data_1:
66     return 1;
67   case FK_SecRel_2:
68   case FK_Data_2:
69     return 2;
70   case FK_SecRel_4:
71   case FK_Data_4:
72   case FK_PCRel_4:
73     return 4;
74   case FK_SecRel_8:
75   case FK_Data_8:
76     return 8;
77   default:
78     llvm_unreachable("Unknown fixup kind!");
79   }
80 }
81 
82 static uint64_t adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
83                                  MCContext *Ctx) {
84   int64_t SignedValue = static_cast<int64_t>(Value);
85 
86   switch (static_cast<unsigned>(Fixup.getKind())) {
87   case AMDGPU::fixup_si_sopp_br: {
88     int64_t BrImm = (SignedValue - 4) / 4;
89 
90     if (Ctx && !isInt<16>(BrImm))
91       Ctx->reportError(Fixup.getLoc(), "branch size exceeds simm16");
92 
93     return BrImm;
94   }
95   case FK_Data_1:
96   case FK_Data_2:
97   case FK_Data_4:
98   case FK_Data_8:
99   case FK_PCRel_4:
100   case FK_SecRel_4:
101     return Value;
102   default:
103     llvm_unreachable("unhandled fixup kind");
104   }
105 }
106 
107 void AMDGPUAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
108                                   const MCValue &Target,
109                                   MutableArrayRef<char> Data, uint64_t Value,
110                                   bool IsResolved,
111                                   const MCSubtargetInfo *STI) const {
112   Value = adjustFixupValue(Fixup, Value, &Asm.getContext());
113   if (!Value)
114     return; // Doesn't change encoding.
115 
116   MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind());
117 
118   // Shift the value into position.
119   Value <<= Info.TargetOffset;
120 
121   unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind());
122   uint32_t Offset = Fixup.getOffset();
123   assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
124 
125   // For each byte of the fragment that the fixup touches, mask in the bits from
126   // the fixup value.
127   for (unsigned i = 0; i != NumBytes; ++i)
128     Data[Offset + i] |= static_cast<uint8_t>((Value >> (i * 8)) & 0xff);
129 }
130 
131 const MCFixupKindInfo &AMDGPUAsmBackend::getFixupKindInfo(
132                                                        MCFixupKind Kind) const {
133   const static MCFixupKindInfo Infos[AMDGPU::NumTargetFixupKinds] = {
134     // name                   offset bits  flags
135     { "fixup_si_sopp_br",     0,     16,   MCFixupKindInfo::FKF_IsPCRel },
136   };
137 
138   if (Kind < FirstTargetFixupKind)
139     return MCAsmBackend::getFixupKindInfo(Kind);
140 
141   return Infos[Kind - FirstTargetFixupKind];
142 }
143 
144 unsigned AMDGPUAsmBackend::getMinimumNopSize() const {
145   return 4;
146 }
147 
148 bool AMDGPUAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count) const {
149   // If the count is not 4-byte aligned, we must be writing data into the text
150   // section (otherwise we have unaligned instructions, and thus have far
151   // bigger problems), so just write zeros instead.
152   OS.write_zeros(Count % 4);
153 
154   // We are properly aligned, so write NOPs as requested.
155   Count /= 4;
156 
157   // FIXME: R600 support.
158   // s_nop 0
159   const uint32_t Encoded_S_NOP_0 = 0xbf800000;
160 
161   for (uint64_t I = 0; I != Count; ++I)
162     support::endian::write<uint32_t>(OS, Encoded_S_NOP_0, Endian);
163 
164   return true;
165 }
166 
167 //===----------------------------------------------------------------------===//
168 // ELFAMDGPUAsmBackend class
169 //===----------------------------------------------------------------------===//
170 
171 namespace {
172 
173 class ELFAMDGPUAsmBackend : public AMDGPUAsmBackend {
174   bool Is64Bit;
175   bool HasRelocationAddend;
176   uint8_t OSABI = ELF::ELFOSABI_NONE;
177   uint8_t ABIVersion = 0;
178 
179 public:
180   ELFAMDGPUAsmBackend(const Target &T, const Triple &TT, uint8_t ABIVersion) :
181       AMDGPUAsmBackend(T), Is64Bit(TT.getArch() == Triple::amdgcn),
182       HasRelocationAddend(TT.getOS() == Triple::AMDHSA),
183       ABIVersion(ABIVersion) {
184     switch (TT.getOS()) {
185     case Triple::AMDHSA:
186       OSABI = ELF::ELFOSABI_AMDGPU_HSA;
187       break;
188     case Triple::AMDPAL:
189       OSABI = ELF::ELFOSABI_AMDGPU_PAL;
190       break;
191     case Triple::Mesa3D:
192       OSABI = ELF::ELFOSABI_AMDGPU_MESA3D;
193       break;
194     default:
195       break;
196     }
197   }
198 
199   std::unique_ptr<MCObjectTargetWriter>
200   createObjectTargetWriter() const override {
201     return createAMDGPUELFObjectWriter(Is64Bit, OSABI, HasRelocationAddend,
202                                        ABIVersion);
203   }
204 };
205 
206 } // end anonymous namespace
207 
208 MCAsmBackend *llvm::createAMDGPUAsmBackend(const Target &T,
209                                            const MCSubtargetInfo &STI,
210                                            const MCRegisterInfo &MRI,
211                                            const MCTargetOptions &Options) {
212   // Use 64-bit ELF for amdgcn
213   return new ELFAMDGPUAsmBackend(T, STI.getTargetTriple(),
214                                  IsaInfo::hasCodeObjectV3(&STI) ? 1 : 0);
215 }
216