1 //===-- AMDGPUAsmBackend.cpp - AMDGPU Assembler Backend -------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 /// \file
9 //===----------------------------------------------------------------------===//
10 
11 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
12 #include "MCTargetDesc/AMDGPUFixupKinds.h"
13 #include "llvm/ADT/StringRef.h"
14 #include "llvm/MC/MCAsmBackend.h"
15 #include "llvm/MC/MCAssembler.h"
16 #include "llvm/MC/MCContext.h"
17 #include "llvm/MC/MCFixupKindInfo.h"
18 #include "llvm/MC/MCObjectWriter.h"
19 #include "llvm/MC/MCValue.h"
20 #include "llvm/Support/TargetRegistry.h"
21 
22 using namespace llvm;
23 
24 namespace {
25 
26 class AMDGPUMCObjectWriter : public MCObjectWriter {
27 public:
28   AMDGPUMCObjectWriter(raw_pwrite_stream &OS) : MCObjectWriter(OS, true) {}
29   void executePostLayoutBinding(MCAssembler &Asm,
30                                 const MCAsmLayout &Layout) override {
31     //XXX: Implement if necessary.
32   }
33   void recordRelocation(MCAssembler &Asm, const MCAsmLayout &Layout,
34                         const MCFragment *Fragment, const MCFixup &Fixup,
35                         MCValue Target, bool &IsPCRel,
36                         uint64_t &FixedValue) override {
37     assert(!"Not implemented");
38   }
39 
40   void writeObject(MCAssembler &Asm, const MCAsmLayout &Layout) override;
41 
42 };
43 
44 class AMDGPUAsmBackend : public MCAsmBackend {
45 public:
46   AMDGPUAsmBackend(const Target &T)
47     : MCAsmBackend() {}
48 
49   unsigned getNumFixupKinds() const override { return AMDGPU::NumTargetFixupKinds; };
50 
51   void processFixupValue(const MCAssembler &Asm,
52                          const MCAsmLayout &Layout,
53                          const MCFixup &Fixup, const MCFragment *DF,
54                          const MCValue &Target, uint64_t &Value,
55                          bool &IsResolved) override;
56 
57   void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
58                   uint64_t Value, bool IsPCRel) const override;
59   bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
60                             const MCRelaxableFragment *DF,
61                             const MCAsmLayout &Layout) const override {
62     return false;
63   }
64   void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI,
65                         MCInst &Res) const override {
66     assert(!"Not implemented");
67   }
68   bool mayNeedRelaxation(const MCInst &Inst) const override { return false; }
69   bool writeNopData(uint64_t Count, MCObjectWriter *OW) const override;
70 
71   const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override;
72 };
73 
74 } //End anonymous namespace
75 
76 void AMDGPUMCObjectWriter::writeObject(MCAssembler &Asm,
77                                        const MCAsmLayout &Layout) {
78   for (MCAssembler::iterator I = Asm.begin(), E = Asm.end(); I != E; ++I) {
79     Asm.writeSectionData(&*I, Layout);
80   }
81 }
82 
83 static unsigned getFixupKindNumBytes(unsigned Kind) {
84   switch (Kind) {
85   case AMDGPU::fixup_si_sopp_br:
86     return 2;
87   case FK_SecRel_1:
88   case FK_Data_1:
89     return 1;
90   case FK_SecRel_2:
91   case FK_Data_2:
92     return 2;
93   case FK_SecRel_4:
94   case FK_Data_4:
95   case FK_PCRel_4:
96     return 4;
97   case FK_SecRel_8:
98   case FK_Data_8:
99     return 8;
100   default:
101     llvm_unreachable("Unknown fixup kind!");
102   }
103 }
104 
105 static uint64_t adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
106                                  MCContext *Ctx) {
107   int64_t SignedValue = static_cast<int64_t>(Value);
108 
109   switch (Fixup.getKind()) {
110   case AMDGPU::fixup_si_sopp_br: {
111     int64_t BrImm = (SignedValue - 4) / 4;
112 
113     if (Ctx && !isInt<16>(BrImm))
114       Ctx->reportError(Fixup.getLoc(), "branch size exceeds simm16");
115 
116     return BrImm;
117   }
118   case FK_Data_1:
119   case FK_Data_2:
120   case FK_Data_4:
121   case FK_Data_8:
122   case FK_PCRel_4:
123   case FK_SecRel_4:
124     return Value;
125   default:
126     llvm_unreachable("unhandled fixup kind");
127   }
128 }
129 
130 void AMDGPUAsmBackend::processFixupValue(const MCAssembler &Asm,
131                                          const MCAsmLayout &Layout,
132                                          const MCFixup &Fixup, const MCFragment *DF,
133                                          const MCValue &Target, uint64_t &Value,
134                                          bool &IsResolved) {
135   if (IsResolved)
136     (void)adjustFixupValue(Fixup, Value, &Asm.getContext());
137 
138 }
139 
140 void AMDGPUAsmBackend::applyFixup(const MCFixup &Fixup, char *Data,
141                                   unsigned DataSize, uint64_t Value,
142                                   bool IsPCRel) const {
143   unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind());
144   if (!Value)
145     return; // Doesn't change encoding.
146 
147   Value = adjustFixupValue(Fixup, Value, nullptr);
148 
149   MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind());
150 
151   // Shift the value into position.
152   Value <<= Info.TargetOffset;
153 
154   uint32_t Offset = Fixup.getOffset();
155   assert(Offset + NumBytes <= DataSize && "Invalid fixup offset!");
156 
157   // For each byte of the fragment that the fixup touches, mask in the bits from
158   // the fixup value.
159   for (unsigned i = 0; i != NumBytes; ++i)
160     Data[Offset + i] |= static_cast<uint8_t>((Value >> (i * 8)) & 0xff);
161 }
162 
163 const MCFixupKindInfo &AMDGPUAsmBackend::getFixupKindInfo(
164                                                        MCFixupKind Kind) const {
165   const static MCFixupKindInfo Infos[AMDGPU::NumTargetFixupKinds] = {
166     // name                   offset bits  flags
167     { "fixup_si_sopp_br",     0,     16,   MCFixupKindInfo::FKF_IsPCRel },
168   };
169 
170   if (Kind < FirstTargetFixupKind)
171     return MCAsmBackend::getFixupKindInfo(Kind);
172 
173   return Infos[Kind - FirstTargetFixupKind];
174 }
175 
176 bool AMDGPUAsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const {
177   OW->WriteZeros(Count);
178 
179   return true;
180 }
181 
182 //===----------------------------------------------------------------------===//
183 // ELFAMDGPUAsmBackend class
184 //===----------------------------------------------------------------------===//
185 
186 namespace {
187 
188 class ELFAMDGPUAsmBackend : public AMDGPUAsmBackend {
189   bool Is64Bit;
190   bool HasRelocationAddend;
191 
192 public:
193   ELFAMDGPUAsmBackend(const Target &T, const Triple &TT) :
194       AMDGPUAsmBackend(T), Is64Bit(TT.getArch() == Triple::amdgcn),
195       HasRelocationAddend(TT.getOS() == Triple::AMDHSA) { }
196 
197   MCObjectWriter *createObjectWriter(raw_pwrite_stream &OS) const override {
198     return createAMDGPUELFObjectWriter(Is64Bit, HasRelocationAddend, OS);
199   }
200 };
201 
202 } // end anonymous namespace
203 
204 MCAsmBackend *llvm::createAMDGPUAsmBackend(const Target &T,
205                                            const MCRegisterInfo &MRI,
206                                            const Triple &TT, StringRef CPU,
207                                            const MCTargetOptions &Options) {
208   // Use 64-bit ELF for amdgcn
209   return new ELFAMDGPUAsmBackend(T, TT);
210 }
211