1 //===-- AMDGPUAsmBackend.cpp - AMDGPU Assembler Backend -------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 /// \file
9 //===----------------------------------------------------------------------===//
10 
11 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
12 #include "MCTargetDesc/AMDGPUFixupKinds.h"
13 #include "llvm/ADT/StringRef.h"
14 #include "llvm/MC/MCAsmBackend.h"
15 #include "llvm/MC/MCAssembler.h"
16 #include "llvm/MC/MCContext.h"
17 #include "llvm/MC/MCFixupKindInfo.h"
18 #include "llvm/MC/MCObjectWriter.h"
19 #include "llvm/MC/MCValue.h"
20 #include "llvm/Support/TargetRegistry.h"
21 
22 using namespace llvm;
23 
24 namespace {
25 
26 class AMDGPUAsmBackend : public MCAsmBackend {
27 public:
28   AMDGPUAsmBackend(const Target &T)
29     : MCAsmBackend() {}
30 
31   unsigned getNumFixupKinds() const override { return AMDGPU::NumTargetFixupKinds; };
32 
33   void processFixupValue(const MCAssembler &Asm,
34                          const MCAsmLayout &Layout,
35                          const MCFixup &Fixup, const MCFragment *DF,
36                          const MCValue &Target, uint64_t &Value,
37                          bool &IsResolved) override;
38 
39   void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
40                   uint64_t Value, bool IsPCRel) const override;
41   bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
42                             const MCRelaxableFragment *DF,
43                             const MCAsmLayout &Layout) const override {
44     return false;
45   }
46   void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI,
47                         MCInst &Res) const override {
48     assert(!"Not implemented");
49   }
50   bool mayNeedRelaxation(const MCInst &Inst) const override { return false; }
51   bool writeNopData(uint64_t Count, MCObjectWriter *OW) const override;
52 
53   const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override;
54 };
55 
56 } //End anonymous namespace
57 
58 static unsigned getFixupKindNumBytes(unsigned Kind) {
59   switch (Kind) {
60   case AMDGPU::fixup_si_sopp_br:
61     return 2;
62   case FK_SecRel_1:
63   case FK_Data_1:
64     return 1;
65   case FK_SecRel_2:
66   case FK_Data_2:
67     return 2;
68   case FK_SecRel_4:
69   case FK_Data_4:
70   case FK_PCRel_4:
71     return 4;
72   case FK_SecRel_8:
73   case FK_Data_8:
74     return 8;
75   default:
76     llvm_unreachable("Unknown fixup kind!");
77   }
78 }
79 
80 static uint64_t adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
81                                  MCContext *Ctx) {
82   int64_t SignedValue = static_cast<int64_t>(Value);
83 
84   switch (Fixup.getKind()) {
85   case AMDGPU::fixup_si_sopp_br: {
86     int64_t BrImm = (SignedValue - 4) / 4;
87 
88     if (Ctx && !isInt<16>(BrImm))
89       Ctx->reportError(Fixup.getLoc(), "branch size exceeds simm16");
90 
91     return BrImm;
92   }
93   case FK_Data_1:
94   case FK_Data_2:
95   case FK_Data_4:
96   case FK_Data_8:
97   case FK_PCRel_4:
98   case FK_SecRel_4:
99     return Value;
100   default:
101     llvm_unreachable("unhandled fixup kind");
102   }
103 }
104 
105 void AMDGPUAsmBackend::processFixupValue(const MCAssembler &Asm,
106                                          const MCAsmLayout &Layout,
107                                          const MCFixup &Fixup, const MCFragment *DF,
108                                          const MCValue &Target, uint64_t &Value,
109                                          bool &IsResolved) {
110   MCValue Res;
111 
112   // When we have complex expressions like: BB0_1 + (BB0_2 - 4), which are
113   // used for long branches, this function will be called with
114   // IsResolved = false and Value set to some pre-computed value.  In
115   // the example above, the value would be:
116   // (BB0_1 + (BB0_2 - 4)) - CurrentOffsetFromStartOfFunction.
117   // This is not what we want.  We just want the expression computation
118   // only.  The reason the MC layer subtracts the current offset from the
119   // expression is because the fixup is of kind FK_PCRel_4.
120   // For these scenarios, evaluateAsValue gives us the computation that we
121   // want.
122   if (!IsResolved && Fixup.getValue()->evaluateAsValue(Res, Layout) &&
123       Res.isAbsolute()) {
124     Value = Res.getConstant();
125     IsResolved = true;
126 
127   }
128   if (IsResolved)
129     Value = adjustFixupValue(Fixup, Value, &Asm.getContext());
130 }
131 
132 void AMDGPUAsmBackend::applyFixup(const MCFixup &Fixup, char *Data,
133                                   unsigned DataSize, uint64_t Value,
134                                   bool IsPCRel) const {
135   if (!Value)
136     return; // Doesn't change encoding.
137 
138   MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind());
139 
140   // Shift the value into position.
141   Value <<= Info.TargetOffset;
142 
143   unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind());
144   uint32_t Offset = Fixup.getOffset();
145   assert(Offset + NumBytes <= DataSize && "Invalid fixup offset!");
146 
147   // For each byte of the fragment that the fixup touches, mask in the bits from
148   // the fixup value.
149   for (unsigned i = 0; i != NumBytes; ++i)
150     Data[Offset + i] |= static_cast<uint8_t>((Value >> (i * 8)) & 0xff);
151 }
152 
153 const MCFixupKindInfo &AMDGPUAsmBackend::getFixupKindInfo(
154                                                        MCFixupKind Kind) const {
155   const static MCFixupKindInfo Infos[AMDGPU::NumTargetFixupKinds] = {
156     // name                   offset bits  flags
157     { "fixup_si_sopp_br",     0,     16,   MCFixupKindInfo::FKF_IsPCRel },
158   };
159 
160   if (Kind < FirstTargetFixupKind)
161     return MCAsmBackend::getFixupKindInfo(Kind);
162 
163   return Infos[Kind - FirstTargetFixupKind];
164 }
165 
166 bool AMDGPUAsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const {
167   OW->WriteZeros(Count);
168 
169   return true;
170 }
171 
172 //===----------------------------------------------------------------------===//
173 // ELFAMDGPUAsmBackend class
174 //===----------------------------------------------------------------------===//
175 
176 namespace {
177 
178 class ELFAMDGPUAsmBackend : public AMDGPUAsmBackend {
179   bool Is64Bit;
180   bool HasRelocationAddend;
181 
182 public:
183   ELFAMDGPUAsmBackend(const Target &T, const Triple &TT) :
184       AMDGPUAsmBackend(T), Is64Bit(TT.getArch() == Triple::amdgcn),
185       HasRelocationAddend(TT.getOS() == Triple::AMDHSA) { }
186 
187   MCObjectWriter *createObjectWriter(raw_pwrite_stream &OS) const override {
188     return createAMDGPUELFObjectWriter(Is64Bit, HasRelocationAddend, OS);
189   }
190 };
191 
192 } // end anonymous namespace
193 
194 MCAsmBackend *llvm::createAMDGPUAsmBackend(const Target &T,
195                                            const MCRegisterInfo &MRI,
196                                            const Triple &TT, StringRef CPU,
197                                            const MCTargetOptions &Options) {
198   // Use 64-bit ELF for amdgcn
199   return new ELFAMDGPUAsmBackend(T, TT);
200 }
201