1 //===-- AMDGPUAsmBackend.cpp - AMDGPU Assembler Backend -------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 /// \file
9 //===----------------------------------------------------------------------===//
10 
11 #include "MCTargetDesc/AMDGPUFixupKinds.h"
12 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
13 #include "llvm/ADT/StringRef.h"
14 #include "llvm/MC/MCAsmBackend.h"
15 #include "llvm/MC/MCAssembler.h"
16 #include "llvm/MC/MCContext.h"
17 #include "llvm/MC/MCFixupKindInfo.h"
18 #include "llvm/MC/MCObjectWriter.h"
19 #include "llvm/MC/MCValue.h"
20 #include "llvm/Support/TargetRegistry.h"
21 
22 using namespace llvm;
23 
24 namespace {
25 
26 class AMDGPUAsmBackend : public MCAsmBackend {
27 public:
28   AMDGPUAsmBackend(const Target &T)
29     : MCAsmBackend() {}
30 
31   unsigned getNumFixupKinds() const override { return AMDGPU::NumTargetFixupKinds; };
32 
33   void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
34                   const MCValue &Target, MutableArrayRef<char> Data,
35                   uint64_t Value, bool IsResolved) const override;
36   bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
37                             const MCRelaxableFragment *DF,
38                             const MCAsmLayout &Layout) const override {
39     return false;
40   }
41   void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI,
42                         MCInst &Res) const override {
43     llvm_unreachable("Not implemented");
44   }
45   bool mayNeedRelaxation(const MCInst &Inst) const override { return false; }
46 
47   unsigned getMinimumNopSize() const override;
48   bool writeNopData(uint64_t Count, MCObjectWriter *OW) const override;
49 
50   const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override;
51 };
52 
53 } //End anonymous namespace
54 
55 static unsigned getFixupKindNumBytes(unsigned Kind) {
56   switch (Kind) {
57   case AMDGPU::fixup_si_sopp_br:
58     return 2;
59   case FK_SecRel_1:
60   case FK_Data_1:
61     return 1;
62   case FK_SecRel_2:
63   case FK_Data_2:
64     return 2;
65   case FK_SecRel_4:
66   case FK_Data_4:
67   case FK_PCRel_4:
68     return 4;
69   case FK_SecRel_8:
70   case FK_Data_8:
71     return 8;
72   default:
73     llvm_unreachable("Unknown fixup kind!");
74   }
75 }
76 
77 static uint64_t adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
78                                  MCContext *Ctx) {
79   int64_t SignedValue = static_cast<int64_t>(Value);
80 
81   switch (static_cast<unsigned>(Fixup.getKind())) {
82   case AMDGPU::fixup_si_sopp_br: {
83     int64_t BrImm = (SignedValue - 4) / 4;
84 
85     if (Ctx && !isInt<16>(BrImm))
86       Ctx->reportError(Fixup.getLoc(), "branch size exceeds simm16");
87 
88     return BrImm;
89   }
90   case FK_Data_1:
91   case FK_Data_2:
92   case FK_Data_4:
93   case FK_Data_8:
94   case FK_PCRel_4:
95   case FK_SecRel_4:
96     return Value;
97   default:
98     llvm_unreachable("unhandled fixup kind");
99   }
100 }
101 
102 void AMDGPUAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
103                                   const MCValue &Target,
104                                   MutableArrayRef<char> Data, uint64_t Value,
105                                   bool IsResolved) const {
106   Value = adjustFixupValue(Fixup, Value, &Asm.getContext());
107   if (!Value)
108     return; // Doesn't change encoding.
109 
110   MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind());
111 
112   // Shift the value into position.
113   Value <<= Info.TargetOffset;
114 
115   unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind());
116   uint32_t Offset = Fixup.getOffset();
117   assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
118 
119   // For each byte of the fragment that the fixup touches, mask in the bits from
120   // the fixup value.
121   for (unsigned i = 0; i != NumBytes; ++i)
122     Data[Offset + i] |= static_cast<uint8_t>((Value >> (i * 8)) & 0xff);
123 }
124 
125 const MCFixupKindInfo &AMDGPUAsmBackend::getFixupKindInfo(
126                                                        MCFixupKind Kind) const {
127   const static MCFixupKindInfo Infos[AMDGPU::NumTargetFixupKinds] = {
128     // name                   offset bits  flags
129     { "fixup_si_sopp_br",     0,     16,   MCFixupKindInfo::FKF_IsPCRel },
130   };
131 
132   if (Kind < FirstTargetFixupKind)
133     return MCAsmBackend::getFixupKindInfo(Kind);
134 
135   return Infos[Kind - FirstTargetFixupKind];
136 }
137 
138 unsigned AMDGPUAsmBackend::getMinimumNopSize() const {
139   return 4;
140 }
141 
142 bool AMDGPUAsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const {
143   // If the count is not 4-byte aligned, we must be writing data into the text
144   // section (otherwise we have unaligned instructions, and thus have far
145   // bigger problems), so just write zeros instead.
146   OW->WriteZeros(Count % 4);
147 
148   // We are properly aligned, so write NOPs as requested.
149   Count /= 4;
150 
151   // FIXME: R600 support.
152   // s_nop 0
153   const uint32_t Encoded_S_NOP_0 = 0xbf800000;
154 
155   for (uint64_t I = 0; I != Count; ++I)
156     OW->write32(Encoded_S_NOP_0);
157 
158   return true;
159 }
160 
161 //===----------------------------------------------------------------------===//
162 // ELFAMDGPUAsmBackend class
163 //===----------------------------------------------------------------------===//
164 
165 namespace {
166 
167 class ELFAMDGPUAsmBackend : public AMDGPUAsmBackend {
168   bool Is64Bit;
169   bool HasRelocationAddend;
170 
171 public:
172   ELFAMDGPUAsmBackend(const Target &T, const Triple &TT) :
173       AMDGPUAsmBackend(T), Is64Bit(TT.getArch() == Triple::amdgcn),
174       HasRelocationAddend(TT.getOS() == Triple::AMDHSA) { }
175 
176   MCObjectWriter *createObjectWriter(raw_pwrite_stream &OS) const override {
177     return createAMDGPUELFObjectWriter(Is64Bit, HasRelocationAddend, OS);
178   }
179 };
180 
181 } // end anonymous namespace
182 
183 MCAsmBackend *llvm::createAMDGPUAsmBackend(const Target &T,
184                                            const MCRegisterInfo &MRI,
185                                            const Triple &TT, StringRef CPU,
186                                            const MCTargetOptions &Options) {
187   // Use 64-bit ELF for amdgcn
188   return new ELFAMDGPUAsmBackend(T, TT);
189 }
190