1 //===- Target.h -------------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #ifndef LLD_ELF_TARGET_H
10 #define LLD_ELF_TARGET_H
11
12 #include "InputSection.h"
13 #include "lld/Common/ErrorHandler.h"
14 #include "llvm/Object/ELF.h"
15 #include "llvm/Support/MathExtras.h"
16 #include <array>
17
18 namespace lld {
19 std::string toString(elf::RelType type);
20
21 namespace elf {
22 class Defined;
23 class InputFile;
24 class Symbol;
25
26 class TargetInfo {
27 public:
calcEFlags()28 virtual uint32_t calcEFlags() const { return 0; }
29 virtual RelExpr getRelExpr(RelType type, const Symbol &s,
30 const uint8_t *loc) const = 0;
getDynRel(RelType type)31 virtual RelType getDynRel(RelType type) const { return 0; }
writeGotPltHeader(uint8_t * buf)32 virtual void writeGotPltHeader(uint8_t *buf) const {}
writeGotHeader(uint8_t * buf)33 virtual void writeGotHeader(uint8_t *buf) const {}
writeGotPlt(uint8_t * buf,const Symbol & s)34 virtual void writeGotPlt(uint8_t *buf, const Symbol &s) const {};
writeIgotPlt(uint8_t * buf,const Symbol & s)35 virtual void writeIgotPlt(uint8_t *buf, const Symbol &s) const {}
36 virtual int64_t getImplicitAddend(const uint8_t *buf, RelType type) const;
getTlsGdRelaxSkip(RelType type)37 virtual int getTlsGdRelaxSkip(RelType type) const { return 1; }
38
39 // If lazy binding is supported, the first entry of the PLT has code
40 // to call the dynamic linker to resolve PLT entries the first time
41 // they are called. This function writes that code.
writePltHeader(uint8_t * buf)42 virtual void writePltHeader(uint8_t *buf) const {}
43
writePlt(uint8_t * buf,const Symbol & sym,uint64_t pltEntryAddr)44 virtual void writePlt(uint8_t *buf, const Symbol &sym,
45 uint64_t pltEntryAddr) const {}
writeIplt(uint8_t * buf,const Symbol & sym,uint64_t pltEntryAddr)46 virtual void writeIplt(uint8_t *buf, const Symbol &sym,
47 uint64_t pltEntryAddr) const {
48 // All but PPC32 and PPC64 use the same format for .plt and .iplt entries.
49 writePlt(buf, sym, pltEntryAddr);
50 }
writeIBTPlt(uint8_t * buf,size_t numEntries)51 virtual void writeIBTPlt(uint8_t *buf, size_t numEntries) const {}
addPltHeaderSymbols(InputSection & isec)52 virtual void addPltHeaderSymbols(InputSection &isec) const {}
addPltSymbols(InputSection & isec,uint64_t off)53 virtual void addPltSymbols(InputSection &isec, uint64_t off) const {}
54
55 // Returns true if a relocation only uses the low bits of a value such that
56 // all those bits are in the same page. For example, if the relocation
57 // only uses the low 12 bits in a system with 4k pages. If this is true, the
58 // bits will always have the same value at runtime and we don't have to emit
59 // a dynamic relocation.
60 virtual bool usesOnlyLowPageBits(RelType type) const;
61
62 // Decide whether a Thunk is needed for the relocation from File
63 // targeting S.
64 virtual bool needsThunk(RelExpr expr, RelType relocType,
65 const InputFile *file, uint64_t branchAddr,
66 const Symbol &s, int64_t a) const;
67
68 // On systems with range extensions we place collections of Thunks at
69 // regular spacings that enable the majority of branches reach the Thunks.
70 // a value of 0 means range extension thunks are not supported.
getThunkSectionSpacing()71 virtual uint32_t getThunkSectionSpacing() const { return 0; }
72
73 // The function with a prologue starting at Loc was compiled with
74 // -fsplit-stack and it calls a function compiled without. Adjust the prologue
75 // to do the right thing. See https://gcc.gnu.org/wiki/SplitStacks.
76 // The symbols st_other flags are needed on PowerPC64 for determining the
77 // offset to the split-stack prologue.
78 virtual bool adjustPrologueForCrossSplitStack(uint8_t *loc, uint8_t *end,
79 uint8_t stOther) const;
80
81 // Return true if we can reach dst from src with RelType type.
82 virtual bool inBranchRange(RelType type, uint64_t src,
83 uint64_t dst) const;
84
85 virtual void relocate(uint8_t *loc, const Relocation &rel,
86 uint64_t val) const = 0;
relocateNoSym(uint8_t * loc,RelType type,uint64_t val)87 void relocateNoSym(uint8_t *loc, RelType type, uint64_t val) const {
88 relocate(loc, Relocation{R_NONE, type, 0, 0, nullptr}, val);
89 }
90
applyJumpInstrMod(uint8_t * loc,JumpModType type,JumpModType val)91 virtual void applyJumpInstrMod(uint8_t *loc, JumpModType type,
92 JumpModType val) const {}
93
94 virtual ~TargetInfo();
95
96 // This deletes a jump insn at the end of the section if it is a fall thru to
97 // the next section. Further, if there is a conditional jump and a direct
98 // jump consecutively, it tries to flip the conditional jump to convert the
99 // direct jump into a fall thru and delete it. Returns true if a jump
100 // instruction can be deleted.
deleteFallThruJmpInsn(InputSection & is,InputFile * file,InputSection * nextIS)101 virtual bool deleteFallThruJmpInsn(InputSection &is, InputFile *file,
102 InputSection *nextIS) const {
103 return false;
104 }
105
106 unsigned defaultCommonPageSize = 4096;
107 unsigned defaultMaxPageSize = 4096;
108
109 uint64_t getImageBase() const;
110
111 // True if _GLOBAL_OFFSET_TABLE_ is relative to .got.plt, false if .got.
112 bool gotBaseSymInGotPlt = true;
113
114 RelType copyRel;
115 RelType gotRel;
116 RelType noneRel;
117 RelType pltRel;
118 RelType relativeRel;
119 RelType iRelativeRel;
120 RelType symbolicRel;
121 RelType tlsDescRel;
122 RelType tlsGotRel;
123 RelType tlsModuleIndexRel;
124 RelType tlsOffsetRel;
125 unsigned gotEntrySize = config->wordsize;
126 unsigned pltEntrySize;
127 unsigned pltHeaderSize;
128 unsigned ipltEntrySize;
129
130 // At least on x86_64 positions 1 and 2 are used by the first plt entry
131 // to support lazy loading.
132 unsigned gotPltHeaderEntriesNum = 3;
133
134 // On PPC ELF V2 abi, the first entry in the .got is the .TOC.
135 unsigned gotHeaderEntriesNum = 0;
136
137 bool needsThunks = false;
138
139 // A 4-byte field corresponding to one or more trap instructions, used to pad
140 // executable OutputSections.
141 std::array<uint8_t, 4> trapInstr;
142
143 // Stores the NOP instructions of different sizes for the target and is used
144 // to pad sections that are relaxed.
145 llvm::Optional<std::vector<std::vector<uint8_t>>> nopInstrs;
146
147 // If a target needs to rewrite calls to __morestack to instead call
148 // __morestack_non_split when a split-stack enabled caller calls a
149 // non-split-stack callee this will return true. Otherwise returns false.
150 bool needsMoreStackNonSplit = true;
151
152 virtual RelExpr adjustTlsExpr(RelType type, RelExpr expr) const;
153 virtual RelExpr adjustGotPcExpr(RelType type, int64_t addend,
154 const uint8_t *loc) const;
155 virtual void relaxGot(uint8_t *loc, const Relocation &rel,
156 uint64_t val) const;
157 virtual void relaxTlsGdToIe(uint8_t *loc, const Relocation &rel,
158 uint64_t val) const;
159 virtual void relaxTlsGdToLe(uint8_t *loc, const Relocation &rel,
160 uint64_t val) const;
161 virtual void relaxTlsIeToLe(uint8_t *loc, const Relocation &rel,
162 uint64_t val) const;
163 virtual void relaxTlsLdToLe(uint8_t *loc, const Relocation &rel,
164 uint64_t val) const;
165
166 protected:
167 // On FreeBSD x86_64 the first page cannot be mmaped.
168 // On Linux this is controlled by vm.mmap_min_addr. At least on some x86_64
169 // installs this is set to 65536, so the first 15 pages cannot be used.
170 // Given that, the smallest value that can be used in here is 0x10000.
171 uint64_t defaultImageBase = 0x10000;
172 };
173
174 TargetInfo *getAArch64TargetInfo();
175 TargetInfo *getAMDGPUTargetInfo();
176 TargetInfo *getARMTargetInfo();
177 TargetInfo *getAVRTargetInfo();
178 TargetInfo *getHexagonTargetInfo();
179 TargetInfo *getMSP430TargetInfo();
180 TargetInfo *getPPC64TargetInfo();
181 TargetInfo *getPPCTargetInfo();
182 TargetInfo *getRISCVTargetInfo();
183 TargetInfo *getSPARCV9TargetInfo();
184 TargetInfo *getX86TargetInfo();
185 TargetInfo *getX86_64TargetInfo();
186 template <class ELFT> TargetInfo *getMipsTargetInfo();
187
188 struct ErrorPlace {
189 InputSectionBase *isec;
190 std::string loc;
191 };
192
193 // Returns input section and corresponding source string for the given location.
194 ErrorPlace getErrorPlace(const uint8_t *loc);
195
getErrorLocation(const uint8_t * loc)196 static inline std::string getErrorLocation(const uint8_t *loc) {
197 return getErrorPlace(loc).loc;
198 }
199
200 void writePPC32GlinkSection(uint8_t *buf, size_t numEntries);
201
202 bool tryRelaxPPC64TocIndirection(const Relocation &rel, uint8_t *bufLoc);
203 unsigned getPPCDFormOp(unsigned secondaryOp);
204
205 // In the PowerPC64 Elf V2 abi a function can have 2 entry points. The first
206 // is a global entry point (GEP) which typically is used to initialize the TOC
207 // pointer in general purpose register 2. The second is a local entry
208 // point (LEP) which bypasses the TOC pointer initialization code. The
209 // offset between GEP and LEP is encoded in a function's st_other flags.
210 // This function will return the offset (in bytes) from the global entry-point
211 // to the local entry-point.
212 unsigned getPPC64GlobalEntryToLocalEntryOffset(uint8_t stOther);
213
214 // Returns true if a relocation is a small code model relocation that accesses
215 // the .toc section.
216 bool isPPC64SmallCodeModelTocReloc(RelType type);
217
218 // Write a prefixed instruction, which is a 4-byte prefix followed by a 4-byte
219 // instruction (regardless of endianness). Therefore, the prefix is always in
220 // lower memory than the instruction.
221 void writePrefixedInstruction(uint8_t *loc, uint64_t insn);
222
223 void addPPC64SaveRestore();
224 uint64_t getPPC64TocBase();
225 uint64_t getAArch64Page(uint64_t expr);
226
227 extern const TargetInfo *target;
228 TargetInfo *getTarget();
229
230 template <class ELFT> bool isMipsPIC(const Defined *sym);
231
232 void reportRangeError(uint8_t *loc, const Relocation &rel, const Twine &v,
233 int64_t min, uint64_t max);
234 void reportRangeError(uint8_t *loc, int64_t v, int n, const Symbol &sym,
235 const Twine &msg);
236
237 // Make sure that V can be represented as an N bit signed integer.
checkInt(uint8_t * loc,int64_t v,int n,const Relocation & rel)238 inline void checkInt(uint8_t *loc, int64_t v, int n, const Relocation &rel) {
239 if (v != llvm::SignExtend64(v, n))
240 reportRangeError(loc, rel, Twine(v), llvm::minIntN(n), llvm::maxIntN(n));
241 }
242
243 // Make sure that V can be represented as an N bit unsigned integer.
checkUInt(uint8_t * loc,uint64_t v,int n,const Relocation & rel)244 inline void checkUInt(uint8_t *loc, uint64_t v, int n, const Relocation &rel) {
245 if ((v >> n) != 0)
246 reportRangeError(loc, rel, Twine(v), 0, llvm::maxUIntN(n));
247 }
248
249 // Make sure that V can be represented as an N bit signed or unsigned integer.
checkIntUInt(uint8_t * loc,uint64_t v,int n,const Relocation & rel)250 inline void checkIntUInt(uint8_t *loc, uint64_t v, int n,
251 const Relocation &rel) {
252 // For the error message we should cast V to a signed integer so that error
253 // messages show a small negative value rather than an extremely large one
254 if (v != (uint64_t)llvm::SignExtend64(v, n) && (v >> n) != 0)
255 reportRangeError(loc, rel, Twine((int64_t)v), llvm::minIntN(n),
256 llvm::maxUIntN(n));
257 }
258
checkAlignment(uint8_t * loc,uint64_t v,int n,const Relocation & rel)259 inline void checkAlignment(uint8_t *loc, uint64_t v, int n,
260 const Relocation &rel) {
261 if ((v & (n - 1)) != 0)
262 error(getErrorLocation(loc) + "improper alignment for relocation " +
263 lld::toString(rel.type) + ": 0x" + llvm::utohexstr(v) +
264 " is not aligned to " + Twine(n) + " bytes");
265 }
266
267 // Endianness-aware read/write.
read16(const void * p)268 inline uint16_t read16(const void *p) {
269 return llvm::support::endian::read16(p, config->endianness);
270 }
271
read32(const void * p)272 inline uint32_t read32(const void *p) {
273 return llvm::support::endian::read32(p, config->endianness);
274 }
275
read64(const void * p)276 inline uint64_t read64(const void *p) {
277 return llvm::support::endian::read64(p, config->endianness);
278 }
279
write16(void * p,uint16_t v)280 inline void write16(void *p, uint16_t v) {
281 llvm::support::endian::write16(p, v, config->endianness);
282 }
283
write32(void * p,uint32_t v)284 inline void write32(void *p, uint32_t v) {
285 llvm::support::endian::write32(p, v, config->endianness);
286 }
287
write64(void * p,uint64_t v)288 inline void write64(void *p, uint64_t v) {
289 llvm::support::endian::write64(p, v, config->endianness);
290 }
291 } // namespace elf
292 } // namespace lld
293
294 #endif
295