xref: /llvm-project-15.0.7/lld/ELF/Target.cpp (revision e046bbdd)
1 //===- Target.cpp ---------------------------------------------------------===//
2 //
3 //                             The LLVM Linker
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // Machine-specific things, such as applying relocations, creation of
11 // GOT or PLT entries, etc., are handled in this file.
12 //
13 // Refer the ELF spec for the single letter varaibles, S, A or P, used
14 // in this file. SA is S+A.
15 //
16 //===----------------------------------------------------------------------===//
17 
18 #include "Target.h"
19 #include "Error.h"
20 #include "OutputSections.h"
21 #include "Symbols.h"
22 
23 #include "llvm/ADT/ArrayRef.h"
24 #include "llvm/Object/ELF.h"
25 #include "llvm/Support/Endian.h"
26 #include "llvm/Support/ELF.h"
27 
28 using namespace llvm;
29 using namespace llvm::object;
30 using namespace llvm::support::endian;
31 using namespace llvm::ELF;
32 
33 namespace lld {
34 namespace elf2 {
35 
36 std::unique_ptr<TargetInfo> Target;
37 
38 template <endianness E> static void add32(void *P, int32_t V) {
39   write32<E>(P, read32<E>(P) + V);
40 }
41 
42 static void add32le(uint8_t *P, int32_t V) { add32<support::little>(P, V); }
43 static void or32le(uint8_t *P, int32_t V) { write32le(P, read32le(P) | V); }
44 
45 template <unsigned N> static void checkInt(int64_t V, uint32_t Type) {
46   if (isInt<N>(V))
47     return;
48   StringRef S = getELFRelocationTypeName(Config->EMachine, Type);
49   error("Relocation " + S + " out of range");
50 }
51 
52 template <unsigned N> static void checkUInt(uint64_t V, uint32_t Type) {
53   if (isUInt<N>(V))
54     return;
55   StringRef S = getELFRelocationTypeName(Config->EMachine, Type);
56   error("Relocation " + S + " out of range");
57 }
58 
59 template <unsigned N> static void checkIntUInt(uint64_t V, uint32_t Type) {
60   if (isInt<N>(V) || isUInt<N>(V))
61     return;
62   StringRef S = getELFRelocationTypeName(Config->EMachine, Type);
63   error("Relocation " + S + " out of range");
64 }
65 
66 template <unsigned N> static void checkAlignment(uint64_t V, uint32_t Type) {
67   if ((V & (N - 1)) == 0)
68     return;
69   StringRef S = getELFRelocationTypeName(Config->EMachine, Type);
70   error("Improper alignment for relocation " + S);
71 }
72 
73 template <class ELFT> bool isGnuIFunc(const SymbolBody &S) {
74   if (auto *SS = dyn_cast<DefinedElf<ELFT>>(&S))
75     return SS->Sym.getType() == STT_GNU_IFUNC;
76   return false;
77 }
78 
79 template bool isGnuIFunc<ELF32LE>(const SymbolBody &S);
80 template bool isGnuIFunc<ELF32BE>(const SymbolBody &S);
81 template bool isGnuIFunc<ELF64LE>(const SymbolBody &S);
82 template bool isGnuIFunc<ELF64BE>(const SymbolBody &S);
83 
84 namespace {
85 class X86TargetInfo final : public TargetInfo {
86 public:
87   X86TargetInfo();
88   void writeGotPltHeaderEntries(uint8_t *Buf) const override;
89   unsigned getDynReloc(unsigned Type) const override;
90   unsigned getTlsGotReloc(unsigned Type) const override;
91   bool isTlsDynReloc(unsigned Type, const SymbolBody &S) const override;
92   void writeGotPltEntry(uint8_t *Buf, uint64_t Plt) const override;
93   void writePltZeroEntry(uint8_t *Buf, uint64_t GotEntryAddr,
94                          uint64_t PltEntryAddr) const override;
95   void writePltEntry(uint8_t *Buf, uint64_t GotAddr, uint64_t GotEntryAddr,
96                      uint64_t PltEntryAddr, int32_t Index,
97                      unsigned RelOff) const override;
98   bool needsCopyRel(uint32_t Type, const SymbolBody &S) const override;
99   bool relocNeedsDynRelative(unsigned Type) const override;
100   bool relocNeedsGot(uint32_t Type, const SymbolBody &S) const override;
101   bool relocNeedsPlt(uint32_t Type, const SymbolBody &S) const override;
102   void relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, uint64_t P,
103                    uint64_t SA, uint64_t ZA = 0,
104                    uint8_t *PairedLoc = nullptr) const override;
105   bool isTlsOptimized(unsigned Type, const SymbolBody *S) const override;
106   unsigned relocateTlsOptimize(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type,
107                                uint64_t P, uint64_t SA,
108                                const SymbolBody *S) const override;
109   bool isGotRelative(uint32_t Type) const override;
110 
111 private:
112   void relocateTlsLdToLe(uint8_t *Loc, uint8_t *BufEnd, uint64_t P,
113                          uint64_t SA) const;
114   void relocateTlsGdToIe(uint8_t *Loc, uint8_t *BufEnd, uint64_t P,
115                          uint64_t SA) const;
116   void relocateTlsGdToLe(uint8_t *Loc, uint8_t *BufEnd, uint64_t P,
117                          uint64_t SA) const;
118   void relocateTlsIeToLe(unsigned Type, uint8_t *Loc, uint8_t *BufEnd,
119                          uint64_t P, uint64_t SA) const;
120 };
121 
122 class X86_64TargetInfo final : public TargetInfo {
123 public:
124   X86_64TargetInfo();
125   bool isTlsDynReloc(unsigned Type, const SymbolBody &S) const override;
126   void writeGotPltHeaderEntries(uint8_t *Buf) const override;
127   void writeGotPltEntry(uint8_t *Buf, uint64_t Plt) const override;
128   void writePltZeroEntry(uint8_t *Buf, uint64_t GotEntryAddr,
129                          uint64_t PltEntryAddr) const override;
130   void writePltEntry(uint8_t *Buf, uint64_t GotAddr, uint64_t GotEntryAddr,
131                      uint64_t PltEntryAddr, int32_t Index,
132                      unsigned RelOff) const override;
133   bool needsCopyRel(uint32_t Type, const SymbolBody &S) const override;
134   bool relocNeedsGot(uint32_t Type, const SymbolBody &S) const override;
135   bool relocNeedsPlt(uint32_t Type, const SymbolBody &S) const override;
136   void relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, uint64_t P,
137                    uint64_t SA, uint64_t ZA = 0,
138                    uint8_t *PairedLoc = nullptr) const override;
139   bool isRelRelative(uint32_t Type) const override;
140   bool isTlsOptimized(unsigned Type, const SymbolBody *S) const override;
141   bool isSizeReloc(uint32_t Type) const override;
142   unsigned relocateTlsOptimize(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type,
143                                uint64_t P, uint64_t SA,
144                                const SymbolBody *S) const override;
145 
146 private:
147   void relocateTlsLdToLe(uint8_t *Loc, uint8_t *BufEnd, uint64_t P,
148                          uint64_t SA) const;
149   void relocateTlsGdToLe(uint8_t *Loc, uint8_t *BufEnd, uint64_t P,
150                          uint64_t SA) const;
151   void relocateTlsGdToIe(uint8_t *Loc, uint8_t *BufEnd, uint64_t P,
152                          uint64_t SA) const;
153   void relocateTlsIeToLe(uint8_t *Loc, uint8_t *BufEnd, uint64_t P,
154                          uint64_t SA) const;
155 };
156 
157 class PPCTargetInfo final : public TargetInfo {
158 public:
159   PPCTargetInfo();
160   void writeGotPltEntry(uint8_t *Buf, uint64_t Plt) const override;
161   void writePltZeroEntry(uint8_t *Buf, uint64_t GotEntryAddr,
162                          uint64_t PltEntryAddr) const override;
163   void writePltEntry(uint8_t *Buf, uint64_t GotAddr, uint64_t GotEntryAddr,
164                      uint64_t PltEntryAddr, int32_t Index,
165                      unsigned RelOff) const override;
166   bool relocNeedsGot(uint32_t Type, const SymbolBody &S) const override;
167   bool relocNeedsPlt(uint32_t Type, const SymbolBody &S) const override;
168   void relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, uint64_t P,
169                    uint64_t SA, uint64_t ZA = 0,
170                    uint8_t *PairedLoc = nullptr) const override;
171   bool isRelRelative(uint32_t Type) const override;
172 };
173 
174 class PPC64TargetInfo final : public TargetInfo {
175 public:
176   PPC64TargetInfo();
177   void writeGotPltEntry(uint8_t *Buf, uint64_t Plt) const override;
178   void writePltZeroEntry(uint8_t *Buf, uint64_t GotEntryAddr,
179                          uint64_t PltEntryAddr) const override;
180   void writePltEntry(uint8_t *Buf, uint64_t GotAddr, uint64_t GotEntryAddr,
181                      uint64_t PltEntryAddr, int32_t Index,
182                      unsigned RelOff) const override;
183   bool relocNeedsGot(uint32_t Type, const SymbolBody &S) const override;
184   bool relocNeedsPlt(uint32_t Type, const SymbolBody &S) const override;
185   void relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, uint64_t P,
186                    uint64_t SA, uint64_t ZA = 0,
187                    uint8_t *PairedLoc = nullptr) const override;
188   bool isRelRelative(uint32_t Type) const override;
189 };
190 
191 class AArch64TargetInfo final : public TargetInfo {
192 public:
193   AArch64TargetInfo();
194   unsigned getDynReloc(unsigned Type) const override;
195   void writeGotPltEntry(uint8_t *Buf, uint64_t Plt) const override;
196   void writePltZeroEntry(uint8_t *Buf, uint64_t GotEntryAddr,
197                          uint64_t PltEntryAddr) const override;
198   void writePltEntry(uint8_t *Buf, uint64_t GotAddr, uint64_t GotEntryAddr,
199                      uint64_t PltEntryAddr, int32_t Index,
200                      unsigned RelOff) const override;
201   unsigned getTlsGotReloc(unsigned Type = -1) const override;
202   bool isTlsDynReloc(unsigned Type, const SymbolBody &S) const override;
203   bool needsCopyRel(uint32_t Type, const SymbolBody &S) const override;
204   bool relocNeedsGot(uint32_t Type, const SymbolBody &S) const override;
205   bool relocNeedsPlt(uint32_t Type, const SymbolBody &S) const override;
206   void relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, uint64_t P,
207                    uint64_t SA, uint64_t ZA = 0,
208                    uint8_t *PairedLoc = nullptr) const override;
209 };
210 
211 class AMDGPUTargetInfo final : public TargetInfo {
212 public:
213   AMDGPUTargetInfo();
214   void writeGotPltEntry(uint8_t *Buf, uint64_t Plt) const override;
215   void writePltZeroEntry(uint8_t *Buf, uint64_t GotEntryAddr,
216                          uint64_t PltEntryAddr) const override;
217   void writePltEntry(uint8_t *Buf, uint64_t GotAddr, uint64_t GotEntryAddr,
218                      uint64_t PltEntryAddr, int32_t Index,
219                      unsigned RelOff) const override;
220   bool relocNeedsGot(uint32_t Type, const SymbolBody &S) const override;
221   bool relocNeedsPlt(uint32_t Type, const SymbolBody &S) const override;
222   void relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, uint64_t P,
223                    uint64_t SA, uint64_t ZA = 0,
224                    uint8_t *PairedLoc = nullptr) const override;
225 };
226 
227 template <class ELFT> class MipsTargetInfo final : public TargetInfo {
228 public:
229   MipsTargetInfo();
230   unsigned getDynReloc(unsigned Type) const override;
231   void writeGotHeaderEntries(uint8_t *Buf) const override;
232   void writeGotPltEntry(uint8_t *Buf, uint64_t Plt) const override;
233   void writePltZeroEntry(uint8_t *Buf, uint64_t GotEntryAddr,
234                          uint64_t PltEntryAddr) const override;
235   void writePltEntry(uint8_t *Buf, uint64_t GotAddr, uint64_t GotEntryAddr,
236                      uint64_t PltEntryAddr, int32_t Index,
237                      unsigned RelOff) const override;
238   bool relocNeedsGot(uint32_t Type, const SymbolBody &S) const override;
239   bool relocNeedsPlt(uint32_t Type, const SymbolBody &S) const override;
240   void relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, uint64_t P,
241                    uint64_t SA, uint64_t ZA = 0,
242                    uint8_t *PairedLoc = nullptr) const override;
243   bool isHintReloc(uint32_t Type) const override;
244   bool isRelRelative(uint32_t Type) const override;
245 };
246 } // anonymous namespace
247 
248 TargetInfo *createTarget() {
249   switch (Config->EMachine) {
250   case EM_386:
251     return new X86TargetInfo();
252   case EM_AARCH64:
253     return new AArch64TargetInfo();
254   case EM_AMDGPU:
255     return new AMDGPUTargetInfo();
256   case EM_MIPS:
257     switch (Config->EKind) {
258     case ELF32LEKind:
259       return new MipsTargetInfo<ELF32LE>();
260     case ELF32BEKind:
261       return new MipsTargetInfo<ELF32BE>();
262     default:
263       error("Unsupported MIPS target");
264     }
265   case EM_PPC:
266     return new PPCTargetInfo();
267   case EM_PPC64:
268     return new PPC64TargetInfo();
269   case EM_X86_64:
270     return new X86_64TargetInfo();
271   }
272   error("Unknown target machine");
273 }
274 
275 TargetInfo::~TargetInfo() {}
276 
277 bool TargetInfo::isTlsOptimized(unsigned Type, const SymbolBody *S) const {
278   return false;
279 }
280 
281 uint64_t TargetInfo::getVAStart() const { return Config->Shared ? 0 : VAStart; }
282 
283 bool TargetInfo::needsCopyRel(uint32_t Type, const SymbolBody &S) const {
284   return false;
285 }
286 
287 bool TargetInfo::isGotRelative(uint32_t Type) const { return false; }
288 
289 bool TargetInfo::isHintReloc(uint32_t Type) const { return false; }
290 
291 bool TargetInfo::isRelRelative(uint32_t Type) const { return true; }
292 
293 bool TargetInfo::isSizeReloc(uint32_t Type) const { return false; }
294 
295 unsigned TargetInfo::relocateTlsOptimize(uint8_t *Loc, uint8_t *BufEnd,
296                                          uint32_t Type, uint64_t P, uint64_t SA,
297                                          const SymbolBody *S) const {
298   return 0;
299 }
300 
301 void TargetInfo::writeGotHeaderEntries(uint8_t *Buf) const {}
302 
303 void TargetInfo::writeGotPltHeaderEntries(uint8_t *Buf) const {}
304 
305 X86TargetInfo::X86TargetInfo() {
306   CopyReloc = R_386_COPY;
307   PCRelReloc = R_386_PC32;
308   GotReloc = R_386_GLOB_DAT;
309   PltReloc = R_386_JUMP_SLOT;
310   IRelativeReloc = R_386_IRELATIVE;
311   RelativeReloc = R_386_RELATIVE;
312   TlsGotReloc = R_386_TLS_TPOFF;
313   TlsGlobalDynamicReloc = R_386_TLS_GD;
314   TlsLocalDynamicReloc = R_386_TLS_LDM;
315   TlsModuleIndexReloc = R_386_TLS_DTPMOD32;
316   TlsOffsetReloc = R_386_TLS_DTPOFF32;
317   LazyRelocations = true;
318   PltEntrySize = 16;
319   PltZeroEntrySize = 16;
320 }
321 
322 void X86TargetInfo::writeGotPltHeaderEntries(uint8_t *Buf) const {
323   write32le(Buf, Out<ELF32LE>::Dynamic->getVA());
324 }
325 
326 void X86TargetInfo::writeGotPltEntry(uint8_t *Buf, uint64_t Plt) const {
327   // Skip 6 bytes of "pushl (GOT+4)"
328   write32le(Buf, Plt + 6);
329 }
330 
331 unsigned X86TargetInfo::getDynReloc(unsigned Type) const {
332   if (Type == R_386_TLS_LE)
333     return R_386_TLS_TPOFF;
334   if (Type == R_386_TLS_LE_32)
335     return R_386_TLS_TPOFF32;
336   return Type;
337 }
338 
339 unsigned X86TargetInfo::getTlsGotReloc(unsigned Type) const {
340   if (Type == R_386_TLS_IE)
341     return Type;
342   return TlsGotReloc;
343 }
344 
345 bool X86TargetInfo::isTlsDynReloc(unsigned Type, const SymbolBody &S) const {
346   if (Type == R_386_TLS_LE || Type == R_386_TLS_LE_32 ||
347       Type == R_386_TLS_GOTIE)
348     return Config->Shared;
349   if (Type == R_386_TLS_IE)
350     return canBePreempted(&S, true);
351   return Type == R_386_TLS_GD;
352 }
353 
354 void X86TargetInfo::writePltZeroEntry(uint8_t *Buf, uint64_t GotEntryAddr,
355                                       uint64_t PltEntryAddr) const {
356   // Executable files and shared object files have
357   // separate procedure linkage tables.
358   if (Config->Shared) {
359     const uint8_t V[] = {
360         0xff, 0xb3, 0x04, 0x00, 0x00, 0x00, // pushl 4(%ebx)
361         0xff, 0xa3, 0x08, 0x00, 0x00, 0x00, // jmp *8(%ebx)
362         0x90, 0x90, 0x90, 0x90              // nop;nop;nop;nop
363     };
364     memcpy(Buf, V, sizeof(V));
365     return;
366   }
367 
368   const uint8_t PltData[] = {
369       0xff, 0x35, 0x00, 0x00, 0x00, 0x00, // pushl (GOT+4)
370       0xff, 0x25, 0x00, 0x00, 0x00, 0x00, // jmp *(GOT+8)
371       0x90, 0x90, 0x90, 0x90              // nop;nop;nop;nop
372   };
373   memcpy(Buf, PltData, sizeof(PltData));
374   write32le(Buf + 2, GotEntryAddr + 4); // GOT+4
375   write32le(Buf + 8, GotEntryAddr + 8); // GOT+8
376 }
377 
378 void X86TargetInfo::writePltEntry(uint8_t *Buf, uint64_t GotAddr,
379                                   uint64_t GotEntryAddr, uint64_t PltEntryAddr,
380                                   int32_t Index, unsigned RelOff) const {
381   const uint8_t Inst[] = {
382       0xff, 0x00, 0x00, 0x00, 0x00, 0x00, // jmp *foo_in_GOT|*foo@GOT(%ebx)
383       0x68, 0x00, 0x00, 0x00, 0x00,       // pushl $reloc_offset
384       0xe9, 0x00, 0x00, 0x00, 0x00        // jmp .PLT0@PC
385   };
386   memcpy(Buf, Inst, sizeof(Inst));
387   // jmp *foo@GOT(%ebx) or jmp *foo_in_GOT
388   Buf[1] = Config->Shared ? 0xa3 : 0x25;
389   write32le(Buf + 2, Config->Shared ? (GotEntryAddr - GotAddr) : GotEntryAddr);
390   write32le(Buf + 7, RelOff);
391   write32le(Buf + 12, -Index * PltEntrySize - PltZeroEntrySize - 16);
392 }
393 
394 bool X86TargetInfo::needsCopyRel(uint32_t Type, const SymbolBody &S) const {
395   if (Type == R_386_32 || Type == R_386_16 || Type == R_386_8)
396     if (auto *SS = dyn_cast<SharedSymbol<ELF32LE>>(&S))
397       return SS->Sym.getType() == STT_OBJECT;
398   return false;
399 }
400 
401 bool X86TargetInfo::relocNeedsGot(uint32_t Type, const SymbolBody &S) const {
402   if (S.isTls() && Type == R_386_TLS_GD)
403     return Target->isTlsOptimized(Type, &S) && canBePreempted(&S, true);
404   if (Type == R_386_TLS_GOTIE || Type == R_386_TLS_IE)
405     return !isTlsOptimized(Type, &S);
406   return Type == R_386_GOT32 || relocNeedsPlt(Type, S);
407 }
408 
409 bool X86TargetInfo::relocNeedsPlt(uint32_t Type, const SymbolBody &S) const {
410   return isGnuIFunc<ELF32LE>(S) ||
411          (Type == R_386_PLT32 && canBePreempted(&S, true)) ||
412          (Type == R_386_PC32 && S.isShared());
413 }
414 
415 bool X86TargetInfo::isGotRelative(uint32_t Type) const {
416   // This relocation does not require got entry,
417   // but it is relative to got and needs it to be created.
418   // Here we request for that.
419   return Type == R_386_GOTOFF;
420 }
421 
422 void X86TargetInfo::relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type,
423                                 uint64_t P, uint64_t SA, uint64_t ZA,
424                                 uint8_t *PairedLoc) const {
425   switch (Type) {
426   case R_386_32:
427     add32le(Loc, SA);
428     break;
429   case R_386_GOT32: {
430     uint64_t V = SA - Out<ELF32LE>::Got->getVA() -
431                  Out<ELF32LE>::Got->getNumEntries() * 4;
432     checkInt<32>(V, Type);
433     add32le(Loc, V);
434     break;
435   }
436   case R_386_GOTOFF:
437     add32le(Loc, SA - Out<ELF32LE>::Got->getVA());
438     break;
439   case R_386_GOTPC:
440     add32le(Loc, SA + Out<ELF32LE>::Got->getVA() - P);
441     break;
442   case R_386_PC32:
443   case R_386_PLT32:
444     add32le(Loc, SA - P);
445     break;
446   case R_386_TLS_GD:
447   case R_386_TLS_LDM:
448   case R_386_TLS_TPOFF: {
449     uint64_t V = SA - Out<ELF32LE>::Got->getVA() -
450                  Out<ELF32LE>::Got->getNumEntries() * 4;
451     checkInt<32>(V, Type);
452     write32le(Loc, V);
453     break;
454   }
455   case R_386_TLS_IE:
456   case R_386_TLS_LDO_32:
457     write32le(Loc, SA);
458     break;
459   case R_386_TLS_LE:
460     write32le(Loc, SA - Out<ELF32LE>::TlsPhdr->p_memsz);
461     break;
462   case R_386_TLS_LE_32:
463     write32le(Loc, Out<ELF32LE>::TlsPhdr->p_memsz - SA);
464     break;
465   default:
466     error("unrecognized reloc " + Twine(Type));
467   }
468 }
469 
470 bool X86TargetInfo::isTlsOptimized(unsigned Type, const SymbolBody *S) const {
471   if (Config->Shared || (S && !S->isTls()))
472     return false;
473   return Type == R_386_TLS_LDO_32 || Type == R_386_TLS_LDM ||
474          Type == R_386_TLS_GD ||
475          (Type == R_386_TLS_IE && !canBePreempted(S, true)) ||
476          (Type == R_386_TLS_GOTIE && !canBePreempted(S, true));
477 }
478 
479 bool X86TargetInfo::relocNeedsDynRelative(unsigned Type) const {
480   return Config->Shared && Type == R_386_TLS_IE;
481 }
482 
483 unsigned X86TargetInfo::relocateTlsOptimize(uint8_t *Loc, uint8_t *BufEnd,
484                                             uint32_t Type, uint64_t P,
485                                             uint64_t SA,
486                                             const SymbolBody *S) const {
487   switch (Type) {
488   case R_386_TLS_GD:
489     if (canBePreempted(S, true))
490       relocateTlsGdToIe(Loc, BufEnd, P, SA);
491     else
492       relocateTlsGdToLe(Loc, BufEnd, P, SA);
493     // The next relocation should be against __tls_get_addr, so skip it
494     return 1;
495   case R_386_TLS_GOTIE:
496   case R_386_TLS_IE:
497     relocateTlsIeToLe(Type, Loc, BufEnd, P, SA);
498     return 0;
499   case R_386_TLS_LDM:
500     relocateTlsLdToLe(Loc, BufEnd, P, SA);
501     // The next relocation should be against __tls_get_addr, so skip it
502     return 1;
503   case R_386_TLS_LDO_32:
504     relocateOne(Loc, BufEnd, R_386_TLS_LE, P, SA);
505     return 0;
506   }
507   llvm_unreachable("Unknown TLS optimization");
508 }
509 
510 // "Ulrich Drepper, ELF Handling For Thread-Local Storage" (5.1
511 // IA-32 Linker Optimizations, http://www.akkadia.org/drepper/tls.pdf) shows
512 // how GD can be optimized to IE:
513 //   leal x@tlsgd(, %ebx, 1),
514 //   call __tls_get_addr@plt
515 // Is converted to:
516 //   movl %gs:0, %eax
517 //   addl x@gotntpoff(%ebx), %eax
518 void X86TargetInfo::relocateTlsGdToIe(uint8_t *Loc, uint8_t *BufEnd, uint64_t P,
519                                       uint64_t SA) const {
520   const uint8_t Inst[] = {
521       0x65, 0xa1, 0x00, 0x00, 0x00, 0x00, // movl %gs:0, %eax
522       0x03, 0x83, 0x00, 0x00, 0x00, 0x00  // addl 0(%ebx), %eax
523   };
524   memcpy(Loc - 3, Inst, sizeof(Inst));
525   relocateOne(Loc + 5, BufEnd, R_386_32, P,
526               SA - Out<ELF32LE>::Got->getVA() -
527                   Out<ELF32LE>::Got->getNumEntries() * 4);
528 }
529 
530 // GD can be optimized to LE:
531 //   leal x@tlsgd(, %ebx, 1),
532 //   call __tls_get_addr@plt
533 // Can be converted to:
534 //   movl %gs:0,%eax
535 //   addl $x@ntpoff,%eax
536 // But gold emits subl $foo@tpoff,%eax instead of addl.
537 // These instructions are completely equal in behavior.
538 // This method generates subl to be consistent with gold.
539 void X86TargetInfo::relocateTlsGdToLe(uint8_t *Loc, uint8_t *BufEnd, uint64_t P,
540                                       uint64_t SA) const {
541   const uint8_t Inst[] = {
542       0x65, 0xa1, 0x00, 0x00, 0x00, 0x00, // movl %gs:0, %eax
543       0x81, 0xe8, 0x00, 0x00, 0x00, 0x00  // subl 0(%ebx), %eax
544   };
545   memcpy(Loc - 3, Inst, sizeof(Inst));
546   relocateOne(Loc + 5, BufEnd, R_386_32, P,
547               Out<ELF32LE>::TlsPhdr->p_memsz - SA);
548 }
549 
550 // LD can be optimized to LE:
551 //   leal foo(%reg),%eax
552 //   call ___tls_get_addr
553 // Is converted to:
554 //   movl %gs:0,%eax
555 //   nop
556 //   leal 0(%esi,1),%esi
557 void X86TargetInfo::relocateTlsLdToLe(uint8_t *Loc, uint8_t *BufEnd, uint64_t P,
558                                       uint64_t SA) const {
559   const uint8_t Inst[] = {
560       0x65, 0xa1, 0x00, 0x00, 0x00, 0x00, // movl %gs:0,%eax
561       0x90,                               // nop
562       0x8d, 0x74, 0x26, 0x00              // leal 0(%esi,1),%esi
563   };
564   memcpy(Loc - 2, Inst, sizeof(Inst));
565 }
566 
567 // In some conditions, relocations can be optimized to avoid using GOT.
568 // This function does that for Initial Exec to Local Exec case.
569 // Read "ELF Handling For Thread-Local Storage, 5.1
570 // IA-32 Linker Optimizations" (http://www.akkadia.org/drepper/tls.pdf)
571 // by Ulrich Drepper for details.
572 void X86TargetInfo::relocateTlsIeToLe(unsigned Type, uint8_t *Loc,
573                                       uint8_t *BufEnd, uint64_t P,
574                                       uint64_t SA) const {
575   // Ulrich's document section 6.2 says that @gotntpoff can
576   // be used with MOVL or ADDL instructions.
577   // @indntpoff is similar to @gotntpoff, but for use in
578   // position dependent code.
579   uint8_t *Inst = Loc - 2;
580   uint8_t *Op = Loc - 1;
581   uint8_t Reg = (Loc[-1] >> 3) & 7;
582   bool IsMov = *Inst == 0x8b;
583   if (Type == R_386_TLS_IE) {
584     // For R_386_TLS_IE relocation we perform the next transformations:
585     // MOVL foo@INDNTPOFF,%EAX is transformed to MOVL $foo,%EAX
586     // MOVL foo@INDNTPOFF,%REG is transformed to MOVL $foo,%REG
587     // ADDL foo@INDNTPOFF,%REG is transformed to ADDL $foo,%REG
588     // First one is special because when EAX is used the sequence is 5 bytes
589     // long, otherwise it is 6 bytes.
590     if (*Op == 0xa1) {
591       *Op = 0xb8;
592     } else {
593       *Inst = IsMov ? 0xc7 : 0x81;
594       *Op = 0xc0 | ((*Op >> 3) & 7);
595     }
596   } else {
597     // R_386_TLS_GOTIE relocation can be optimized to
598     // R_386_TLS_LE so that it does not use GOT.
599     // "MOVL foo@GOTTPOFF(%RIP), %REG" is transformed to "MOVL $foo, %REG".
600     // "ADDL foo@GOTNTPOFF(%RIP), %REG" is transformed to "LEAL foo(%REG), %REG"
601     // Note: gold converts to ADDL instead of LEAL.
602     *Inst = IsMov ? 0xc7 : 0x8d;
603     if (IsMov)
604       *Op = 0xc0 | ((*Op >> 3) & 7);
605     else
606       *Op = 0x80 | Reg | (Reg << 3);
607   }
608   relocateOne(Loc, BufEnd, R_386_TLS_LE, P, SA);
609 }
610 
611 X86_64TargetInfo::X86_64TargetInfo() {
612   CopyReloc = R_X86_64_COPY;
613   PCRelReloc = R_X86_64_PC32;
614   GotReloc = R_X86_64_GLOB_DAT;
615   PltReloc = R_X86_64_JUMP_SLOT;
616   RelativeReloc = R_X86_64_RELATIVE;
617   IRelativeReloc = R_X86_64_IRELATIVE;
618   TlsGotReloc = R_X86_64_TPOFF64;
619   TlsLocalDynamicReloc = R_X86_64_TLSLD;
620   TlsGlobalDynamicReloc = R_X86_64_TLSGD;
621   TlsModuleIndexReloc = R_X86_64_DTPMOD64;
622   TlsOffsetReloc = R_X86_64_DTPOFF64;
623   LazyRelocations = true;
624   PltEntrySize = 16;
625   PltZeroEntrySize = 16;
626 }
627 
628 void X86_64TargetInfo::writeGotPltHeaderEntries(uint8_t *Buf) const {
629   write64le(Buf, Out<ELF64LE>::Dynamic->getVA());
630 }
631 
632 void X86_64TargetInfo::writeGotPltEntry(uint8_t *Buf, uint64_t Plt) const {
633   // Skip 6 bytes of "jmpq *got(%rip)"
634   write32le(Buf, Plt + 6);
635 }
636 
637 void X86_64TargetInfo::writePltZeroEntry(uint8_t *Buf, uint64_t GotEntryAddr,
638                                          uint64_t PltEntryAddr) const {
639   const uint8_t PltData[] = {
640       0xff, 0x35, 0x00, 0x00, 0x00, 0x00, // pushq GOT+8(%rip)
641       0xff, 0x25, 0x00, 0x00, 0x00, 0x00, // jmp *GOT+16(%rip)
642       0x0f, 0x1f, 0x40, 0x00              // nopl 0x0(rax)
643   };
644   memcpy(Buf, PltData, sizeof(PltData));
645   write32le(Buf + 2, GotEntryAddr - PltEntryAddr + 2); // GOT+8
646   write32le(Buf + 8, GotEntryAddr - PltEntryAddr + 4); // GOT+16
647 }
648 
649 void X86_64TargetInfo::writePltEntry(uint8_t *Buf, uint64_t GotAddr,
650                                      uint64_t GotEntryAddr,
651                                      uint64_t PltEntryAddr, int32_t Index,
652                                      unsigned RelOff) const {
653   const uint8_t Inst[] = {
654       0xff, 0x25, 0x00, 0x00, 0x00, 0x00, // jmpq *got(%rip)
655       0x68, 0x00, 0x00, 0x00, 0x00,       // pushq <relocation index>
656       0xe9, 0x00, 0x00, 0x00, 0x00        // jmpq plt[0]
657   };
658   memcpy(Buf, Inst, sizeof(Inst));
659 
660   write32le(Buf + 2, GotEntryAddr - PltEntryAddr - 6);
661   write32le(Buf + 7, Index);
662   write32le(Buf + 12, -Index * PltEntrySize - PltZeroEntrySize - 16);
663 }
664 
665 bool X86_64TargetInfo::needsCopyRel(uint32_t Type, const SymbolBody &S) const {
666   if (Type == R_X86_64_32S || Type == R_X86_64_32 || Type == R_X86_64_PC32 ||
667       Type == R_X86_64_64)
668     if (auto *SS = dyn_cast<SharedSymbol<ELF64LE>>(&S))
669       return SS->Sym.getType() == STT_OBJECT;
670   return false;
671 }
672 
673 bool X86_64TargetInfo::relocNeedsGot(uint32_t Type, const SymbolBody &S) const {
674   if (Type == R_X86_64_TLSGD)
675     return Target->isTlsOptimized(Type, &S) && canBePreempted(&S, true);
676   if (Type == R_X86_64_GOTTPOFF)
677     return !isTlsOptimized(Type, &S);
678   return Type == R_X86_64_GOTPCREL || relocNeedsPlt(Type, S);
679 }
680 
681 bool X86_64TargetInfo::isTlsDynReloc(unsigned Type, const SymbolBody &S) const {
682   return Type == R_X86_64_GOTTPOFF || Type == R_X86_64_TLSGD;
683 }
684 
685 bool X86_64TargetInfo::relocNeedsPlt(uint32_t Type, const SymbolBody &S) const {
686   if (needsCopyRel(Type, S))
687     return false;
688   if (isGnuIFunc<ELF64LE>(S))
689     return true;
690 
691   switch (Type) {
692   default:
693     return false;
694   case R_X86_64_32:
695   case R_X86_64_64:
696   case R_X86_64_PC32:
697     // This relocation is defined to have a value of (S + A - P).
698     // The problems start when a non PIC program calls a function in a shared
699     // library.
700     // In an ideal world, we could just report an error saying the relocation
701     // can overflow at runtime.
702     // In the real world with glibc, crt1.o has a R_X86_64_PC32 pointing to
703     // libc.so.
704     //
705     // The general idea on how to handle such cases is to create a PLT entry
706     // and use that as the function value.
707     //
708     // For the static linking part, we just return true and everything else
709     // will use the the PLT entry as the address.
710     //
711     // The remaining (unimplemented) problem is making sure pointer equality
712     // still works. We need the help of the dynamic linker for that. We
713     // let it know that we have a direct reference to a so symbol by creating
714     // an undefined symbol with a non zero st_value. Seeing that, the
715     // dynamic linker resolves the symbol to the value of the symbol we created.
716     // This is true even for got entries, so pointer equality is maintained.
717     // To avoid an infinite loop, the only entry that points to the
718     // real function is a dedicated got entry used by the plt. That is
719     // identified by special relocation types (R_X86_64_JUMP_SLOT,
720     // R_386_JMP_SLOT, etc).
721     return S.isShared();
722   case R_X86_64_PLT32:
723     return canBePreempted(&S, true);
724   }
725 }
726 
727 bool X86_64TargetInfo::isRelRelative(uint32_t Type) const {
728   switch (Type) {
729   default:
730     return false;
731   case R_X86_64_DTPOFF32:
732   case R_X86_64_DTPOFF64:
733   case R_X86_64_PC8:
734   case R_X86_64_PC16:
735   case R_X86_64_PC32:
736   case R_X86_64_PC64:
737   case R_X86_64_PLT32:
738     return true;
739   }
740 }
741 
742 bool X86_64TargetInfo::isSizeReloc(uint32_t Type) const {
743   return Type == R_X86_64_SIZE32 || Type == R_X86_64_SIZE64;
744 }
745 
746 bool X86_64TargetInfo::isTlsOptimized(unsigned Type,
747                                       const SymbolBody *S) const {
748   if (Config->Shared || (S && !S->isTls()))
749     return false;
750   return Type == R_X86_64_TLSGD || Type == R_X86_64_TLSLD ||
751          Type == R_X86_64_DTPOFF32 ||
752          (Type == R_X86_64_GOTTPOFF && !canBePreempted(S, true));
753 }
754 
755 // "Ulrich Drepper, ELF Handling For Thread-Local Storage" (5.5
756 // x86-x64 linker optimizations, http://www.akkadia.org/drepper/tls.pdf) shows
757 // how LD can be optimized to LE:
758 //   leaq bar@tlsld(%rip), %rdi
759 //   callq __tls_get_addr@PLT
760 //   leaq bar@dtpoff(%rax), %rcx
761 // Is converted to:
762 //  .word 0x6666
763 //  .byte 0x66
764 //  mov %fs:0,%rax
765 //  leaq bar@tpoff(%rax), %rcx
766 void X86_64TargetInfo::relocateTlsLdToLe(uint8_t *Loc, uint8_t *BufEnd,
767                                          uint64_t P, uint64_t SA) const {
768   const uint8_t Inst[] = {
769       0x66, 0x66,                                          //.word 0x6666
770       0x66,                                                //.byte 0x66
771       0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00 // mov %fs:0,%rax
772   };
773   memcpy(Loc - 3, Inst, sizeof(Inst));
774 }
775 
776 // "Ulrich Drepper, ELF Handling For Thread-Local Storage" (5.5
777 // x86-x64 linker optimizations, http://www.akkadia.org/drepper/tls.pdf) shows
778 // how GD can be optimized to LE:
779 //  .byte 0x66
780 //  leaq x@tlsgd(%rip), %rdi
781 //  .word 0x6666
782 //  rex64
783 //  call __tls_get_addr@plt
784 // Is converted to:
785 //  mov %fs:0x0,%rax
786 //  lea x@tpoff,%rax
787 void X86_64TargetInfo::relocateTlsGdToLe(uint8_t *Loc, uint8_t *BufEnd,
788                                          uint64_t P, uint64_t SA) const {
789   const uint8_t Inst[] = {
790       0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00, // mov %fs:0x0,%rax
791       0x48, 0x8d, 0x80, 0x00, 0x00, 0x00, 0x00              // lea x@tpoff,%rax
792   };
793   memcpy(Loc - 4, Inst, sizeof(Inst));
794   relocateOne(Loc + 8, BufEnd, R_X86_64_TPOFF32, P, SA);
795 }
796 
797 // "Ulrich Drepper, ELF Handling For Thread-Local Storage" (5.5
798 // x86-x64 linker optimizations, http://www.akkadia.org/drepper/tls.pdf) shows
799 // how GD can be optimized to IE:
800 //  .byte 0x66
801 //  leaq x@tlsgd(%rip), %rdi
802 //  .word 0x6666
803 //  rex64
804 //  call __tls_get_addr@plt
805 // Is converted to:
806 //  mov %fs:0x0,%rax
807 //  addq x@tpoff,%rax
808 void X86_64TargetInfo::relocateTlsGdToIe(uint8_t *Loc, uint8_t *BufEnd,
809                                          uint64_t P, uint64_t SA) const {
810   const uint8_t Inst[] = {
811       0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00, // mov %fs:0x0,%rax
812       0x48, 0x03, 0x05, 0x00, 0x00, 0x00, 0x00              // addq x@tpoff,%rax
813   };
814   memcpy(Loc - 4, Inst, sizeof(Inst));
815   relocateOne(Loc + 8, BufEnd, R_X86_64_TPOFF64, P + 12, SA);
816 }
817 
818 // In some conditions, R_X86_64_GOTTPOFF relocation can be optimized to
819 // R_X86_64_TPOFF32 so that it does not use GOT.
820 // This function does that. Read "ELF Handling For Thread-Local Storage,
821 // 5.5 x86-x64 linker optimizations" (http://www.akkadia.org/drepper/tls.pdf)
822 // by Ulrich Drepper for details.
823 void X86_64TargetInfo::relocateTlsIeToLe(uint8_t *Loc, uint8_t *BufEnd,
824                                          uint64_t P, uint64_t SA) const {
825   // Ulrich's document section 6.5 says that @gottpoff(%rip) must be
826   // used in MOVQ or ADDQ instructions only.
827   // "MOVQ foo@GOTTPOFF(%RIP), %REG" is transformed to "MOVQ $foo, %REG".
828   // "ADDQ foo@GOTTPOFF(%RIP), %REG" is transformed to "LEAQ foo(%REG), %REG"
829   // (if the register is not RSP/R12) or "ADDQ $foo, %RSP".
830   // Opcodes info can be found at http://ref.x86asm.net/coder64.html#x48.
831   uint8_t *Prefix = Loc - 3;
832   uint8_t *Inst = Loc - 2;
833   uint8_t *RegSlot = Loc - 1;
834   uint8_t Reg = Loc[-1] >> 3;
835   bool IsMov = *Inst == 0x8b;
836   bool RspAdd = !IsMov && Reg == 4;
837   // r12 and rsp registers requires special handling.
838   // Problem is that for other registers, for example leaq 0xXXXXXXXX(%r11),%r11
839   // result out is 7 bytes: 4d 8d 9b XX XX XX XX,
840   // but leaq 0xXXXXXXXX(%r12),%r12 is 8 bytes: 4d 8d a4 24 XX XX XX XX.
841   // The same true for rsp. So we convert to addq for them, saving 1 byte that
842   // we dont have.
843   if (RspAdd)
844     *Inst = 0x81;
845   else
846     *Inst = IsMov ? 0xc7 : 0x8d;
847   if (*Prefix == 0x4c)
848     *Prefix = (IsMov || RspAdd) ? 0x49 : 0x4d;
849   *RegSlot = (IsMov || RspAdd) ? (0xc0 | Reg) : (0x80 | Reg | (Reg << 3));
850   relocateOne(Loc, BufEnd, R_X86_64_TPOFF32, P, SA);
851 }
852 
853 // This function applies a TLS relocation with an optimization as described
854 // in the Ulrich's document. As a result of rewriting instructions at the
855 // relocation target, relocations immediately follow the TLS relocation (which
856 // would be applied to rewritten instructions) may have to be skipped.
857 // This function returns a number of relocations that need to be skipped.
858 unsigned X86_64TargetInfo::relocateTlsOptimize(uint8_t *Loc, uint8_t *BufEnd,
859                                                uint32_t Type, uint64_t P,
860                                                uint64_t SA,
861                                                const SymbolBody *S) const {
862   switch (Type) {
863   case R_X86_64_DTPOFF32:
864     relocateOne(Loc, BufEnd, R_X86_64_TPOFF32, P, SA);
865     return 0;
866   case R_X86_64_GOTTPOFF:
867     relocateTlsIeToLe(Loc, BufEnd, P, SA);
868     return 0;
869   case R_X86_64_TLSGD: {
870     if (canBePreempted(S, true))
871       relocateTlsGdToIe(Loc, BufEnd, P, SA);
872     else
873       relocateTlsGdToLe(Loc, BufEnd, P, SA);
874     // The next relocation should be against __tls_get_addr, so skip it
875     return 1;
876   }
877   case R_X86_64_TLSLD:
878     relocateTlsLdToLe(Loc, BufEnd, P, SA);
879     // The next relocation should be against __tls_get_addr, so skip it
880     return 1;
881   }
882   llvm_unreachable("Unknown TLS optimization");
883 }
884 
885 void X86_64TargetInfo::relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type,
886                                    uint64_t P, uint64_t SA, uint64_t ZA,
887                                    uint8_t *PairedLoc) const {
888   switch (Type) {
889   case R_X86_64_32:
890     checkUInt<32>(SA, Type);
891     write32le(Loc, SA);
892     break;
893   case R_X86_64_32S:
894     checkInt<32>(SA, Type);
895     write32le(Loc, SA);
896     break;
897   case R_X86_64_64:
898     write64le(Loc, SA);
899     break;
900   case R_X86_64_DTPOFF32:
901     write32le(Loc, SA);
902     break;
903   case R_X86_64_DTPOFF64:
904     write64le(Loc, SA);
905     break;
906   case R_X86_64_GOTPCREL:
907   case R_X86_64_PC32:
908   case R_X86_64_PLT32:
909   case R_X86_64_TLSGD:
910   case R_X86_64_TLSLD:
911     write32le(Loc, SA - P);
912     break;
913   case R_X86_64_SIZE32:
914     write32le(Loc, ZA);
915     break;
916   case R_X86_64_SIZE64:
917     write64le(Loc, ZA);
918     break;
919   case R_X86_64_TPOFF32: {
920     uint64_t Val = SA - Out<ELF64LE>::TlsPhdr->p_memsz;
921     checkInt<32>(Val, Type);
922     write32le(Loc, Val);
923     break;
924   }
925   case R_X86_64_TPOFF64:
926     write32le(Loc, SA - P);
927     break;
928   default:
929     error("unrecognized reloc " + Twine(Type));
930   }
931 }
932 
933 // Relocation masks following the #lo(value), #hi(value), #ha(value),
934 // #higher(value), #highera(value), #highest(value), and #highesta(value)
935 // macros defined in section 4.5.1. Relocation Types of the PPC-elf64abi
936 // document.
937 static uint16_t applyPPCLo(uint64_t V) { return V; }
938 static uint16_t applyPPCHi(uint64_t V) { return V >> 16; }
939 static uint16_t applyPPCHa(uint64_t V) { return (V + 0x8000) >> 16; }
940 static uint16_t applyPPCHigher(uint64_t V) { return V >> 32; }
941 static uint16_t applyPPCHighera(uint64_t V) { return (V + 0x8000) >> 32; }
942 static uint16_t applyPPCHighest(uint64_t V) { return V >> 48; }
943 static uint16_t applyPPCHighesta(uint64_t V) { return (V + 0x8000) >> 48; }
944 
945 PPCTargetInfo::PPCTargetInfo() {}
946 void PPCTargetInfo::writeGotPltEntry(uint8_t *Buf, uint64_t Plt) const {}
947 void PPCTargetInfo::writePltZeroEntry(uint8_t *Buf, uint64_t GotEntryAddr,
948                                         uint64_t PltEntryAddr) const {}
949 void PPCTargetInfo::writePltEntry(uint8_t *Buf, uint64_t GotAddr,
950                                   uint64_t GotEntryAddr,
951                                   uint64_t PltEntryAddr, int32_t Index,
952                                   unsigned RelOff) const {}
953 bool PPCTargetInfo::relocNeedsGot(uint32_t Type, const SymbolBody &S) const {
954   return false;
955 }
956 bool PPCTargetInfo::relocNeedsPlt(uint32_t Type, const SymbolBody &S) const {
957   return false;
958 }
959 bool PPCTargetInfo::isRelRelative(uint32_t Type) const { return false; }
960 
961 void PPCTargetInfo::relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type,
962                                 uint64_t P, uint64_t SA, uint64_t ZA,
963                                 uint8_t *PairedLoc) const {
964   switch (Type) {
965   case R_PPC_ADDR16_HA:
966     write16be(Loc, applyPPCHa(SA));
967     break;
968   case R_PPC_ADDR16_LO:
969     write16be(Loc, applyPPCLo(SA));
970     break;
971   default:
972     error("unrecognized reloc " + Twine(Type));
973   }
974 }
975 
976 PPC64TargetInfo::PPC64TargetInfo() {
977   PCRelReloc = R_PPC64_REL24;
978   GotReloc = R_PPC64_GLOB_DAT;
979   RelativeReloc = R_PPC64_RELATIVE;
980   PltEntrySize = 32;
981 
982   // We need 64K pages (at least under glibc/Linux, the loader won't
983   // set different permissions on a finer granularity than that).
984   PageSize = 65536;
985 
986   // The PPC64 ELF ABI v1 spec, says:
987   //
988   //   It is normally desirable to put segments with different characteristics
989   //   in separate 256 Mbyte portions of the address space, to give the
990   //   operating system full paging flexibility in the 64-bit address space.
991   //
992   // And because the lowest non-zero 256M boundary is 0x10000000, PPC64 linkers
993   // use 0x10000000 as the starting address.
994   VAStart = 0x10000000;
995 }
996 
997 uint64_t getPPC64TocBase() {
998   // The TOC consists of sections .got, .toc, .tocbss, .plt in that
999   // order. The TOC starts where the first of these sections starts.
1000 
1001   // FIXME: This obviously does not do the right thing when there is no .got
1002   // section, but there is a .toc or .tocbss section.
1003   uint64_t TocVA = Out<ELF64BE>::Got->getVA();
1004   if (!TocVA)
1005     TocVA = Out<ELF64BE>::Plt->getVA();
1006 
1007   // Per the ppc64-elf-linux ABI, The TOC base is TOC value plus 0x8000
1008   // thus permitting a full 64 Kbytes segment. Note that the glibc startup
1009   // code (crt1.o) assumes that you can get from the TOC base to the
1010   // start of the .toc section with only a single (signed) 16-bit relocation.
1011   return TocVA + 0x8000;
1012 }
1013 
1014 void PPC64TargetInfo::writeGotPltEntry(uint8_t *Buf, uint64_t Plt) const {}
1015 void PPC64TargetInfo::writePltZeroEntry(uint8_t *Buf, uint64_t GotEntryAddr,
1016                                         uint64_t PltEntryAddr) const {}
1017 void PPC64TargetInfo::writePltEntry(uint8_t *Buf, uint64_t GotAddr,
1018                                     uint64_t GotEntryAddr,
1019                                     uint64_t PltEntryAddr, int32_t Index,
1020                                     unsigned RelOff) const {
1021   uint64_t Off = GotEntryAddr - getPPC64TocBase();
1022 
1023   // FIXME: What we should do, in theory, is get the offset of the function
1024   // descriptor in the .opd section, and use that as the offset from %r2 (the
1025   // TOC-base pointer). Instead, we have the GOT-entry offset, and that will
1026   // be a pointer to the function descriptor in the .opd section. Using
1027   // this scheme is simpler, but requires an extra indirection per PLT dispatch.
1028 
1029   write32be(Buf,      0xf8410028);                   // std %r2, 40(%r1)
1030   write32be(Buf + 4,  0x3d620000 | applyPPCHa(Off)); // addis %r11, %r2, X@ha
1031   write32be(Buf + 8,  0xe98b0000 | applyPPCLo(Off)); // ld %r12, X@l(%r11)
1032   write32be(Buf + 12, 0xe96c0000);                   // ld %r11,0(%r12)
1033   write32be(Buf + 16, 0x7d6903a6);                   // mtctr %r11
1034   write32be(Buf + 20, 0xe84c0008);                   // ld %r2,8(%r12)
1035   write32be(Buf + 24, 0xe96c0010);                   // ld %r11,16(%r12)
1036   write32be(Buf + 28, 0x4e800420);                   // bctr
1037 }
1038 
1039 bool PPC64TargetInfo::relocNeedsGot(uint32_t Type, const SymbolBody &S) const {
1040   if (relocNeedsPlt(Type, S))
1041     return true;
1042 
1043   switch (Type) {
1044   default: return false;
1045   case R_PPC64_GOT16:
1046   case R_PPC64_GOT16_DS:
1047   case R_PPC64_GOT16_HA:
1048   case R_PPC64_GOT16_HI:
1049   case R_PPC64_GOT16_LO:
1050   case R_PPC64_GOT16_LO_DS:
1051     return true;
1052   }
1053 }
1054 
1055 bool PPC64TargetInfo::relocNeedsPlt(uint32_t Type, const SymbolBody &S) const {
1056   // These are function calls that need to be redirected through a PLT stub.
1057   return Type == R_PPC64_REL24 && canBePreempted(&S, false);
1058 }
1059 
1060 bool PPC64TargetInfo::isRelRelative(uint32_t Type) const {
1061   switch (Type) {
1062   default:
1063     return true;
1064   case R_PPC64_ADDR64:
1065   case R_PPC64_TOC:
1066     return false;
1067   }
1068 }
1069 
1070 void PPC64TargetInfo::relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type,
1071                                   uint64_t P, uint64_t SA, uint64_t ZA,
1072                                   uint8_t *PairedLoc) const {
1073   uint64_t TB = getPPC64TocBase();
1074 
1075   // For a TOC-relative relocation, adjust the addend and proceed in terms of
1076   // the corresponding ADDR16 relocation type.
1077   switch (Type) {
1078   case R_PPC64_TOC16:       Type = R_PPC64_ADDR16;       SA -= TB; break;
1079   case R_PPC64_TOC16_DS:    Type = R_PPC64_ADDR16_DS;    SA -= TB; break;
1080   case R_PPC64_TOC16_HA:    Type = R_PPC64_ADDR16_HA;    SA -= TB; break;
1081   case R_PPC64_TOC16_HI:    Type = R_PPC64_ADDR16_HI;    SA -= TB; break;
1082   case R_PPC64_TOC16_LO:    Type = R_PPC64_ADDR16_LO;    SA -= TB; break;
1083   case R_PPC64_TOC16_LO_DS: Type = R_PPC64_ADDR16_LO_DS; SA -= TB; break;
1084   default: break;
1085   }
1086 
1087   switch (Type) {
1088   case R_PPC64_ADDR14: {
1089     checkAlignment<4>(SA, Type);
1090     // Preserve the AA/LK bits in the branch instruction
1091     uint8_t AALK = Loc[3];
1092     write16be(Loc + 2, (AALK & 3) | (SA & 0xfffc));
1093     break;
1094   }
1095   case R_PPC64_ADDR16:
1096     checkInt<16>(SA, Type);
1097     write16be(Loc, SA);
1098     break;
1099   case R_PPC64_ADDR16_DS:
1100     checkInt<16>(SA, Type);
1101     write16be(Loc, (read16be(Loc) & 3) | (SA & ~3));
1102     break;
1103   case R_PPC64_ADDR16_HA:
1104     write16be(Loc, applyPPCHa(SA));
1105     break;
1106   case R_PPC64_ADDR16_HI:
1107     write16be(Loc, applyPPCHi(SA));
1108     break;
1109   case R_PPC64_ADDR16_HIGHER:
1110     write16be(Loc, applyPPCHigher(SA));
1111     break;
1112   case R_PPC64_ADDR16_HIGHERA:
1113     write16be(Loc, applyPPCHighera(SA));
1114     break;
1115   case R_PPC64_ADDR16_HIGHEST:
1116     write16be(Loc, applyPPCHighest(SA));
1117     break;
1118   case R_PPC64_ADDR16_HIGHESTA:
1119     write16be(Loc, applyPPCHighesta(SA));
1120     break;
1121   case R_PPC64_ADDR16_LO:
1122     write16be(Loc, applyPPCLo(SA));
1123     break;
1124   case R_PPC64_ADDR16_LO_DS:
1125     write16be(Loc, (read16be(Loc) & 3) | (applyPPCLo(SA) & ~3));
1126     break;
1127   case R_PPC64_ADDR32:
1128     checkInt<32>(SA, Type);
1129     write32be(Loc, SA);
1130     break;
1131   case R_PPC64_ADDR64:
1132     write64be(Loc, SA);
1133     break;
1134   case R_PPC64_REL16_HA:
1135     write16be(Loc, applyPPCHa(SA - P));
1136     break;
1137   case R_PPC64_REL16_HI:
1138     write16be(Loc, applyPPCHi(SA - P));
1139     break;
1140   case R_PPC64_REL16_LO:
1141     write16be(Loc, applyPPCLo(SA - P));
1142     break;
1143   case R_PPC64_REL24: {
1144     // If we have an undefined weak symbol, we might get here with a symbol
1145     // address of zero. That could overflow, but the code must be unreachable,
1146     // so don't bother doing anything at all.
1147     if (!SA)
1148       break;
1149 
1150     uint64_t PltStart = Out<ELF64BE>::Plt->getVA();
1151     uint64_t PltEnd = PltStart + Out<ELF64BE>::Plt->getSize();
1152     bool InPlt = PltStart <= SA && SA < PltEnd;
1153 
1154     if (!InPlt && Out<ELF64BE>::Opd) {
1155       // If this is a local call, and we currently have the address of a
1156       // function-descriptor, get the underlying code address instead.
1157       uint64_t OpdStart = Out<ELF64BE>::Opd->getVA();
1158       uint64_t OpdEnd = OpdStart + Out<ELF64BE>::Opd->getSize();
1159       bool InOpd = OpdStart <= SA && SA < OpdEnd;
1160 
1161       if (InOpd)
1162         SA = read64be(&Out<ELF64BE>::OpdBuf[SA - OpdStart]);
1163     }
1164 
1165     uint32_t Mask = 0x03FFFFFC;
1166     checkInt<24>(SA - P, Type);
1167     write32be(Loc, (read32be(Loc) & ~Mask) | ((SA - P) & Mask));
1168 
1169     uint32_t Nop = 0x60000000;
1170     if (InPlt && Loc + 8 <= BufEnd && read32be(Loc + 4) == Nop)
1171       write32be(Loc + 4, 0xe8410028); // ld %r2, 40(%r1)
1172     break;
1173   }
1174   case R_PPC64_REL32:
1175     checkInt<32>(SA - P, Type);
1176     write32be(Loc, SA - P);
1177     break;
1178   case R_PPC64_REL64:
1179     write64be(Loc, SA - P);
1180     break;
1181   case R_PPC64_TOC:
1182     write64be(Loc, SA);
1183     break;
1184   default:
1185     error("unrecognized reloc " + Twine(Type));
1186   }
1187 }
1188 
1189 AArch64TargetInfo::AArch64TargetInfo() {
1190   CopyReloc = R_AARCH64_COPY;
1191   IRelativeReloc = R_AARCH64_IRELATIVE;
1192   GotReloc = R_AARCH64_GLOB_DAT;
1193   PltReloc = R_AARCH64_JUMP_SLOT;
1194   TlsGotReloc = R_AARCH64_TLS_TPREL64;
1195   LazyRelocations = true;
1196   PltEntrySize = 16;
1197   PltZeroEntrySize = 32;
1198 }
1199 
1200 unsigned AArch64TargetInfo::getDynReloc(unsigned Type) const {
1201   if (Type == R_AARCH64_ABS32 || Type == R_AARCH64_ABS64)
1202     return Type;
1203   StringRef S = getELFRelocationTypeName(EM_AARCH64, Type);
1204   error("Relocation " + S + " cannot be used when making a shared object; "
1205                             "recompile with -fPIC.");
1206 }
1207 
1208 void AArch64TargetInfo::writeGotPltEntry(uint8_t *Buf, uint64_t Plt) const {
1209   write64le(Buf, Out<ELF64LE>::Plt->getVA());
1210 }
1211 
1212 void AArch64TargetInfo::writePltZeroEntry(uint8_t *Buf, uint64_t GotEntryAddr,
1213                                           uint64_t PltEntryAddr) const {
1214   const uint8_t PltData[] = {
1215       0xf0, 0x7b, 0xbf, 0xa9, // stp	x16, x30, [sp,#-16]!
1216       0x10, 0x00, 0x00, 0x90, // adrp	x16, Page(&(.plt.got[2]))
1217       0x11, 0x02, 0x40, 0xf9, // ldr	x17, [x16, Offset(&(.plt.got[2]))]
1218       0x10, 0x02, 0x00, 0x91, // add	x16, x16, Offset(&(.plt.got[2]))
1219       0x20, 0x02, 0x1f, 0xd6, // br	x17
1220       0x1f, 0x20, 0x03, 0xd5, // nop
1221       0x1f, 0x20, 0x03, 0xd5, // nop
1222       0x1f, 0x20, 0x03, 0xd5  // nop
1223   };
1224   memcpy(Buf, PltData, sizeof(PltData));
1225 
1226   relocateOne(Buf + 4, Buf + 8, R_AARCH64_ADR_PREL_PG_HI21, PltEntryAddr + 4,
1227               GotEntryAddr + 16);
1228   relocateOne(Buf + 8, Buf + 12, R_AARCH64_LDST64_ABS_LO12_NC, PltEntryAddr + 8,
1229               GotEntryAddr + 16);
1230   relocateOne(Buf + 12, Buf + 16, R_AARCH64_ADD_ABS_LO12_NC, PltEntryAddr + 12,
1231               GotEntryAddr + 16);
1232 }
1233 
1234 void AArch64TargetInfo::writePltEntry(uint8_t *Buf, uint64_t GotAddr,
1235                                       uint64_t GotEntryAddr,
1236                                       uint64_t PltEntryAddr, int32_t Index,
1237                                       unsigned RelOff) const {
1238   const uint8_t Inst[] = {
1239       0x10, 0x00, 0x00, 0x90, // adrp x16, Page(&(.plt.got[n]))
1240       0x11, 0x02, 0x40, 0xf9, // ldr  x17, [x16, Offset(&(.plt.got[n]))]
1241       0x10, 0x02, 0x00, 0x91, // add  x16, x16, Offset(&(.plt.got[n]))
1242       0x20, 0x02, 0x1f, 0xd6  // br   x17
1243   };
1244   memcpy(Buf, Inst, sizeof(Inst));
1245 
1246   relocateOne(Buf, Buf + 4, R_AARCH64_ADR_PREL_PG_HI21, PltEntryAddr,
1247               GotEntryAddr);
1248   relocateOne(Buf + 4, Buf + 8, R_AARCH64_LDST64_ABS_LO12_NC, PltEntryAddr + 4,
1249               GotEntryAddr);
1250   relocateOne(Buf + 8, Buf + 12, R_AARCH64_ADD_ABS_LO12_NC, PltEntryAddr + 8,
1251               GotEntryAddr);
1252 }
1253 
1254 unsigned AArch64TargetInfo::getTlsGotReloc(unsigned Type) const {
1255   if (Type == R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 ||
1256       Type == R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC)
1257     return Type;
1258   return TlsGotReloc;
1259 }
1260 
1261 bool AArch64TargetInfo::isTlsDynReloc(unsigned Type,
1262                                       const SymbolBody &S) const {
1263   return Type == R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 ||
1264          Type == R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC;
1265 }
1266 
1267 bool AArch64TargetInfo::needsCopyRel(uint32_t Type, const SymbolBody &S) const {
1268   if (Config->Shared)
1269     return false;
1270   switch (Type) {
1271   default:
1272     return false;
1273   case R_AARCH64_ABS16:
1274   case R_AARCH64_ABS32:
1275   case R_AARCH64_ABS64:
1276   case R_AARCH64_ADD_ABS_LO12_NC:
1277   case R_AARCH64_ADR_PREL_LO21:
1278   case R_AARCH64_ADR_PREL_PG_HI21:
1279   case R_AARCH64_LDST8_ABS_LO12_NC:
1280   case R_AARCH64_LDST16_ABS_LO12_NC:
1281   case R_AARCH64_LDST32_ABS_LO12_NC:
1282   case R_AARCH64_LDST64_ABS_LO12_NC:
1283   case R_AARCH64_LDST128_ABS_LO12_NC:
1284     if (auto *SS = dyn_cast<SharedSymbol<ELF64LE>>(&S))
1285       return SS->Sym.getType() == STT_OBJECT;
1286     return false;
1287   }
1288 }
1289 
1290 bool AArch64TargetInfo::relocNeedsGot(uint32_t Type,
1291                                       const SymbolBody &S) const {
1292   switch (Type) {
1293   case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
1294   case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
1295   case R_AARCH64_ADR_GOT_PAGE:
1296   case R_AARCH64_LD64_GOT_LO12_NC:
1297     return true;
1298   default:
1299     return relocNeedsPlt(Type, S);
1300   }
1301 }
1302 
1303 bool AArch64TargetInfo::relocNeedsPlt(uint32_t Type,
1304                                       const SymbolBody &S) const {
1305   if (isGnuIFunc<ELF64LE>(S))
1306     return true;
1307   switch (Type) {
1308   default:
1309     return false;
1310   case R_AARCH64_CALL26:
1311   case R_AARCH64_CONDBR19:
1312   case R_AARCH64_JUMP26:
1313   case R_AARCH64_TSTBR14:
1314     return canBePreempted(&S, true);
1315   }
1316 }
1317 
1318 static void updateAArch64Adr(uint8_t *L, uint64_t Imm) {
1319   uint32_t ImmLo = (Imm & 0x3) << 29;
1320   uint32_t ImmHi = ((Imm & 0x1FFFFC) >> 2) << 5;
1321   uint64_t Mask = (0x3 << 29) | (0x7FFFF << 5);
1322   write32le(L, (read32le(L) & ~Mask) | ImmLo | ImmHi);
1323 }
1324 
1325 // Page(Expr) is the page address of the expression Expr, defined
1326 // as (Expr & ~0xFFF). (This applies even if the machine page size
1327 // supported by the platform has a different value.)
1328 static uint64_t getAArch64Page(uint64_t Expr) {
1329   return Expr & (~static_cast<uint64_t>(0xFFF));
1330 }
1331 
1332 void AArch64TargetInfo::relocateOne(uint8_t *Loc, uint8_t *BufEnd,
1333                                     uint32_t Type, uint64_t P, uint64_t SA,
1334                                     uint64_t ZA, uint8_t *PairedLoc) const {
1335   switch (Type) {
1336   case R_AARCH64_ABS16:
1337     checkIntUInt<16>(SA, Type);
1338     write16le(Loc, SA);
1339     break;
1340   case R_AARCH64_ABS32:
1341     checkIntUInt<32>(SA, Type);
1342     write32le(Loc, SA);
1343     break;
1344   case R_AARCH64_ABS64:
1345     write64le(Loc, SA);
1346     break;
1347   case R_AARCH64_ADD_ABS_LO12_NC:
1348     // This relocation stores 12 bits and there's no instruction
1349     // to do it. Instead, we do a 32 bits store of the value
1350     // of r_addend bitwise-or'ed Loc. This assumes that the addend
1351     // bits in Loc are zero.
1352     or32le(Loc, (SA & 0xFFF) << 10);
1353     break;
1354   case R_AARCH64_ADR_GOT_PAGE: {
1355     uint64_t X = getAArch64Page(SA) - getAArch64Page(P);
1356     checkInt<33>(X, Type);
1357     updateAArch64Adr(Loc, (X >> 12) & 0x1FFFFF); // X[32:12]
1358     break;
1359   }
1360   case R_AARCH64_ADR_PREL_LO21: {
1361     uint64_t X = SA - P;
1362     checkInt<21>(X, Type);
1363     updateAArch64Adr(Loc, X & 0x1FFFFF);
1364     break;
1365   }
1366   case R_AARCH64_ADR_PREL_PG_HI21:
1367   case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: {
1368     uint64_t X = getAArch64Page(SA) - getAArch64Page(P);
1369     checkInt<33>(X, Type);
1370     updateAArch64Adr(Loc, (X >> 12) & 0x1FFFFF); // X[32:12]
1371     break;
1372   }
1373   case R_AARCH64_CALL26:
1374   case R_AARCH64_JUMP26: {
1375     uint64_t X = SA - P;
1376     checkInt<28>(X, Type);
1377     or32le(Loc, (X & 0x0FFFFFFC) >> 2);
1378     break;
1379   }
1380   case R_AARCH64_CONDBR19: {
1381     uint64_t X = SA - P;
1382     checkInt<21>(X, Type);
1383     or32le(Loc, (X & 0x1FFFFC) << 3);
1384     break;
1385   }
1386   case R_AARCH64_LD64_GOT_LO12_NC:
1387   case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
1388     checkAlignment<8>(SA, Type);
1389     or32le(Loc, (SA & 0xFF8) << 7);
1390     break;
1391   case R_AARCH64_LDST128_ABS_LO12_NC:
1392     or32le(Loc, (SA & 0x0FF8) << 6);
1393     break;
1394   case R_AARCH64_LDST16_ABS_LO12_NC:
1395     or32le(Loc, (SA & 0x0FFC) << 9);
1396     break;
1397   case R_AARCH64_LDST8_ABS_LO12_NC:
1398     or32le(Loc, (SA & 0xFFF) << 10);
1399     break;
1400   case R_AARCH64_LDST32_ABS_LO12_NC:
1401     or32le(Loc, (SA & 0xFFC) << 8);
1402     break;
1403   case R_AARCH64_LDST64_ABS_LO12_NC:
1404     or32le(Loc, (SA & 0xFF8) << 7);
1405     break;
1406   case R_AARCH64_PREL16:
1407     checkIntUInt<16>(SA - P, Type);
1408     write16le(Loc, SA - P);
1409     break;
1410   case R_AARCH64_PREL32:
1411     checkIntUInt<32>(SA - P, Type);
1412     write32le(Loc, SA - P);
1413     break;
1414   case R_AARCH64_PREL64:
1415     write64le(Loc, SA - P);
1416     break;
1417   case R_AARCH64_TSTBR14: {
1418     uint64_t X = SA - P;
1419     checkInt<16>(X, Type);
1420     or32le(Loc, (X & 0xFFFC) << 3);
1421     break;
1422   }
1423   default:
1424     error("unrecognized reloc " + Twine(Type));
1425   }
1426 }
1427 
1428 AMDGPUTargetInfo::AMDGPUTargetInfo() {}
1429 
1430 void AMDGPUTargetInfo::writeGotPltEntry(uint8_t *Buf, uint64_t Plt) const {
1431   llvm_unreachable("not implemented");
1432 }
1433 
1434 void AMDGPUTargetInfo::writePltZeroEntry(uint8_t *Buf, uint64_t GotEntryAddr,
1435                                          uint64_t PltEntryAddr) const {
1436   llvm_unreachable("not implemented");
1437 }
1438 
1439 void AMDGPUTargetInfo::writePltEntry(uint8_t *Buf, uint64_t GotAddr,
1440                                      uint64_t GotEntryAddr,
1441                                      uint64_t PltEntryAddr, int32_t Index,
1442                                      unsigned RelOff) const {
1443   llvm_unreachable("not implemented");
1444 }
1445 
1446 bool AMDGPUTargetInfo::relocNeedsGot(uint32_t Type, const SymbolBody &S) const {
1447   return false;
1448 }
1449 
1450 bool AMDGPUTargetInfo::relocNeedsPlt(uint32_t Type, const SymbolBody &S) const {
1451   return false;
1452 }
1453 
1454 // Implementing relocations for AMDGPU is low priority since most
1455 // programs don't use relocations now. Thus, this function is not
1456 // actually called (relocateOne is called for each relocation).
1457 // That's why the AMDGPU port works without implementing this function.
1458 void AMDGPUTargetInfo::relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type,
1459                                    uint64_t P, uint64_t SA, uint64_t ZA,
1460                                    uint8_t *PairedLoc) const {
1461   llvm_unreachable("not implemented");
1462 }
1463 
1464 template <class ELFT> MipsTargetInfo<ELFT>::MipsTargetInfo() {
1465   PageSize = 65536;
1466   GotHeaderEntriesNum = 2;
1467   RelativeReloc = R_MIPS_REL32;
1468 }
1469 
1470 template <class ELFT>
1471 unsigned MipsTargetInfo<ELFT>::getDynReloc(unsigned Type) const {
1472   if (Type == R_MIPS_32 || Type == R_MIPS_64)
1473     return R_MIPS_REL32;
1474   StringRef S = getELFRelocationTypeName(EM_MIPS, Type);
1475   error("Relocation " + S + " cannot be used when making a shared object; "
1476                             "recompile with -fPIC.");
1477 }
1478 
1479 template <class ELFT>
1480 void MipsTargetInfo<ELFT>::writeGotHeaderEntries(uint8_t *Buf) const {
1481   typedef typename ELFFile<ELFT>::Elf_Off Elf_Off;
1482   auto *P = reinterpret_cast<Elf_Off *>(Buf);
1483   // Module pointer
1484   P[1] = ELFT::Is64Bits ? 0x8000000000000000 : 0x80000000;
1485 }
1486 
1487 template <class ELFT>
1488 void MipsTargetInfo<ELFT>::writeGotPltEntry(uint8_t *Buf, uint64_t Plt) const {}
1489 template <class ELFT>
1490 void MipsTargetInfo<ELFT>::writePltZeroEntry(uint8_t *Buf, uint64_t GotEntryAddr,
1491                                        uint64_t PltEntryAddr) const {}
1492 template <class ELFT>
1493 void MipsTargetInfo<ELFT>::writePltEntry(uint8_t *Buf, uint64_t GotAddr,
1494                                          uint64_t GotEntryAddr,
1495                                          uint64_t PltEntryAddr, int32_t Index,
1496                                          unsigned RelOff) const {}
1497 
1498 template <class ELFT>
1499 bool MipsTargetInfo<ELFT>::relocNeedsGot(uint32_t Type,
1500                                          const SymbolBody &S) const {
1501   return Type == R_MIPS_GOT16 || Type == R_MIPS_CALL16;
1502 }
1503 
1504 template <class ELFT>
1505 bool MipsTargetInfo<ELFT>::relocNeedsPlt(uint32_t Type,
1506                                          const SymbolBody &S) const {
1507   return false;
1508 }
1509 
1510 static uint16_t mipsHigh(uint64_t V) { return (V + 0x8000) >> 16; }
1511 
1512 template <endianness E, uint8_t BSIZE>
1513 static void applyMipsPcReloc(uint8_t *Loc, uint32_t Type, uint64_t P,
1514                              uint64_t SA) {
1515   uint32_t Mask = ~(0xffffffff << BSIZE);
1516   uint32_t Instr = read32<E>(Loc);
1517   int64_t A = SignExtend64<BSIZE + 2>((Instr & Mask) << 2);
1518   checkAlignment<4>(SA + A, Type);
1519   int64_t V = SA + A - P;
1520   checkInt<BSIZE + 2>(V, Type);
1521   write32<E>(Loc, (Instr & ~Mask) | ((V >> 2) & Mask));
1522 }
1523 
1524 template <class ELFT>
1525 void MipsTargetInfo<ELFT>::relocateOne(uint8_t *Loc, uint8_t *BufEnd,
1526                                        uint32_t Type, uint64_t P, uint64_t SA,
1527                                        uint64_t ZA, uint8_t *PairedLoc) const {
1528   const endianness E = ELFT::TargetEndianness;
1529   switch (Type) {
1530   case R_MIPS_32:
1531     add32<E>(Loc, SA);
1532     break;
1533   case R_MIPS_CALL16:
1534   case R_MIPS_GOT16: {
1535     int64_t V = SA - getMipsGpAddr<ELFT>();
1536     if (Type == R_MIPS_GOT16)
1537       checkInt<16>(V, Type);
1538     write32<E>(Loc, (read32<E>(Loc) & 0xffff0000) | (V & 0xffff));
1539     break;
1540   }
1541   case R_MIPS_GPREL16: {
1542     uint32_t Instr = read32<E>(Loc);
1543     int64_t V = SA + SignExtend64<16>(Instr & 0xffff) - getMipsGpAddr<ELFT>();
1544     checkInt<16>(V, Type);
1545     write32<E>(Loc, (Instr & 0xffff0000) | (V & 0xffff));
1546     break;
1547   }
1548   case R_MIPS_GPREL32:
1549     write32<E>(Loc, SA + int32_t(read32<E>(Loc)) - getMipsGpAddr<ELFT>());
1550     break;
1551   case R_MIPS_HI16: {
1552     uint32_t Instr = read32<E>(Loc);
1553     if (PairedLoc) {
1554       uint64_t AHL = ((Instr & 0xffff) << 16) +
1555                      SignExtend64<16>(read32<E>(PairedLoc) & 0xffff);
1556       write32<E>(Loc, (Instr & 0xffff0000) | mipsHigh(SA + AHL));
1557     } else {
1558       warning("Can't find matching R_MIPS_LO16 relocation for R_MIPS_HI16");
1559       write32<E>(Loc, (Instr & 0xffff0000) | mipsHigh(SA));
1560     }
1561     break;
1562   }
1563   case R_MIPS_JALR:
1564     // Ignore this optimization relocation for now
1565     break;
1566   case R_MIPS_LO16: {
1567     uint32_t Instr = read32<E>(Loc);
1568     int64_t AHL = SignExtend64<16>(Instr & 0xffff);
1569     write32<E>(Loc, (Instr & 0xffff0000) | ((SA + AHL) & 0xffff));
1570     break;
1571   }
1572   case R_MIPS_PC16:
1573     applyMipsPcReloc<E, 16>(Loc, Type, P, SA);
1574     break;
1575   case R_MIPS_PC19_S2:
1576     applyMipsPcReloc<E, 19>(Loc, Type, P, SA);
1577     break;
1578   case R_MIPS_PC21_S2:
1579     applyMipsPcReloc<E, 21>(Loc, Type, P, SA);
1580     break;
1581   case R_MIPS_PC26_S2:
1582     applyMipsPcReloc<E, 26>(Loc, Type, P, SA);
1583     break;
1584   case R_MIPS_PCHI16: {
1585     uint32_t Instr = read32<E>(Loc);
1586     if (PairedLoc) {
1587       uint64_t AHL = ((Instr & 0xffff) << 16) +
1588                      SignExtend64<16>(read32<E>(PairedLoc) & 0xffff);
1589       write32<E>(Loc, (Instr & 0xffff0000) | mipsHigh(SA + AHL - P));
1590     } else {
1591       warning("Can't find matching R_MIPS_PCLO16 relocation for R_MIPS_PCHI16");
1592       write32<E>(Loc, (Instr & 0xffff0000) | mipsHigh(SA - P));
1593     }
1594     break;
1595   }
1596   case R_MIPS_PCLO16: {
1597     uint32_t Instr = read32<E>(Loc);
1598     int64_t AHL = SignExtend64<16>(Instr & 0xffff);
1599     write32<E>(Loc, (Instr & 0xffff0000) | ((SA + AHL - P) & 0xffff));
1600     break;
1601   }
1602   default:
1603     error("unrecognized reloc " + Twine(Type));
1604   }
1605 }
1606 
1607 template <class ELFT>
1608 bool MipsTargetInfo<ELFT>::isHintReloc(uint32_t Type) const {
1609   return Type == R_MIPS_JALR;
1610 }
1611 
1612 template <class ELFT>
1613 bool MipsTargetInfo<ELFT>::isRelRelative(uint32_t Type) const {
1614   switch (Type) {
1615   default:
1616     return false;
1617   case R_MIPS_PC16:
1618   case R_MIPS_PC19_S2:
1619   case R_MIPS_PC21_S2:
1620   case R_MIPS_PC26_S2:
1621   case R_MIPS_PCHI16:
1622   case R_MIPS_PCLO16:
1623     return true;
1624   }
1625 }
1626 
1627 // _gp is a MIPS-specific ABI-defined symbol which points to
1628 // a location that is relative to GOT. This function returns
1629 // the value for the symbol.
1630 template <class ELFT> typename ELFFile<ELFT>::uintX_t getMipsGpAddr() {
1631   unsigned GPOffset = 0x7ff0;
1632   if (uint64_t V = Out<ELFT>::Got->getVA())
1633     return V + GPOffset;
1634   return 0;
1635 }
1636 
1637 bool needsMipsLocalGot(uint32_t Type, SymbolBody *Body) {
1638   // The R_MIPS_GOT16 relocation requires creation of entry in the local part
1639   // of GOT if its target is a local symbol or non-local symbol with 'local'
1640   // visibility.
1641   if (Type != R_MIPS_GOT16)
1642     return false;
1643   if (!Body)
1644     return true;
1645   uint8_t V = Body->getVisibility();
1646   if (V != STV_DEFAULT && V != STV_PROTECTED)
1647     return true;
1648   return !Config->Shared;
1649 }
1650 
1651 template uint32_t getMipsGpAddr<ELF32LE>();
1652 template uint32_t getMipsGpAddr<ELF32BE>();
1653 template uint64_t getMipsGpAddr<ELF64LE>();
1654 template uint64_t getMipsGpAddr<ELF64BE>();
1655 }
1656 }
1657