xref: /llvm-project-15.0.7/lld/ELF/Target.cpp (revision 61329522)
1 //===- Target.cpp ---------------------------------------------------------===//
2 //
3 //                             The LLVM Linker
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // Machine-specific things, such as applying relocations, creation of
11 // GOT or PLT entries, etc., are handled in this file.
12 //
13 // Refer the ELF spec for the single letter variables, S, A or P, used
14 // in this file.
15 //
16 // Some functions defined in this file has "relaxTls" as part of their names.
17 // They do peephole optimization for TLS variables by rewriting instructions.
18 // They are not part of the ABI but optional optimization, so you can skip
19 // them if you are not interested in how TLS variables are optimized.
20 // See the following paper for the details.
21 //
22 //   Ulrich Drepper, ELF Handling For Thread-Local Storage
23 //   http://www.akkadia.org/drepper/tls.pdf
24 //
25 //===----------------------------------------------------------------------===//
26 
27 #include "Target.h"
28 #include "Error.h"
29 #include "InputFiles.h"
30 #include "Memory.h"
31 #include "OutputSections.h"
32 #include "SymbolTable.h"
33 #include "Symbols.h"
34 #include "SyntheticSections.h"
35 #include "Thunks.h"
36 #include "Writer.h"
37 #include "llvm/ADT/ArrayRef.h"
38 #include "llvm/Object/ELF.h"
39 #include "llvm/Support/ELF.h"
40 #include "llvm/Support/Endian.h"
41 
42 using namespace llvm;
43 using namespace llvm::object;
44 using namespace llvm::support::endian;
45 using namespace llvm::ELF;
46 
47 std::string lld::toString(uint32_t Type) {
48   StringRef S = getELFRelocationTypeName(elf::Config->EMachine, Type);
49   if (S == "Unknown")
50     return ("Unknown (" + Twine(Type) + ")").str();
51   return S;
52 }
53 
54 namespace lld {
55 namespace elf {
56 
57 TargetInfo *Target;
58 
59 static void or32le(uint8_t *P, int32_t V) { write32le(P, read32le(P) | V); }
60 static void or32be(uint8_t *P, int32_t V) { write32be(P, read32be(P) | V); }
61 
62 template <class ELFT> static std::string getErrorLoc(uint8_t *Loc) {
63   for (InputSectionBase *D : InputSections) {
64     auto *IS = dyn_cast_or_null<InputSection>(D);
65     if (!IS || !IS->OutSec)
66       continue;
67 
68     uint8_t *ISLoc = cast<OutputSection>(IS->OutSec)->Loc + IS->OutSecOff;
69     if (ISLoc <= Loc && Loc < ISLoc + IS->getSize())
70       return IS->template getLocation<ELFT>(Loc - ISLoc) + ": ";
71   }
72   return "";
73 }
74 
75 static std::string getErrorLocation(uint8_t *Loc) {
76   switch (Config->EKind) {
77   case ELF32LEKind:
78     return getErrorLoc<ELF32LE>(Loc);
79   case ELF32BEKind:
80     return getErrorLoc<ELF32BE>(Loc);
81   case ELF64LEKind:
82     return getErrorLoc<ELF64LE>(Loc);
83   case ELF64BEKind:
84     return getErrorLoc<ELF64BE>(Loc);
85   default:
86     llvm_unreachable("unknown ELF type");
87   }
88 }
89 
90 template <unsigned N>
91 static void checkInt(uint8_t *Loc, int64_t V, uint32_t Type) {
92   if (!isInt<N>(V))
93     error(getErrorLocation(Loc) + "relocation " + toString(Type) +
94           " out of range");
95 }
96 
97 template <unsigned N>
98 static void checkUInt(uint8_t *Loc, uint64_t V, uint32_t Type) {
99   if (!isUInt<N>(V))
100     error(getErrorLocation(Loc) + "relocation " + toString(Type) +
101           " out of range");
102 }
103 
104 template <unsigned N>
105 static void checkIntUInt(uint8_t *Loc, uint64_t V, uint32_t Type) {
106   if (!isInt<N>(V) && !isUInt<N>(V))
107     error(getErrorLocation(Loc) + "relocation " + toString(Type) +
108           " out of range");
109 }
110 
111 template <unsigned N>
112 static void checkAlignment(uint8_t *Loc, uint64_t V, uint32_t Type) {
113   if ((V & (N - 1)) != 0)
114     error(getErrorLocation(Loc) + "improper alignment for relocation " +
115           toString(Type));
116 }
117 
118 namespace {
119 class X86TargetInfo final : public TargetInfo {
120 public:
121   X86TargetInfo();
122   RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override;
123   int64_t getImplicitAddend(const uint8_t *Buf, uint32_t Type) const override;
124   void writeGotPltHeader(uint8_t *Buf) const override;
125   uint32_t getDynRel(uint32_t Type) const override;
126   bool isTlsLocalDynamicRel(uint32_t Type) const override;
127   bool isTlsGlobalDynamicRel(uint32_t Type) const override;
128   bool isTlsInitialExecRel(uint32_t Type) const override;
129   void writeGotPlt(uint8_t *Buf, const SymbolBody &S) const override;
130   void writeIgotPlt(uint8_t *Buf, const SymbolBody &S) const override;
131   void writePltHeader(uint8_t *Buf) const override;
132   void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr,
133                 int32_t Index, unsigned RelOff) const override;
134   void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
135 
136   RelExpr adjustRelaxExpr(uint32_t Type, const uint8_t *Data,
137                           RelExpr Expr) const override;
138   void relaxTlsGdToIe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
139   void relaxTlsGdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
140   void relaxTlsIeToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
141   void relaxTlsLdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
142 };
143 
144 template <class ELFT> class X86_64TargetInfo final : public TargetInfo {
145 public:
146   X86_64TargetInfo();
147   RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override;
148   bool isPicRel(uint32_t Type) const override;
149   bool isTlsLocalDynamicRel(uint32_t Type) const override;
150   bool isTlsGlobalDynamicRel(uint32_t Type) const override;
151   bool isTlsInitialExecRel(uint32_t Type) const override;
152   void writeGotPltHeader(uint8_t *Buf) const override;
153   void writeGotPlt(uint8_t *Buf, const SymbolBody &S) const override;
154   void writePltHeader(uint8_t *Buf) const override;
155   void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr,
156                 int32_t Index, unsigned RelOff) const override;
157   void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
158 
159   RelExpr adjustRelaxExpr(uint32_t Type, const uint8_t *Data,
160                           RelExpr Expr) const override;
161   void relaxGot(uint8_t *Loc, uint64_t Val) const override;
162   void relaxTlsGdToIe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
163   void relaxTlsGdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
164   void relaxTlsIeToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
165   void relaxTlsLdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
166 
167 private:
168   void relaxGotNoPic(uint8_t *Loc, uint64_t Val, uint8_t Op,
169                      uint8_t ModRm) const;
170 };
171 
172 class PPCTargetInfo final : public TargetInfo {
173 public:
174   PPCTargetInfo();
175   void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
176   RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override;
177 };
178 
179 class PPC64TargetInfo final : public TargetInfo {
180 public:
181   PPC64TargetInfo();
182   RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override;
183   void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr,
184                 int32_t Index, unsigned RelOff) const override;
185   void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
186 };
187 
188 class AArch64TargetInfo final : public TargetInfo {
189 public:
190   AArch64TargetInfo();
191   RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override;
192   bool isPicRel(uint32_t Type) const override;
193   bool isTlsInitialExecRel(uint32_t Type) const override;
194   void writeGotPlt(uint8_t *Buf, const SymbolBody &S) const override;
195   void writePltHeader(uint8_t *Buf) const override;
196   void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr,
197                 int32_t Index, unsigned RelOff) const override;
198   bool usesOnlyLowPageBits(uint32_t Type) const override;
199   void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
200   RelExpr adjustRelaxExpr(uint32_t Type, const uint8_t *Data,
201                           RelExpr Expr) const override;
202   void relaxTlsGdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
203   void relaxTlsGdToIe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
204   void relaxTlsIeToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
205 };
206 
207 class AMDGPUTargetInfo final : public TargetInfo {
208 public:
209   AMDGPUTargetInfo();
210   void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
211   RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override;
212 };
213 
214 class ARMTargetInfo final : public TargetInfo {
215 public:
216   ARMTargetInfo();
217   RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override;
218   bool isPicRel(uint32_t Type) const override;
219   uint32_t getDynRel(uint32_t Type) const override;
220   int64_t getImplicitAddend(const uint8_t *Buf, uint32_t Type) const override;
221   bool isTlsLocalDynamicRel(uint32_t Type) const override;
222   bool isTlsGlobalDynamicRel(uint32_t Type) const override;
223   bool isTlsInitialExecRel(uint32_t Type) const override;
224   void writeGotPlt(uint8_t *Buf, const SymbolBody &S) const override;
225   void writeIgotPlt(uint8_t *Buf, const SymbolBody &S) const override;
226   void writePltHeader(uint8_t *Buf) const override;
227   void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr,
228                 int32_t Index, unsigned RelOff) const override;
229   void addPltSymbols(InputSectionBase *IS, uint64_t Off) const override;
230   void addPltHeaderSymbols(InputSectionBase *ISD) const override;
231   bool needsThunk(RelExpr Expr, uint32_t RelocType, const InputFile *File,
232                   const SymbolBody &S) const override;
233   void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
234 };
235 
236 template <class ELFT> class MipsTargetInfo final : public TargetInfo {
237 public:
238   MipsTargetInfo();
239   RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override;
240   int64_t getImplicitAddend(const uint8_t *Buf, uint32_t Type) const override;
241   bool isPicRel(uint32_t Type) const override;
242   uint32_t getDynRel(uint32_t Type) const override;
243   bool isTlsLocalDynamicRel(uint32_t Type) const override;
244   bool isTlsGlobalDynamicRel(uint32_t Type) const override;
245   void writeGotPlt(uint8_t *Buf, const SymbolBody &S) const override;
246   void writePltHeader(uint8_t *Buf) const override;
247   void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr,
248                 int32_t Index, unsigned RelOff) const override;
249   bool needsThunk(RelExpr Expr, uint32_t RelocType, const InputFile *File,
250                   const SymbolBody &S) const override;
251   void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
252   bool usesOnlyLowPageBits(uint32_t Type) const override;
253 };
254 } // anonymous namespace
255 
256 TargetInfo *createTarget() {
257   switch (Config->EMachine) {
258   case EM_386:
259   case EM_IAMCU:
260     return make<X86TargetInfo>();
261   case EM_AARCH64:
262     return make<AArch64TargetInfo>();
263   case EM_AMDGPU:
264     return make<AMDGPUTargetInfo>();
265   case EM_ARM:
266     return make<ARMTargetInfo>();
267   case EM_MIPS:
268     switch (Config->EKind) {
269     case ELF32LEKind:
270       return make<MipsTargetInfo<ELF32LE>>();
271     case ELF32BEKind:
272       return make<MipsTargetInfo<ELF32BE>>();
273     case ELF64LEKind:
274       return make<MipsTargetInfo<ELF64LE>>();
275     case ELF64BEKind:
276       return make<MipsTargetInfo<ELF64BE>>();
277     default:
278       fatal("unsupported MIPS target");
279     }
280   case EM_PPC:
281     return make<PPCTargetInfo>();
282   case EM_PPC64:
283     return make<PPC64TargetInfo>();
284   case EM_X86_64:
285     if (Config->EKind == ELF32LEKind)
286       return make<X86_64TargetInfo<ELF32LE>>();
287     return make<X86_64TargetInfo<ELF64LE>>();
288   }
289   fatal("unknown target machine");
290 }
291 
292 TargetInfo::~TargetInfo() {}
293 
294 int64_t TargetInfo::getImplicitAddend(const uint8_t *Buf, uint32_t Type) const {
295   return 0;
296 }
297 
298 bool TargetInfo::usesOnlyLowPageBits(uint32_t Type) const { return false; }
299 
300 bool TargetInfo::needsThunk(RelExpr Expr, uint32_t RelocType,
301                             const InputFile *File, const SymbolBody &S) const {
302   return false;
303 }
304 
305 bool TargetInfo::isTlsInitialExecRel(uint32_t Type) const { return false; }
306 
307 bool TargetInfo::isTlsLocalDynamicRel(uint32_t Type) const { return false; }
308 
309 bool TargetInfo::isTlsGlobalDynamicRel(uint32_t Type) const { return false; }
310 
311 void TargetInfo::writeIgotPlt(uint8_t *Buf, const SymbolBody &S) const {
312   writeGotPlt(Buf, S);
313 }
314 
315 RelExpr TargetInfo::adjustRelaxExpr(uint32_t Type, const uint8_t *Data,
316                                     RelExpr Expr) const {
317   return Expr;
318 }
319 
320 void TargetInfo::relaxGot(uint8_t *Loc, uint64_t Val) const {
321   llvm_unreachable("Should not have claimed to be relaxable");
322 }
323 
324 void TargetInfo::relaxTlsGdToLe(uint8_t *Loc, uint32_t Type,
325                                 uint64_t Val) const {
326   llvm_unreachable("Should not have claimed to be relaxable");
327 }
328 
329 void TargetInfo::relaxTlsGdToIe(uint8_t *Loc, uint32_t Type,
330                                 uint64_t Val) const {
331   llvm_unreachable("Should not have claimed to be relaxable");
332 }
333 
334 void TargetInfo::relaxTlsIeToLe(uint8_t *Loc, uint32_t Type,
335                                 uint64_t Val) const {
336   llvm_unreachable("Should not have claimed to be relaxable");
337 }
338 
339 void TargetInfo::relaxTlsLdToLe(uint8_t *Loc, uint32_t Type,
340                                 uint64_t Val) const {
341   llvm_unreachable("Should not have claimed to be relaxable");
342 }
343 
344 X86TargetInfo::X86TargetInfo() {
345   CopyRel = R_386_COPY;
346   GotRel = R_386_GLOB_DAT;
347   PltRel = R_386_JUMP_SLOT;
348   IRelativeRel = R_386_IRELATIVE;
349   RelativeRel = R_386_RELATIVE;
350   TlsGotRel = R_386_TLS_TPOFF;
351   TlsModuleIndexRel = R_386_TLS_DTPMOD32;
352   TlsOffsetRel = R_386_TLS_DTPOFF32;
353   GotEntrySize = 4;
354   GotPltEntrySize = 4;
355   PltEntrySize = 16;
356   PltHeaderSize = 16;
357   TlsGdRelaxSkip = 2;
358 }
359 
360 RelExpr X86TargetInfo::getRelExpr(uint32_t Type, const SymbolBody &S) const {
361   switch (Type) {
362   case R_386_8:
363   case R_386_16:
364   case R_386_32:
365   case R_386_TLS_LDO_32:
366     return R_ABS;
367   case R_386_TLS_GD:
368     return R_TLSGD;
369   case R_386_TLS_LDM:
370     return R_TLSLD;
371   case R_386_PLT32:
372     return R_PLT_PC;
373   case R_386_PC8:
374   case R_386_PC16:
375   case R_386_PC32:
376     return R_PC;
377   case R_386_GOTPC:
378     return R_GOTONLY_PC_FROM_END;
379   case R_386_TLS_IE:
380     return R_GOT;
381   case R_386_GOT32:
382   case R_386_GOT32X:
383   case R_386_TLS_GOTIE:
384     return R_GOT_FROM_END;
385   case R_386_GOTOFF:
386     return R_GOTREL_FROM_END;
387   case R_386_TLS_LE:
388     return R_TLS;
389   case R_386_TLS_LE_32:
390     return R_NEG_TLS;
391   case R_386_NONE:
392     return R_NONE;
393   default:
394     error(toString(S.File) + ": unknown relocation type: " + toString(Type));
395     return R_HINT;
396   }
397 }
398 
399 RelExpr X86TargetInfo::adjustRelaxExpr(uint32_t Type, const uint8_t *Data,
400                                        RelExpr Expr) const {
401   switch (Expr) {
402   default:
403     return Expr;
404   case R_RELAX_TLS_GD_TO_IE:
405     return R_RELAX_TLS_GD_TO_IE_END;
406   case R_RELAX_TLS_GD_TO_LE:
407     return R_RELAX_TLS_GD_TO_LE_NEG;
408   }
409 }
410 
411 void X86TargetInfo::writeGotPltHeader(uint8_t *Buf) const {
412   write32le(Buf, In<ELF32LE>::Dynamic->getVA());
413 }
414 
415 void X86TargetInfo::writeGotPlt(uint8_t *Buf, const SymbolBody &S) const {
416   // Entries in .got.plt initially points back to the corresponding
417   // PLT entries with a fixed offset to skip the first instruction.
418   write32le(Buf, S.getPltVA() + 6);
419 }
420 
421 void X86TargetInfo::writeIgotPlt(uint8_t *Buf, const SymbolBody &S) const {
422   // An x86 entry is the address of the ifunc resolver function.
423   write32le(Buf, S.getVA());
424 }
425 
426 uint32_t X86TargetInfo::getDynRel(uint32_t Type) const {
427   if (Type == R_386_TLS_LE)
428     return R_386_TLS_TPOFF;
429   if (Type == R_386_TLS_LE_32)
430     return R_386_TLS_TPOFF32;
431   return Type;
432 }
433 
434 bool X86TargetInfo::isTlsGlobalDynamicRel(uint32_t Type) const {
435   return Type == R_386_TLS_GD;
436 }
437 
438 bool X86TargetInfo::isTlsLocalDynamicRel(uint32_t Type) const {
439   return Type == R_386_TLS_LDO_32 || Type == R_386_TLS_LDM;
440 }
441 
442 bool X86TargetInfo::isTlsInitialExecRel(uint32_t Type) const {
443   return Type == R_386_TLS_IE || Type == R_386_TLS_GOTIE;
444 }
445 
446 void X86TargetInfo::writePltHeader(uint8_t *Buf) const {
447   // Executable files and shared object files have
448   // separate procedure linkage tables.
449   if (Config->Pic) {
450     const uint8_t V[] = {
451         0xff, 0xb3, 0x04, 0x00, 0x00, 0x00, // pushl 4(%ebx)
452         0xff, 0xa3, 0x08, 0x00, 0x00, 0x00, // jmp   *8(%ebx)
453         0x90, 0x90, 0x90, 0x90              // nop; nop; nop; nop
454     };
455     memcpy(Buf, V, sizeof(V));
456     return;
457   }
458 
459   const uint8_t PltData[] = {
460       0xff, 0x35, 0x00, 0x00, 0x00, 0x00, // pushl (GOT+4)
461       0xff, 0x25, 0x00, 0x00, 0x00, 0x00, // jmp   *(GOT+8)
462       0x90, 0x90, 0x90, 0x90              // nop; nop; nop; nop
463   };
464   memcpy(Buf, PltData, sizeof(PltData));
465   uint32_t Got = In<ELF32LE>::GotPlt->getVA();
466   write32le(Buf + 2, Got + 4);
467   write32le(Buf + 8, Got + 8);
468 }
469 
470 void X86TargetInfo::writePlt(uint8_t *Buf, uint64_t GotEntryAddr,
471                              uint64_t PltEntryAddr, int32_t Index,
472                              unsigned RelOff) const {
473   const uint8_t Inst[] = {
474       0xff, 0x00, 0x00, 0x00, 0x00, 0x00, // jmp *foo_in_GOT|*foo@GOT(%ebx)
475       0x68, 0x00, 0x00, 0x00, 0x00,       // pushl $reloc_offset
476       0xe9, 0x00, 0x00, 0x00, 0x00        // jmp .PLT0@PC
477   };
478   memcpy(Buf, Inst, sizeof(Inst));
479 
480   // jmp *foo@GOT(%ebx) or jmp *foo_in_GOT
481   Buf[1] = Config->Pic ? 0xa3 : 0x25;
482   uint32_t Got = In<ELF32LE>::GotPlt->getVA();
483   write32le(Buf + 2, Config->Shared ? GotEntryAddr - Got : GotEntryAddr);
484   write32le(Buf + 7, RelOff);
485   write32le(Buf + 12, -Index * PltEntrySize - PltHeaderSize - 16);
486 }
487 
488 int64_t X86TargetInfo::getImplicitAddend(const uint8_t *Buf,
489                                          uint32_t Type) const {
490   switch (Type) {
491   default:
492     return 0;
493   case R_386_8:
494   case R_386_PC8:
495     return SignExtend64<8>(*Buf);
496   case R_386_16:
497   case R_386_PC16:
498     return SignExtend64<16>(read16le(Buf));
499   case R_386_32:
500   case R_386_GOT32:
501   case R_386_GOT32X:
502   case R_386_GOTOFF:
503   case R_386_GOTPC:
504   case R_386_PC32:
505   case R_386_PLT32:
506   case R_386_TLS_LE:
507     return SignExtend64<32>(read32le(Buf));
508   }
509 }
510 
511 void X86TargetInfo::relocateOne(uint8_t *Loc, uint32_t Type,
512                                 uint64_t Val) const {
513   // R_386_{PC,}{8,16} are not part of the i386 psABI, but they are
514   // being used for some 16-bit programs such as boot loaders, so
515   // we want to support them.
516   switch (Type) {
517   case R_386_8:
518     checkUInt<8>(Loc, Val, Type);
519     *Loc = Val;
520     break;
521   case R_386_PC8:
522     checkInt<8>(Loc, Val, Type);
523     *Loc = Val;
524     break;
525   case R_386_16:
526     checkUInt<16>(Loc, Val, Type);
527     write16le(Loc, Val);
528     break;
529   case R_386_PC16:
530     checkInt<16>(Loc, Val, Type);
531     write16le(Loc, Val);
532     break;
533   default:
534     checkInt<32>(Loc, Val, Type);
535     write32le(Loc, Val);
536   }
537 }
538 
539 void X86TargetInfo::relaxTlsGdToLe(uint8_t *Loc, uint32_t Type,
540                                    uint64_t Val) const {
541   // Convert
542   //   leal x@tlsgd(, %ebx, 1),
543   //   call __tls_get_addr@plt
544   // to
545   //   movl %gs:0,%eax
546   //   subl $x@ntpoff,%eax
547   const uint8_t Inst[] = {
548       0x65, 0xa1, 0x00, 0x00, 0x00, 0x00, // movl %gs:0, %eax
549       0x81, 0xe8, 0x00, 0x00, 0x00, 0x00  // subl 0(%ebx), %eax
550   };
551   memcpy(Loc - 3, Inst, sizeof(Inst));
552   relocateOne(Loc + 5, R_386_32, Val);
553 }
554 
555 void X86TargetInfo::relaxTlsGdToIe(uint8_t *Loc, uint32_t Type,
556                                    uint64_t Val) const {
557   // Convert
558   //   leal x@tlsgd(, %ebx, 1),
559   //   call __tls_get_addr@plt
560   // to
561   //   movl %gs:0, %eax
562   //   addl x@gotntpoff(%ebx), %eax
563   const uint8_t Inst[] = {
564       0x65, 0xa1, 0x00, 0x00, 0x00, 0x00, // movl %gs:0, %eax
565       0x03, 0x83, 0x00, 0x00, 0x00, 0x00  // addl 0(%ebx), %eax
566   };
567   memcpy(Loc - 3, Inst, sizeof(Inst));
568   relocateOne(Loc + 5, R_386_32, Val);
569 }
570 
571 // In some conditions, relocations can be optimized to avoid using GOT.
572 // This function does that for Initial Exec to Local Exec case.
573 void X86TargetInfo::relaxTlsIeToLe(uint8_t *Loc, uint32_t Type,
574                                    uint64_t Val) const {
575   // Ulrich's document section 6.2 says that @gotntpoff can
576   // be used with MOVL or ADDL instructions.
577   // @indntpoff is similar to @gotntpoff, but for use in
578   // position dependent code.
579   uint8_t Reg = (Loc[-1] >> 3) & 7;
580 
581   if (Type == R_386_TLS_IE) {
582     if (Loc[-1] == 0xa1) {
583       // "movl foo@indntpoff,%eax" -> "movl $foo,%eax"
584       // This case is different from the generic case below because
585       // this is a 5 byte instruction while below is 6 bytes.
586       Loc[-1] = 0xb8;
587     } else if (Loc[-2] == 0x8b) {
588       // "movl foo@indntpoff,%reg" -> "movl $foo,%reg"
589       Loc[-2] = 0xc7;
590       Loc[-1] = 0xc0 | Reg;
591     } else {
592       // "addl foo@indntpoff,%reg" -> "addl $foo,%reg"
593       Loc[-2] = 0x81;
594       Loc[-1] = 0xc0 | Reg;
595     }
596   } else {
597     assert(Type == R_386_TLS_GOTIE);
598     if (Loc[-2] == 0x8b) {
599       // "movl foo@gottpoff(%rip),%reg" -> "movl $foo,%reg"
600       Loc[-2] = 0xc7;
601       Loc[-1] = 0xc0 | Reg;
602     } else {
603       // "addl foo@gotntpoff(%rip),%reg" -> "leal foo(%reg),%reg"
604       Loc[-2] = 0x8d;
605       Loc[-1] = 0x80 | (Reg << 3) | Reg;
606     }
607   }
608   relocateOne(Loc, R_386_TLS_LE, Val);
609 }
610 
611 void X86TargetInfo::relaxTlsLdToLe(uint8_t *Loc, uint32_t Type,
612                                    uint64_t Val) const {
613   if (Type == R_386_TLS_LDO_32) {
614     relocateOne(Loc, R_386_TLS_LE, Val);
615     return;
616   }
617 
618   // Convert
619   //   leal foo(%reg),%eax
620   //   call ___tls_get_addr
621   // to
622   //   movl %gs:0,%eax
623   //   nop
624   //   leal 0(%esi,1),%esi
625   const uint8_t Inst[] = {
626       0x65, 0xa1, 0x00, 0x00, 0x00, 0x00, // movl %gs:0,%eax
627       0x90,                               // nop
628       0x8d, 0x74, 0x26, 0x00              // leal 0(%esi,1),%esi
629   };
630   memcpy(Loc - 2, Inst, sizeof(Inst));
631 }
632 
633 template <class ELFT> X86_64TargetInfo<ELFT>::X86_64TargetInfo() {
634   CopyRel = R_X86_64_COPY;
635   GotRel = R_X86_64_GLOB_DAT;
636   PltRel = R_X86_64_JUMP_SLOT;
637   RelativeRel = R_X86_64_RELATIVE;
638   IRelativeRel = R_X86_64_IRELATIVE;
639   TlsGotRel = R_X86_64_TPOFF64;
640   TlsModuleIndexRel = R_X86_64_DTPMOD64;
641   TlsOffsetRel = R_X86_64_DTPOFF64;
642   GotEntrySize = 8;
643   GotPltEntrySize = 8;
644   PltEntrySize = 16;
645   PltHeaderSize = 16;
646   TlsGdRelaxSkip = 2;
647   // Align to the large page size (known as a superpage or huge page).
648   // FreeBSD automatically promotes large, superpage-aligned allocations.
649   DefaultImageBase = 0x200000;
650 }
651 
652 template <class ELFT>
653 RelExpr X86_64TargetInfo<ELFT>::getRelExpr(uint32_t Type,
654                                            const SymbolBody &S) const {
655   switch (Type) {
656   case R_X86_64_8:
657   case R_X86_64_16:
658   case R_X86_64_32:
659   case R_X86_64_32S:
660   case R_X86_64_64:
661   case R_X86_64_DTPOFF32:
662   case R_X86_64_DTPOFF64:
663     return R_ABS;
664   case R_X86_64_TPOFF32:
665     return R_TLS;
666   case R_X86_64_TLSLD:
667     return R_TLSLD_PC;
668   case R_X86_64_TLSGD:
669     return R_TLSGD_PC;
670   case R_X86_64_SIZE32:
671   case R_X86_64_SIZE64:
672     return R_SIZE;
673   case R_X86_64_PLT32:
674     return R_PLT_PC;
675   case R_X86_64_PC32:
676   case R_X86_64_PC64:
677     return R_PC;
678   case R_X86_64_GOT32:
679   case R_X86_64_GOT64:
680     return R_GOT_FROM_END;
681   case R_X86_64_GOTPCREL:
682   case R_X86_64_GOTPCRELX:
683   case R_X86_64_REX_GOTPCRELX:
684   case R_X86_64_GOTTPOFF:
685     return R_GOT_PC;
686   case R_X86_64_NONE:
687     return R_NONE;
688   default:
689     error(toString(S.File) + ": unknown relocation type: " + toString(Type));
690     return R_HINT;
691   }
692 }
693 
694 template <class ELFT>
695 void X86_64TargetInfo<ELFT>::writeGotPltHeader(uint8_t *Buf) const {
696   // The first entry holds the value of _DYNAMIC. It is not clear why that is
697   // required, but it is documented in the psabi and the glibc dynamic linker
698   // seems to use it (note that this is relevant for linking ld.so, not any
699   // other program).
700   write64le(Buf, In<ELFT>::Dynamic->getVA());
701 }
702 
703 template <class ELFT>
704 void X86_64TargetInfo<ELFT>::writeGotPlt(uint8_t *Buf,
705                                          const SymbolBody &S) const {
706   // See comments in X86TargetInfo::writeGotPlt.
707   write32le(Buf, S.getPltVA() + 6);
708 }
709 
710 template <class ELFT>
711 void X86_64TargetInfo<ELFT>::writePltHeader(uint8_t *Buf) const {
712   const uint8_t PltData[] = {
713       0xff, 0x35, 0x00, 0x00, 0x00, 0x00, // pushq GOT+8(%rip)
714       0xff, 0x25, 0x00, 0x00, 0x00, 0x00, // jmp *GOT+16(%rip)
715       0x0f, 0x1f, 0x40, 0x00              // nopl 0x0(rax)
716   };
717   memcpy(Buf, PltData, sizeof(PltData));
718   uint64_t Got = In<ELFT>::GotPlt->getVA();
719   uint64_t Plt = In<ELFT>::Plt->getVA();
720   write32le(Buf + 2, Got - Plt + 2); // GOT+8
721   write32le(Buf + 8, Got - Plt + 4); // GOT+16
722 }
723 
724 template <class ELFT>
725 void X86_64TargetInfo<ELFT>::writePlt(uint8_t *Buf, uint64_t GotEntryAddr,
726                                       uint64_t PltEntryAddr, int32_t Index,
727                                       unsigned RelOff) const {
728   const uint8_t Inst[] = {
729       0xff, 0x25, 0x00, 0x00, 0x00, 0x00, // jmpq *got(%rip)
730       0x68, 0x00, 0x00, 0x00, 0x00,       // pushq <relocation index>
731       0xe9, 0x00, 0x00, 0x00, 0x00        // jmpq plt[0]
732   };
733   memcpy(Buf, Inst, sizeof(Inst));
734 
735   write32le(Buf + 2, GotEntryAddr - PltEntryAddr - 6);
736   write32le(Buf + 7, Index);
737   write32le(Buf + 12, -Index * PltEntrySize - PltHeaderSize - 16);
738 }
739 
740 template <class ELFT>
741 bool X86_64TargetInfo<ELFT>::isPicRel(uint32_t Type) const {
742   return Type != R_X86_64_PC32 && Type != R_X86_64_32;
743 }
744 
745 template <class ELFT>
746 bool X86_64TargetInfo<ELFT>::isTlsInitialExecRel(uint32_t Type) const {
747   return Type == R_X86_64_GOTTPOFF;
748 }
749 
750 template <class ELFT>
751 bool X86_64TargetInfo<ELFT>::isTlsGlobalDynamicRel(uint32_t Type) const {
752   return Type == R_X86_64_TLSGD;
753 }
754 
755 template <class ELFT>
756 bool X86_64TargetInfo<ELFT>::isTlsLocalDynamicRel(uint32_t Type) const {
757   return Type == R_X86_64_DTPOFF32 || Type == R_X86_64_DTPOFF64 ||
758          Type == R_X86_64_TLSLD;
759 }
760 
761 template <class ELFT>
762 void X86_64TargetInfo<ELFT>::relaxTlsGdToLe(uint8_t *Loc, uint32_t Type,
763                                             uint64_t Val) const {
764   // Convert
765   //   .byte 0x66
766   //   leaq x@tlsgd(%rip), %rdi
767   //   .word 0x6666
768   //   rex64
769   //   call __tls_get_addr@plt
770   // to
771   //   mov %fs:0x0,%rax
772   //   lea x@tpoff,%rax
773   const uint8_t Inst[] = {
774       0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00, // mov %fs:0x0,%rax
775       0x48, 0x8d, 0x80, 0x00, 0x00, 0x00, 0x00              // lea x@tpoff,%rax
776   };
777   memcpy(Loc - 4, Inst, sizeof(Inst));
778   // The original code used a pc relative relocation and so we have to
779   // compensate for the -4 in had in the addend.
780   relocateOne(Loc + 8, R_X86_64_TPOFF32, Val + 4);
781 }
782 
783 template <class ELFT>
784 void X86_64TargetInfo<ELFT>::relaxTlsGdToIe(uint8_t *Loc, uint32_t Type,
785                                             uint64_t Val) const {
786   // Convert
787   //   .byte 0x66
788   //   leaq x@tlsgd(%rip), %rdi
789   //   .word 0x6666
790   //   rex64
791   //   call __tls_get_addr@plt
792   // to
793   //   mov %fs:0x0,%rax
794   //   addq x@tpoff,%rax
795   const uint8_t Inst[] = {
796       0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00, // mov %fs:0x0,%rax
797       0x48, 0x03, 0x05, 0x00, 0x00, 0x00, 0x00              // addq x@tpoff,%rax
798   };
799   memcpy(Loc - 4, Inst, sizeof(Inst));
800   // Both code sequences are PC relatives, but since we are moving the constant
801   // forward by 8 bytes we have to subtract the value by 8.
802   relocateOne(Loc + 8, R_X86_64_PC32, Val - 8);
803 }
804 
805 // In some conditions, R_X86_64_GOTTPOFF relocation can be optimized to
806 // R_X86_64_TPOFF32 so that it does not use GOT.
807 template <class ELFT>
808 void X86_64TargetInfo<ELFT>::relaxTlsIeToLe(uint8_t *Loc, uint32_t Type,
809                                             uint64_t Val) const {
810   uint8_t *Inst = Loc - 3;
811   uint8_t Reg = Loc[-1] >> 3;
812   uint8_t *RegSlot = Loc - 1;
813 
814   // Note that ADD with RSP or R12 is converted to ADD instead of LEA
815   // because LEA with these registers needs 4 bytes to encode and thus
816   // wouldn't fit the space.
817 
818   if (memcmp(Inst, "\x48\x03\x25", 3) == 0) {
819     // "addq foo@gottpoff(%rip),%rsp" -> "addq $foo,%rsp"
820     memcpy(Inst, "\x48\x81\xc4", 3);
821   } else if (memcmp(Inst, "\x4c\x03\x25", 3) == 0) {
822     // "addq foo@gottpoff(%rip),%r12" -> "addq $foo,%r12"
823     memcpy(Inst, "\x49\x81\xc4", 3);
824   } else if (memcmp(Inst, "\x4c\x03", 2) == 0) {
825     // "addq foo@gottpoff(%rip),%r[8-15]" -> "leaq foo(%r[8-15]),%r[8-15]"
826     memcpy(Inst, "\x4d\x8d", 2);
827     *RegSlot = 0x80 | (Reg << 3) | Reg;
828   } else if (memcmp(Inst, "\x48\x03", 2) == 0) {
829     // "addq foo@gottpoff(%rip),%reg -> "leaq foo(%reg),%reg"
830     memcpy(Inst, "\x48\x8d", 2);
831     *RegSlot = 0x80 | (Reg << 3) | Reg;
832   } else if (memcmp(Inst, "\x4c\x8b", 2) == 0) {
833     // "movq foo@gottpoff(%rip),%r[8-15]" -> "movq $foo,%r[8-15]"
834     memcpy(Inst, "\x49\xc7", 2);
835     *RegSlot = 0xc0 | Reg;
836   } else if (memcmp(Inst, "\x48\x8b", 2) == 0) {
837     // "movq foo@gottpoff(%rip),%reg" -> "movq $foo,%reg"
838     memcpy(Inst, "\x48\xc7", 2);
839     *RegSlot = 0xc0 | Reg;
840   } else {
841     error(getErrorLocation(Loc - 3) +
842           "R_X86_64_GOTTPOFF must be used in MOVQ or ADDQ instructions only");
843   }
844 
845   // The original code used a PC relative relocation.
846   // Need to compensate for the -4 it had in the addend.
847   relocateOne(Loc, R_X86_64_TPOFF32, Val + 4);
848 }
849 
850 template <class ELFT>
851 void X86_64TargetInfo<ELFT>::relaxTlsLdToLe(uint8_t *Loc, uint32_t Type,
852                                             uint64_t Val) const {
853   // Convert
854   //   leaq bar@tlsld(%rip), %rdi
855   //   callq __tls_get_addr@PLT
856   //   leaq bar@dtpoff(%rax), %rcx
857   // to
858   //   .word 0x6666
859   //   .byte 0x66
860   //   mov %fs:0,%rax
861   //   leaq bar@tpoff(%rax), %rcx
862   if (Type == R_X86_64_DTPOFF64) {
863     write64le(Loc, Val);
864     return;
865   }
866   if (Type == R_X86_64_DTPOFF32) {
867     relocateOne(Loc, R_X86_64_TPOFF32, Val);
868     return;
869   }
870 
871   const uint8_t Inst[] = {
872       0x66, 0x66,                                          // .word 0x6666
873       0x66,                                                // .byte 0x66
874       0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00 // mov %fs:0,%rax
875   };
876   memcpy(Loc - 3, Inst, sizeof(Inst));
877 }
878 
879 template <class ELFT>
880 void X86_64TargetInfo<ELFT>::relocateOne(uint8_t *Loc, uint32_t Type,
881                                          uint64_t Val) const {
882   switch (Type) {
883   case R_X86_64_8:
884     checkUInt<8>(Loc, Val, Type);
885     *Loc = Val;
886     break;
887   case R_X86_64_16:
888     checkUInt<16>(Loc, Val, Type);
889     write16le(Loc, Val);
890     break;
891   case R_X86_64_32:
892     checkUInt<32>(Loc, Val, Type);
893     write32le(Loc, Val);
894     break;
895   case R_X86_64_32S:
896   case R_X86_64_TPOFF32:
897   case R_X86_64_GOT32:
898   case R_X86_64_GOTPCREL:
899   case R_X86_64_GOTPCRELX:
900   case R_X86_64_REX_GOTPCRELX:
901   case R_X86_64_PC32:
902   case R_X86_64_GOTTPOFF:
903   case R_X86_64_PLT32:
904   case R_X86_64_TLSGD:
905   case R_X86_64_TLSLD:
906   case R_X86_64_DTPOFF32:
907   case R_X86_64_SIZE32:
908     checkInt<32>(Loc, Val, Type);
909     write32le(Loc, Val);
910     break;
911   case R_X86_64_64:
912   case R_X86_64_DTPOFF64:
913   case R_X86_64_GLOB_DAT:
914   case R_X86_64_PC64:
915   case R_X86_64_SIZE64:
916   case R_X86_64_GOT64:
917     write64le(Loc, Val);
918     break;
919   default:
920     llvm_unreachable("unexpected relocation");
921   }
922 }
923 
924 template <class ELFT>
925 RelExpr X86_64TargetInfo<ELFT>::adjustRelaxExpr(uint32_t Type,
926                                                 const uint8_t *Data,
927                                                 RelExpr RelExpr) const {
928   if (Type != R_X86_64_GOTPCRELX && Type != R_X86_64_REX_GOTPCRELX)
929     return RelExpr;
930   const uint8_t Op = Data[-2];
931   const uint8_t ModRm = Data[-1];
932 
933   // FIXME: When PIC is disabled and foo is defined locally in the
934   // lower 32 bit address space, memory operand in mov can be converted into
935   // immediate operand. Otherwise, mov must be changed to lea. We support only
936   // latter relaxation at this moment.
937   if (Op == 0x8b)
938     return R_RELAX_GOT_PC;
939 
940   // Relax call and jmp.
941   if (Op == 0xff && (ModRm == 0x15 || ModRm == 0x25))
942     return R_RELAX_GOT_PC;
943 
944   // Relaxation of test, adc, add, and, cmp, or, sbb, sub, xor.
945   // If PIC then no relaxation is available.
946   // We also don't relax test/binop instructions without REX byte,
947   // they are 32bit operations and not common to have.
948   assert(Type == R_X86_64_REX_GOTPCRELX);
949   return Config->Pic ? RelExpr : R_RELAX_GOT_PC_NOPIC;
950 }
951 
952 // A subset of relaxations can only be applied for no-PIC. This method
953 // handles such relaxations. Instructions encoding information was taken from:
954 // "Intel 64 and IA-32 Architectures Software Developer's Manual V2"
955 // (http://www.intel.com/content/dam/www/public/us/en/documents/manuals/
956 //    64-ia-32-architectures-software-developer-instruction-set-reference-manual-325383.pdf)
957 template <class ELFT>
958 void X86_64TargetInfo<ELFT>::relaxGotNoPic(uint8_t *Loc, uint64_t Val,
959                                            uint8_t Op, uint8_t ModRm) const {
960   const uint8_t Rex = Loc[-3];
961   // Convert "test %reg, foo@GOTPCREL(%rip)" to "test $foo, %reg".
962   if (Op == 0x85) {
963     // See "TEST-Logical Compare" (4-428 Vol. 2B),
964     // TEST r/m64, r64 uses "full" ModR / M byte (no opcode extension).
965 
966     // ModR/M byte has form XX YYY ZZZ, where
967     // YYY is MODRM.reg(register 2), ZZZ is MODRM.rm(register 1).
968     // XX has different meanings:
969     // 00: The operand's memory address is in reg1.
970     // 01: The operand's memory address is reg1 + a byte-sized displacement.
971     // 10: The operand's memory address is reg1 + a word-sized displacement.
972     // 11: The operand is reg1 itself.
973     // If an instruction requires only one operand, the unused reg2 field
974     // holds extra opcode bits rather than a register code
975     // 0xC0 == 11 000 000 binary.
976     // 0x38 == 00 111 000 binary.
977     // We transfer reg2 to reg1 here as operand.
978     // See "2.1.3 ModR/M and SIB Bytes" (Vol. 2A 2-3).
979     Loc[-1] = 0xc0 | (ModRm & 0x38) >> 3; // ModR/M byte.
980 
981     // Change opcode from TEST r/m64, r64 to TEST r/m64, imm32
982     // See "TEST-Logical Compare" (4-428 Vol. 2B).
983     Loc[-2] = 0xf7;
984 
985     // Move R bit to the B bit in REX byte.
986     // REX byte is encoded as 0100WRXB, where
987     // 0100 is 4bit fixed pattern.
988     // REX.W When 1, a 64-bit operand size is used. Otherwise, when 0, the
989     //   default operand size is used (which is 32-bit for most but not all
990     //   instructions).
991     // REX.R This 1-bit value is an extension to the MODRM.reg field.
992     // REX.X This 1-bit value is an extension to the SIB.index field.
993     // REX.B This 1-bit value is an extension to the MODRM.rm field or the
994     // SIB.base field.
995     // See "2.2.1.2 More on REX Prefix Fields " (2-8 Vol. 2A).
996     Loc[-3] = (Rex & ~0x4) | (Rex & 0x4) >> 2;
997     relocateOne(Loc, R_X86_64_PC32, Val);
998     return;
999   }
1000 
1001   // If we are here then we need to relax the adc, add, and, cmp, or, sbb, sub
1002   // or xor operations.
1003 
1004   // Convert "binop foo@GOTPCREL(%rip), %reg" to "binop $foo, %reg".
1005   // Logic is close to one for test instruction above, but we also
1006   // write opcode extension here, see below for details.
1007   Loc[-1] = 0xc0 | (ModRm & 0x38) >> 3 | (Op & 0x3c); // ModR/M byte.
1008 
1009   // Primary opcode is 0x81, opcode extension is one of:
1010   // 000b = ADD, 001b is OR, 010b is ADC, 011b is SBB,
1011   // 100b is AND, 101b is SUB, 110b is XOR, 111b is CMP.
1012   // This value was wrote to MODRM.reg in a line above.
1013   // See "3.2 INSTRUCTIONS (A-M)" (Vol. 2A 3-15),
1014   // "INSTRUCTION SET REFERENCE, N-Z" (Vol. 2B 4-1) for
1015   // descriptions about each operation.
1016   Loc[-2] = 0x81;
1017   Loc[-3] = (Rex & ~0x4) | (Rex & 0x4) >> 2;
1018   relocateOne(Loc, R_X86_64_PC32, Val);
1019 }
1020 
1021 template <class ELFT>
1022 void X86_64TargetInfo<ELFT>::relaxGot(uint8_t *Loc, uint64_t Val) const {
1023   const uint8_t Op = Loc[-2];
1024   const uint8_t ModRm = Loc[-1];
1025 
1026   // Convert "mov foo@GOTPCREL(%rip),%reg" to "lea foo(%rip),%reg".
1027   if (Op == 0x8b) {
1028     Loc[-2] = 0x8d;
1029     relocateOne(Loc, R_X86_64_PC32, Val);
1030     return;
1031   }
1032 
1033   if (Op != 0xff) {
1034     // We are relaxing a rip relative to an absolute, so compensate
1035     // for the old -4 addend.
1036     assert(!Config->Pic);
1037     relaxGotNoPic(Loc, Val + 4, Op, ModRm);
1038     return;
1039   }
1040 
1041   // Convert call/jmp instructions.
1042   if (ModRm == 0x15) {
1043     // ABI says we can convert "call *foo@GOTPCREL(%rip)" to "nop; call foo".
1044     // Instead we convert to "addr32 call foo" where addr32 is an instruction
1045     // prefix. That makes result expression to be a single instruction.
1046     Loc[-2] = 0x67; // addr32 prefix
1047     Loc[-1] = 0xe8; // call
1048     relocateOne(Loc, R_X86_64_PC32, Val);
1049     return;
1050   }
1051 
1052   // Convert "jmp *foo@GOTPCREL(%rip)" to "jmp foo; nop".
1053   // jmp doesn't return, so it is fine to use nop here, it is just a stub.
1054   assert(ModRm == 0x25);
1055   Loc[-2] = 0xe9; // jmp
1056   Loc[3] = 0x90;  // nop
1057   relocateOne(Loc - 1, R_X86_64_PC32, Val + 1);
1058 }
1059 
1060 // Relocation masks following the #lo(value), #hi(value), #ha(value),
1061 // #higher(value), #highera(value), #highest(value), and #highesta(value)
1062 // macros defined in section 4.5.1. Relocation Types of the PPC-elf64abi
1063 // document.
1064 static uint16_t applyPPCLo(uint64_t V) { return V; }
1065 static uint16_t applyPPCHi(uint64_t V) { return V >> 16; }
1066 static uint16_t applyPPCHa(uint64_t V) { return (V + 0x8000) >> 16; }
1067 static uint16_t applyPPCHigher(uint64_t V) { return V >> 32; }
1068 static uint16_t applyPPCHighera(uint64_t V) { return (V + 0x8000) >> 32; }
1069 static uint16_t applyPPCHighest(uint64_t V) { return V >> 48; }
1070 static uint16_t applyPPCHighesta(uint64_t V) { return (V + 0x8000) >> 48; }
1071 
1072 PPCTargetInfo::PPCTargetInfo() {}
1073 
1074 void PPCTargetInfo::relocateOne(uint8_t *Loc, uint32_t Type,
1075                                 uint64_t Val) const {
1076   switch (Type) {
1077   case R_PPC_ADDR16_HA:
1078     write16be(Loc, applyPPCHa(Val));
1079     break;
1080   case R_PPC_ADDR16_LO:
1081     write16be(Loc, applyPPCLo(Val));
1082     break;
1083   case R_PPC_ADDR32:
1084   case R_PPC_REL32:
1085     write32be(Loc, Val);
1086     break;
1087   case R_PPC_REL24:
1088     or32be(Loc, Val & 0x3FFFFFC);
1089     break;
1090   default:
1091     error(getErrorLocation(Loc) + "unrecognized reloc " + Twine(Type));
1092   }
1093 }
1094 
1095 RelExpr PPCTargetInfo::getRelExpr(uint32_t Type, const SymbolBody &S) const {
1096   switch (Type) {
1097   case R_PPC_REL24:
1098   case R_PPC_REL32:
1099     return R_PC;
1100   default:
1101     return R_ABS;
1102   }
1103 }
1104 
1105 PPC64TargetInfo::PPC64TargetInfo() {
1106   PltRel = GotRel = R_PPC64_GLOB_DAT;
1107   RelativeRel = R_PPC64_RELATIVE;
1108   GotEntrySize = 8;
1109   GotPltEntrySize = 8;
1110   PltEntrySize = 32;
1111   PltHeaderSize = 0;
1112 
1113   // We need 64K pages (at least under glibc/Linux, the loader won't
1114   // set different permissions on a finer granularity than that).
1115   DefaultMaxPageSize = 65536;
1116 
1117   // The PPC64 ELF ABI v1 spec, says:
1118   //
1119   //   It is normally desirable to put segments with different characteristics
1120   //   in separate 256 Mbyte portions of the address space, to give the
1121   //   operating system full paging flexibility in the 64-bit address space.
1122   //
1123   // And because the lowest non-zero 256M boundary is 0x10000000, PPC64 linkers
1124   // use 0x10000000 as the starting address.
1125   DefaultImageBase = 0x10000000;
1126 }
1127 
1128 static uint64_t PPC64TocOffset = 0x8000;
1129 
1130 uint64_t getPPC64TocBase() {
1131   // The TOC consists of sections .got, .toc, .tocbss, .plt in that order. The
1132   // TOC starts where the first of these sections starts. We always create a
1133   // .got when we see a relocation that uses it, so for us the start is always
1134   // the .got.
1135   uint64_t TocVA = In<ELF64BE>::Got->getVA();
1136 
1137   // Per the ppc64-elf-linux ABI, The TOC base is TOC value plus 0x8000
1138   // thus permitting a full 64 Kbytes segment. Note that the glibc startup
1139   // code (crt1.o) assumes that you can get from the TOC base to the
1140   // start of the .toc section with only a single (signed) 16-bit relocation.
1141   return TocVA + PPC64TocOffset;
1142 }
1143 
1144 RelExpr PPC64TargetInfo::getRelExpr(uint32_t Type, const SymbolBody &S) const {
1145   switch (Type) {
1146   default:
1147     return R_ABS;
1148   case R_PPC64_TOC16:
1149   case R_PPC64_TOC16_DS:
1150   case R_PPC64_TOC16_HA:
1151   case R_PPC64_TOC16_HI:
1152   case R_PPC64_TOC16_LO:
1153   case R_PPC64_TOC16_LO_DS:
1154     return R_GOTREL;
1155   case R_PPC64_TOC:
1156     return R_PPC_TOC;
1157   case R_PPC64_REL24:
1158     return R_PPC_PLT_OPD;
1159   }
1160 }
1161 
1162 void PPC64TargetInfo::writePlt(uint8_t *Buf, uint64_t GotEntryAddr,
1163                                uint64_t PltEntryAddr, int32_t Index,
1164                                unsigned RelOff) const {
1165   uint64_t Off = GotEntryAddr - getPPC64TocBase();
1166 
1167   // FIXME: What we should do, in theory, is get the offset of the function
1168   // descriptor in the .opd section, and use that as the offset from %r2 (the
1169   // TOC-base pointer). Instead, we have the GOT-entry offset, and that will
1170   // be a pointer to the function descriptor in the .opd section. Using
1171   // this scheme is simpler, but requires an extra indirection per PLT dispatch.
1172 
1173   write32be(Buf, 0xf8410028);                       // std %r2, 40(%r1)
1174   write32be(Buf + 4, 0x3d620000 | applyPPCHa(Off)); // addis %r11, %r2, X@ha
1175   write32be(Buf + 8, 0xe98b0000 | applyPPCLo(Off)); // ld %r12, X@l(%r11)
1176   write32be(Buf + 12, 0xe96c0000);                  // ld %r11,0(%r12)
1177   write32be(Buf + 16, 0x7d6903a6);                  // mtctr %r11
1178   write32be(Buf + 20, 0xe84c0008);                  // ld %r2,8(%r12)
1179   write32be(Buf + 24, 0xe96c0010);                  // ld %r11,16(%r12)
1180   write32be(Buf + 28, 0x4e800420);                  // bctr
1181 }
1182 
1183 static std::pair<uint32_t, uint64_t> toAddr16Rel(uint32_t Type, uint64_t Val) {
1184   uint64_t V = Val - PPC64TocOffset;
1185   switch (Type) {
1186   case R_PPC64_TOC16:
1187     return {R_PPC64_ADDR16, V};
1188   case R_PPC64_TOC16_DS:
1189     return {R_PPC64_ADDR16_DS, V};
1190   case R_PPC64_TOC16_HA:
1191     return {R_PPC64_ADDR16_HA, V};
1192   case R_PPC64_TOC16_HI:
1193     return {R_PPC64_ADDR16_HI, V};
1194   case R_PPC64_TOC16_LO:
1195     return {R_PPC64_ADDR16_LO, V};
1196   case R_PPC64_TOC16_LO_DS:
1197     return {R_PPC64_ADDR16_LO_DS, V};
1198   default:
1199     return {Type, Val};
1200   }
1201 }
1202 
1203 void PPC64TargetInfo::relocateOne(uint8_t *Loc, uint32_t Type,
1204                                   uint64_t Val) const {
1205   // For a TOC-relative relocation, proceed in terms of the corresponding
1206   // ADDR16 relocation type.
1207   std::tie(Type, Val) = toAddr16Rel(Type, Val);
1208 
1209   switch (Type) {
1210   case R_PPC64_ADDR14: {
1211     checkAlignment<4>(Loc, Val, Type);
1212     // Preserve the AA/LK bits in the branch instruction
1213     uint8_t AALK = Loc[3];
1214     write16be(Loc + 2, (AALK & 3) | (Val & 0xfffc));
1215     break;
1216   }
1217   case R_PPC64_ADDR16:
1218     checkInt<16>(Loc, Val, Type);
1219     write16be(Loc, Val);
1220     break;
1221   case R_PPC64_ADDR16_DS:
1222     checkInt<16>(Loc, Val, Type);
1223     write16be(Loc, (read16be(Loc) & 3) | (Val & ~3));
1224     break;
1225   case R_PPC64_ADDR16_HA:
1226   case R_PPC64_REL16_HA:
1227     write16be(Loc, applyPPCHa(Val));
1228     break;
1229   case R_PPC64_ADDR16_HI:
1230   case R_PPC64_REL16_HI:
1231     write16be(Loc, applyPPCHi(Val));
1232     break;
1233   case R_PPC64_ADDR16_HIGHER:
1234     write16be(Loc, applyPPCHigher(Val));
1235     break;
1236   case R_PPC64_ADDR16_HIGHERA:
1237     write16be(Loc, applyPPCHighera(Val));
1238     break;
1239   case R_PPC64_ADDR16_HIGHEST:
1240     write16be(Loc, applyPPCHighest(Val));
1241     break;
1242   case R_PPC64_ADDR16_HIGHESTA:
1243     write16be(Loc, applyPPCHighesta(Val));
1244     break;
1245   case R_PPC64_ADDR16_LO:
1246     write16be(Loc, applyPPCLo(Val));
1247     break;
1248   case R_PPC64_ADDR16_LO_DS:
1249   case R_PPC64_REL16_LO:
1250     write16be(Loc, (read16be(Loc) & 3) | (applyPPCLo(Val) & ~3));
1251     break;
1252   case R_PPC64_ADDR32:
1253   case R_PPC64_REL32:
1254     checkInt<32>(Loc, Val, Type);
1255     write32be(Loc, Val);
1256     break;
1257   case R_PPC64_ADDR64:
1258   case R_PPC64_REL64:
1259   case R_PPC64_TOC:
1260     write64be(Loc, Val);
1261     break;
1262   case R_PPC64_REL24: {
1263     uint32_t Mask = 0x03FFFFFC;
1264     checkInt<24>(Loc, Val, Type);
1265     write32be(Loc, (read32be(Loc) & ~Mask) | (Val & Mask));
1266     break;
1267   }
1268   default:
1269     error(getErrorLocation(Loc) + "unrecognized reloc " + Twine(Type));
1270   }
1271 }
1272 
1273 AArch64TargetInfo::AArch64TargetInfo() {
1274   CopyRel = R_AARCH64_COPY;
1275   RelativeRel = R_AARCH64_RELATIVE;
1276   IRelativeRel = R_AARCH64_IRELATIVE;
1277   GotRel = R_AARCH64_GLOB_DAT;
1278   PltRel = R_AARCH64_JUMP_SLOT;
1279   TlsDescRel = R_AARCH64_TLSDESC;
1280   TlsGotRel = R_AARCH64_TLS_TPREL64;
1281   GotEntrySize = 8;
1282   GotPltEntrySize = 8;
1283   PltEntrySize = 16;
1284   PltHeaderSize = 32;
1285   DefaultMaxPageSize = 65536;
1286 
1287   // It doesn't seem to be documented anywhere, but tls on aarch64 uses variant
1288   // 1 of the tls structures and the tcb size is 16.
1289   TcbSize = 16;
1290 }
1291 
1292 RelExpr AArch64TargetInfo::getRelExpr(uint32_t Type,
1293                                       const SymbolBody &S) const {
1294   switch (Type) {
1295   default:
1296     return R_ABS;
1297   case R_AARCH64_TLSDESC_ADR_PAGE21:
1298     return R_TLSDESC_PAGE;
1299   case R_AARCH64_TLSDESC_LD64_LO12_NC:
1300   case R_AARCH64_TLSDESC_ADD_LO12_NC:
1301     return R_TLSDESC;
1302   case R_AARCH64_TLSDESC_CALL:
1303     return R_TLSDESC_CALL;
1304   case R_AARCH64_TLSLE_ADD_TPREL_HI12:
1305   case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
1306     return R_TLS;
1307   case R_AARCH64_CALL26:
1308   case R_AARCH64_CONDBR19:
1309   case R_AARCH64_JUMP26:
1310   case R_AARCH64_TSTBR14:
1311     return R_PLT_PC;
1312   case R_AARCH64_PREL16:
1313   case R_AARCH64_PREL32:
1314   case R_AARCH64_PREL64:
1315   case R_AARCH64_ADR_PREL_LO21:
1316     return R_PC;
1317   case R_AARCH64_ADR_PREL_PG_HI21:
1318     return R_PAGE_PC;
1319   case R_AARCH64_LD64_GOT_LO12_NC:
1320   case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
1321     return R_GOT;
1322   case R_AARCH64_ADR_GOT_PAGE:
1323   case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
1324     return R_GOT_PAGE_PC;
1325   case R_AARCH64_NONE:
1326     return R_NONE;
1327   }
1328 }
1329 
1330 RelExpr AArch64TargetInfo::adjustRelaxExpr(uint32_t Type, const uint8_t *Data,
1331                                            RelExpr Expr) const {
1332   if (Expr == R_RELAX_TLS_GD_TO_IE) {
1333     if (Type == R_AARCH64_TLSDESC_ADR_PAGE21)
1334       return R_RELAX_TLS_GD_TO_IE_PAGE_PC;
1335     return R_RELAX_TLS_GD_TO_IE_ABS;
1336   }
1337   return Expr;
1338 }
1339 
1340 bool AArch64TargetInfo::usesOnlyLowPageBits(uint32_t Type) const {
1341   switch (Type) {
1342   default:
1343     return false;
1344   case R_AARCH64_ADD_ABS_LO12_NC:
1345   case R_AARCH64_LD64_GOT_LO12_NC:
1346   case R_AARCH64_LDST128_ABS_LO12_NC:
1347   case R_AARCH64_LDST16_ABS_LO12_NC:
1348   case R_AARCH64_LDST32_ABS_LO12_NC:
1349   case R_AARCH64_LDST64_ABS_LO12_NC:
1350   case R_AARCH64_LDST8_ABS_LO12_NC:
1351   case R_AARCH64_TLSDESC_ADD_LO12_NC:
1352   case R_AARCH64_TLSDESC_LD64_LO12_NC:
1353   case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
1354     return true;
1355   }
1356 }
1357 
1358 bool AArch64TargetInfo::isTlsInitialExecRel(uint32_t Type) const {
1359   return Type == R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 ||
1360          Type == R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC;
1361 }
1362 
1363 bool AArch64TargetInfo::isPicRel(uint32_t Type) const {
1364   return Type == R_AARCH64_ABS32 || Type == R_AARCH64_ABS64;
1365 }
1366 
1367 void AArch64TargetInfo::writeGotPlt(uint8_t *Buf, const SymbolBody &) const {
1368   write64le(Buf, In<ELF64LE>::Plt->getVA());
1369 }
1370 
1371 // Page(Expr) is the page address of the expression Expr, defined
1372 // as (Expr & ~0xFFF). (This applies even if the machine page size
1373 // supported by the platform has a different value.)
1374 uint64_t getAArch64Page(uint64_t Expr) {
1375   return Expr & (~static_cast<uint64_t>(0xFFF));
1376 }
1377 
1378 void AArch64TargetInfo::writePltHeader(uint8_t *Buf) const {
1379   const uint8_t PltData[] = {
1380       0xf0, 0x7b, 0xbf, 0xa9, // stp	x16, x30, [sp,#-16]!
1381       0x10, 0x00, 0x00, 0x90, // adrp	x16, Page(&(.plt.got[2]))
1382       0x11, 0x02, 0x40, 0xf9, // ldr	x17, [x16, Offset(&(.plt.got[2]))]
1383       0x10, 0x02, 0x00, 0x91, // add	x16, x16, Offset(&(.plt.got[2]))
1384       0x20, 0x02, 0x1f, 0xd6, // br	x17
1385       0x1f, 0x20, 0x03, 0xd5, // nop
1386       0x1f, 0x20, 0x03, 0xd5, // nop
1387       0x1f, 0x20, 0x03, 0xd5  // nop
1388   };
1389   memcpy(Buf, PltData, sizeof(PltData));
1390 
1391   uint64_t Got = In<ELF64LE>::GotPlt->getVA();
1392   uint64_t Plt = In<ELF64LE>::Plt->getVA();
1393   relocateOne(Buf + 4, R_AARCH64_ADR_PREL_PG_HI21,
1394               getAArch64Page(Got + 16) - getAArch64Page(Plt + 4));
1395   relocateOne(Buf + 8, R_AARCH64_LDST64_ABS_LO12_NC, Got + 16);
1396   relocateOne(Buf + 12, R_AARCH64_ADD_ABS_LO12_NC, Got + 16);
1397 }
1398 
1399 void AArch64TargetInfo::writePlt(uint8_t *Buf, uint64_t GotEntryAddr,
1400                                  uint64_t PltEntryAddr, int32_t Index,
1401                                  unsigned RelOff) const {
1402   const uint8_t Inst[] = {
1403       0x10, 0x00, 0x00, 0x90, // adrp x16, Page(&(.plt.got[n]))
1404       0x11, 0x02, 0x40, 0xf9, // ldr  x17, [x16, Offset(&(.plt.got[n]))]
1405       0x10, 0x02, 0x00, 0x91, // add  x16, x16, Offset(&(.plt.got[n]))
1406       0x20, 0x02, 0x1f, 0xd6  // br   x17
1407   };
1408   memcpy(Buf, Inst, sizeof(Inst));
1409 
1410   relocateOne(Buf, R_AARCH64_ADR_PREL_PG_HI21,
1411               getAArch64Page(GotEntryAddr) - getAArch64Page(PltEntryAddr));
1412   relocateOne(Buf + 4, R_AARCH64_LDST64_ABS_LO12_NC, GotEntryAddr);
1413   relocateOne(Buf + 8, R_AARCH64_ADD_ABS_LO12_NC, GotEntryAddr);
1414 }
1415 
1416 static void write32AArch64Addr(uint8_t *L, uint64_t Imm) {
1417   uint32_t ImmLo = (Imm & 0x3) << 29;
1418   uint32_t ImmHi = (Imm & 0x1FFFFC) << 3;
1419   uint64_t Mask = (0x3 << 29) | (0x1FFFFC << 3);
1420   write32le(L, (read32le(L) & ~Mask) | ImmLo | ImmHi);
1421 }
1422 
1423 // Return the bits [Start, End] from Val shifted Start bits.
1424 // For instance, getBits(0xF0, 4, 8) returns 0xF.
1425 static uint64_t getBits(uint64_t Val, int Start, int End) {
1426   uint64_t Mask = ((uint64_t)1 << (End + 1 - Start)) - 1;
1427   return (Val >> Start) & Mask;
1428 }
1429 
1430 // Update the immediate field in a AARCH64 ldr, str, and add instruction.
1431 static void or32AArch64Imm(uint8_t *L, uint64_t Imm) {
1432   or32le(L, (Imm & 0xFFF) << 10);
1433 }
1434 
1435 void AArch64TargetInfo::relocateOne(uint8_t *Loc, uint32_t Type,
1436                                     uint64_t Val) const {
1437   switch (Type) {
1438   case R_AARCH64_ABS16:
1439   case R_AARCH64_PREL16:
1440     checkIntUInt<16>(Loc, Val, Type);
1441     write16le(Loc, Val);
1442     break;
1443   case R_AARCH64_ABS32:
1444   case R_AARCH64_PREL32:
1445     checkIntUInt<32>(Loc, Val, Type);
1446     write32le(Loc, Val);
1447     break;
1448   case R_AARCH64_ABS64:
1449   case R_AARCH64_GLOB_DAT:
1450   case R_AARCH64_PREL64:
1451     write64le(Loc, Val);
1452     break;
1453   case R_AARCH64_ADD_ABS_LO12_NC:
1454     or32AArch64Imm(Loc, Val);
1455     break;
1456   case R_AARCH64_ADR_GOT_PAGE:
1457   case R_AARCH64_ADR_PREL_PG_HI21:
1458   case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
1459   case R_AARCH64_TLSDESC_ADR_PAGE21:
1460     checkInt<33>(Loc, Val, Type);
1461     write32AArch64Addr(Loc, Val >> 12);
1462     break;
1463   case R_AARCH64_ADR_PREL_LO21:
1464     checkInt<21>(Loc, Val, Type);
1465     write32AArch64Addr(Loc, Val);
1466     break;
1467   case R_AARCH64_CALL26:
1468   case R_AARCH64_JUMP26:
1469     checkInt<28>(Loc, Val, Type);
1470     or32le(Loc, (Val & 0x0FFFFFFC) >> 2);
1471     break;
1472   case R_AARCH64_CONDBR19:
1473     checkInt<21>(Loc, Val, Type);
1474     or32le(Loc, (Val & 0x1FFFFC) << 3);
1475     break;
1476   case R_AARCH64_LD64_GOT_LO12_NC:
1477   case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
1478   case R_AARCH64_TLSDESC_LD64_LO12_NC:
1479     checkAlignment<8>(Loc, Val, Type);
1480     or32le(Loc, (Val & 0xFF8) << 7);
1481     break;
1482   case R_AARCH64_LDST8_ABS_LO12_NC:
1483     or32AArch64Imm(Loc, getBits(Val, 0, 11));
1484     break;
1485   case R_AARCH64_LDST16_ABS_LO12_NC:
1486     or32AArch64Imm(Loc, getBits(Val, 1, 11));
1487     break;
1488   case R_AARCH64_LDST32_ABS_LO12_NC:
1489     or32AArch64Imm(Loc, getBits(Val, 2, 11));
1490     break;
1491   case R_AARCH64_LDST64_ABS_LO12_NC:
1492     or32AArch64Imm(Loc, getBits(Val, 3, 11));
1493     break;
1494   case R_AARCH64_LDST128_ABS_LO12_NC:
1495     or32AArch64Imm(Loc, getBits(Val, 4, 11));
1496     break;
1497   case R_AARCH64_MOVW_UABS_G0_NC:
1498     or32le(Loc, (Val & 0xFFFF) << 5);
1499     break;
1500   case R_AARCH64_MOVW_UABS_G1_NC:
1501     or32le(Loc, (Val & 0xFFFF0000) >> 11);
1502     break;
1503   case R_AARCH64_MOVW_UABS_G2_NC:
1504     or32le(Loc, (Val & 0xFFFF00000000) >> 27);
1505     break;
1506   case R_AARCH64_MOVW_UABS_G3:
1507     or32le(Loc, (Val & 0xFFFF000000000000) >> 43);
1508     break;
1509   case R_AARCH64_TSTBR14:
1510     checkInt<16>(Loc, Val, Type);
1511     or32le(Loc, (Val & 0xFFFC) << 3);
1512     break;
1513   case R_AARCH64_TLSLE_ADD_TPREL_HI12:
1514     checkInt<24>(Loc, Val, Type);
1515     or32AArch64Imm(Loc, Val >> 12);
1516     break;
1517   case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
1518   case R_AARCH64_TLSDESC_ADD_LO12_NC:
1519     or32AArch64Imm(Loc, Val);
1520     break;
1521   default:
1522     error(getErrorLocation(Loc) + "unrecognized reloc " + Twine(Type));
1523   }
1524 }
1525 
1526 void AArch64TargetInfo::relaxTlsGdToLe(uint8_t *Loc, uint32_t Type,
1527                                        uint64_t Val) const {
1528   // TLSDESC Global-Dynamic relocation are in the form:
1529   //   adrp    x0, :tlsdesc:v             [R_AARCH64_TLSDESC_ADR_PAGE21]
1530   //   ldr     x1, [x0, #:tlsdesc_lo12:v  [R_AARCH64_TLSDESC_LD64_LO12_NC]
1531   //   add     x0, x0, :tlsdesc_los:v     [_AARCH64_TLSDESC_ADD_LO12_NC]
1532   //   .tlsdesccall                       [R_AARCH64_TLSDESC_CALL]
1533   //   blr     x1
1534   // And it can optimized to:
1535   //   movz    x0, #0x0, lsl #16
1536   //   movk    x0, #0x10
1537   //   nop
1538   //   nop
1539   checkUInt<32>(Loc, Val, Type);
1540 
1541   switch (Type) {
1542   case R_AARCH64_TLSDESC_ADD_LO12_NC:
1543   case R_AARCH64_TLSDESC_CALL:
1544     write32le(Loc, 0xd503201f); // nop
1545     return;
1546   case R_AARCH64_TLSDESC_ADR_PAGE21:
1547     write32le(Loc, 0xd2a00000 | (((Val >> 16) & 0xffff) << 5)); // movz
1548     return;
1549   case R_AARCH64_TLSDESC_LD64_LO12_NC:
1550     write32le(Loc, 0xf2800000 | ((Val & 0xffff) << 5)); // movk
1551     return;
1552   default:
1553     llvm_unreachable("unsupported relocation for TLS GD to LE relaxation");
1554   }
1555 }
1556 
1557 void AArch64TargetInfo::relaxTlsGdToIe(uint8_t *Loc, uint32_t Type,
1558                                        uint64_t Val) const {
1559   // TLSDESC Global-Dynamic relocation are in the form:
1560   //   adrp    x0, :tlsdesc:v             [R_AARCH64_TLSDESC_ADR_PAGE21]
1561   //   ldr     x1, [x0, #:tlsdesc_lo12:v  [R_AARCH64_TLSDESC_LD64_LO12_NC]
1562   //   add     x0, x0, :tlsdesc_los:v     [_AARCH64_TLSDESC_ADD_LO12_NC]
1563   //   .tlsdesccall                       [R_AARCH64_TLSDESC_CALL]
1564   //   blr     x1
1565   // And it can optimized to:
1566   //   adrp    x0, :gottprel:v
1567   //   ldr     x0, [x0, :gottprel_lo12:v]
1568   //   nop
1569   //   nop
1570 
1571   switch (Type) {
1572   case R_AARCH64_TLSDESC_ADD_LO12_NC:
1573   case R_AARCH64_TLSDESC_CALL:
1574     write32le(Loc, 0xd503201f); // nop
1575     break;
1576   case R_AARCH64_TLSDESC_ADR_PAGE21:
1577     write32le(Loc, 0x90000000); // adrp
1578     relocateOne(Loc, R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21, Val);
1579     break;
1580   case R_AARCH64_TLSDESC_LD64_LO12_NC:
1581     write32le(Loc, 0xf9400000); // ldr
1582     relocateOne(Loc, R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC, Val);
1583     break;
1584   default:
1585     llvm_unreachable("unsupported relocation for TLS GD to LE relaxation");
1586   }
1587 }
1588 
1589 void AArch64TargetInfo::relaxTlsIeToLe(uint8_t *Loc, uint32_t Type,
1590                                        uint64_t Val) const {
1591   checkUInt<32>(Loc, Val, Type);
1592 
1593   if (Type == R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21) {
1594     // Generate MOVZ.
1595     uint32_t RegNo = read32le(Loc) & 0x1f;
1596     write32le(Loc, (0xd2a00000 | RegNo) | (((Val >> 16) & 0xffff) << 5));
1597     return;
1598   }
1599   if (Type == R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC) {
1600     // Generate MOVK.
1601     uint32_t RegNo = read32le(Loc) & 0x1f;
1602     write32le(Loc, (0xf2800000 | RegNo) | ((Val & 0xffff) << 5));
1603     return;
1604   }
1605   llvm_unreachable("invalid relocation for TLS IE to LE relaxation");
1606 }
1607 
1608 AMDGPUTargetInfo::AMDGPUTargetInfo() {
1609   RelativeRel = R_AMDGPU_REL64;
1610   GotRel = R_AMDGPU_ABS64;
1611   GotEntrySize = 8;
1612 }
1613 
1614 void AMDGPUTargetInfo::relocateOne(uint8_t *Loc, uint32_t Type,
1615                                    uint64_t Val) const {
1616   switch (Type) {
1617   case R_AMDGPU_ABS32:
1618   case R_AMDGPU_GOTPCREL:
1619   case R_AMDGPU_GOTPCREL32_LO:
1620   case R_AMDGPU_REL32:
1621   case R_AMDGPU_REL32_LO:
1622     write32le(Loc, Val);
1623     break;
1624   case R_AMDGPU_ABS64:
1625     write64le(Loc, Val);
1626     break;
1627   case R_AMDGPU_GOTPCREL32_HI:
1628   case R_AMDGPU_REL32_HI:
1629     write32le(Loc, Val >> 32);
1630     break;
1631   default:
1632     error(getErrorLocation(Loc) + "unrecognized reloc " + Twine(Type));
1633   }
1634 }
1635 
1636 RelExpr AMDGPUTargetInfo::getRelExpr(uint32_t Type, const SymbolBody &S) const {
1637   switch (Type) {
1638   case R_AMDGPU_ABS32:
1639   case R_AMDGPU_ABS64:
1640     return R_ABS;
1641   case R_AMDGPU_REL32:
1642   case R_AMDGPU_REL32_LO:
1643   case R_AMDGPU_REL32_HI:
1644     return R_PC;
1645   case R_AMDGPU_GOTPCREL:
1646   case R_AMDGPU_GOTPCREL32_LO:
1647   case R_AMDGPU_GOTPCREL32_HI:
1648     return R_GOT_PC;
1649   default:
1650     error(toString(S.File) + ": unknown relocation type: " + toString(Type));
1651     return R_HINT;
1652   }
1653 }
1654 
1655 ARMTargetInfo::ARMTargetInfo() {
1656   CopyRel = R_ARM_COPY;
1657   RelativeRel = R_ARM_RELATIVE;
1658   IRelativeRel = R_ARM_IRELATIVE;
1659   GotRel = R_ARM_GLOB_DAT;
1660   PltRel = R_ARM_JUMP_SLOT;
1661   TlsGotRel = R_ARM_TLS_TPOFF32;
1662   TlsModuleIndexRel = R_ARM_TLS_DTPMOD32;
1663   TlsOffsetRel = R_ARM_TLS_DTPOFF32;
1664   GotEntrySize = 4;
1665   GotPltEntrySize = 4;
1666   PltEntrySize = 16;
1667   PltHeaderSize = 20;
1668   // ARM uses Variant 1 TLS
1669   TcbSize = 8;
1670   NeedsThunks = true;
1671 }
1672 
1673 RelExpr ARMTargetInfo::getRelExpr(uint32_t Type, const SymbolBody &S) const {
1674   switch (Type) {
1675   default:
1676     return R_ABS;
1677   case R_ARM_THM_JUMP11:
1678     return R_PC;
1679   case R_ARM_CALL:
1680   case R_ARM_JUMP24:
1681   case R_ARM_PC24:
1682   case R_ARM_PLT32:
1683   case R_ARM_PREL31:
1684   case R_ARM_THM_JUMP19:
1685   case R_ARM_THM_JUMP24:
1686   case R_ARM_THM_CALL:
1687     return R_PLT_PC;
1688   case R_ARM_GOTOFF32:
1689     // (S + A) - GOT_ORG
1690     return R_GOTREL;
1691   case R_ARM_GOT_BREL:
1692     // GOT(S) + A - GOT_ORG
1693     return R_GOT_OFF;
1694   case R_ARM_GOT_PREL:
1695   case R_ARM_TLS_IE32:
1696     // GOT(S) + A - P
1697     return R_GOT_PC;
1698   case R_ARM_TARGET1:
1699     return Config->Target1Rel ? R_PC : R_ABS;
1700   case R_ARM_TARGET2:
1701     if (Config->Target2 == Target2Policy::Rel)
1702       return R_PC;
1703     if (Config->Target2 == Target2Policy::Abs)
1704       return R_ABS;
1705     return R_GOT_PC;
1706   case R_ARM_TLS_GD32:
1707     return R_TLSGD_PC;
1708   case R_ARM_TLS_LDM32:
1709     return R_TLSLD_PC;
1710   case R_ARM_BASE_PREL:
1711     // B(S) + A - P
1712     // FIXME: currently B(S) assumed to be .got, this may not hold for all
1713     // platforms.
1714     return R_GOTONLY_PC;
1715   case R_ARM_MOVW_PREL_NC:
1716   case R_ARM_MOVT_PREL:
1717   case R_ARM_REL32:
1718   case R_ARM_THM_MOVW_PREL_NC:
1719   case R_ARM_THM_MOVT_PREL:
1720     return R_PC;
1721   case R_ARM_NONE:
1722     return R_NONE;
1723   case R_ARM_TLS_LE32:
1724     return R_TLS;
1725   }
1726 }
1727 
1728 bool ARMTargetInfo::isPicRel(uint32_t Type) const {
1729   return (Type == R_ARM_TARGET1 && !Config->Target1Rel) ||
1730          (Type == R_ARM_ABS32);
1731 }
1732 
1733 uint32_t ARMTargetInfo::getDynRel(uint32_t Type) const {
1734   if (Type == R_ARM_TARGET1 && !Config->Target1Rel)
1735     return R_ARM_ABS32;
1736   if (Type == R_ARM_ABS32)
1737     return Type;
1738   // Keep it going with a dummy value so that we can find more reloc errors.
1739   return R_ARM_ABS32;
1740 }
1741 
1742 void ARMTargetInfo::writeGotPlt(uint8_t *Buf, const SymbolBody &) const {
1743   write32le(Buf, In<ELF32LE>::Plt->getVA());
1744 }
1745 
1746 void ARMTargetInfo::writeIgotPlt(uint8_t *Buf, const SymbolBody &S) const {
1747   // An ARM entry is the address of the ifunc resolver function.
1748   write32le(Buf, S.getVA());
1749 }
1750 
1751 void ARMTargetInfo::writePltHeader(uint8_t *Buf) const {
1752   const uint8_t PltData[] = {
1753       0x04, 0xe0, 0x2d, 0xe5, //     str lr, [sp,#-4]!
1754       0x04, 0xe0, 0x9f, 0xe5, //     ldr lr, L2
1755       0x0e, 0xe0, 0x8f, 0xe0, // L1: add lr, pc, lr
1756       0x08, 0xf0, 0xbe, 0xe5, //     ldr pc, [lr, #8]
1757       0x00, 0x00, 0x00, 0x00, // L2: .word   &(.got.plt) - L1 - 8
1758   };
1759   memcpy(Buf, PltData, sizeof(PltData));
1760   uint64_t GotPlt = In<ELF32LE>::GotPlt->getVA();
1761   uint64_t L1 = In<ELF32LE>::Plt->getVA() + 8;
1762   write32le(Buf + 16, GotPlt - L1 - 8);
1763 }
1764 
1765 void ARMTargetInfo::addPltHeaderSymbols(InputSectionBase *ISD) const {
1766   auto *IS = cast<InputSection>(ISD);
1767   addSyntheticLocal<ELF32LE>("$a", STT_NOTYPE, 0, 0, IS);
1768   addSyntheticLocal<ELF32LE>("$d", STT_NOTYPE, 16, 0, IS);
1769 }
1770 
1771 void ARMTargetInfo::writePlt(uint8_t *Buf, uint64_t GotEntryAddr,
1772                              uint64_t PltEntryAddr, int32_t Index,
1773                              unsigned RelOff) const {
1774   // FIXME: Using simple code sequence with simple relocations.
1775   // There is a more optimal sequence but it requires support for the group
1776   // relocations. See ELF for the ARM Architecture Appendix A.3
1777   const uint8_t PltData[] = {
1778       0x04, 0xc0, 0x9f, 0xe5, //     ldr ip, L2
1779       0x0f, 0xc0, 0x8c, 0xe0, // L1: add ip, ip, pc
1780       0x00, 0xf0, 0x9c, 0xe5, //     ldr pc, [ip]
1781       0x00, 0x00, 0x00, 0x00, // L2: .word   Offset(&(.plt.got) - L1 - 8
1782   };
1783   memcpy(Buf, PltData, sizeof(PltData));
1784   uint64_t L1 = PltEntryAddr + 4;
1785   write32le(Buf + 12, GotEntryAddr - L1 - 8);
1786 }
1787 
1788 void ARMTargetInfo::addPltSymbols(InputSectionBase *ISD, uint64_t Off) const {
1789   auto *IS = cast<InputSection>(ISD);
1790   addSyntheticLocal<ELF32LE>("$a", STT_NOTYPE, Off, 0, IS);
1791   addSyntheticLocal<ELF32LE>("$d", STT_NOTYPE, Off + 12, 0, IS);
1792 }
1793 
1794 bool ARMTargetInfo::needsThunk(RelExpr Expr, uint32_t RelocType,
1795                                const InputFile *File,
1796                                const SymbolBody &S) const {
1797   // If S is an undefined weak symbol in an executable we don't need a Thunk.
1798   // In a DSO calls to undefined symbols, including weak ones get PLT entries
1799   // which may need a thunk.
1800   if (S.isUndefined() && !S.isLocal() && S.symbol()->isWeak() &&
1801       !Config->Shared)
1802     return false;
1803   // A state change from ARM to Thumb and vice versa must go through an
1804   // interworking thunk if the relocation type is not R_ARM_CALL or
1805   // R_ARM_THM_CALL.
1806   switch (RelocType) {
1807   case R_ARM_PC24:
1808   case R_ARM_PLT32:
1809   case R_ARM_JUMP24:
1810     // Source is ARM, all PLT entries are ARM so no interworking required.
1811     // Otherwise we need to interwork if Symbol has bit 0 set (Thumb).
1812     if (Expr == R_PC && ((S.getVA() & 1) == 1))
1813       return true;
1814     break;
1815   case R_ARM_THM_JUMP19:
1816   case R_ARM_THM_JUMP24:
1817     // Source is Thumb, all PLT entries are ARM so interworking is required.
1818     // Otherwise we need to interwork if Symbol has bit 0 clear (ARM).
1819     if (Expr == R_PLT_PC || ((S.getVA() & 1) == 0))
1820       return true;
1821     break;
1822   }
1823   return false;
1824 }
1825 
1826 void ARMTargetInfo::relocateOne(uint8_t *Loc, uint32_t Type,
1827                                 uint64_t Val) const {
1828   switch (Type) {
1829   case R_ARM_ABS32:
1830   case R_ARM_BASE_PREL:
1831   case R_ARM_GLOB_DAT:
1832   case R_ARM_GOTOFF32:
1833   case R_ARM_GOT_BREL:
1834   case R_ARM_GOT_PREL:
1835   case R_ARM_REL32:
1836   case R_ARM_RELATIVE:
1837   case R_ARM_TARGET1:
1838   case R_ARM_TARGET2:
1839   case R_ARM_TLS_GD32:
1840   case R_ARM_TLS_IE32:
1841   case R_ARM_TLS_LDM32:
1842   case R_ARM_TLS_LDO32:
1843   case R_ARM_TLS_LE32:
1844   case R_ARM_TLS_TPOFF32:
1845     write32le(Loc, Val);
1846     break;
1847   case R_ARM_TLS_DTPMOD32:
1848     write32le(Loc, 1);
1849     break;
1850   case R_ARM_PREL31:
1851     checkInt<31>(Loc, Val, Type);
1852     write32le(Loc, (read32le(Loc) & 0x80000000) | (Val & ~0x80000000));
1853     break;
1854   case R_ARM_CALL:
1855     // R_ARM_CALL is used for BL and BLX instructions, depending on the
1856     // value of bit 0 of Val, we must select a BL or BLX instruction
1857     if (Val & 1) {
1858       // If bit 0 of Val is 1 the target is Thumb, we must select a BLX.
1859       // The BLX encoding is 0xfa:H:imm24 where Val = imm24:H:'1'
1860       checkInt<26>(Loc, Val, Type);
1861       write32le(Loc, 0xfa000000 |                    // opcode
1862                          ((Val & 2) << 23) |         // H
1863                          ((Val >> 2) & 0x00ffffff)); // imm24
1864       break;
1865     }
1866     if ((read32le(Loc) & 0xfe000000) == 0xfa000000)
1867       // BLX (always unconditional) instruction to an ARM Target, select an
1868       // unconditional BL.
1869       write32le(Loc, 0xeb000000 | (read32le(Loc) & 0x00ffffff));
1870   // fall through as BL encoding is shared with B
1871   case R_ARM_JUMP24:
1872   case R_ARM_PC24:
1873   case R_ARM_PLT32:
1874     checkInt<26>(Loc, Val, Type);
1875     write32le(Loc, (read32le(Loc) & ~0x00ffffff) | ((Val >> 2) & 0x00ffffff));
1876     break;
1877   case R_ARM_THM_JUMP11:
1878     checkInt<12>(Loc, Val, Type);
1879     write16le(Loc, (read32le(Loc) & 0xf800) | ((Val >> 1) & 0x07ff));
1880     break;
1881   case R_ARM_THM_JUMP19:
1882     // Encoding T3: Val = S:J2:J1:imm6:imm11:0
1883     checkInt<21>(Loc, Val, Type);
1884     write16le(Loc,
1885               (read16le(Loc) & 0xfbc0) |   // opcode cond
1886                   ((Val >> 10) & 0x0400) | // S
1887                   ((Val >> 12) & 0x003f)); // imm6
1888     write16le(Loc + 2,
1889               0x8000 |                    // opcode
1890                   ((Val >> 8) & 0x0800) | // J2
1891                   ((Val >> 5) & 0x2000) | // J1
1892                   ((Val >> 1) & 0x07ff)); // imm11
1893     break;
1894   case R_ARM_THM_CALL:
1895     // R_ARM_THM_CALL is used for BL and BLX instructions, depending on the
1896     // value of bit 0 of Val, we must select a BL or BLX instruction
1897     if ((Val & 1) == 0) {
1898       // Ensure BLX destination is 4-byte aligned. As BLX instruction may
1899       // only be two byte aligned. This must be done before overflow check
1900       Val = alignTo(Val, 4);
1901     }
1902     // Bit 12 is 0 for BLX, 1 for BL
1903     write16le(Loc + 2, (read16le(Loc + 2) & ~0x1000) | (Val & 1) << 12);
1904   // Fall through as rest of encoding is the same as B.W
1905   case R_ARM_THM_JUMP24:
1906     // Encoding B  T4, BL T1, BLX T2: Val = S:I1:I2:imm10:imm11:0
1907     // FIXME: Use of I1 and I2 require v6T2ops
1908     checkInt<25>(Loc, Val, Type);
1909     write16le(Loc,
1910               0xf000 |                     // opcode
1911                   ((Val >> 14) & 0x0400) | // S
1912                   ((Val >> 12) & 0x03ff)); // imm10
1913     write16le(Loc + 2,
1914               (read16le(Loc + 2) & 0xd000) |                  // opcode
1915                   (((~(Val >> 10)) ^ (Val >> 11)) & 0x2000) | // J1
1916                   (((~(Val >> 11)) ^ (Val >> 13)) & 0x0800) | // J2
1917                   ((Val >> 1) & 0x07ff));                     // imm11
1918     break;
1919   case R_ARM_MOVW_ABS_NC:
1920   case R_ARM_MOVW_PREL_NC:
1921     write32le(Loc, (read32le(Loc) & ~0x000f0fff) | ((Val & 0xf000) << 4) |
1922                        (Val & 0x0fff));
1923     break;
1924   case R_ARM_MOVT_ABS:
1925   case R_ARM_MOVT_PREL:
1926     checkInt<32>(Loc, Val, Type);
1927     write32le(Loc, (read32le(Loc) & ~0x000f0fff) |
1928                        (((Val >> 16) & 0xf000) << 4) | ((Val >> 16) & 0xfff));
1929     break;
1930   case R_ARM_THM_MOVT_ABS:
1931   case R_ARM_THM_MOVT_PREL:
1932     // Encoding T1: A = imm4:i:imm3:imm8
1933     checkInt<32>(Loc, Val, Type);
1934     write16le(Loc,
1935               0xf2c0 |                     // opcode
1936                   ((Val >> 17) & 0x0400) | // i
1937                   ((Val >> 28) & 0x000f)); // imm4
1938     write16le(Loc + 2,
1939               (read16le(Loc + 2) & 0x8f00) | // opcode
1940                   ((Val >> 12) & 0x7000) |   // imm3
1941                   ((Val >> 16) & 0x00ff));   // imm8
1942     break;
1943   case R_ARM_THM_MOVW_ABS_NC:
1944   case R_ARM_THM_MOVW_PREL_NC:
1945     // Encoding T3: A = imm4:i:imm3:imm8
1946     write16le(Loc,
1947               0xf240 |                     // opcode
1948                   ((Val >> 1) & 0x0400) |  // i
1949                   ((Val >> 12) & 0x000f)); // imm4
1950     write16le(Loc + 2,
1951               (read16le(Loc + 2) & 0x8f00) | // opcode
1952                   ((Val << 4) & 0x7000) |    // imm3
1953                   (Val & 0x00ff));           // imm8
1954     break;
1955   default:
1956     error(getErrorLocation(Loc) + "unrecognized reloc " + Twine(Type));
1957   }
1958 }
1959 
1960 int64_t ARMTargetInfo::getImplicitAddend(const uint8_t *Buf,
1961                                          uint32_t Type) const {
1962   switch (Type) {
1963   default:
1964     return 0;
1965   case R_ARM_ABS32:
1966   case R_ARM_BASE_PREL:
1967   case R_ARM_GOTOFF32:
1968   case R_ARM_GOT_BREL:
1969   case R_ARM_GOT_PREL:
1970   case R_ARM_REL32:
1971   case R_ARM_TARGET1:
1972   case R_ARM_TARGET2:
1973   case R_ARM_TLS_GD32:
1974   case R_ARM_TLS_LDM32:
1975   case R_ARM_TLS_LDO32:
1976   case R_ARM_TLS_IE32:
1977   case R_ARM_TLS_LE32:
1978     return SignExtend64<32>(read32le(Buf));
1979   case R_ARM_PREL31:
1980     return SignExtend64<31>(read32le(Buf));
1981   case R_ARM_CALL:
1982   case R_ARM_JUMP24:
1983   case R_ARM_PC24:
1984   case R_ARM_PLT32:
1985     return SignExtend64<26>(read32le(Buf) << 2);
1986   case R_ARM_THM_JUMP11:
1987     return SignExtend64<12>(read16le(Buf) << 1);
1988   case R_ARM_THM_JUMP19: {
1989     // Encoding T3: A = S:J2:J1:imm10:imm6:0
1990     uint16_t Hi = read16le(Buf);
1991     uint16_t Lo = read16le(Buf + 2);
1992     return SignExtend64<20>(((Hi & 0x0400) << 10) | // S
1993                             ((Lo & 0x0800) << 8) |  // J2
1994                             ((Lo & 0x2000) << 5) |  // J1
1995                             ((Hi & 0x003f) << 12) | // imm6
1996                             ((Lo & 0x07ff) << 1));  // imm11:0
1997   }
1998   case R_ARM_THM_CALL:
1999   case R_ARM_THM_JUMP24: {
2000     // Encoding B T4, BL T1, BLX T2: A = S:I1:I2:imm10:imm11:0
2001     // I1 = NOT(J1 EOR S), I2 = NOT(J2 EOR S)
2002     // FIXME: I1 and I2 require v6T2ops
2003     uint16_t Hi = read16le(Buf);
2004     uint16_t Lo = read16le(Buf + 2);
2005     return SignExtend64<24>(((Hi & 0x0400) << 14) |                    // S
2006                             (~((Lo ^ (Hi << 3)) << 10) & 0x00800000) | // I1
2007                             (~((Lo ^ (Hi << 1)) << 11) & 0x00400000) | // I2
2008                             ((Hi & 0x003ff) << 12) |                   // imm0
2009                             ((Lo & 0x007ff) << 1)); // imm11:0
2010   }
2011   // ELF for the ARM Architecture 4.6.1.1 the implicit addend for MOVW and
2012   // MOVT is in the range -32768 <= A < 32768
2013   case R_ARM_MOVW_ABS_NC:
2014   case R_ARM_MOVT_ABS:
2015   case R_ARM_MOVW_PREL_NC:
2016   case R_ARM_MOVT_PREL: {
2017     uint64_t Val = read32le(Buf) & 0x000f0fff;
2018     return SignExtend64<16>(((Val & 0x000f0000) >> 4) | (Val & 0x00fff));
2019   }
2020   case R_ARM_THM_MOVW_ABS_NC:
2021   case R_ARM_THM_MOVT_ABS:
2022   case R_ARM_THM_MOVW_PREL_NC:
2023   case R_ARM_THM_MOVT_PREL: {
2024     // Encoding T3: A = imm4:i:imm3:imm8
2025     uint16_t Hi = read16le(Buf);
2026     uint16_t Lo = read16le(Buf + 2);
2027     return SignExtend64<16>(((Hi & 0x000f) << 12) | // imm4
2028                             ((Hi & 0x0400) << 1) |  // i
2029                             ((Lo & 0x7000) >> 4) |  // imm3
2030                             (Lo & 0x00ff));         // imm8
2031   }
2032   }
2033 }
2034 
2035 bool ARMTargetInfo::isTlsLocalDynamicRel(uint32_t Type) const {
2036   return Type == R_ARM_TLS_LDO32 || Type == R_ARM_TLS_LDM32;
2037 }
2038 
2039 bool ARMTargetInfo::isTlsGlobalDynamicRel(uint32_t Type) const {
2040   return Type == R_ARM_TLS_GD32;
2041 }
2042 
2043 bool ARMTargetInfo::isTlsInitialExecRel(uint32_t Type) const {
2044   return Type == R_ARM_TLS_IE32;
2045 }
2046 
2047 template <class ELFT> MipsTargetInfo<ELFT>::MipsTargetInfo() {
2048   GotPltHeaderEntriesNum = 2;
2049   DefaultMaxPageSize = 65536;
2050   GotEntrySize = sizeof(typename ELFT::uint);
2051   GotPltEntrySize = sizeof(typename ELFT::uint);
2052   PltEntrySize = 16;
2053   PltHeaderSize = 32;
2054   CopyRel = R_MIPS_COPY;
2055   PltRel = R_MIPS_JUMP_SLOT;
2056   NeedsThunks = true;
2057   if (ELFT::Is64Bits) {
2058     RelativeRel = (R_MIPS_64 << 8) | R_MIPS_REL32;
2059     TlsGotRel = R_MIPS_TLS_TPREL64;
2060     TlsModuleIndexRel = R_MIPS_TLS_DTPMOD64;
2061     TlsOffsetRel = R_MIPS_TLS_DTPREL64;
2062   } else {
2063     RelativeRel = R_MIPS_REL32;
2064     TlsGotRel = R_MIPS_TLS_TPREL32;
2065     TlsModuleIndexRel = R_MIPS_TLS_DTPMOD32;
2066     TlsOffsetRel = R_MIPS_TLS_DTPREL32;
2067   }
2068 }
2069 
2070 template <class ELFT>
2071 RelExpr MipsTargetInfo<ELFT>::getRelExpr(uint32_t Type,
2072                                          const SymbolBody &S) const {
2073   // See comment in the calculateMipsRelChain.
2074   if (ELFT::Is64Bits || Config->MipsN32Abi)
2075     Type &= 0xff;
2076   switch (Type) {
2077   default:
2078     return R_ABS;
2079   case R_MIPS_JALR:
2080     return R_HINT;
2081   case R_MIPS_GPREL16:
2082   case R_MIPS_GPREL32:
2083     return R_MIPS_GOTREL;
2084   case R_MIPS_26:
2085     return R_PLT;
2086   case R_MIPS_HI16:
2087   case R_MIPS_LO16:
2088     // R_MIPS_HI16/R_MIPS_LO16 relocations against _gp_disp calculate
2089     // offset between start of function and 'gp' value which by default
2090     // equal to the start of .got section. In that case we consider these
2091     // relocations as relative.
2092     if (&S == ElfSym::MipsGpDisp)
2093       return R_MIPS_GOT_GP_PC;
2094     if (&S == ElfSym::MipsLocalGp)
2095       return R_MIPS_GOT_GP;
2096     // fallthrough
2097   case R_MIPS_GOT_OFST:
2098     return R_ABS;
2099   case R_MIPS_PC32:
2100   case R_MIPS_PC16:
2101   case R_MIPS_PC19_S2:
2102   case R_MIPS_PC21_S2:
2103   case R_MIPS_PC26_S2:
2104   case R_MIPS_PCHI16:
2105   case R_MIPS_PCLO16:
2106     return R_PC;
2107   case R_MIPS_GOT16:
2108     if (S.isLocal())
2109       return R_MIPS_GOT_LOCAL_PAGE;
2110   // fallthrough
2111   case R_MIPS_CALL16:
2112   case R_MIPS_GOT_DISP:
2113   case R_MIPS_TLS_GOTTPREL:
2114     return R_MIPS_GOT_OFF;
2115   case R_MIPS_CALL_HI16:
2116   case R_MIPS_CALL_LO16:
2117   case R_MIPS_GOT_HI16:
2118   case R_MIPS_GOT_LO16:
2119     return R_MIPS_GOT_OFF32;
2120   case R_MIPS_GOT_PAGE:
2121     return R_MIPS_GOT_LOCAL_PAGE;
2122   case R_MIPS_TLS_GD:
2123     return R_MIPS_TLSGD;
2124   case R_MIPS_TLS_LDM:
2125     return R_MIPS_TLSLD;
2126   }
2127 }
2128 
2129 template <class ELFT> bool MipsTargetInfo<ELFT>::isPicRel(uint32_t Type) const {
2130   return Type == R_MIPS_32 || Type == R_MIPS_64;
2131 }
2132 
2133 template <class ELFT>
2134 uint32_t MipsTargetInfo<ELFT>::getDynRel(uint32_t Type) const {
2135   return RelativeRel;
2136 }
2137 
2138 template <class ELFT>
2139 bool MipsTargetInfo<ELFT>::isTlsLocalDynamicRel(uint32_t Type) const {
2140   return Type == R_MIPS_TLS_LDM;
2141 }
2142 
2143 template <class ELFT>
2144 bool MipsTargetInfo<ELFT>::isTlsGlobalDynamicRel(uint32_t Type) const {
2145   return Type == R_MIPS_TLS_GD;
2146 }
2147 
2148 template <class ELFT>
2149 void MipsTargetInfo<ELFT>::writeGotPlt(uint8_t *Buf, const SymbolBody &) const {
2150   write32<ELFT::TargetEndianness>(Buf, In<ELFT>::Plt->getVA());
2151 }
2152 
2153 template <endianness E, uint8_t BSIZE, uint8_t SHIFT>
2154 static int64_t getPcRelocAddend(const uint8_t *Loc) {
2155   uint32_t Instr = read32<E>(Loc);
2156   uint32_t Mask = 0xffffffff >> (32 - BSIZE);
2157   return SignExtend64<BSIZE + SHIFT>((Instr & Mask) << SHIFT);
2158 }
2159 
2160 template <endianness E, uint8_t BSIZE, uint8_t SHIFT>
2161 static void applyMipsPcReloc(uint8_t *Loc, uint32_t Type, uint64_t V) {
2162   uint32_t Mask = 0xffffffff >> (32 - BSIZE);
2163   uint32_t Instr = read32<E>(Loc);
2164   if (SHIFT > 0)
2165     checkAlignment<(1 << SHIFT)>(Loc, V, Type);
2166   checkInt<BSIZE + SHIFT>(Loc, V, Type);
2167   write32<E>(Loc, (Instr & ~Mask) | ((V >> SHIFT) & Mask));
2168 }
2169 
2170 template <endianness E> static void writeMipsHi16(uint8_t *Loc, uint64_t V) {
2171   uint32_t Instr = read32<E>(Loc);
2172   uint16_t Res = ((V + 0x8000) >> 16) & 0xffff;
2173   write32<E>(Loc, (Instr & 0xffff0000) | Res);
2174 }
2175 
2176 template <endianness E> static void writeMipsHigher(uint8_t *Loc, uint64_t V) {
2177   uint32_t Instr = read32<E>(Loc);
2178   uint16_t Res = ((V + 0x80008000) >> 32) & 0xffff;
2179   write32<E>(Loc, (Instr & 0xffff0000) | Res);
2180 }
2181 
2182 template <endianness E> static void writeMipsHighest(uint8_t *Loc, uint64_t V) {
2183   uint32_t Instr = read32<E>(Loc);
2184   uint16_t Res = ((V + 0x800080008000) >> 48) & 0xffff;
2185   write32<E>(Loc, (Instr & 0xffff0000) | Res);
2186 }
2187 
2188 template <endianness E> static void writeMipsLo16(uint8_t *Loc, uint64_t V) {
2189   uint32_t Instr = read32<E>(Loc);
2190   write32<E>(Loc, (Instr & 0xffff0000) | (V & 0xffff));
2191 }
2192 
2193 template <class ELFT> static bool isMipsR6() {
2194   const auto &FirstObj = cast<ELFFileBase<ELFT>>(*Config->FirstElf);
2195   uint32_t Arch = FirstObj.getObj().getHeader()->e_flags & EF_MIPS_ARCH;
2196   return Arch == EF_MIPS_ARCH_32R6 || Arch == EF_MIPS_ARCH_64R6;
2197 }
2198 
2199 template <class ELFT>
2200 void MipsTargetInfo<ELFT>::writePltHeader(uint8_t *Buf) const {
2201   const endianness E = ELFT::TargetEndianness;
2202   if (Config->MipsN32Abi) {
2203     write32<E>(Buf, 0x3c0e0000);      // lui   $14, %hi(&GOTPLT[0])
2204     write32<E>(Buf + 4, 0x8dd90000);  // lw    $25, %lo(&GOTPLT[0])($14)
2205     write32<E>(Buf + 8, 0x25ce0000);  // addiu $14, $14, %lo(&GOTPLT[0])
2206     write32<E>(Buf + 12, 0x030ec023); // subu  $24, $24, $14
2207   } else {
2208     write32<E>(Buf, 0x3c1c0000);      // lui   $28, %hi(&GOTPLT[0])
2209     write32<E>(Buf + 4, 0x8f990000);  // lw    $25, %lo(&GOTPLT[0])($28)
2210     write32<E>(Buf + 8, 0x279c0000);  // addiu $28, $28, %lo(&GOTPLT[0])
2211     write32<E>(Buf + 12, 0x031cc023); // subu  $24, $24, $28
2212   }
2213   write32<E>(Buf + 16, 0x03e07825); // move  $15, $31
2214   write32<E>(Buf + 20, 0x0018c082); // srl   $24, $24, 2
2215   write32<E>(Buf + 24, 0x0320f809); // jalr  $25
2216   write32<E>(Buf + 28, 0x2718fffe); // subu  $24, $24, 2
2217   uint64_t Got = In<ELFT>::GotPlt->getVA();
2218   writeMipsHi16<E>(Buf, Got);
2219   writeMipsLo16<E>(Buf + 4, Got);
2220   writeMipsLo16<E>(Buf + 8, Got);
2221 }
2222 
2223 template <class ELFT>
2224 void MipsTargetInfo<ELFT>::writePlt(uint8_t *Buf, uint64_t GotEntryAddr,
2225                                     uint64_t PltEntryAddr, int32_t Index,
2226                                     unsigned RelOff) const {
2227   const endianness E = ELFT::TargetEndianness;
2228   write32<E>(Buf, 0x3c0f0000);     // lui   $15, %hi(.got.plt entry)
2229   write32<E>(Buf + 4, 0x8df90000); // l[wd] $25, %lo(.got.plt entry)($15)
2230                                    // jr    $25
2231   write32<E>(Buf + 8, isMipsR6<ELFT>() ? 0x03200009 : 0x03200008);
2232   write32<E>(Buf + 12, 0x25f80000); // addiu $24, $15, %lo(.got.plt entry)
2233   writeMipsHi16<E>(Buf, GotEntryAddr);
2234   writeMipsLo16<E>(Buf + 4, GotEntryAddr);
2235   writeMipsLo16<E>(Buf + 12, GotEntryAddr);
2236 }
2237 
2238 template <class ELFT>
2239 bool MipsTargetInfo<ELFT>::needsThunk(RelExpr Expr, uint32_t Type,
2240                                       const InputFile *File,
2241                                       const SymbolBody &S) const {
2242   // Any MIPS PIC code function is invoked with its address in register $t9.
2243   // So if we have a branch instruction from non-PIC code to the PIC one
2244   // we cannot make the jump directly and need to create a small stubs
2245   // to save the target function address.
2246   // See page 3-38 ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf
2247   if (Type != R_MIPS_26)
2248     return false;
2249   auto *F = dyn_cast_or_null<ELFFileBase<ELFT>>(File);
2250   if (!F)
2251     return false;
2252   // If current file has PIC code, LA25 stub is not required.
2253   if (F->getObj().getHeader()->e_flags & EF_MIPS_PIC)
2254     return false;
2255   auto *D = dyn_cast<DefinedRegular>(&S);
2256   // LA25 is required if target file has PIC code
2257   // or target symbol is a PIC symbol.
2258   return D && D->isMipsPIC<ELFT>();
2259 }
2260 
2261 template <class ELFT>
2262 int64_t MipsTargetInfo<ELFT>::getImplicitAddend(const uint8_t *Buf,
2263                                                 uint32_t Type) const {
2264   const endianness E = ELFT::TargetEndianness;
2265   switch (Type) {
2266   default:
2267     return 0;
2268   case R_MIPS_32:
2269   case R_MIPS_GPREL32:
2270   case R_MIPS_TLS_DTPREL32:
2271   case R_MIPS_TLS_TPREL32:
2272     return SignExtend64<32>(read32<E>(Buf));
2273   case R_MIPS_26:
2274     // FIXME (simon): If the relocation target symbol is not a PLT entry
2275     // we should use another expression for calculation:
2276     // ((A << 2) | (P & 0xf0000000)) >> 2
2277     return SignExtend64<28>((read32<E>(Buf) & 0x3ffffff) << 2);
2278   case R_MIPS_GPREL16:
2279   case R_MIPS_LO16:
2280   case R_MIPS_PCLO16:
2281   case R_MIPS_TLS_DTPREL_HI16:
2282   case R_MIPS_TLS_DTPREL_LO16:
2283   case R_MIPS_TLS_TPREL_HI16:
2284   case R_MIPS_TLS_TPREL_LO16:
2285     return SignExtend64<16>(read32<E>(Buf));
2286   case R_MIPS_PC16:
2287     return getPcRelocAddend<E, 16, 2>(Buf);
2288   case R_MIPS_PC19_S2:
2289     return getPcRelocAddend<E, 19, 2>(Buf);
2290   case R_MIPS_PC21_S2:
2291     return getPcRelocAddend<E, 21, 2>(Buf);
2292   case R_MIPS_PC26_S2:
2293     return getPcRelocAddend<E, 26, 2>(Buf);
2294   case R_MIPS_PC32:
2295     return getPcRelocAddend<E, 32, 0>(Buf);
2296   }
2297 }
2298 
2299 static std::pair<uint32_t, uint64_t>
2300 calculateMipsRelChain(uint8_t *Loc, uint32_t Type, uint64_t Val) {
2301   // MIPS N64 ABI packs multiple relocations into the single relocation
2302   // record. In general, all up to three relocations can have arbitrary
2303   // types. In fact, Clang and GCC uses only a few combinations. For now,
2304   // we support two of them. That is allow to pass at least all LLVM
2305   // test suite cases.
2306   // <any relocation> / R_MIPS_SUB / R_MIPS_HI16 | R_MIPS_LO16
2307   // <any relocation> / R_MIPS_64 / R_MIPS_NONE
2308   // The first relocation is a 'real' relocation which is calculated
2309   // using the corresponding symbol's value. The second and the third
2310   // relocations used to modify result of the first one: extend it to
2311   // 64-bit, extract high or low part etc. For details, see part 2.9 Relocation
2312   // at the https://dmz-portal.mips.com/mw/images/8/82/007-4658-001.pdf
2313   uint32_t Type2 = (Type >> 8) & 0xff;
2314   uint32_t Type3 = (Type >> 16) & 0xff;
2315   if (Type2 == R_MIPS_NONE && Type3 == R_MIPS_NONE)
2316     return std::make_pair(Type, Val);
2317   if (Type2 == R_MIPS_64 && Type3 == R_MIPS_NONE)
2318     return std::make_pair(Type2, Val);
2319   if (Type2 == R_MIPS_SUB && (Type3 == R_MIPS_HI16 || Type3 == R_MIPS_LO16))
2320     return std::make_pair(Type3, -Val);
2321   error(getErrorLocation(Loc) + "unsupported relocations combination " +
2322         Twine(Type));
2323   return std::make_pair(Type & 0xff, Val);
2324 }
2325 
2326 template <class ELFT>
2327 void MipsTargetInfo<ELFT>::relocateOne(uint8_t *Loc, uint32_t Type,
2328                                        uint64_t Val) const {
2329   const endianness E = ELFT::TargetEndianness;
2330   // Thread pointer and DRP offsets from the start of TLS data area.
2331   // https://www.linux-mips.org/wiki/NPTL
2332   if (Type == R_MIPS_TLS_DTPREL_HI16 || Type == R_MIPS_TLS_DTPREL_LO16 ||
2333       Type == R_MIPS_TLS_DTPREL32 || Type == R_MIPS_TLS_DTPREL64)
2334     Val -= 0x8000;
2335   else if (Type == R_MIPS_TLS_TPREL_HI16 || Type == R_MIPS_TLS_TPREL_LO16 ||
2336            Type == R_MIPS_TLS_TPREL32 || Type == R_MIPS_TLS_TPREL64)
2337     Val -= 0x7000;
2338   if (ELFT::Is64Bits || Config->MipsN32Abi)
2339     std::tie(Type, Val) = calculateMipsRelChain(Loc, Type, Val);
2340   switch (Type) {
2341   case R_MIPS_32:
2342   case R_MIPS_GPREL32:
2343   case R_MIPS_TLS_DTPREL32:
2344   case R_MIPS_TLS_TPREL32:
2345     write32<E>(Loc, Val);
2346     break;
2347   case R_MIPS_64:
2348   case R_MIPS_TLS_DTPREL64:
2349   case R_MIPS_TLS_TPREL64:
2350     write64<E>(Loc, Val);
2351     break;
2352   case R_MIPS_26:
2353     write32<E>(Loc, (read32<E>(Loc) & ~0x3ffffff) | ((Val >> 2) & 0x3ffffff));
2354     break;
2355   case R_MIPS_GOT16:
2356     // The R_MIPS_GOT16 relocation's value in "relocatable" linking mode
2357     // is updated addend (not a GOT index). In that case write high 16 bits
2358     // to store a correct addend value.
2359     if (Config->Relocatable)
2360       writeMipsHi16<E>(Loc, Val);
2361     else {
2362       checkInt<16>(Loc, Val, Type);
2363       writeMipsLo16<E>(Loc, Val);
2364     }
2365     break;
2366   case R_MIPS_GOT_DISP:
2367   case R_MIPS_GOT_PAGE:
2368   case R_MIPS_GPREL16:
2369   case R_MIPS_TLS_GD:
2370   case R_MIPS_TLS_LDM:
2371     checkInt<16>(Loc, Val, Type);
2372   // fallthrough
2373   case R_MIPS_CALL16:
2374   case R_MIPS_CALL_LO16:
2375   case R_MIPS_GOT_LO16:
2376   case R_MIPS_GOT_OFST:
2377   case R_MIPS_LO16:
2378   case R_MIPS_PCLO16:
2379   case R_MIPS_TLS_DTPREL_LO16:
2380   case R_MIPS_TLS_GOTTPREL:
2381   case R_MIPS_TLS_TPREL_LO16:
2382     writeMipsLo16<E>(Loc, Val);
2383     break;
2384   case R_MIPS_CALL_HI16:
2385   case R_MIPS_GOT_HI16:
2386   case R_MIPS_HI16:
2387   case R_MIPS_PCHI16:
2388   case R_MIPS_TLS_DTPREL_HI16:
2389   case R_MIPS_TLS_TPREL_HI16:
2390     writeMipsHi16<E>(Loc, Val);
2391     break;
2392   case R_MIPS_HIGHER:
2393     writeMipsHigher<E>(Loc, Val);
2394     break;
2395   case R_MIPS_HIGHEST:
2396     writeMipsHighest<E>(Loc, Val);
2397     break;
2398   case R_MIPS_JALR:
2399     // Ignore this optimization relocation for now
2400     break;
2401   case R_MIPS_PC16:
2402     applyMipsPcReloc<E, 16, 2>(Loc, Type, Val);
2403     break;
2404   case R_MIPS_PC19_S2:
2405     applyMipsPcReloc<E, 19, 2>(Loc, Type, Val);
2406     break;
2407   case R_MIPS_PC21_S2:
2408     applyMipsPcReloc<E, 21, 2>(Loc, Type, Val);
2409     break;
2410   case R_MIPS_PC26_S2:
2411     applyMipsPcReloc<E, 26, 2>(Loc, Type, Val);
2412     break;
2413   case R_MIPS_PC32:
2414     applyMipsPcReloc<E, 32, 0>(Loc, Type, Val);
2415     break;
2416   default:
2417     error(getErrorLocation(Loc) + "unrecognized reloc " + Twine(Type));
2418   }
2419 }
2420 
2421 template <class ELFT>
2422 bool MipsTargetInfo<ELFT>::usesOnlyLowPageBits(uint32_t Type) const {
2423   return Type == R_MIPS_LO16 || Type == R_MIPS_GOT_OFST;
2424 }
2425 }
2426 }
2427