xref: /llvm-project-15.0.7/lld/ELF/Target.cpp (revision ee123223)
1 //===- Target.cpp ---------------------------------------------------------===//
2 //
3 //                             The LLVM Linker
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // Machine-specific things, such as applying relocations, creation of
11 // GOT or PLT entries, etc., are handled in this file.
12 //
13 // Refer the ELF spec for the single letter variables, S, A or P, used
14 // in this file.
15 //
16 // Some functions defined in this file has "relaxTls" as part of their names.
17 // They do peephole optimization for TLS variables by rewriting instructions.
18 // They are not part of the ABI but optional optimization, so you can skip
19 // them if you are not interested in how TLS variables are optimized.
20 // See the following paper for the details.
21 //
22 //   Ulrich Drepper, ELF Handling For Thread-Local Storage
23 //   http://www.akkadia.org/drepper/tls.pdf
24 //
25 //===----------------------------------------------------------------------===//
26 
27 #include "Target.h"
28 #include "Error.h"
29 #include "InputFiles.h"
30 #include "Memory.h"
31 #include "OutputSections.h"
32 #include "SymbolTable.h"
33 #include "Symbols.h"
34 #include "SyntheticSections.h"
35 #include "Thunks.h"
36 #include "Writer.h"
37 #include "llvm/ADT/ArrayRef.h"
38 #include "llvm/Object/ELF.h"
39 #include "llvm/Support/ELF.h"
40 #include "llvm/Support/Endian.h"
41 
42 using namespace llvm;
43 using namespace llvm::object;
44 using namespace llvm::support::endian;
45 using namespace llvm::ELF;
46 
47 std::string lld::toString(uint32_t Type) {
48   StringRef S = getELFRelocationTypeName(elf::Config->EMachine, Type);
49   if (S == "Unknown")
50     return ("Unknown (" + Twine(Type) + ")").str();
51   return S;
52 }
53 
54 namespace lld {
55 namespace elf {
56 
57 TargetInfo *Target;
58 
59 static void or32le(uint8_t *P, int32_t V) { write32le(P, read32le(P) | V); }
60 static void or32be(uint8_t *P, int32_t V) { write32be(P, read32be(P) | V); }
61 
62 template <class ELFT> static std::string getErrorLoc(uint8_t *Loc) {
63   for (InputSectionData *D : Symtab<ELFT>::X->Sections) {
64     auto *IS = dyn_cast_or_null<InputSection<ELFT>>(D);
65     if (!IS || !IS->OutSec)
66       continue;
67 
68     uint8_t *ISLoc = cast<OutputSection<ELFT>>(IS->OutSec)->Loc + IS->OutSecOff;
69     if (ISLoc <= Loc && Loc < ISLoc + IS->getSize())
70       return IS->getLocation(Loc - ISLoc) + ": ";
71   }
72   return "";
73 }
74 
75 static std::string getErrorLocation(uint8_t *Loc) {
76   switch (Config->EKind) {
77   case ELF32LEKind:
78     return getErrorLoc<ELF32LE>(Loc);
79   case ELF32BEKind:
80     return getErrorLoc<ELF32BE>(Loc);
81   case ELF64LEKind:
82     return getErrorLoc<ELF64LE>(Loc);
83   case ELF64BEKind:
84     return getErrorLoc<ELF64BE>(Loc);
85   default:
86     llvm_unreachable("unknown ELF type");
87   }
88 }
89 
90 template <unsigned N>
91 static void checkInt(uint8_t *Loc, int64_t V, uint32_t Type) {
92   if (!isInt<N>(V))
93     error(getErrorLocation(Loc) + "relocation " + toString(Type) +
94           " out of range");
95 }
96 
97 template <unsigned N>
98 static void checkUInt(uint8_t *Loc, uint64_t V, uint32_t Type) {
99   if (!isUInt<N>(V))
100     error(getErrorLocation(Loc) + "relocation " + toString(Type) +
101           " out of range");
102 }
103 
104 template <unsigned N>
105 static void checkIntUInt(uint8_t *Loc, uint64_t V, uint32_t Type) {
106   if (!isInt<N>(V) && !isUInt<N>(V))
107     error(getErrorLocation(Loc) + "relocation " + toString(Type) +
108           " out of range");
109 }
110 
111 template <unsigned N>
112 static void checkAlignment(uint8_t *Loc, uint64_t V, uint32_t Type) {
113   if ((V & (N - 1)) != 0)
114     error(getErrorLocation(Loc) + "improper alignment for relocation " +
115           toString(Type));
116 }
117 
118 namespace {
119 class X86TargetInfo final : public TargetInfo {
120 public:
121   X86TargetInfo();
122   RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override;
123   int64_t getImplicitAddend(const uint8_t *Buf, uint32_t Type) const override;
124   void writeGotPltHeader(uint8_t *Buf) const override;
125   uint32_t getDynRel(uint32_t Type) const override;
126   bool isTlsLocalDynamicRel(uint32_t Type) const override;
127   bool isTlsGlobalDynamicRel(uint32_t Type) const override;
128   bool isTlsInitialExecRel(uint32_t Type) const override;
129   void writeGotPlt(uint8_t *Buf, const SymbolBody &S) const override;
130   void writeIgotPlt(uint8_t *Buf, const SymbolBody &S) const override;
131   void writePltHeader(uint8_t *Buf) const override;
132   void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr,
133                 int32_t Index, unsigned RelOff) const override;
134   void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
135 
136   RelExpr adjustRelaxExpr(uint32_t Type, const uint8_t *Data,
137                           RelExpr Expr) const override;
138   void relaxTlsGdToIe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
139   void relaxTlsGdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
140   void relaxTlsIeToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
141   void relaxTlsLdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
142 };
143 
144 template <class ELFT> class X86_64TargetInfo final : public TargetInfo {
145 public:
146   X86_64TargetInfo();
147   RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override;
148   bool isPicRel(uint32_t Type) const override;
149   bool isTlsLocalDynamicRel(uint32_t Type) const override;
150   bool isTlsGlobalDynamicRel(uint32_t Type) const override;
151   bool isTlsInitialExecRel(uint32_t Type) const override;
152   void writeGotPltHeader(uint8_t *Buf) const override;
153   void writeGotPlt(uint8_t *Buf, const SymbolBody &S) const override;
154   void writePltHeader(uint8_t *Buf) const override;
155   void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr,
156                 int32_t Index, unsigned RelOff) const override;
157   void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
158 
159   RelExpr adjustRelaxExpr(uint32_t Type, const uint8_t *Data,
160                           RelExpr Expr) const override;
161   void relaxGot(uint8_t *Loc, uint64_t Val) const override;
162   void relaxTlsGdToIe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
163   void relaxTlsGdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
164   void relaxTlsIeToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
165   void relaxTlsLdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
166 
167 private:
168   void relaxGotNoPic(uint8_t *Loc, uint64_t Val, uint8_t Op,
169                      uint8_t ModRm) const;
170 };
171 
172 class PPCTargetInfo final : public TargetInfo {
173 public:
174   PPCTargetInfo();
175   void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
176   RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override;
177 };
178 
179 class PPC64TargetInfo final : public TargetInfo {
180 public:
181   PPC64TargetInfo();
182   RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override;
183   void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr,
184                 int32_t Index, unsigned RelOff) const override;
185   void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
186 };
187 
188 class AArch64TargetInfo final : public TargetInfo {
189 public:
190   AArch64TargetInfo();
191   RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override;
192   bool isPicRel(uint32_t Type) const override;
193   bool isTlsInitialExecRel(uint32_t Type) const override;
194   void writeGotPlt(uint8_t *Buf, const SymbolBody &S) const override;
195   void writePltHeader(uint8_t *Buf) const override;
196   void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr,
197                 int32_t Index, unsigned RelOff) const override;
198   bool usesOnlyLowPageBits(uint32_t Type) const override;
199   void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
200   RelExpr adjustRelaxExpr(uint32_t Type, const uint8_t *Data,
201                           RelExpr Expr) const override;
202   void relaxTlsGdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
203   void relaxTlsGdToIe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
204   void relaxTlsIeToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
205 };
206 
207 class AMDGPUTargetInfo final : public TargetInfo {
208 public:
209   AMDGPUTargetInfo();
210   void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
211   RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override;
212 };
213 
214 class ARMTargetInfo final : public TargetInfo {
215 public:
216   ARMTargetInfo();
217   RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override;
218   bool isPicRel(uint32_t Type) const override;
219   uint32_t getDynRel(uint32_t Type) const override;
220   int64_t getImplicitAddend(const uint8_t *Buf, uint32_t Type) const override;
221   bool isTlsLocalDynamicRel(uint32_t Type) const override;
222   bool isTlsGlobalDynamicRel(uint32_t Type) const override;
223   bool isTlsInitialExecRel(uint32_t Type) const override;
224   void writeGotPlt(uint8_t *Buf, const SymbolBody &S) const override;
225   void writeIgotPlt(uint8_t *Buf, const SymbolBody &S) const override;
226   void writePltHeader(uint8_t *Buf) const override;
227   void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr,
228                 int32_t Index, unsigned RelOff) const override;
229   void addPltSymbols(InputSectionData *IS, uint64_t Off) const override;
230   void addPltHeaderSymbols(InputSectionData *ISD) const override;
231   bool needsThunk(RelExpr Expr, uint32_t RelocType, const InputFile *File,
232                   const SymbolBody &S) const override;
233   void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
234 };
235 
236 template <class ELFT> class MipsTargetInfo final : public TargetInfo {
237 public:
238   MipsTargetInfo();
239   RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override;
240   int64_t getImplicitAddend(const uint8_t *Buf, uint32_t Type) const override;
241   bool isPicRel(uint32_t Type) const override;
242   uint32_t getDynRel(uint32_t Type) const override;
243   bool isTlsLocalDynamicRel(uint32_t Type) const override;
244   bool isTlsGlobalDynamicRel(uint32_t Type) const override;
245   void writeGotPlt(uint8_t *Buf, const SymbolBody &S) const override;
246   void writePltHeader(uint8_t *Buf) const override;
247   void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr,
248                 int32_t Index, unsigned RelOff) const override;
249   bool needsThunk(RelExpr Expr, uint32_t RelocType, const InputFile *File,
250                   const SymbolBody &S) const override;
251   void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
252   bool usesOnlyLowPageBits(uint32_t Type) const override;
253 };
254 } // anonymous namespace
255 
256 TargetInfo *createTarget() {
257   switch (Config->EMachine) {
258   case EM_386:
259   case EM_IAMCU:
260     return make<X86TargetInfo>();
261   case EM_AARCH64:
262     return make<AArch64TargetInfo>();
263   case EM_AMDGPU:
264     return make<AMDGPUTargetInfo>();
265   case EM_ARM:
266     return make<ARMTargetInfo>();
267   case EM_MIPS:
268     switch (Config->EKind) {
269     case ELF32LEKind:
270       return make<MipsTargetInfo<ELF32LE>>();
271     case ELF32BEKind:
272       return make<MipsTargetInfo<ELF32BE>>();
273     case ELF64LEKind:
274       return make<MipsTargetInfo<ELF64LE>>();
275     case ELF64BEKind:
276       return make<MipsTargetInfo<ELF64BE>>();
277     default:
278       fatal("unsupported MIPS target");
279     }
280   case EM_PPC:
281     return make<PPCTargetInfo>();
282   case EM_PPC64:
283     return make<PPC64TargetInfo>();
284   case EM_X86_64:
285     if (Config->EKind == ELF32LEKind)
286       return make<X86_64TargetInfo<ELF32LE>>();
287     return make<X86_64TargetInfo<ELF64LE>>();
288   }
289   fatal("unknown target machine");
290 }
291 
292 TargetInfo::~TargetInfo() {}
293 
294 int64_t TargetInfo::getImplicitAddend(const uint8_t *Buf, uint32_t Type) const {
295   return 0;
296 }
297 
298 bool TargetInfo::usesOnlyLowPageBits(uint32_t Type) const { return false; }
299 
300 bool TargetInfo::needsThunk(RelExpr Expr, uint32_t RelocType,
301                             const InputFile *File, const SymbolBody &S) const {
302   return false;
303 }
304 
305 bool TargetInfo::isTlsInitialExecRel(uint32_t Type) const { return false; }
306 
307 bool TargetInfo::isTlsLocalDynamicRel(uint32_t Type) const { return false; }
308 
309 bool TargetInfo::isTlsGlobalDynamicRel(uint32_t Type) const { return false; }
310 
311 void TargetInfo::writeIgotPlt(uint8_t *Buf, const SymbolBody &S) const {
312   writeGotPlt(Buf, S);
313 }
314 
315 RelExpr TargetInfo::adjustRelaxExpr(uint32_t Type, const uint8_t *Data,
316                                     RelExpr Expr) const {
317   return Expr;
318 }
319 
320 void TargetInfo::relaxGot(uint8_t *Loc, uint64_t Val) const {
321   llvm_unreachable("Should not have claimed to be relaxable");
322 }
323 
324 void TargetInfo::relaxTlsGdToLe(uint8_t *Loc, uint32_t Type,
325                                 uint64_t Val) const {
326   llvm_unreachable("Should not have claimed to be relaxable");
327 }
328 
329 void TargetInfo::relaxTlsGdToIe(uint8_t *Loc, uint32_t Type,
330                                 uint64_t Val) const {
331   llvm_unreachable("Should not have claimed to be relaxable");
332 }
333 
334 void TargetInfo::relaxTlsIeToLe(uint8_t *Loc, uint32_t Type,
335                                 uint64_t Val) const {
336   llvm_unreachable("Should not have claimed to be relaxable");
337 }
338 
339 void TargetInfo::relaxTlsLdToLe(uint8_t *Loc, uint32_t Type,
340                                 uint64_t Val) const {
341   llvm_unreachable("Should not have claimed to be relaxable");
342 }
343 
344 X86TargetInfo::X86TargetInfo() {
345   CopyRel = R_386_COPY;
346   GotRel = R_386_GLOB_DAT;
347   PltRel = R_386_JUMP_SLOT;
348   IRelativeRel = R_386_IRELATIVE;
349   RelativeRel = R_386_RELATIVE;
350   TlsGotRel = R_386_TLS_TPOFF;
351   TlsModuleIndexRel = R_386_TLS_DTPMOD32;
352   TlsOffsetRel = R_386_TLS_DTPOFF32;
353   GotEntrySize = 4;
354   GotPltEntrySize = 4;
355   PltEntrySize = 16;
356   PltHeaderSize = 16;
357   TlsGdRelaxSkip = 2;
358 }
359 
360 RelExpr X86TargetInfo::getRelExpr(uint32_t Type, const SymbolBody &S) const {
361   switch (Type) {
362   case R_386_8:
363   case R_386_16:
364   case R_386_32:
365   case R_386_TLS_LDO_32:
366     return R_ABS;
367   case R_386_TLS_GD:
368     return R_TLSGD;
369   case R_386_TLS_LDM:
370     return R_TLSLD;
371   case R_386_PLT32:
372     return R_PLT_PC;
373   case R_386_PC8:
374   case R_386_PC16:
375   case R_386_PC32:
376     return R_PC;
377   case R_386_GOTPC:
378     return R_GOTONLY_PC_FROM_END;
379   case R_386_TLS_IE:
380     return R_GOT;
381   case R_386_GOT32:
382   case R_386_GOT32X:
383   case R_386_TLS_GOTIE:
384     return R_GOT_FROM_END;
385   case R_386_GOTOFF:
386     return R_GOTREL_FROM_END;
387   case R_386_TLS_LE:
388     return R_TLS;
389   case R_386_TLS_LE_32:
390     return R_NEG_TLS;
391   case R_386_NONE:
392     return R_HINT;
393   default:
394     error(toString(S.File) + ": unknown relocation type: " + toString(Type));
395     return R_HINT;
396   }
397 }
398 
399 RelExpr X86TargetInfo::adjustRelaxExpr(uint32_t Type, const uint8_t *Data,
400                                        RelExpr Expr) const {
401   switch (Expr) {
402   default:
403     return Expr;
404   case R_RELAX_TLS_GD_TO_IE:
405     return R_RELAX_TLS_GD_TO_IE_END;
406   case R_RELAX_TLS_GD_TO_LE:
407     return R_RELAX_TLS_GD_TO_LE_NEG;
408   }
409 }
410 
411 void X86TargetInfo::writeGotPltHeader(uint8_t *Buf) const {
412   write32le(Buf, In<ELF32LE>::Dynamic->getVA());
413 }
414 
415 void X86TargetInfo::writeGotPlt(uint8_t *Buf, const SymbolBody &S) const {
416   // Entries in .got.plt initially points back to the corresponding
417   // PLT entries with a fixed offset to skip the first instruction.
418   write32le(Buf, S.getPltVA<ELF32LE>() + 6);
419 }
420 
421 void X86TargetInfo::writeIgotPlt(uint8_t *Buf, const SymbolBody &S) const {
422   // An x86 entry is the address of the ifunc resolver function.
423   write32le(Buf, S.getVA<ELF32LE>());
424 }
425 
426 uint32_t X86TargetInfo::getDynRel(uint32_t Type) const {
427   if (Type == R_386_TLS_LE)
428     return R_386_TLS_TPOFF;
429   if (Type == R_386_TLS_LE_32)
430     return R_386_TLS_TPOFF32;
431   return Type;
432 }
433 
434 bool X86TargetInfo::isTlsGlobalDynamicRel(uint32_t Type) const {
435   return Type == R_386_TLS_GD;
436 }
437 
438 bool X86TargetInfo::isTlsLocalDynamicRel(uint32_t Type) const {
439   return Type == R_386_TLS_LDO_32 || Type == R_386_TLS_LDM;
440 }
441 
442 bool X86TargetInfo::isTlsInitialExecRel(uint32_t Type) const {
443   return Type == R_386_TLS_IE || Type == R_386_TLS_GOTIE;
444 }
445 
446 void X86TargetInfo::writePltHeader(uint8_t *Buf) const {
447   // Executable files and shared object files have
448   // separate procedure linkage tables.
449   if (Config->Pic) {
450     const uint8_t V[] = {
451         0xff, 0xb3, 0x04, 0x00, 0x00, 0x00, // pushl 4(%ebx)
452         0xff, 0xa3, 0x08, 0x00, 0x00, 0x00, // jmp   *8(%ebx)
453         0x90, 0x90, 0x90, 0x90              // nop; nop; nop; nop
454     };
455     memcpy(Buf, V, sizeof(V));
456     return;
457   }
458 
459   const uint8_t PltData[] = {
460       0xff, 0x35, 0x00, 0x00, 0x00, 0x00, // pushl (GOT+4)
461       0xff, 0x25, 0x00, 0x00, 0x00, 0x00, // jmp   *(GOT+8)
462       0x90, 0x90, 0x90, 0x90              // nop; nop; nop; nop
463   };
464   memcpy(Buf, PltData, sizeof(PltData));
465   uint32_t Got = In<ELF32LE>::GotPlt->getVA();
466   write32le(Buf + 2, Got + 4);
467   write32le(Buf + 8, Got + 8);
468 }
469 
470 void X86TargetInfo::writePlt(uint8_t *Buf, uint64_t GotEntryAddr,
471                              uint64_t PltEntryAddr, int32_t Index,
472                              unsigned RelOff) const {
473   const uint8_t Inst[] = {
474       0xff, 0x00, 0x00, 0x00, 0x00, 0x00, // jmp *foo_in_GOT|*foo@GOT(%ebx)
475       0x68, 0x00, 0x00, 0x00, 0x00,       // pushl $reloc_offset
476       0xe9, 0x00, 0x00, 0x00, 0x00        // jmp .PLT0@PC
477   };
478   memcpy(Buf, Inst, sizeof(Inst));
479 
480   // jmp *foo@GOT(%ebx) or jmp *foo_in_GOT
481   Buf[1] = Config->Pic ? 0xa3 : 0x25;
482   uint32_t Got = In<ELF32LE>::GotPlt->getVA();
483   write32le(Buf + 2, Config->Shared ? GotEntryAddr - Got : GotEntryAddr);
484   write32le(Buf + 7, RelOff);
485   write32le(Buf + 12, -Index * PltEntrySize - PltHeaderSize - 16);
486 }
487 
488 int64_t X86TargetInfo::getImplicitAddend(const uint8_t *Buf,
489                                          uint32_t Type) const {
490   switch (Type) {
491   default:
492     return 0;
493   case R_386_8:
494     return *Buf;
495   case R_386_PC8:
496     return SignExtend64<8>(*Buf);
497   case R_386_16:
498     return read16le(Buf);
499   case R_386_PC16:
500     return SignExtend64<16>(read16le(Buf));
501   case R_386_32:
502   case R_386_GOT32:
503   case R_386_GOT32X:
504   case R_386_GOTOFF:
505   case R_386_GOTPC:
506   case R_386_PC32:
507   case R_386_PLT32:
508   case R_386_TLS_LE:
509     return read32le(Buf);
510   }
511 }
512 
513 void X86TargetInfo::relocateOne(uint8_t *Loc, uint32_t Type,
514                                 uint64_t Val) const {
515   // R_386_{PC,}{8,16} are not part of the i386 psABI, but they are
516   // being used for some 16-bit programs such as boot loaders, so
517   // we want to support them.
518   switch (Type) {
519   case R_386_8:
520   case R_386_PC8:
521     checkInt<8>(Loc, Val, Type);
522     *Loc = Val;
523     break;
524   case R_386_16:
525   case R_386_PC16:
526     checkInt<16>(Loc, Val, Type);
527     write16le(Loc, Val);
528     break;
529   default:
530     checkInt<32>(Loc, Val, Type);
531     write32le(Loc, Val);
532   }
533 }
534 
535 void X86TargetInfo::relaxTlsGdToLe(uint8_t *Loc, uint32_t Type,
536                                    uint64_t Val) const {
537   // Convert
538   //   leal x@tlsgd(, %ebx, 1),
539   //   call __tls_get_addr@plt
540   // to
541   //   movl %gs:0,%eax
542   //   subl $x@ntpoff,%eax
543   const uint8_t Inst[] = {
544       0x65, 0xa1, 0x00, 0x00, 0x00, 0x00, // movl %gs:0, %eax
545       0x81, 0xe8, 0x00, 0x00, 0x00, 0x00  // subl 0(%ebx), %eax
546   };
547   memcpy(Loc - 3, Inst, sizeof(Inst));
548   relocateOne(Loc + 5, R_386_32, Val);
549 }
550 
551 void X86TargetInfo::relaxTlsGdToIe(uint8_t *Loc, uint32_t Type,
552                                    uint64_t Val) const {
553   // Convert
554   //   leal x@tlsgd(, %ebx, 1),
555   //   call __tls_get_addr@plt
556   // to
557   //   movl %gs:0, %eax
558   //   addl x@gotntpoff(%ebx), %eax
559   const uint8_t Inst[] = {
560       0x65, 0xa1, 0x00, 0x00, 0x00, 0x00, // movl %gs:0, %eax
561       0x03, 0x83, 0x00, 0x00, 0x00, 0x00  // addl 0(%ebx), %eax
562   };
563   memcpy(Loc - 3, Inst, sizeof(Inst));
564   relocateOne(Loc + 5, R_386_32, Val);
565 }
566 
567 // In some conditions, relocations can be optimized to avoid using GOT.
568 // This function does that for Initial Exec to Local Exec case.
569 void X86TargetInfo::relaxTlsIeToLe(uint8_t *Loc, uint32_t Type,
570                                    uint64_t Val) const {
571   // Ulrich's document section 6.2 says that @gotntpoff can
572   // be used with MOVL or ADDL instructions.
573   // @indntpoff is similar to @gotntpoff, but for use in
574   // position dependent code.
575   uint8_t Reg = (Loc[-1] >> 3) & 7;
576 
577   if (Type == R_386_TLS_IE) {
578     if (Loc[-1] == 0xa1) {
579       // "movl foo@indntpoff,%eax" -> "movl $foo,%eax"
580       // This case is different from the generic case below because
581       // this is a 5 byte instruction while below is 6 bytes.
582       Loc[-1] = 0xb8;
583     } else if (Loc[-2] == 0x8b) {
584       // "movl foo@indntpoff,%reg" -> "movl $foo,%reg"
585       Loc[-2] = 0xc7;
586       Loc[-1] = 0xc0 | Reg;
587     } else {
588       // "addl foo@indntpoff,%reg" -> "addl $foo,%reg"
589       Loc[-2] = 0x81;
590       Loc[-1] = 0xc0 | Reg;
591     }
592   } else {
593     assert(Type == R_386_TLS_GOTIE);
594     if (Loc[-2] == 0x8b) {
595       // "movl foo@gottpoff(%rip),%reg" -> "movl $foo,%reg"
596       Loc[-2] = 0xc7;
597       Loc[-1] = 0xc0 | Reg;
598     } else {
599       // "addl foo@gotntpoff(%rip),%reg" -> "leal foo(%reg),%reg"
600       Loc[-2] = 0x8d;
601       Loc[-1] = 0x80 | (Reg << 3) | Reg;
602     }
603   }
604   relocateOne(Loc, R_386_TLS_LE, Val);
605 }
606 
607 void X86TargetInfo::relaxTlsLdToLe(uint8_t *Loc, uint32_t Type,
608                                    uint64_t Val) const {
609   if (Type == R_386_TLS_LDO_32) {
610     relocateOne(Loc, R_386_TLS_LE, Val);
611     return;
612   }
613 
614   // Convert
615   //   leal foo(%reg),%eax
616   //   call ___tls_get_addr
617   // to
618   //   movl %gs:0,%eax
619   //   nop
620   //   leal 0(%esi,1),%esi
621   const uint8_t Inst[] = {
622       0x65, 0xa1, 0x00, 0x00, 0x00, 0x00, // movl %gs:0,%eax
623       0x90,                               // nop
624       0x8d, 0x74, 0x26, 0x00              // leal 0(%esi,1),%esi
625   };
626   memcpy(Loc - 2, Inst, sizeof(Inst));
627 }
628 
629 template <class ELFT> X86_64TargetInfo<ELFT>::X86_64TargetInfo() {
630   CopyRel = R_X86_64_COPY;
631   GotRel = R_X86_64_GLOB_DAT;
632   PltRel = R_X86_64_JUMP_SLOT;
633   RelativeRel = R_X86_64_RELATIVE;
634   IRelativeRel = R_X86_64_IRELATIVE;
635   TlsGotRel = R_X86_64_TPOFF64;
636   TlsModuleIndexRel = R_X86_64_DTPMOD64;
637   TlsOffsetRel = R_X86_64_DTPOFF64;
638   GotEntrySize = 8;
639   GotPltEntrySize = 8;
640   PltEntrySize = 16;
641   PltHeaderSize = 16;
642   TlsGdRelaxSkip = 2;
643   // Align to the large page size (known as a superpage or huge page).
644   // FreeBSD automatically promotes large, superpage-aligned allocations.
645   DefaultImageBase = 0x200000;
646 }
647 
648 template <class ELFT>
649 RelExpr X86_64TargetInfo<ELFT>::getRelExpr(uint32_t Type,
650                                            const SymbolBody &S) const {
651   switch (Type) {
652   case R_X86_64_8:
653   case R_X86_64_32:
654   case R_X86_64_32S:
655   case R_X86_64_64:
656   case R_X86_64_DTPOFF32:
657   case R_X86_64_DTPOFF64:
658     return R_ABS;
659   case R_X86_64_TPOFF32:
660     return R_TLS;
661   case R_X86_64_TLSLD:
662     return R_TLSLD_PC;
663   case R_X86_64_TLSGD:
664     return R_TLSGD_PC;
665   case R_X86_64_SIZE32:
666   case R_X86_64_SIZE64:
667     return R_SIZE;
668   case R_X86_64_PLT32:
669     return R_PLT_PC;
670   case R_X86_64_PC32:
671   case R_X86_64_PC64:
672     return R_PC;
673   case R_X86_64_GOT32:
674   case R_X86_64_GOT64:
675     return R_GOT_FROM_END;
676   case R_X86_64_GOTPCREL:
677   case R_X86_64_GOTPCRELX:
678   case R_X86_64_REX_GOTPCRELX:
679   case R_X86_64_GOTTPOFF:
680     return R_GOT_PC;
681   case R_X86_64_NONE:
682     return R_HINT;
683   default:
684     error(toString(S.File) + ": unknown relocation type: " + toString(Type));
685     return R_HINT;
686   }
687 }
688 
689 template <class ELFT>
690 void X86_64TargetInfo<ELFT>::writeGotPltHeader(uint8_t *Buf) const {
691   // The first entry holds the value of _DYNAMIC. It is not clear why that is
692   // required, but it is documented in the psabi and the glibc dynamic linker
693   // seems to use it (note that this is relevant for linking ld.so, not any
694   // other program).
695   write64le(Buf, In<ELFT>::Dynamic->getVA());
696 }
697 
698 template <class ELFT>
699 void X86_64TargetInfo<ELFT>::writeGotPlt(uint8_t *Buf,
700                                          const SymbolBody &S) const {
701   // See comments in X86TargetInfo::writeGotPlt.
702   write32le(Buf, S.getPltVA<ELFT>() + 6);
703 }
704 
705 template <class ELFT>
706 void X86_64TargetInfo<ELFT>::writePltHeader(uint8_t *Buf) const {
707   const uint8_t PltData[] = {
708       0xff, 0x35, 0x00, 0x00, 0x00, 0x00, // pushq GOT+8(%rip)
709       0xff, 0x25, 0x00, 0x00, 0x00, 0x00, // jmp *GOT+16(%rip)
710       0x0f, 0x1f, 0x40, 0x00              // nopl 0x0(rax)
711   };
712   memcpy(Buf, PltData, sizeof(PltData));
713   uint64_t Got = In<ELFT>::GotPlt->getVA();
714   uint64_t Plt = In<ELFT>::Plt->getVA();
715   write32le(Buf + 2, Got - Plt + 2); // GOT+8
716   write32le(Buf + 8, Got - Plt + 4); // GOT+16
717 }
718 
719 template <class ELFT>
720 void X86_64TargetInfo<ELFT>::writePlt(uint8_t *Buf, uint64_t GotEntryAddr,
721                                       uint64_t PltEntryAddr, int32_t Index,
722                                       unsigned RelOff) const {
723   const uint8_t Inst[] = {
724       0xff, 0x25, 0x00, 0x00, 0x00, 0x00, // jmpq *got(%rip)
725       0x68, 0x00, 0x00, 0x00, 0x00,       // pushq <relocation index>
726       0xe9, 0x00, 0x00, 0x00, 0x00        // jmpq plt[0]
727   };
728   memcpy(Buf, Inst, sizeof(Inst));
729 
730   write32le(Buf + 2, GotEntryAddr - PltEntryAddr - 6);
731   write32le(Buf + 7, Index);
732   write32le(Buf + 12, -Index * PltEntrySize - PltHeaderSize - 16);
733 }
734 
735 template <class ELFT>
736 bool X86_64TargetInfo<ELFT>::isPicRel(uint32_t Type) const {
737   return Type != R_X86_64_PC32 && Type != R_X86_64_32;
738 }
739 
740 template <class ELFT>
741 bool X86_64TargetInfo<ELFT>::isTlsInitialExecRel(uint32_t Type) const {
742   return Type == R_X86_64_GOTTPOFF;
743 }
744 
745 template <class ELFT>
746 bool X86_64TargetInfo<ELFT>::isTlsGlobalDynamicRel(uint32_t Type) const {
747   return Type == R_X86_64_TLSGD;
748 }
749 
750 template <class ELFT>
751 bool X86_64TargetInfo<ELFT>::isTlsLocalDynamicRel(uint32_t Type) const {
752   return Type == R_X86_64_DTPOFF32 || Type == R_X86_64_DTPOFF64 ||
753          Type == R_X86_64_TLSLD;
754 }
755 
756 template <class ELFT>
757 void X86_64TargetInfo<ELFT>::relaxTlsGdToLe(uint8_t *Loc, uint32_t Type,
758                                             uint64_t Val) const {
759   // Convert
760   //   .byte 0x66
761   //   leaq x@tlsgd(%rip), %rdi
762   //   .word 0x6666
763   //   rex64
764   //   call __tls_get_addr@plt
765   // to
766   //   mov %fs:0x0,%rax
767   //   lea x@tpoff,%rax
768   const uint8_t Inst[] = {
769       0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00, // mov %fs:0x0,%rax
770       0x48, 0x8d, 0x80, 0x00, 0x00, 0x00, 0x00              // lea x@tpoff,%rax
771   };
772   memcpy(Loc - 4, Inst, sizeof(Inst));
773   // The original code used a pc relative relocation and so we have to
774   // compensate for the -4 in had in the addend.
775   relocateOne(Loc + 8, R_X86_64_TPOFF32, Val + 4);
776 }
777 
778 template <class ELFT>
779 void X86_64TargetInfo<ELFT>::relaxTlsGdToIe(uint8_t *Loc, uint32_t Type,
780                                             uint64_t Val) const {
781   // Convert
782   //   .byte 0x66
783   //   leaq x@tlsgd(%rip), %rdi
784   //   .word 0x6666
785   //   rex64
786   //   call __tls_get_addr@plt
787   // to
788   //   mov %fs:0x0,%rax
789   //   addq x@tpoff,%rax
790   const uint8_t Inst[] = {
791       0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00, // mov %fs:0x0,%rax
792       0x48, 0x03, 0x05, 0x00, 0x00, 0x00, 0x00              // addq x@tpoff,%rax
793   };
794   memcpy(Loc - 4, Inst, sizeof(Inst));
795   // Both code sequences are PC relatives, but since we are moving the constant
796   // forward by 8 bytes we have to subtract the value by 8.
797   relocateOne(Loc + 8, R_X86_64_PC32, Val - 8);
798 }
799 
800 // In some conditions, R_X86_64_GOTTPOFF relocation can be optimized to
801 // R_X86_64_TPOFF32 so that it does not use GOT.
802 template <class ELFT>
803 void X86_64TargetInfo<ELFT>::relaxTlsIeToLe(uint8_t *Loc, uint32_t Type,
804                                             uint64_t Val) const {
805   uint8_t *Inst = Loc - 3;
806   uint8_t Reg = Loc[-1] >> 3;
807   uint8_t *RegSlot = Loc - 1;
808 
809   // Note that ADD with RSP or R12 is converted to ADD instead of LEA
810   // because LEA with these registers needs 4 bytes to encode and thus
811   // wouldn't fit the space.
812 
813   if (memcmp(Inst, "\x48\x03\x25", 3) == 0) {
814     // "addq foo@gottpoff(%rip),%rsp" -> "addq $foo,%rsp"
815     memcpy(Inst, "\x48\x81\xc4", 3);
816   } else if (memcmp(Inst, "\x4c\x03\x25", 3) == 0) {
817     // "addq foo@gottpoff(%rip),%r12" -> "addq $foo,%r12"
818     memcpy(Inst, "\x49\x81\xc4", 3);
819   } else if (memcmp(Inst, "\x4c\x03", 2) == 0) {
820     // "addq foo@gottpoff(%rip),%r[8-15]" -> "leaq foo(%r[8-15]),%r[8-15]"
821     memcpy(Inst, "\x4d\x8d", 2);
822     *RegSlot = 0x80 | (Reg << 3) | Reg;
823   } else if (memcmp(Inst, "\x48\x03", 2) == 0) {
824     // "addq foo@gottpoff(%rip),%reg -> "leaq foo(%reg),%reg"
825     memcpy(Inst, "\x48\x8d", 2);
826     *RegSlot = 0x80 | (Reg << 3) | Reg;
827   } else if (memcmp(Inst, "\x4c\x8b", 2) == 0) {
828     // "movq foo@gottpoff(%rip),%r[8-15]" -> "movq $foo,%r[8-15]"
829     memcpy(Inst, "\x49\xc7", 2);
830     *RegSlot = 0xc0 | Reg;
831   } else if (memcmp(Inst, "\x48\x8b", 2) == 0) {
832     // "movq foo@gottpoff(%rip),%reg" -> "movq $foo,%reg"
833     memcpy(Inst, "\x48\xc7", 2);
834     *RegSlot = 0xc0 | Reg;
835   } else {
836     error(getErrorLocation(Loc - 3) +
837           "R_X86_64_GOTTPOFF must be used in MOVQ or ADDQ instructions only");
838   }
839 
840   // The original code used a PC relative relocation.
841   // Need to compensate for the -4 it had in the addend.
842   relocateOne(Loc, R_X86_64_TPOFF32, Val + 4);
843 }
844 
845 template <class ELFT>
846 void X86_64TargetInfo<ELFT>::relaxTlsLdToLe(uint8_t *Loc, uint32_t Type,
847                                             uint64_t Val) const {
848   // Convert
849   //   leaq bar@tlsld(%rip), %rdi
850   //   callq __tls_get_addr@PLT
851   //   leaq bar@dtpoff(%rax), %rcx
852   // to
853   //   .word 0x6666
854   //   .byte 0x66
855   //   mov %fs:0,%rax
856   //   leaq bar@tpoff(%rax), %rcx
857   if (Type == R_X86_64_DTPOFF64) {
858     write64le(Loc, Val);
859     return;
860   }
861   if (Type == R_X86_64_DTPOFF32) {
862     relocateOne(Loc, R_X86_64_TPOFF32, Val);
863     return;
864   }
865 
866   const uint8_t Inst[] = {
867       0x66, 0x66,                                          // .word 0x6666
868       0x66,                                                // .byte 0x66
869       0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00 // mov %fs:0,%rax
870   };
871   memcpy(Loc - 3, Inst, sizeof(Inst));
872 }
873 
874 template <class ELFT>
875 void X86_64TargetInfo<ELFT>::relocateOne(uint8_t *Loc, uint32_t Type,
876                                          uint64_t Val) const {
877   switch (Type) {
878   case R_X86_64_8:
879     checkUInt<8>(Loc, Val, Type);
880     *Loc = Val;
881     break;
882   case R_X86_64_32:
883     checkUInt<32>(Loc, Val, Type);
884     write32le(Loc, Val);
885     break;
886   case R_X86_64_32S:
887   case R_X86_64_TPOFF32:
888   case R_X86_64_GOT32:
889   case R_X86_64_GOTPCREL:
890   case R_X86_64_GOTPCRELX:
891   case R_X86_64_REX_GOTPCRELX:
892   case R_X86_64_PC32:
893   case R_X86_64_GOTTPOFF:
894   case R_X86_64_PLT32:
895   case R_X86_64_TLSGD:
896   case R_X86_64_TLSLD:
897   case R_X86_64_DTPOFF32:
898   case R_X86_64_SIZE32:
899     checkInt<32>(Loc, Val, Type);
900     write32le(Loc, Val);
901     break;
902   case R_X86_64_64:
903   case R_X86_64_DTPOFF64:
904   case R_X86_64_GLOB_DAT:
905   case R_X86_64_PC64:
906   case R_X86_64_SIZE64:
907   case R_X86_64_GOT64:
908     write64le(Loc, Val);
909     break;
910   default:
911     llvm_unreachable("unexpected relocation");
912   }
913 }
914 
915 template <class ELFT>
916 RelExpr X86_64TargetInfo<ELFT>::adjustRelaxExpr(uint32_t Type,
917                                                 const uint8_t *Data,
918                                                 RelExpr RelExpr) const {
919   if (Type != R_X86_64_GOTPCRELX && Type != R_X86_64_REX_GOTPCRELX)
920     return RelExpr;
921   const uint8_t Op = Data[-2];
922   const uint8_t ModRm = Data[-1];
923   // FIXME: When PIC is disabled and foo is defined locally in the
924   // lower 32 bit address space, memory operand in mov can be converted into
925   // immediate operand. Otherwise, mov must be changed to lea. We support only
926   // latter relaxation at this moment.
927   if (Op == 0x8b)
928     return R_RELAX_GOT_PC;
929   // Relax call and jmp.
930   if (Op == 0xff && (ModRm == 0x15 || ModRm == 0x25))
931     return R_RELAX_GOT_PC;
932 
933   // Relaxation of test, adc, add, and, cmp, or, sbb, sub, xor.
934   // If PIC then no relaxation is available.
935   // We also don't relax test/binop instructions without REX byte,
936   // they are 32bit operations and not common to have.
937   assert(Type == R_X86_64_REX_GOTPCRELX);
938   return Config->Pic ? RelExpr : R_RELAX_GOT_PC_NOPIC;
939 }
940 
941 // A subset of relaxations can only be applied for no-PIC. This method
942 // handles such relaxations. Instructions encoding information was taken from:
943 // "Intel 64 and IA-32 Architectures Software Developer's Manual V2"
944 // (http://www.intel.com/content/dam/www/public/us/en/documents/manuals/
945 //    64-ia-32-architectures-software-developer-instruction-set-reference-manual-325383.pdf)
946 template <class ELFT>
947 void X86_64TargetInfo<ELFT>::relaxGotNoPic(uint8_t *Loc, uint64_t Val,
948                                            uint8_t Op, uint8_t ModRm) const {
949   const uint8_t Rex = Loc[-3];
950   // Convert "test %reg, foo@GOTPCREL(%rip)" to "test $foo, %reg".
951   if (Op == 0x85) {
952     // See "TEST-Logical Compare" (4-428 Vol. 2B),
953     // TEST r/m64, r64 uses "full" ModR / M byte (no opcode extension).
954 
955     // ModR/M byte has form XX YYY ZZZ, where
956     // YYY is MODRM.reg(register 2), ZZZ is MODRM.rm(register 1).
957     // XX has different meanings:
958     // 00: The operand's memory address is in reg1.
959     // 01: The operand's memory address is reg1 + a byte-sized displacement.
960     // 10: The operand's memory address is reg1 + a word-sized displacement.
961     // 11: The operand is reg1 itself.
962     // If an instruction requires only one operand, the unused reg2 field
963     // holds extra opcode bits rather than a register code
964     // 0xC0 == 11 000 000 binary.
965     // 0x38 == 00 111 000 binary.
966     // We transfer reg2 to reg1 here as operand.
967     // See "2.1.3 ModR/M and SIB Bytes" (Vol. 2A 2-3).
968     Loc[-1] = 0xc0 | (ModRm & 0x38) >> 3; // ModR/M byte.
969 
970     // Change opcode from TEST r/m64, r64 to TEST r/m64, imm32
971     // See "TEST-Logical Compare" (4-428 Vol. 2B).
972     Loc[-2] = 0xf7;
973 
974     // Move R bit to the B bit in REX byte.
975     // REX byte is encoded as 0100WRXB, where
976     // 0100 is 4bit fixed pattern.
977     // REX.W When 1, a 64-bit operand size is used. Otherwise, when 0, the
978     //   default operand size is used (which is 32-bit for most but not all
979     //   instructions).
980     // REX.R This 1-bit value is an extension to the MODRM.reg field.
981     // REX.X This 1-bit value is an extension to the SIB.index field.
982     // REX.B This 1-bit value is an extension to the MODRM.rm field or the
983     // SIB.base field.
984     // See "2.2.1.2 More on REX Prefix Fields " (2-8 Vol. 2A).
985     Loc[-3] = (Rex & ~0x4) | (Rex & 0x4) >> 2;
986     relocateOne(Loc, R_X86_64_PC32, Val);
987     return;
988   }
989 
990   // If we are here then we need to relax the adc, add, and, cmp, or, sbb, sub
991   // or xor operations.
992 
993   // Convert "binop foo@GOTPCREL(%rip), %reg" to "binop $foo, %reg".
994   // Logic is close to one for test instruction above, but we also
995   // write opcode extension here, see below for details.
996   Loc[-1] = 0xc0 | (ModRm & 0x38) >> 3 | (Op & 0x3c); // ModR/M byte.
997 
998   // Primary opcode is 0x81, opcode extension is one of:
999   // 000b = ADD, 001b is OR, 010b is ADC, 011b is SBB,
1000   // 100b is AND, 101b is SUB, 110b is XOR, 111b is CMP.
1001   // This value was wrote to MODRM.reg in a line above.
1002   // See "3.2 INSTRUCTIONS (A-M)" (Vol. 2A 3-15),
1003   // "INSTRUCTION SET REFERENCE, N-Z" (Vol. 2B 4-1) for
1004   // descriptions about each operation.
1005   Loc[-2] = 0x81;
1006   Loc[-3] = (Rex & ~0x4) | (Rex & 0x4) >> 2;
1007   relocateOne(Loc, R_X86_64_PC32, Val);
1008 }
1009 
1010 template <class ELFT>
1011 void X86_64TargetInfo<ELFT>::relaxGot(uint8_t *Loc, uint64_t Val) const {
1012   const uint8_t Op = Loc[-2];
1013   const uint8_t ModRm = Loc[-1];
1014 
1015   // Convert "mov foo@GOTPCREL(%rip),%reg" to "lea foo(%rip),%reg".
1016   if (Op == 0x8b) {
1017     Loc[-2] = 0x8d;
1018     relocateOne(Loc, R_X86_64_PC32, Val);
1019     return;
1020   }
1021 
1022   if (Op != 0xff) {
1023     // We are relaxing a rip relative to an absolute, so compensate
1024     // for the old -4 addend.
1025     assert(!Config->Pic);
1026     relaxGotNoPic(Loc, Val + 4, Op, ModRm);
1027     return;
1028   }
1029 
1030   // Convert call/jmp instructions.
1031   if (ModRm == 0x15) {
1032     // ABI says we can convert "call *foo@GOTPCREL(%rip)" to "nop; call foo".
1033     // Instead we convert to "addr32 call foo" where addr32 is an instruction
1034     // prefix. That makes result expression to be a single instruction.
1035     Loc[-2] = 0x67; // addr32 prefix
1036     Loc[-1] = 0xe8; // call
1037     relocateOne(Loc, R_X86_64_PC32, Val);
1038     return;
1039   }
1040 
1041   // Convert "jmp *foo@GOTPCREL(%rip)" to "jmp foo; nop".
1042   // jmp doesn't return, so it is fine to use nop here, it is just a stub.
1043   assert(ModRm == 0x25);
1044   Loc[-2] = 0xe9; // jmp
1045   Loc[3] = 0x90;  // nop
1046   relocateOne(Loc - 1, R_X86_64_PC32, Val + 1);
1047 }
1048 
1049 // Relocation masks following the #lo(value), #hi(value), #ha(value),
1050 // #higher(value), #highera(value), #highest(value), and #highesta(value)
1051 // macros defined in section 4.5.1. Relocation Types of the PPC-elf64abi
1052 // document.
1053 static uint16_t applyPPCLo(uint64_t V) { return V; }
1054 static uint16_t applyPPCHi(uint64_t V) { return V >> 16; }
1055 static uint16_t applyPPCHa(uint64_t V) { return (V + 0x8000) >> 16; }
1056 static uint16_t applyPPCHigher(uint64_t V) { return V >> 32; }
1057 static uint16_t applyPPCHighera(uint64_t V) { return (V + 0x8000) >> 32; }
1058 static uint16_t applyPPCHighest(uint64_t V) { return V >> 48; }
1059 static uint16_t applyPPCHighesta(uint64_t V) { return (V + 0x8000) >> 48; }
1060 
1061 PPCTargetInfo::PPCTargetInfo() {}
1062 
1063 void PPCTargetInfo::relocateOne(uint8_t *Loc, uint32_t Type,
1064                                 uint64_t Val) const {
1065   switch (Type) {
1066   case R_PPC_ADDR16_HA:
1067     write16be(Loc, applyPPCHa(Val));
1068     break;
1069   case R_PPC_ADDR16_LO:
1070     write16be(Loc, applyPPCLo(Val));
1071     break;
1072   case R_PPC_ADDR32:
1073   case R_PPC_REL32:
1074     write32be(Loc, Val);
1075     break;
1076   case R_PPC_REL24:
1077     or32be(Loc, Val & 0x3FFFFFC);
1078     break;
1079   default:
1080     error(getErrorLocation(Loc) + "unrecognized reloc " + Twine(Type));
1081   }
1082 }
1083 
1084 RelExpr PPCTargetInfo::getRelExpr(uint32_t Type, const SymbolBody &S) const {
1085   switch (Type) {
1086   case R_PPC_REL24:
1087   case R_PPC_REL32:
1088     return R_PC;
1089   default:
1090     return R_ABS;
1091   }
1092 }
1093 
1094 PPC64TargetInfo::PPC64TargetInfo() {
1095   PltRel = GotRel = R_PPC64_GLOB_DAT;
1096   RelativeRel = R_PPC64_RELATIVE;
1097   GotEntrySize = 8;
1098   GotPltEntrySize = 8;
1099   PltEntrySize = 32;
1100   PltHeaderSize = 0;
1101 
1102   // We need 64K pages (at least under glibc/Linux, the loader won't
1103   // set different permissions on a finer granularity than that).
1104   DefaultMaxPageSize = 65536;
1105 
1106   // The PPC64 ELF ABI v1 spec, says:
1107   //
1108   //   It is normally desirable to put segments with different characteristics
1109   //   in separate 256 Mbyte portions of the address space, to give the
1110   //   operating system full paging flexibility in the 64-bit address space.
1111   //
1112   // And because the lowest non-zero 256M boundary is 0x10000000, PPC64 linkers
1113   // use 0x10000000 as the starting address.
1114   DefaultImageBase = 0x10000000;
1115 }
1116 
1117 static uint64_t PPC64TocOffset = 0x8000;
1118 
1119 uint64_t getPPC64TocBase() {
1120   // The TOC consists of sections .got, .toc, .tocbss, .plt in that order. The
1121   // TOC starts where the first of these sections starts. We always create a
1122   // .got when we see a relocation that uses it, so for us the start is always
1123   // the .got.
1124   uint64_t TocVA = In<ELF64BE>::Got->getVA();
1125 
1126   // Per the ppc64-elf-linux ABI, The TOC base is TOC value plus 0x8000
1127   // thus permitting a full 64 Kbytes segment. Note that the glibc startup
1128   // code (crt1.o) assumes that you can get from the TOC base to the
1129   // start of the .toc section with only a single (signed) 16-bit relocation.
1130   return TocVA + PPC64TocOffset;
1131 }
1132 
1133 RelExpr PPC64TargetInfo::getRelExpr(uint32_t Type, const SymbolBody &S) const {
1134   switch (Type) {
1135   default:
1136     return R_ABS;
1137   case R_PPC64_TOC16:
1138   case R_PPC64_TOC16_DS:
1139   case R_PPC64_TOC16_HA:
1140   case R_PPC64_TOC16_HI:
1141   case R_PPC64_TOC16_LO:
1142   case R_PPC64_TOC16_LO_DS:
1143     return R_GOTREL;
1144   case R_PPC64_TOC:
1145     return R_PPC_TOC;
1146   case R_PPC64_REL24:
1147     return R_PPC_PLT_OPD;
1148   }
1149 }
1150 
1151 void PPC64TargetInfo::writePlt(uint8_t *Buf, uint64_t GotEntryAddr,
1152                                uint64_t PltEntryAddr, int32_t Index,
1153                                unsigned RelOff) const {
1154   uint64_t Off = GotEntryAddr - getPPC64TocBase();
1155 
1156   // FIXME: What we should do, in theory, is get the offset of the function
1157   // descriptor in the .opd section, and use that as the offset from %r2 (the
1158   // TOC-base pointer). Instead, we have the GOT-entry offset, and that will
1159   // be a pointer to the function descriptor in the .opd section. Using
1160   // this scheme is simpler, but requires an extra indirection per PLT dispatch.
1161 
1162   write32be(Buf, 0xf8410028);                       // std %r2, 40(%r1)
1163   write32be(Buf + 4, 0x3d620000 | applyPPCHa(Off)); // addis %r11, %r2, X@ha
1164   write32be(Buf + 8, 0xe98b0000 | applyPPCLo(Off)); // ld %r12, X@l(%r11)
1165   write32be(Buf + 12, 0xe96c0000);                  // ld %r11,0(%r12)
1166   write32be(Buf + 16, 0x7d6903a6);                  // mtctr %r11
1167   write32be(Buf + 20, 0xe84c0008);                  // ld %r2,8(%r12)
1168   write32be(Buf + 24, 0xe96c0010);                  // ld %r11,16(%r12)
1169   write32be(Buf + 28, 0x4e800420);                  // bctr
1170 }
1171 
1172 static std::pair<uint32_t, uint64_t> toAddr16Rel(uint32_t Type, uint64_t Val) {
1173   uint64_t V = Val - PPC64TocOffset;
1174   switch (Type) {
1175   case R_PPC64_TOC16:
1176     return {R_PPC64_ADDR16, V};
1177   case R_PPC64_TOC16_DS:
1178     return {R_PPC64_ADDR16_DS, V};
1179   case R_PPC64_TOC16_HA:
1180     return {R_PPC64_ADDR16_HA, V};
1181   case R_PPC64_TOC16_HI:
1182     return {R_PPC64_ADDR16_HI, V};
1183   case R_PPC64_TOC16_LO:
1184     return {R_PPC64_ADDR16_LO, V};
1185   case R_PPC64_TOC16_LO_DS:
1186     return {R_PPC64_ADDR16_LO_DS, V};
1187   default:
1188     return {Type, Val};
1189   }
1190 }
1191 
1192 void PPC64TargetInfo::relocateOne(uint8_t *Loc, uint32_t Type,
1193                                   uint64_t Val) const {
1194   // For a TOC-relative relocation, proceed in terms of the corresponding
1195   // ADDR16 relocation type.
1196   std::tie(Type, Val) = toAddr16Rel(Type, Val);
1197 
1198   switch (Type) {
1199   case R_PPC64_ADDR14: {
1200     checkAlignment<4>(Loc, Val, Type);
1201     // Preserve the AA/LK bits in the branch instruction
1202     uint8_t AALK = Loc[3];
1203     write16be(Loc + 2, (AALK & 3) | (Val & 0xfffc));
1204     break;
1205   }
1206   case R_PPC64_ADDR16:
1207     checkInt<16>(Loc, Val, Type);
1208     write16be(Loc, Val);
1209     break;
1210   case R_PPC64_ADDR16_DS:
1211     checkInt<16>(Loc, Val, Type);
1212     write16be(Loc, (read16be(Loc) & 3) | (Val & ~3));
1213     break;
1214   case R_PPC64_ADDR16_HA:
1215   case R_PPC64_REL16_HA:
1216     write16be(Loc, applyPPCHa(Val));
1217     break;
1218   case R_PPC64_ADDR16_HI:
1219   case R_PPC64_REL16_HI:
1220     write16be(Loc, applyPPCHi(Val));
1221     break;
1222   case R_PPC64_ADDR16_HIGHER:
1223     write16be(Loc, applyPPCHigher(Val));
1224     break;
1225   case R_PPC64_ADDR16_HIGHERA:
1226     write16be(Loc, applyPPCHighera(Val));
1227     break;
1228   case R_PPC64_ADDR16_HIGHEST:
1229     write16be(Loc, applyPPCHighest(Val));
1230     break;
1231   case R_PPC64_ADDR16_HIGHESTA:
1232     write16be(Loc, applyPPCHighesta(Val));
1233     break;
1234   case R_PPC64_ADDR16_LO:
1235     write16be(Loc, applyPPCLo(Val));
1236     break;
1237   case R_PPC64_ADDR16_LO_DS:
1238   case R_PPC64_REL16_LO:
1239     write16be(Loc, (read16be(Loc) & 3) | (applyPPCLo(Val) & ~3));
1240     break;
1241   case R_PPC64_ADDR32:
1242   case R_PPC64_REL32:
1243     checkInt<32>(Loc, Val, Type);
1244     write32be(Loc, Val);
1245     break;
1246   case R_PPC64_ADDR64:
1247   case R_PPC64_REL64:
1248   case R_PPC64_TOC:
1249     write64be(Loc, Val);
1250     break;
1251   case R_PPC64_REL24: {
1252     uint32_t Mask = 0x03FFFFFC;
1253     checkInt<24>(Loc, Val, Type);
1254     write32be(Loc, (read32be(Loc) & ~Mask) | (Val & Mask));
1255     break;
1256   }
1257   default:
1258     error(getErrorLocation(Loc) + "unrecognized reloc " + Twine(Type));
1259   }
1260 }
1261 
1262 AArch64TargetInfo::AArch64TargetInfo() {
1263   CopyRel = R_AARCH64_COPY;
1264   RelativeRel = R_AARCH64_RELATIVE;
1265   IRelativeRel = R_AARCH64_IRELATIVE;
1266   GotRel = R_AARCH64_GLOB_DAT;
1267   PltRel = R_AARCH64_JUMP_SLOT;
1268   TlsDescRel = R_AARCH64_TLSDESC;
1269   TlsGotRel = R_AARCH64_TLS_TPREL64;
1270   GotEntrySize = 8;
1271   GotPltEntrySize = 8;
1272   PltEntrySize = 16;
1273   PltHeaderSize = 32;
1274   DefaultMaxPageSize = 65536;
1275 
1276   // It doesn't seem to be documented anywhere, but tls on aarch64 uses variant
1277   // 1 of the tls structures and the tcb size is 16.
1278   TcbSize = 16;
1279 }
1280 
1281 RelExpr AArch64TargetInfo::getRelExpr(uint32_t Type,
1282                                       const SymbolBody &S) const {
1283   switch (Type) {
1284   default:
1285     return R_ABS;
1286   case R_AARCH64_TLSDESC_ADR_PAGE21:
1287     return R_TLSDESC_PAGE;
1288   case R_AARCH64_TLSDESC_LD64_LO12_NC:
1289   case R_AARCH64_TLSDESC_ADD_LO12_NC:
1290     return R_TLSDESC;
1291   case R_AARCH64_TLSDESC_CALL:
1292     return R_TLSDESC_CALL;
1293   case R_AARCH64_TLSLE_ADD_TPREL_HI12:
1294   case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
1295     return R_TLS;
1296   case R_AARCH64_CALL26:
1297   case R_AARCH64_CONDBR19:
1298   case R_AARCH64_JUMP26:
1299   case R_AARCH64_TSTBR14:
1300     return R_PLT_PC;
1301   case R_AARCH64_PREL16:
1302   case R_AARCH64_PREL32:
1303   case R_AARCH64_PREL64:
1304   case R_AARCH64_ADR_PREL_LO21:
1305     return R_PC;
1306   case R_AARCH64_ADR_PREL_PG_HI21:
1307     return R_PAGE_PC;
1308   case R_AARCH64_LD64_GOT_LO12_NC:
1309   case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
1310     return R_GOT;
1311   case R_AARCH64_ADR_GOT_PAGE:
1312   case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
1313     return R_GOT_PAGE_PC;
1314   }
1315 }
1316 
1317 RelExpr AArch64TargetInfo::adjustRelaxExpr(uint32_t Type, const uint8_t *Data,
1318                                            RelExpr Expr) const {
1319   if (Expr == R_RELAX_TLS_GD_TO_IE) {
1320     if (Type == R_AARCH64_TLSDESC_ADR_PAGE21)
1321       return R_RELAX_TLS_GD_TO_IE_PAGE_PC;
1322     return R_RELAX_TLS_GD_TO_IE_ABS;
1323   }
1324   return Expr;
1325 }
1326 
1327 bool AArch64TargetInfo::usesOnlyLowPageBits(uint32_t Type) const {
1328   switch (Type) {
1329   default:
1330     return false;
1331   case R_AARCH64_ADD_ABS_LO12_NC:
1332   case R_AARCH64_LD64_GOT_LO12_NC:
1333   case R_AARCH64_LDST128_ABS_LO12_NC:
1334   case R_AARCH64_LDST16_ABS_LO12_NC:
1335   case R_AARCH64_LDST32_ABS_LO12_NC:
1336   case R_AARCH64_LDST64_ABS_LO12_NC:
1337   case R_AARCH64_LDST8_ABS_LO12_NC:
1338   case R_AARCH64_TLSDESC_ADD_LO12_NC:
1339   case R_AARCH64_TLSDESC_LD64_LO12_NC:
1340   case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
1341     return true;
1342   }
1343 }
1344 
1345 bool AArch64TargetInfo::isTlsInitialExecRel(uint32_t Type) const {
1346   return Type == R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 ||
1347          Type == R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC;
1348 }
1349 
1350 bool AArch64TargetInfo::isPicRel(uint32_t Type) const {
1351   return Type == R_AARCH64_ABS32 || Type == R_AARCH64_ABS64;
1352 }
1353 
1354 void AArch64TargetInfo::writeGotPlt(uint8_t *Buf, const SymbolBody &) const {
1355   write64le(Buf, In<ELF64LE>::Plt->getVA());
1356 }
1357 
1358 // Page(Expr) is the page address of the expression Expr, defined
1359 // as (Expr & ~0xFFF). (This applies even if the machine page size
1360 // supported by the platform has a different value.)
1361 uint64_t getAArch64Page(uint64_t Expr) {
1362   return Expr & (~static_cast<uint64_t>(0xFFF));
1363 }
1364 
1365 void AArch64TargetInfo::writePltHeader(uint8_t *Buf) const {
1366   const uint8_t PltData[] = {
1367       0xf0, 0x7b, 0xbf, 0xa9, // stp	x16, x30, [sp,#-16]!
1368       0x10, 0x00, 0x00, 0x90, // adrp	x16, Page(&(.plt.got[2]))
1369       0x11, 0x02, 0x40, 0xf9, // ldr	x17, [x16, Offset(&(.plt.got[2]))]
1370       0x10, 0x02, 0x00, 0x91, // add	x16, x16, Offset(&(.plt.got[2]))
1371       0x20, 0x02, 0x1f, 0xd6, // br	x17
1372       0x1f, 0x20, 0x03, 0xd5, // nop
1373       0x1f, 0x20, 0x03, 0xd5, // nop
1374       0x1f, 0x20, 0x03, 0xd5  // nop
1375   };
1376   memcpy(Buf, PltData, sizeof(PltData));
1377 
1378   uint64_t Got = In<ELF64LE>::GotPlt->getVA();
1379   uint64_t Plt = In<ELF64LE>::Plt->getVA();
1380   relocateOne(Buf + 4, R_AARCH64_ADR_PREL_PG_HI21,
1381               getAArch64Page(Got + 16) - getAArch64Page(Plt + 4));
1382   relocateOne(Buf + 8, R_AARCH64_LDST64_ABS_LO12_NC, Got + 16);
1383   relocateOne(Buf + 12, R_AARCH64_ADD_ABS_LO12_NC, Got + 16);
1384 }
1385 
1386 void AArch64TargetInfo::writePlt(uint8_t *Buf, uint64_t GotEntryAddr,
1387                                  uint64_t PltEntryAddr, int32_t Index,
1388                                  unsigned RelOff) const {
1389   const uint8_t Inst[] = {
1390       0x10, 0x00, 0x00, 0x90, // adrp x16, Page(&(.plt.got[n]))
1391       0x11, 0x02, 0x40, 0xf9, // ldr  x17, [x16, Offset(&(.plt.got[n]))]
1392       0x10, 0x02, 0x00, 0x91, // add  x16, x16, Offset(&(.plt.got[n]))
1393       0x20, 0x02, 0x1f, 0xd6  // br   x17
1394   };
1395   memcpy(Buf, Inst, sizeof(Inst));
1396 
1397   relocateOne(Buf, R_AARCH64_ADR_PREL_PG_HI21,
1398               getAArch64Page(GotEntryAddr) - getAArch64Page(PltEntryAddr));
1399   relocateOne(Buf + 4, R_AARCH64_LDST64_ABS_LO12_NC, GotEntryAddr);
1400   relocateOne(Buf + 8, R_AARCH64_ADD_ABS_LO12_NC, GotEntryAddr);
1401 }
1402 
1403 static void write32AArch64Addr(uint8_t *L, uint64_t Imm) {
1404   uint32_t ImmLo = (Imm & 0x3) << 29;
1405   uint32_t ImmHi = (Imm & 0x1FFFFC) << 3;
1406   uint64_t Mask = (0x3 << 29) | (0x1FFFFC << 3);
1407   write32le(L, (read32le(L) & ~Mask) | ImmLo | ImmHi);
1408 }
1409 
1410 // Return the bits [Start, End] from Val shifted Start bits.
1411 // For instance, getBits(0xF0, 4, 8) returns 0xF.
1412 static uint64_t getBits(uint64_t Val, int Start, int End) {
1413   uint64_t Mask = ((uint64_t)1 << (End + 1 - Start)) - 1;
1414   return (Val >> Start) & Mask;
1415 }
1416 
1417 // Update the immediate field in a AARCH64 ldr, str, and add instruction.
1418 static void or32AArch64Imm(uint8_t *L, uint64_t Imm) {
1419   or32le(L, (Imm & 0xFFF) << 10);
1420 }
1421 
1422 void AArch64TargetInfo::relocateOne(uint8_t *Loc, uint32_t Type,
1423                                     uint64_t Val) const {
1424   switch (Type) {
1425   case R_AARCH64_ABS16:
1426   case R_AARCH64_PREL16:
1427     checkIntUInt<16>(Loc, Val, Type);
1428     write16le(Loc, Val);
1429     break;
1430   case R_AARCH64_ABS32:
1431   case R_AARCH64_PREL32:
1432     checkIntUInt<32>(Loc, Val, Type);
1433     write32le(Loc, Val);
1434     break;
1435   case R_AARCH64_ABS64:
1436   case R_AARCH64_GLOB_DAT:
1437   case R_AARCH64_PREL64:
1438     write64le(Loc, Val);
1439     break;
1440   case R_AARCH64_ADD_ABS_LO12_NC:
1441     or32AArch64Imm(Loc, Val);
1442     break;
1443   case R_AARCH64_ADR_GOT_PAGE:
1444   case R_AARCH64_ADR_PREL_PG_HI21:
1445   case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
1446   case R_AARCH64_TLSDESC_ADR_PAGE21:
1447     checkInt<33>(Loc, Val, Type);
1448     write32AArch64Addr(Loc, Val >> 12);
1449     break;
1450   case R_AARCH64_ADR_PREL_LO21:
1451     checkInt<21>(Loc, Val, Type);
1452     write32AArch64Addr(Loc, Val);
1453     break;
1454   case R_AARCH64_CALL26:
1455   case R_AARCH64_JUMP26:
1456     checkInt<28>(Loc, Val, Type);
1457     or32le(Loc, (Val & 0x0FFFFFFC) >> 2);
1458     break;
1459   case R_AARCH64_CONDBR19:
1460     checkInt<21>(Loc, Val, Type);
1461     or32le(Loc, (Val & 0x1FFFFC) << 3);
1462     break;
1463   case R_AARCH64_LD64_GOT_LO12_NC:
1464   case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
1465   case R_AARCH64_TLSDESC_LD64_LO12_NC:
1466     checkAlignment<8>(Loc, Val, Type);
1467     or32le(Loc, (Val & 0xFF8) << 7);
1468     break;
1469   case R_AARCH64_LDST8_ABS_LO12_NC:
1470     or32AArch64Imm(Loc, getBits(Val, 0, 11));
1471     break;
1472   case R_AARCH64_LDST16_ABS_LO12_NC:
1473     or32AArch64Imm(Loc, getBits(Val, 1, 11));
1474     break;
1475   case R_AARCH64_LDST32_ABS_LO12_NC:
1476     or32AArch64Imm(Loc, getBits(Val, 2, 11));
1477     break;
1478   case R_AARCH64_LDST64_ABS_LO12_NC:
1479     or32AArch64Imm(Loc, getBits(Val, 3, 11));
1480     break;
1481   case R_AARCH64_LDST128_ABS_LO12_NC:
1482     or32AArch64Imm(Loc, getBits(Val, 4, 11));
1483     break;
1484   case R_AARCH64_MOVW_UABS_G0_NC:
1485     or32le(Loc, (Val & 0xFFFF) << 5);
1486     break;
1487   case R_AARCH64_MOVW_UABS_G1_NC:
1488     or32le(Loc, (Val & 0xFFFF0000) >> 11);
1489     break;
1490   case R_AARCH64_MOVW_UABS_G2_NC:
1491     or32le(Loc, (Val & 0xFFFF00000000) >> 27);
1492     break;
1493   case R_AARCH64_MOVW_UABS_G3:
1494     or32le(Loc, (Val & 0xFFFF000000000000) >> 43);
1495     break;
1496   case R_AARCH64_TSTBR14:
1497     checkInt<16>(Loc, Val, Type);
1498     or32le(Loc, (Val & 0xFFFC) << 3);
1499     break;
1500   case R_AARCH64_TLSLE_ADD_TPREL_HI12:
1501     checkInt<24>(Loc, Val, Type);
1502     or32AArch64Imm(Loc, Val >> 12);
1503     break;
1504   case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
1505   case R_AARCH64_TLSDESC_ADD_LO12_NC:
1506     or32AArch64Imm(Loc, Val);
1507     break;
1508   default:
1509     error(getErrorLocation(Loc) + "unrecognized reloc " + Twine(Type));
1510   }
1511 }
1512 
1513 void AArch64TargetInfo::relaxTlsGdToLe(uint8_t *Loc, uint32_t Type,
1514                                        uint64_t Val) const {
1515   // TLSDESC Global-Dynamic relocation are in the form:
1516   //   adrp    x0, :tlsdesc:v             [R_AARCH64_TLSDESC_ADR_PAGE21]
1517   //   ldr     x1, [x0, #:tlsdesc_lo12:v  [R_AARCH64_TLSDESC_LD64_LO12_NC]
1518   //   add     x0, x0, :tlsdesc_los:v     [_AARCH64_TLSDESC_ADD_LO12_NC]
1519   //   .tlsdesccall                       [R_AARCH64_TLSDESC_CALL]
1520   //   blr     x1
1521   // And it can optimized to:
1522   //   movz    x0, #0x0, lsl #16
1523   //   movk    x0, #0x10
1524   //   nop
1525   //   nop
1526   checkUInt<32>(Loc, Val, Type);
1527 
1528   switch (Type) {
1529   case R_AARCH64_TLSDESC_ADD_LO12_NC:
1530   case R_AARCH64_TLSDESC_CALL:
1531     write32le(Loc, 0xd503201f); // nop
1532     return;
1533   case R_AARCH64_TLSDESC_ADR_PAGE21:
1534     write32le(Loc, 0xd2a00000 | (((Val >> 16) & 0xffff) << 5)); // movz
1535     return;
1536   case R_AARCH64_TLSDESC_LD64_LO12_NC:
1537     write32le(Loc, 0xf2800000 | ((Val & 0xffff) << 5)); // movk
1538     return;
1539   default:
1540     llvm_unreachable("unsupported relocation for TLS GD to LE relaxation");
1541   }
1542 }
1543 
1544 void AArch64TargetInfo::relaxTlsGdToIe(uint8_t *Loc, uint32_t Type,
1545                                        uint64_t Val) const {
1546   // TLSDESC Global-Dynamic relocation are in the form:
1547   //   adrp    x0, :tlsdesc:v             [R_AARCH64_TLSDESC_ADR_PAGE21]
1548   //   ldr     x1, [x0, #:tlsdesc_lo12:v  [R_AARCH64_TLSDESC_LD64_LO12_NC]
1549   //   add     x0, x0, :tlsdesc_los:v     [_AARCH64_TLSDESC_ADD_LO12_NC]
1550   //   .tlsdesccall                       [R_AARCH64_TLSDESC_CALL]
1551   //   blr     x1
1552   // And it can optimized to:
1553   //   adrp    x0, :gottprel:v
1554   //   ldr     x0, [x0, :gottprel_lo12:v]
1555   //   nop
1556   //   nop
1557 
1558   switch (Type) {
1559   case R_AARCH64_TLSDESC_ADD_LO12_NC:
1560   case R_AARCH64_TLSDESC_CALL:
1561     write32le(Loc, 0xd503201f); // nop
1562     break;
1563   case R_AARCH64_TLSDESC_ADR_PAGE21:
1564     write32le(Loc, 0x90000000); // adrp
1565     relocateOne(Loc, R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21, Val);
1566     break;
1567   case R_AARCH64_TLSDESC_LD64_LO12_NC:
1568     write32le(Loc, 0xf9400000); // ldr
1569     relocateOne(Loc, R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC, Val);
1570     break;
1571   default:
1572     llvm_unreachable("unsupported relocation for TLS GD to LE relaxation");
1573   }
1574 }
1575 
1576 void AArch64TargetInfo::relaxTlsIeToLe(uint8_t *Loc, uint32_t Type,
1577                                        uint64_t Val) const {
1578   checkUInt<32>(Loc, Val, Type);
1579 
1580   if (Type == R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21) {
1581     // Generate MOVZ.
1582     uint32_t RegNo = read32le(Loc) & 0x1f;
1583     write32le(Loc, (0xd2a00000 | RegNo) | (((Val >> 16) & 0xffff) << 5));
1584     return;
1585   }
1586   if (Type == R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC) {
1587     // Generate MOVK.
1588     uint32_t RegNo = read32le(Loc) & 0x1f;
1589     write32le(Loc, (0xf2800000 | RegNo) | ((Val & 0xffff) << 5));
1590     return;
1591   }
1592   llvm_unreachable("invalid relocation for TLS IE to LE relaxation");
1593 }
1594 
1595 AMDGPUTargetInfo::AMDGPUTargetInfo() {
1596   RelativeRel = R_AMDGPU_REL64;
1597   GotRel = R_AMDGPU_ABS64;
1598   GotEntrySize = 8;
1599 }
1600 
1601 void AMDGPUTargetInfo::relocateOne(uint8_t *Loc, uint32_t Type,
1602                                    uint64_t Val) const {
1603   switch (Type) {
1604   case R_AMDGPU_ABS32:
1605   case R_AMDGPU_GOTPCREL:
1606   case R_AMDGPU_GOTPCREL32_LO:
1607   case R_AMDGPU_REL32:
1608   case R_AMDGPU_REL32_LO:
1609     write32le(Loc, Val);
1610     break;
1611   case R_AMDGPU_ABS64:
1612     write64le(Loc, Val);
1613     break;
1614   case R_AMDGPU_GOTPCREL32_HI:
1615   case R_AMDGPU_REL32_HI:
1616     write32le(Loc, Val >> 32);
1617     break;
1618   default:
1619     error(getErrorLocation(Loc) + "unrecognized reloc " + Twine(Type));
1620   }
1621 }
1622 
1623 RelExpr AMDGPUTargetInfo::getRelExpr(uint32_t Type, const SymbolBody &S) const {
1624   switch (Type) {
1625   case R_AMDGPU_ABS32:
1626   case R_AMDGPU_ABS64:
1627     return R_ABS;
1628   case R_AMDGPU_REL32:
1629   case R_AMDGPU_REL32_LO:
1630   case R_AMDGPU_REL32_HI:
1631     return R_PC;
1632   case R_AMDGPU_GOTPCREL:
1633   case R_AMDGPU_GOTPCREL32_LO:
1634   case R_AMDGPU_GOTPCREL32_HI:
1635     return R_GOT_PC;
1636   default:
1637     error(toString(S.File) + ": unknown relocation type: " + toString(Type));
1638     return R_HINT;
1639   }
1640 }
1641 
1642 ARMTargetInfo::ARMTargetInfo() {
1643   CopyRel = R_ARM_COPY;
1644   RelativeRel = R_ARM_RELATIVE;
1645   IRelativeRel = R_ARM_IRELATIVE;
1646   GotRel = R_ARM_GLOB_DAT;
1647   PltRel = R_ARM_JUMP_SLOT;
1648   TlsGotRel = R_ARM_TLS_TPOFF32;
1649   TlsModuleIndexRel = R_ARM_TLS_DTPMOD32;
1650   TlsOffsetRel = R_ARM_TLS_DTPOFF32;
1651   GotEntrySize = 4;
1652   GotPltEntrySize = 4;
1653   PltEntrySize = 16;
1654   PltHeaderSize = 20;
1655   // ARM uses Variant 1 TLS
1656   TcbSize = 8;
1657   NeedsThunks = true;
1658 }
1659 
1660 RelExpr ARMTargetInfo::getRelExpr(uint32_t Type, const SymbolBody &S) const {
1661   switch (Type) {
1662   default:
1663     return R_ABS;
1664   case R_ARM_THM_JUMP11:
1665     return R_PC;
1666   case R_ARM_CALL:
1667   case R_ARM_JUMP24:
1668   case R_ARM_PC24:
1669   case R_ARM_PLT32:
1670   case R_ARM_PREL31:
1671   case R_ARM_THM_JUMP19:
1672   case R_ARM_THM_JUMP24:
1673   case R_ARM_THM_CALL:
1674     return R_PLT_PC;
1675   case R_ARM_GOTOFF32:
1676     // (S + A) - GOT_ORG
1677     return R_GOTREL;
1678   case R_ARM_GOT_BREL:
1679     // GOT(S) + A - GOT_ORG
1680     return R_GOT_OFF;
1681   case R_ARM_GOT_PREL:
1682   case R_ARM_TLS_IE32:
1683     // GOT(S) + A - P
1684     return R_GOT_PC;
1685   case R_ARM_TARGET1:
1686     return Config->Target1Rel ? R_PC : R_ABS;
1687   case R_ARM_TARGET2:
1688     if (Config->Target2 == Target2Policy::Rel)
1689       return R_PC;
1690     if (Config->Target2 == Target2Policy::Abs)
1691       return R_ABS;
1692     return R_GOT_PC;
1693   case R_ARM_TLS_GD32:
1694     return R_TLSGD_PC;
1695   case R_ARM_TLS_LDM32:
1696     return R_TLSLD_PC;
1697   case R_ARM_BASE_PREL:
1698     // B(S) + A - P
1699     // FIXME: currently B(S) assumed to be .got, this may not hold for all
1700     // platforms.
1701     return R_GOTONLY_PC;
1702   case R_ARM_MOVW_PREL_NC:
1703   case R_ARM_MOVT_PREL:
1704   case R_ARM_REL32:
1705   case R_ARM_THM_MOVW_PREL_NC:
1706   case R_ARM_THM_MOVT_PREL:
1707     return R_PC;
1708   case R_ARM_NONE:
1709     return R_HINT;
1710   case R_ARM_TLS_LE32:
1711     return R_TLS;
1712   }
1713 }
1714 
1715 bool ARMTargetInfo::isPicRel(uint32_t Type) const {
1716   return (Type == R_ARM_TARGET1 && !Config->Target1Rel) ||
1717          (Type == R_ARM_ABS32);
1718 }
1719 
1720 uint32_t ARMTargetInfo::getDynRel(uint32_t Type) const {
1721   if (Type == R_ARM_TARGET1 && !Config->Target1Rel)
1722     return R_ARM_ABS32;
1723   if (Type == R_ARM_ABS32)
1724     return Type;
1725   // Keep it going with a dummy value so that we can find more reloc errors.
1726   return R_ARM_ABS32;
1727 }
1728 
1729 void ARMTargetInfo::writeGotPlt(uint8_t *Buf, const SymbolBody &) const {
1730   write32le(Buf, In<ELF32LE>::Plt->getVA());
1731 }
1732 
1733 void ARMTargetInfo::writeIgotPlt(uint8_t *Buf, const SymbolBody &S) const {
1734   // An ARM entry is the address of the ifunc resolver function.
1735   write32le(Buf, S.getVA<ELF32LE>());
1736 }
1737 
1738 void ARMTargetInfo::writePltHeader(uint8_t *Buf) const {
1739   const uint8_t PltData[] = {
1740       0x04, 0xe0, 0x2d, 0xe5, //     str lr, [sp,#-4]!
1741       0x04, 0xe0, 0x9f, 0xe5, //     ldr lr, L2
1742       0x0e, 0xe0, 0x8f, 0xe0, // L1: add lr, pc, lr
1743       0x08, 0xf0, 0xbe, 0xe5, //     ldr pc, [lr, #8]
1744       0x00, 0x00, 0x00, 0x00, // L2: .word   &(.got.plt) - L1 - 8
1745   };
1746   memcpy(Buf, PltData, sizeof(PltData));
1747   uint64_t GotPlt = In<ELF32LE>::GotPlt->getVA();
1748   uint64_t L1 = In<ELF32LE>::Plt->getVA() + 8;
1749   write32le(Buf + 16, GotPlt - L1 - 8);
1750 }
1751 
1752 void ARMTargetInfo::addPltHeaderSymbols(InputSectionData *ISD) const {
1753   auto *IS = cast<InputSection<ELF32LE>>(ISD);
1754   addSyntheticLocal("$a", STT_NOTYPE, 0, 0, IS);
1755   addSyntheticLocal("$d", STT_NOTYPE, 16, 0, IS);
1756 }
1757 
1758 void ARMTargetInfo::writePlt(uint8_t *Buf, uint64_t GotEntryAddr,
1759                              uint64_t PltEntryAddr, int32_t Index,
1760                              unsigned RelOff) const {
1761   // FIXME: Using simple code sequence with simple relocations.
1762   // There is a more optimal sequence but it requires support for the group
1763   // relocations. See ELF for the ARM Architecture Appendix A.3
1764   const uint8_t PltData[] = {
1765       0x04, 0xc0, 0x9f, 0xe5, //     ldr ip, L2
1766       0x0f, 0xc0, 0x8c, 0xe0, // L1: add ip, ip, pc
1767       0x00, 0xf0, 0x9c, 0xe5, //     ldr pc, [ip]
1768       0x00, 0x00, 0x00, 0x00, // L2: .word   Offset(&(.plt.got) - L1 - 8
1769   };
1770   memcpy(Buf, PltData, sizeof(PltData));
1771   uint64_t L1 = PltEntryAddr + 4;
1772   write32le(Buf + 12, GotEntryAddr - L1 - 8);
1773 }
1774 
1775 void ARMTargetInfo::addPltSymbols(InputSectionData *ISD, uint64_t Off) const {
1776   auto *IS = cast<InputSection<ELF32LE>>(ISD);
1777   addSyntheticLocal("$a", STT_NOTYPE, Off, 0, IS);
1778   addSyntheticLocal("$d", STT_NOTYPE, Off + 12, 0, IS);
1779 }
1780 
1781 bool ARMTargetInfo::needsThunk(RelExpr Expr, uint32_t RelocType,
1782                                const InputFile *File,
1783                                const SymbolBody &S) const {
1784   // If S is an undefined weak symbol in an executable we don't need a Thunk.
1785   // In a DSO calls to undefined symbols, including weak ones get PLT entries
1786   // which may need a thunk.
1787   if (S.isUndefined() && !S.isLocal() && S.symbol()->isWeak() &&
1788       !Config->Shared)
1789     return false;
1790   // A state change from ARM to Thumb and vice versa must go through an
1791   // interworking thunk if the relocation type is not R_ARM_CALL or
1792   // R_ARM_THM_CALL.
1793   switch (RelocType) {
1794   case R_ARM_PC24:
1795   case R_ARM_PLT32:
1796   case R_ARM_JUMP24:
1797     // Source is ARM, all PLT entries are ARM so no interworking required.
1798     // Otherwise we need to interwork if Symbol has bit 0 set (Thumb).
1799     if (Expr == R_PC && ((S.getVA<ELF32LE>() & 1) == 1))
1800       return true;
1801     break;
1802   case R_ARM_THM_JUMP19:
1803   case R_ARM_THM_JUMP24:
1804     // Source is Thumb, all PLT entries are ARM so interworking is required.
1805     // Otherwise we need to interwork if Symbol has bit 0 clear (ARM).
1806     if (Expr == R_PLT_PC || ((S.getVA<ELF32LE>() & 1) == 0))
1807       return true;
1808     break;
1809   }
1810   return false;
1811 }
1812 
1813 void ARMTargetInfo::relocateOne(uint8_t *Loc, uint32_t Type,
1814                                 uint64_t Val) const {
1815   switch (Type) {
1816   case R_ARM_ABS32:
1817   case R_ARM_BASE_PREL:
1818   case R_ARM_GLOB_DAT:
1819   case R_ARM_GOTOFF32:
1820   case R_ARM_GOT_BREL:
1821   case R_ARM_GOT_PREL:
1822   case R_ARM_REL32:
1823   case R_ARM_RELATIVE:
1824   case R_ARM_TARGET1:
1825   case R_ARM_TARGET2:
1826   case R_ARM_TLS_GD32:
1827   case R_ARM_TLS_IE32:
1828   case R_ARM_TLS_LDM32:
1829   case R_ARM_TLS_LDO32:
1830   case R_ARM_TLS_LE32:
1831   case R_ARM_TLS_TPOFF32:
1832     write32le(Loc, Val);
1833     break;
1834   case R_ARM_TLS_DTPMOD32:
1835     write32le(Loc, 1);
1836     break;
1837   case R_ARM_PREL31:
1838     checkInt<31>(Loc, Val, Type);
1839     write32le(Loc, (read32le(Loc) & 0x80000000) | (Val & ~0x80000000));
1840     break;
1841   case R_ARM_CALL:
1842     // R_ARM_CALL is used for BL and BLX instructions, depending on the
1843     // value of bit 0 of Val, we must select a BL or BLX instruction
1844     if (Val & 1) {
1845       // If bit 0 of Val is 1 the target is Thumb, we must select a BLX.
1846       // The BLX encoding is 0xfa:H:imm24 where Val = imm24:H:'1'
1847       checkInt<26>(Loc, Val, Type);
1848       write32le(Loc, 0xfa000000 |                    // opcode
1849                          ((Val & 2) << 23) |         // H
1850                          ((Val >> 2) & 0x00ffffff)); // imm24
1851       break;
1852     }
1853     if ((read32le(Loc) & 0xfe000000) == 0xfa000000)
1854       // BLX (always unconditional) instruction to an ARM Target, select an
1855       // unconditional BL.
1856       write32le(Loc, 0xeb000000 | (read32le(Loc) & 0x00ffffff));
1857   // fall through as BL encoding is shared with B
1858   case R_ARM_JUMP24:
1859   case R_ARM_PC24:
1860   case R_ARM_PLT32:
1861     checkInt<26>(Loc, Val, Type);
1862     write32le(Loc, (read32le(Loc) & ~0x00ffffff) | ((Val >> 2) & 0x00ffffff));
1863     break;
1864   case R_ARM_THM_JUMP11:
1865     checkInt<12>(Loc, Val, Type);
1866     write16le(Loc, (read32le(Loc) & 0xf800) | ((Val >> 1) & 0x07ff));
1867     break;
1868   case R_ARM_THM_JUMP19:
1869     // Encoding T3: Val = S:J2:J1:imm6:imm11:0
1870     checkInt<21>(Loc, Val, Type);
1871     write16le(Loc,
1872               (read16le(Loc) & 0xfbc0) |   // opcode cond
1873                   ((Val >> 10) & 0x0400) | // S
1874                   ((Val >> 12) & 0x003f)); // imm6
1875     write16le(Loc + 2,
1876               0x8000 |                    // opcode
1877                   ((Val >> 8) & 0x0800) | // J2
1878                   ((Val >> 5) & 0x2000) | // J1
1879                   ((Val >> 1) & 0x07ff)); // imm11
1880     break;
1881   case R_ARM_THM_CALL:
1882     // R_ARM_THM_CALL is used for BL and BLX instructions, depending on the
1883     // value of bit 0 of Val, we must select a BL or BLX instruction
1884     if ((Val & 1) == 0) {
1885       // Ensure BLX destination is 4-byte aligned. As BLX instruction may
1886       // only be two byte aligned. This must be done before overflow check
1887       Val = alignTo(Val, 4);
1888     }
1889     // Bit 12 is 0 for BLX, 1 for BL
1890     write16le(Loc + 2, (read16le(Loc + 2) & ~0x1000) | (Val & 1) << 12);
1891   // Fall through as rest of encoding is the same as B.W
1892   case R_ARM_THM_JUMP24:
1893     // Encoding B  T4, BL T1, BLX T2: Val = S:I1:I2:imm10:imm11:0
1894     // FIXME: Use of I1 and I2 require v6T2ops
1895     checkInt<25>(Loc, Val, Type);
1896     write16le(Loc,
1897               0xf000 |                     // opcode
1898                   ((Val >> 14) & 0x0400) | // S
1899                   ((Val >> 12) & 0x03ff)); // imm10
1900     write16le(Loc + 2,
1901               (read16le(Loc + 2) & 0xd000) |                  // opcode
1902                   (((~(Val >> 10)) ^ (Val >> 11)) & 0x2000) | // J1
1903                   (((~(Val >> 11)) ^ (Val >> 13)) & 0x0800) | // J2
1904                   ((Val >> 1) & 0x07ff));                     // imm11
1905     break;
1906   case R_ARM_MOVW_ABS_NC:
1907   case R_ARM_MOVW_PREL_NC:
1908     write32le(Loc, (read32le(Loc) & ~0x000f0fff) | ((Val & 0xf000) << 4) |
1909                        (Val & 0x0fff));
1910     break;
1911   case R_ARM_MOVT_ABS:
1912   case R_ARM_MOVT_PREL:
1913     checkInt<32>(Loc, Val, Type);
1914     write32le(Loc, (read32le(Loc) & ~0x000f0fff) |
1915                        (((Val >> 16) & 0xf000) << 4) | ((Val >> 16) & 0xfff));
1916     break;
1917   case R_ARM_THM_MOVT_ABS:
1918   case R_ARM_THM_MOVT_PREL:
1919     // Encoding T1: A = imm4:i:imm3:imm8
1920     checkInt<32>(Loc, Val, Type);
1921     write16le(Loc,
1922               0xf2c0 |                     // opcode
1923                   ((Val >> 17) & 0x0400) | // i
1924                   ((Val >> 28) & 0x000f)); // imm4
1925     write16le(Loc + 2,
1926               (read16le(Loc + 2) & 0x8f00) | // opcode
1927                   ((Val >> 12) & 0x7000) |   // imm3
1928                   ((Val >> 16) & 0x00ff));   // imm8
1929     break;
1930   case R_ARM_THM_MOVW_ABS_NC:
1931   case R_ARM_THM_MOVW_PREL_NC:
1932     // Encoding T3: A = imm4:i:imm3:imm8
1933     write16le(Loc,
1934               0xf240 |                     // opcode
1935                   ((Val >> 1) & 0x0400) |  // i
1936                   ((Val >> 12) & 0x000f)); // imm4
1937     write16le(Loc + 2,
1938               (read16le(Loc + 2) & 0x8f00) | // opcode
1939                   ((Val << 4) & 0x7000) |    // imm3
1940                   (Val & 0x00ff));           // imm8
1941     break;
1942   default:
1943     error(getErrorLocation(Loc) + "unrecognized reloc " + Twine(Type));
1944   }
1945 }
1946 
1947 int64_t ARMTargetInfo::getImplicitAddend(const uint8_t *Buf,
1948                                          uint32_t Type) const {
1949   switch (Type) {
1950   default:
1951     return 0;
1952   case R_ARM_ABS32:
1953   case R_ARM_BASE_PREL:
1954   case R_ARM_GOTOFF32:
1955   case R_ARM_GOT_BREL:
1956   case R_ARM_GOT_PREL:
1957   case R_ARM_REL32:
1958   case R_ARM_TARGET1:
1959   case R_ARM_TARGET2:
1960   case R_ARM_TLS_GD32:
1961   case R_ARM_TLS_LDM32:
1962   case R_ARM_TLS_LDO32:
1963   case R_ARM_TLS_IE32:
1964   case R_ARM_TLS_LE32:
1965     return SignExtend64<32>(read32le(Buf));
1966   case R_ARM_PREL31:
1967     return SignExtend64<31>(read32le(Buf));
1968   case R_ARM_CALL:
1969   case R_ARM_JUMP24:
1970   case R_ARM_PC24:
1971   case R_ARM_PLT32:
1972     return SignExtend64<26>(read32le(Buf) << 2);
1973   case R_ARM_THM_JUMP11:
1974     return SignExtend64<12>(read16le(Buf) << 1);
1975   case R_ARM_THM_JUMP19: {
1976     // Encoding T3: A = S:J2:J1:imm10:imm6:0
1977     uint16_t Hi = read16le(Buf);
1978     uint16_t Lo = read16le(Buf + 2);
1979     return SignExtend64<20>(((Hi & 0x0400) << 10) | // S
1980                             ((Lo & 0x0800) << 8) |  // J2
1981                             ((Lo & 0x2000) << 5) |  // J1
1982                             ((Hi & 0x003f) << 12) | // imm6
1983                             ((Lo & 0x07ff) << 1));  // imm11:0
1984   }
1985   case R_ARM_THM_CALL:
1986   case R_ARM_THM_JUMP24: {
1987     // Encoding B T4, BL T1, BLX T2: A = S:I1:I2:imm10:imm11:0
1988     // I1 = NOT(J1 EOR S), I2 = NOT(J2 EOR S)
1989     // FIXME: I1 and I2 require v6T2ops
1990     uint16_t Hi = read16le(Buf);
1991     uint16_t Lo = read16le(Buf + 2);
1992     return SignExtend64<24>(((Hi & 0x0400) << 14) |                    // S
1993                             (~((Lo ^ (Hi << 3)) << 10) & 0x00800000) | // I1
1994                             (~((Lo ^ (Hi << 1)) << 11) & 0x00400000) | // I2
1995                             ((Hi & 0x003ff) << 12) |                   // imm0
1996                             ((Lo & 0x007ff) << 1)); // imm11:0
1997   }
1998   // ELF for the ARM Architecture 4.6.1.1 the implicit addend for MOVW and
1999   // MOVT is in the range -32768 <= A < 32768
2000   case R_ARM_MOVW_ABS_NC:
2001   case R_ARM_MOVT_ABS:
2002   case R_ARM_MOVW_PREL_NC:
2003   case R_ARM_MOVT_PREL: {
2004     uint64_t Val = read32le(Buf) & 0x000f0fff;
2005     return SignExtend64<16>(((Val & 0x000f0000) >> 4) | (Val & 0x00fff));
2006   }
2007   case R_ARM_THM_MOVW_ABS_NC:
2008   case R_ARM_THM_MOVT_ABS:
2009   case R_ARM_THM_MOVW_PREL_NC:
2010   case R_ARM_THM_MOVT_PREL: {
2011     // Encoding T3: A = imm4:i:imm3:imm8
2012     uint16_t Hi = read16le(Buf);
2013     uint16_t Lo = read16le(Buf + 2);
2014     return SignExtend64<16>(((Hi & 0x000f) << 12) | // imm4
2015                             ((Hi & 0x0400) << 1) |  // i
2016                             ((Lo & 0x7000) >> 4) |  // imm3
2017                             (Lo & 0x00ff));         // imm8
2018   }
2019   }
2020 }
2021 
2022 bool ARMTargetInfo::isTlsLocalDynamicRel(uint32_t Type) const {
2023   return Type == R_ARM_TLS_LDO32 || Type == R_ARM_TLS_LDM32;
2024 }
2025 
2026 bool ARMTargetInfo::isTlsGlobalDynamicRel(uint32_t Type) const {
2027   return Type == R_ARM_TLS_GD32;
2028 }
2029 
2030 bool ARMTargetInfo::isTlsInitialExecRel(uint32_t Type) const {
2031   return Type == R_ARM_TLS_IE32;
2032 }
2033 
2034 template <class ELFT> MipsTargetInfo<ELFT>::MipsTargetInfo() {
2035   GotPltHeaderEntriesNum = 2;
2036   DefaultMaxPageSize = 65536;
2037   GotEntrySize = sizeof(typename ELFT::uint);
2038   GotPltEntrySize = sizeof(typename ELFT::uint);
2039   PltEntrySize = 16;
2040   PltHeaderSize = 32;
2041   CopyRel = R_MIPS_COPY;
2042   PltRel = R_MIPS_JUMP_SLOT;
2043   NeedsThunks = true;
2044   if (ELFT::Is64Bits) {
2045     RelativeRel = (R_MIPS_64 << 8) | R_MIPS_REL32;
2046     TlsGotRel = R_MIPS_TLS_TPREL64;
2047     TlsModuleIndexRel = R_MIPS_TLS_DTPMOD64;
2048     TlsOffsetRel = R_MIPS_TLS_DTPREL64;
2049   } else {
2050     RelativeRel = R_MIPS_REL32;
2051     TlsGotRel = R_MIPS_TLS_TPREL32;
2052     TlsModuleIndexRel = R_MIPS_TLS_DTPMOD32;
2053     TlsOffsetRel = R_MIPS_TLS_DTPREL32;
2054   }
2055 }
2056 
2057 template <class ELFT>
2058 RelExpr MipsTargetInfo<ELFT>::getRelExpr(uint32_t Type,
2059                                          const SymbolBody &S) const {
2060   // See comment in the calculateMipsRelChain.
2061   if (ELFT::Is64Bits || Config->MipsN32Abi)
2062     Type &= 0xff;
2063   switch (Type) {
2064   default:
2065     return R_ABS;
2066   case R_MIPS_JALR:
2067     return R_HINT;
2068   case R_MIPS_GPREL16:
2069   case R_MIPS_GPREL32:
2070     return R_MIPS_GOTREL;
2071   case R_MIPS_26:
2072     return R_PLT;
2073   case R_MIPS_HI16:
2074   case R_MIPS_LO16:
2075   case R_MIPS_GOT_OFST:
2076     // R_MIPS_HI16/R_MIPS_LO16 relocations against _gp_disp calculate
2077     // offset between start of function and 'gp' value which by default
2078     // equal to the start of .got section. In that case we consider these
2079     // relocations as relative.
2080     if (&S == ElfSym<ELFT>::MipsGpDisp)
2081       return R_PC;
2082     return R_ABS;
2083   case R_MIPS_PC32:
2084   case R_MIPS_PC16:
2085   case R_MIPS_PC19_S2:
2086   case R_MIPS_PC21_S2:
2087   case R_MIPS_PC26_S2:
2088   case R_MIPS_PCHI16:
2089   case R_MIPS_PCLO16:
2090     return R_PC;
2091   case R_MIPS_GOT16:
2092     if (S.isLocal())
2093       return R_MIPS_GOT_LOCAL_PAGE;
2094   // fallthrough
2095   case R_MIPS_CALL16:
2096   case R_MIPS_GOT_DISP:
2097   case R_MIPS_TLS_GOTTPREL:
2098     return R_MIPS_GOT_OFF;
2099   case R_MIPS_CALL_HI16:
2100   case R_MIPS_CALL_LO16:
2101   case R_MIPS_GOT_HI16:
2102   case R_MIPS_GOT_LO16:
2103     return R_MIPS_GOT_OFF32;
2104   case R_MIPS_GOT_PAGE:
2105     return R_MIPS_GOT_LOCAL_PAGE;
2106   case R_MIPS_TLS_GD:
2107     return R_MIPS_TLSGD;
2108   case R_MIPS_TLS_LDM:
2109     return R_MIPS_TLSLD;
2110   }
2111 }
2112 
2113 template <class ELFT> bool MipsTargetInfo<ELFT>::isPicRel(uint32_t Type) const {
2114   return Type == R_MIPS_32 || Type == R_MIPS_64;
2115 }
2116 
2117 template <class ELFT>
2118 uint32_t MipsTargetInfo<ELFT>::getDynRel(uint32_t Type) const {
2119   return RelativeRel;
2120 }
2121 
2122 template <class ELFT>
2123 bool MipsTargetInfo<ELFT>::isTlsLocalDynamicRel(uint32_t Type) const {
2124   return Type == R_MIPS_TLS_LDM;
2125 }
2126 
2127 template <class ELFT>
2128 bool MipsTargetInfo<ELFT>::isTlsGlobalDynamicRel(uint32_t Type) const {
2129   return Type == R_MIPS_TLS_GD;
2130 }
2131 
2132 template <class ELFT>
2133 void MipsTargetInfo<ELFT>::writeGotPlt(uint8_t *Buf, const SymbolBody &) const {
2134   write32<ELFT::TargetEndianness>(Buf, In<ELFT>::Plt->getVA());
2135 }
2136 
2137 template <endianness E, uint8_t BSIZE, uint8_t SHIFT>
2138 static int64_t getPcRelocAddend(const uint8_t *Loc) {
2139   uint32_t Instr = read32<E>(Loc);
2140   uint32_t Mask = 0xffffffff >> (32 - BSIZE);
2141   return SignExtend64<BSIZE + SHIFT>((Instr & Mask) << SHIFT);
2142 }
2143 
2144 template <endianness E, uint8_t BSIZE, uint8_t SHIFT>
2145 static void applyMipsPcReloc(uint8_t *Loc, uint32_t Type, uint64_t V) {
2146   uint32_t Mask = 0xffffffff >> (32 - BSIZE);
2147   uint32_t Instr = read32<E>(Loc);
2148   if (SHIFT > 0)
2149     checkAlignment<(1 << SHIFT)>(Loc, V, Type);
2150   checkInt<BSIZE + SHIFT>(Loc, V, Type);
2151   write32<E>(Loc, (Instr & ~Mask) | ((V >> SHIFT) & Mask));
2152 }
2153 
2154 template <endianness E> static void writeMipsHi16(uint8_t *Loc, uint64_t V) {
2155   uint32_t Instr = read32<E>(Loc);
2156   uint16_t Res = ((V + 0x8000) >> 16) & 0xffff;
2157   write32<E>(Loc, (Instr & 0xffff0000) | Res);
2158 }
2159 
2160 template <endianness E> static void writeMipsHigher(uint8_t *Loc, uint64_t V) {
2161   uint32_t Instr = read32<E>(Loc);
2162   uint16_t Res = ((V + 0x80008000) >> 32) & 0xffff;
2163   write32<E>(Loc, (Instr & 0xffff0000) | Res);
2164 }
2165 
2166 template <endianness E> static void writeMipsHighest(uint8_t *Loc, uint64_t V) {
2167   uint32_t Instr = read32<E>(Loc);
2168   uint16_t Res = ((V + 0x800080008000) >> 48) & 0xffff;
2169   write32<E>(Loc, (Instr & 0xffff0000) | Res);
2170 }
2171 
2172 template <endianness E> static void writeMipsLo16(uint8_t *Loc, uint64_t V) {
2173   uint32_t Instr = read32<E>(Loc);
2174   write32<E>(Loc, (Instr & 0xffff0000) | (V & 0xffff));
2175 }
2176 
2177 template <class ELFT> static bool isMipsR6() {
2178   const auto &FirstObj = cast<ELFFileBase<ELFT>>(*Config->FirstElf);
2179   uint32_t Arch = FirstObj.getObj().getHeader()->e_flags & EF_MIPS_ARCH;
2180   return Arch == EF_MIPS_ARCH_32R6 || Arch == EF_MIPS_ARCH_64R6;
2181 }
2182 
2183 template <class ELFT>
2184 void MipsTargetInfo<ELFT>::writePltHeader(uint8_t *Buf) const {
2185   const endianness E = ELFT::TargetEndianness;
2186   if (Config->MipsN32Abi) {
2187     write32<E>(Buf, 0x3c0e0000);      // lui   $14, %hi(&GOTPLT[0])
2188     write32<E>(Buf + 4, 0x8dd90000);  // lw    $25, %lo(&GOTPLT[0])($14)
2189     write32<E>(Buf + 8, 0x25ce0000);  // addiu $14, $14, %lo(&GOTPLT[0])
2190     write32<E>(Buf + 12, 0x030ec023); // subu  $24, $24, $14
2191   } else {
2192     write32<E>(Buf, 0x3c1c0000);      // lui   $28, %hi(&GOTPLT[0])
2193     write32<E>(Buf + 4, 0x8f990000);  // lw    $25, %lo(&GOTPLT[0])($28)
2194     write32<E>(Buf + 8, 0x279c0000);  // addiu $28, $28, %lo(&GOTPLT[0])
2195     write32<E>(Buf + 12, 0x031cc023); // subu  $24, $24, $28
2196   }
2197   write32<E>(Buf + 16, 0x03e07825); // move  $15, $31
2198   write32<E>(Buf + 20, 0x0018c082); // srl   $24, $24, 2
2199   write32<E>(Buf + 24, 0x0320f809); // jalr  $25
2200   write32<E>(Buf + 28, 0x2718fffe); // subu  $24, $24, 2
2201   uint64_t Got = In<ELFT>::GotPlt->getVA();
2202   writeMipsHi16<E>(Buf, Got);
2203   writeMipsLo16<E>(Buf + 4, Got);
2204   writeMipsLo16<E>(Buf + 8, Got);
2205 }
2206 
2207 template <class ELFT>
2208 void MipsTargetInfo<ELFT>::writePlt(uint8_t *Buf, uint64_t GotEntryAddr,
2209                                     uint64_t PltEntryAddr, int32_t Index,
2210                                     unsigned RelOff) const {
2211   const endianness E = ELFT::TargetEndianness;
2212   write32<E>(Buf, 0x3c0f0000);     // lui   $15, %hi(.got.plt entry)
2213   write32<E>(Buf + 4, 0x8df90000); // l[wd] $25, %lo(.got.plt entry)($15)
2214                                    // jr    $25
2215   write32<E>(Buf + 8, isMipsR6<ELFT>() ? 0x03200009 : 0x03200008);
2216   write32<E>(Buf + 12, 0x25f80000); // addiu $24, $15, %lo(.got.plt entry)
2217   writeMipsHi16<E>(Buf, GotEntryAddr);
2218   writeMipsLo16<E>(Buf + 4, GotEntryAddr);
2219   writeMipsLo16<E>(Buf + 12, GotEntryAddr);
2220 }
2221 
2222 template <class ELFT>
2223 bool MipsTargetInfo<ELFT>::needsThunk(RelExpr Expr, uint32_t Type,
2224                                       const InputFile *File,
2225                                       const SymbolBody &S) const {
2226   // Any MIPS PIC code function is invoked with its address in register $t9.
2227   // So if we have a branch instruction from non-PIC code to the PIC one
2228   // we cannot make the jump directly and need to create a small stubs
2229   // to save the target function address.
2230   // See page 3-38 ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf
2231   if (Type != R_MIPS_26)
2232     return false;
2233   auto *F = dyn_cast_or_null<ELFFileBase<ELFT>>(File);
2234   if (!F)
2235     return false;
2236   // If current file has PIC code, LA25 stub is not required.
2237   if (F->getObj().getHeader()->e_flags & EF_MIPS_PIC)
2238     return false;
2239   auto *D = dyn_cast<DefinedRegular<ELFT>>(&S);
2240   // LA25 is required if target file has PIC code
2241   // or target symbol is a PIC symbol.
2242   return D && D->isMipsPIC();
2243 }
2244 
2245 template <class ELFT>
2246 int64_t MipsTargetInfo<ELFT>::getImplicitAddend(const uint8_t *Buf,
2247                                                 uint32_t Type) const {
2248   const endianness E = ELFT::TargetEndianness;
2249   switch (Type) {
2250   default:
2251     return 0;
2252   case R_MIPS_32:
2253   case R_MIPS_GPREL32:
2254   case R_MIPS_TLS_DTPREL32:
2255   case R_MIPS_TLS_TPREL32:
2256     return read32<E>(Buf);
2257   case R_MIPS_26:
2258     // FIXME (simon): If the relocation target symbol is not a PLT entry
2259     // we should use another expression for calculation:
2260     // ((A << 2) | (P & 0xf0000000)) >> 2
2261     return SignExtend64<28>((read32<E>(Buf) & 0x3ffffff) << 2);
2262   case R_MIPS_GPREL16:
2263   case R_MIPS_LO16:
2264   case R_MIPS_PCLO16:
2265   case R_MIPS_TLS_DTPREL_HI16:
2266   case R_MIPS_TLS_DTPREL_LO16:
2267   case R_MIPS_TLS_TPREL_HI16:
2268   case R_MIPS_TLS_TPREL_LO16:
2269     return SignExtend64<16>(read32<E>(Buf));
2270   case R_MIPS_PC16:
2271     return getPcRelocAddend<E, 16, 2>(Buf);
2272   case R_MIPS_PC19_S2:
2273     return getPcRelocAddend<E, 19, 2>(Buf);
2274   case R_MIPS_PC21_S2:
2275     return getPcRelocAddend<E, 21, 2>(Buf);
2276   case R_MIPS_PC26_S2:
2277     return getPcRelocAddend<E, 26, 2>(Buf);
2278   case R_MIPS_PC32:
2279     return getPcRelocAddend<E, 32, 0>(Buf);
2280   }
2281 }
2282 
2283 static std::pair<uint32_t, uint64_t>
2284 calculateMipsRelChain(uint8_t *Loc, uint32_t Type, uint64_t Val) {
2285   // MIPS N64 ABI packs multiple relocations into the single relocation
2286   // record. In general, all up to three relocations can have arbitrary
2287   // types. In fact, Clang and GCC uses only a few combinations. For now,
2288   // we support two of them. That is allow to pass at least all LLVM
2289   // test suite cases.
2290   // <any relocation> / R_MIPS_SUB / R_MIPS_HI16 | R_MIPS_LO16
2291   // <any relocation> / R_MIPS_64 / R_MIPS_NONE
2292   // The first relocation is a 'real' relocation which is calculated
2293   // using the corresponding symbol's value. The second and the third
2294   // relocations used to modify result of the first one: extend it to
2295   // 64-bit, extract high or low part etc. For details, see part 2.9 Relocation
2296   // at the https://dmz-portal.mips.com/mw/images/8/82/007-4658-001.pdf
2297   uint32_t Type2 = (Type >> 8) & 0xff;
2298   uint32_t Type3 = (Type >> 16) & 0xff;
2299   if (Type2 == R_MIPS_NONE && Type3 == R_MIPS_NONE)
2300     return std::make_pair(Type, Val);
2301   if (Type2 == R_MIPS_64 && Type3 == R_MIPS_NONE)
2302     return std::make_pair(Type2, Val);
2303   if (Type2 == R_MIPS_SUB && (Type3 == R_MIPS_HI16 || Type3 == R_MIPS_LO16))
2304     return std::make_pair(Type3, -Val);
2305   error(getErrorLocation(Loc) + "unsupported relocations combination " +
2306         Twine(Type));
2307   return std::make_pair(Type & 0xff, Val);
2308 }
2309 
2310 template <class ELFT>
2311 void MipsTargetInfo<ELFT>::relocateOne(uint8_t *Loc, uint32_t Type,
2312                                        uint64_t Val) const {
2313   const endianness E = ELFT::TargetEndianness;
2314   // Thread pointer and DRP offsets from the start of TLS data area.
2315   // https://www.linux-mips.org/wiki/NPTL
2316   if (Type == R_MIPS_TLS_DTPREL_HI16 || Type == R_MIPS_TLS_DTPREL_LO16 ||
2317       Type == R_MIPS_TLS_DTPREL32 || Type == R_MIPS_TLS_DTPREL64)
2318     Val -= 0x8000;
2319   else if (Type == R_MIPS_TLS_TPREL_HI16 || Type == R_MIPS_TLS_TPREL_LO16 ||
2320            Type == R_MIPS_TLS_TPREL32 || Type == R_MIPS_TLS_TPREL64)
2321     Val -= 0x7000;
2322   if (ELFT::Is64Bits || Config->MipsN32Abi)
2323     std::tie(Type, Val) = calculateMipsRelChain(Loc, Type, Val);
2324   switch (Type) {
2325   case R_MIPS_32:
2326   case R_MIPS_GPREL32:
2327   case R_MIPS_TLS_DTPREL32:
2328   case R_MIPS_TLS_TPREL32:
2329     write32<E>(Loc, Val);
2330     break;
2331   case R_MIPS_64:
2332   case R_MIPS_TLS_DTPREL64:
2333   case R_MIPS_TLS_TPREL64:
2334     write64<E>(Loc, Val);
2335     break;
2336   case R_MIPS_26:
2337     write32<E>(Loc, (read32<E>(Loc) & ~0x3ffffff) | ((Val >> 2) & 0x3ffffff));
2338     break;
2339   case R_MIPS_GOT_DISP:
2340   case R_MIPS_GOT_PAGE:
2341   case R_MIPS_GOT16:
2342   case R_MIPS_GPREL16:
2343   case R_MIPS_TLS_GD:
2344   case R_MIPS_TLS_LDM:
2345     checkInt<16>(Loc, Val, Type);
2346   // fallthrough
2347   case R_MIPS_CALL16:
2348   case R_MIPS_CALL_LO16:
2349   case R_MIPS_GOT_LO16:
2350   case R_MIPS_GOT_OFST:
2351   case R_MIPS_LO16:
2352   case R_MIPS_PCLO16:
2353   case R_MIPS_TLS_DTPREL_LO16:
2354   case R_MIPS_TLS_GOTTPREL:
2355   case R_MIPS_TLS_TPREL_LO16:
2356     writeMipsLo16<E>(Loc, Val);
2357     break;
2358   case R_MIPS_CALL_HI16:
2359   case R_MIPS_GOT_HI16:
2360   case R_MIPS_HI16:
2361   case R_MIPS_PCHI16:
2362   case R_MIPS_TLS_DTPREL_HI16:
2363   case R_MIPS_TLS_TPREL_HI16:
2364     writeMipsHi16<E>(Loc, Val);
2365     break;
2366   case R_MIPS_HIGHER:
2367     writeMipsHigher<E>(Loc, Val);
2368     break;
2369   case R_MIPS_HIGHEST:
2370     writeMipsHighest<E>(Loc, Val);
2371     break;
2372   case R_MIPS_JALR:
2373     // Ignore this optimization relocation for now
2374     break;
2375   case R_MIPS_PC16:
2376     applyMipsPcReloc<E, 16, 2>(Loc, Type, Val);
2377     break;
2378   case R_MIPS_PC19_S2:
2379     applyMipsPcReloc<E, 19, 2>(Loc, Type, Val);
2380     break;
2381   case R_MIPS_PC21_S2:
2382     applyMipsPcReloc<E, 21, 2>(Loc, Type, Val);
2383     break;
2384   case R_MIPS_PC26_S2:
2385     applyMipsPcReloc<E, 26, 2>(Loc, Type, Val);
2386     break;
2387   case R_MIPS_PC32:
2388     applyMipsPcReloc<E, 32, 0>(Loc, Type, Val);
2389     break;
2390   default:
2391     error(getErrorLocation(Loc) + "unrecognized reloc " + Twine(Type));
2392   }
2393 }
2394 
2395 template <class ELFT>
2396 bool MipsTargetInfo<ELFT>::usesOnlyLowPageBits(uint32_t Type) const {
2397   return Type == R_MIPS_LO16 || Type == R_MIPS_GOT_OFST;
2398 }
2399 }
2400 }
2401