xref: /llvm-project-15.0.7/lld/ELF/Target.cpp (revision 0202dec2)
1 //===- Target.cpp ---------------------------------------------------------===//
2 //
3 //                             The LLVM Linker
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // Machine-specific things, such as applying relocations, creation of
11 // GOT or PLT entries, etc., are handled in this file.
12 //
13 // Refer the ELF spec for the single letter varaibles, S, A or P, used
14 // in this file.
15 //
16 // Some functions defined in this file has "relaxTls" as part of their names.
17 // They do peephole optimization for TLS variables by rewriting instructions.
18 // They are not part of the ABI but optional optimization, so you can skip
19 // them if you are not interested in how TLS variables are optimized.
20 // See the following paper for the details.
21 //
22 //   Ulrich Drepper, ELF Handling For Thread-Local Storage
23 //   http://www.akkadia.org/drepper/tls.pdf
24 //
25 //===----------------------------------------------------------------------===//
26 
27 #include "Target.h"
28 #include "Error.h"
29 #include "InputFiles.h"
30 #include "OutputSections.h"
31 #include "Symbols.h"
32 
33 #include "llvm/ADT/ArrayRef.h"
34 #include "llvm/Object/ELF.h"
35 #include "llvm/Support/Endian.h"
36 #include "llvm/Support/ELF.h"
37 
38 using namespace llvm;
39 using namespace llvm::object;
40 using namespace llvm::support::endian;
41 using namespace llvm::ELF;
42 
43 namespace lld {
44 namespace elf {
45 
46 TargetInfo *Target;
47 
48 static void or32le(uint8_t *P, int32_t V) { write32le(P, read32le(P) | V); }
49 
50 StringRef getRelName(uint32_t Type) {
51   return getELFRelocationTypeName(Config->EMachine, Type);
52 }
53 
54 template <unsigned N> static void checkInt(int64_t V, uint32_t Type) {
55   if (!isInt<N>(V))
56     error("relocation " + getRelName(Type) + " out of range");
57 }
58 
59 template <unsigned N> static void checkUInt(uint64_t V, uint32_t Type) {
60   if (!isUInt<N>(V))
61     error("relocation " + getRelName(Type) + " out of range");
62 }
63 
64 template <unsigned N> static void checkIntUInt(uint64_t V, uint32_t Type) {
65   if (!isInt<N>(V) && !isUInt<N>(V))
66     error("relocation " + getRelName(Type) + " out of range");
67 }
68 
69 template <unsigned N> static void checkAlignment(uint64_t V, uint32_t Type) {
70   if ((V & (N - 1)) != 0)
71     error("improper alignment for relocation " + getRelName(Type));
72 }
73 
74 static void errorDynRel(uint32_t Type) {
75   error("relocation " + getRelName(Type) +
76         " cannot be used against shared object; recompile with -fPIC.");
77 }
78 
79 namespace {
80 class X86TargetInfo final : public TargetInfo {
81 public:
82   X86TargetInfo();
83   RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override;
84   uint64_t getImplicitAddend(const uint8_t *Buf, uint32_t Type) const override;
85   void writeGotPltHeader(uint8_t *Buf) const override;
86   uint32_t getDynRel(uint32_t Type) const override;
87   bool isTlsLocalDynamicRel(uint32_t Type) const override;
88   bool isTlsGlobalDynamicRel(uint32_t Type) const override;
89   bool isTlsInitialExecRel(uint32_t Type) const override;
90   void writeGotPlt(uint8_t *Buf, const SymbolBody &S) const override;
91   void writePltHeader(uint8_t *Buf) const override;
92   void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr,
93                 int32_t Index, unsigned RelOff) const override;
94   void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
95 
96   RelExpr adjustRelaxExpr(uint32_t Type, const uint8_t *Data,
97                           RelExpr Expr) const override;
98   void relaxTlsGdToIe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
99   void relaxTlsGdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
100   void relaxTlsIeToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
101   void relaxTlsLdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
102 };
103 
104 class X86_64TargetInfo final : public TargetInfo {
105 public:
106   X86_64TargetInfo();
107   RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override;
108   uint32_t getDynRel(uint32_t Type) const override;
109   bool isTlsLocalDynamicRel(uint32_t Type) const override;
110   bool isTlsGlobalDynamicRel(uint32_t Type) const override;
111   bool isTlsInitialExecRel(uint32_t Type) const override;
112   void writeGotPltHeader(uint8_t *Buf) const override;
113   void writeGotPlt(uint8_t *Buf, const SymbolBody &S) const override;
114   void writePltHeader(uint8_t *Buf) const override;
115   void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr,
116                 int32_t Index, unsigned RelOff) const override;
117   void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
118 
119   RelExpr adjustRelaxExpr(uint32_t Type, const uint8_t *Data,
120                           RelExpr Expr) const override;
121   void relaxGot(uint8_t *Loc, uint64_t Val) const override;
122   void relaxTlsGdToIe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
123   void relaxTlsGdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
124   void relaxTlsIeToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
125   void relaxTlsLdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
126 
127 private:
128   void relaxGotNoPic(uint8_t *Loc, uint64_t Val, uint8_t Op,
129                      uint8_t ModRm) const;
130 };
131 
132 class PPCTargetInfo final : public TargetInfo {
133 public:
134   PPCTargetInfo();
135   void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
136   RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override;
137 };
138 
139 class PPC64TargetInfo final : public TargetInfo {
140 public:
141   PPC64TargetInfo();
142   RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override;
143   void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr,
144                 int32_t Index, unsigned RelOff) const override;
145   void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
146 };
147 
148 class AArch64TargetInfo final : public TargetInfo {
149 public:
150   AArch64TargetInfo();
151   RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override;
152   uint32_t getDynRel(uint32_t Type) const override;
153   bool isTlsInitialExecRel(uint32_t Type) const override;
154   void writeGotPlt(uint8_t *Buf, const SymbolBody &S) const override;
155   void writePltHeader(uint8_t *Buf) const override;
156   void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr,
157                 int32_t Index, unsigned RelOff) const override;
158   bool usesOnlyLowPageBits(uint32_t Type) const override;
159   void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
160   RelExpr adjustRelaxExpr(uint32_t Type, const uint8_t *Data,
161                           RelExpr Expr) const override;
162   void relaxTlsGdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
163   void relaxTlsGdToIe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
164   void relaxTlsIeToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
165 };
166 
167 class AMDGPUTargetInfo final : public TargetInfo {
168 public:
169   AMDGPUTargetInfo() {}
170   void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
171   RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override;
172 };
173 
174 class ARMTargetInfo final : public TargetInfo {
175 public:
176   ARMTargetInfo();
177   RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override;
178   uint32_t getDynRel(uint32_t Type) const override;
179   uint64_t getImplicitAddend(const uint8_t *Buf, uint32_t Type) const override;
180   void writeGotPlt(uint8_t *Buf, const SymbolBody &S) const override;
181   void writePltHeader(uint8_t *Buf) const override;
182   void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr,
183                 int32_t Index, unsigned RelOff) const override;
184   void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
185 };
186 
187 template <class ELFT> class MipsTargetInfo final : public TargetInfo {
188 public:
189   MipsTargetInfo();
190   RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override;
191   uint64_t getImplicitAddend(const uint8_t *Buf, uint32_t Type) const override;
192   uint32_t getDynRel(uint32_t Type) const override;
193   void writeGotPlt(uint8_t *Buf, const SymbolBody &S) const override;
194   void writePltHeader(uint8_t *Buf) const override;
195   void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr,
196                 int32_t Index, unsigned RelOff) const override;
197   void writeThunk(uint8_t *Buf, uint64_t S) const override;
198   bool needsThunk(uint32_t Type, const InputFile &File,
199                   const SymbolBody &S) const override;
200   void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
201   bool usesOnlyLowPageBits(uint32_t Type) const override;
202 };
203 } // anonymous namespace
204 
205 TargetInfo *createTarget() {
206   switch (Config->EMachine) {
207   case EM_386:
208     return new X86TargetInfo();
209   case EM_AARCH64:
210     return new AArch64TargetInfo();
211   case EM_AMDGPU:
212     return new AMDGPUTargetInfo();
213   case EM_ARM:
214     return new ARMTargetInfo();
215   case EM_MIPS:
216     switch (Config->EKind) {
217     case ELF32LEKind:
218       return new MipsTargetInfo<ELF32LE>();
219     case ELF32BEKind:
220       return new MipsTargetInfo<ELF32BE>();
221     case ELF64LEKind:
222       return new MipsTargetInfo<ELF64LE>();
223     case ELF64BEKind:
224       return new MipsTargetInfo<ELF64BE>();
225     default:
226       fatal("unsupported MIPS target");
227     }
228   case EM_PPC:
229     return new PPCTargetInfo();
230   case EM_PPC64:
231     return new PPC64TargetInfo();
232   case EM_X86_64:
233     return new X86_64TargetInfo();
234   }
235   fatal("unknown target machine");
236 }
237 
238 TargetInfo::~TargetInfo() {}
239 
240 uint64_t TargetInfo::getImplicitAddend(const uint8_t *Buf,
241                                        uint32_t Type) const {
242   return 0;
243 }
244 
245 uint64_t TargetInfo::getVAStart() const { return Config->Pic ? 0 : VAStart; }
246 
247 bool TargetInfo::usesOnlyLowPageBits(uint32_t Type) const { return false; }
248 
249 bool TargetInfo::needsThunk(uint32_t Type, const InputFile &File,
250                             const SymbolBody &S) const {
251   return false;
252 }
253 
254 bool TargetInfo::isTlsInitialExecRel(uint32_t Type) const { return false; }
255 
256 bool TargetInfo::isTlsLocalDynamicRel(uint32_t Type) const { return false; }
257 
258 bool TargetInfo::isTlsGlobalDynamicRel(uint32_t Type) const {
259   return false;
260 }
261 
262 RelExpr TargetInfo::adjustRelaxExpr(uint32_t Type, const uint8_t *Data,
263                                     RelExpr Expr) const {
264   return Expr;
265 }
266 
267 void TargetInfo::relaxGot(uint8_t *Loc, uint64_t Val) const {
268   llvm_unreachable("Should not have claimed to be relaxable");
269 }
270 
271 void TargetInfo::relaxTlsGdToLe(uint8_t *Loc, uint32_t Type,
272                                 uint64_t Val) const {
273   llvm_unreachable("Should not have claimed to be relaxable");
274 }
275 
276 void TargetInfo::relaxTlsGdToIe(uint8_t *Loc, uint32_t Type,
277                                 uint64_t Val) const {
278   llvm_unreachable("Should not have claimed to be relaxable");
279 }
280 
281 void TargetInfo::relaxTlsIeToLe(uint8_t *Loc, uint32_t Type,
282                                 uint64_t Val) const {
283   llvm_unreachable("Should not have claimed to be relaxable");
284 }
285 
286 void TargetInfo::relaxTlsLdToLe(uint8_t *Loc, uint32_t Type,
287                                 uint64_t Val) const {
288   llvm_unreachable("Should not have claimed to be relaxable");
289 }
290 
291 X86TargetInfo::X86TargetInfo() {
292   CopyRel = R_386_COPY;
293   GotRel = R_386_GLOB_DAT;
294   PltRel = R_386_JUMP_SLOT;
295   IRelativeRel = R_386_IRELATIVE;
296   RelativeRel = R_386_RELATIVE;
297   TlsGotRel = R_386_TLS_TPOFF;
298   TlsModuleIndexRel = R_386_TLS_DTPMOD32;
299   TlsOffsetRel = R_386_TLS_DTPOFF32;
300   PltEntrySize = 16;
301   PltHeaderSize = 16;
302   TlsGdRelaxSkip = 2;
303 }
304 
305 RelExpr X86TargetInfo::getRelExpr(uint32_t Type, const SymbolBody &S) const {
306   switch (Type) {
307   default:
308     return R_ABS;
309   case R_386_TLS_GD:
310     return R_TLSGD;
311   case R_386_TLS_LDM:
312     return R_TLSLD;
313   case R_386_PLT32:
314     return R_PLT_PC;
315   case R_386_PC32:
316     return R_PC;
317   case R_386_GOTPC:
318     return R_GOTONLY_PC;
319   case R_386_TLS_IE:
320     return R_GOT;
321   case R_386_GOT32:
322   case R_386_TLS_GOTIE:
323     return R_GOT_FROM_END;
324   case R_386_GOTOFF:
325     return R_GOTREL;
326   case R_386_TLS_LE:
327     return R_TLS;
328   case R_386_TLS_LE_32:
329     return R_NEG_TLS;
330   }
331 }
332 
333 RelExpr X86TargetInfo::adjustRelaxExpr(uint32_t Type, const uint8_t *Data,
334                                        RelExpr Expr) const {
335   switch (Expr) {
336   default:
337     return Expr;
338   case R_RELAX_TLS_GD_TO_IE:
339     return R_RELAX_TLS_GD_TO_IE_END;
340   case R_RELAX_TLS_GD_TO_LE:
341     return R_RELAX_TLS_GD_TO_LE_NEG;
342   }
343 }
344 
345 void X86TargetInfo::writeGotPltHeader(uint8_t *Buf) const {
346   write32le(Buf, Out<ELF32LE>::Dynamic->getVA());
347 }
348 
349 void X86TargetInfo::writeGotPlt(uint8_t *Buf, const SymbolBody &S) const {
350   // Entries in .got.plt initially points back to the corresponding
351   // PLT entries with a fixed offset to skip the first instruction.
352   write32le(Buf, S.getPltVA<ELF32LE>() + 6);
353 }
354 
355 uint32_t X86TargetInfo::getDynRel(uint32_t Type) const {
356   if (Type == R_386_TLS_LE)
357     return R_386_TLS_TPOFF;
358   if (Type == R_386_TLS_LE_32)
359     return R_386_TLS_TPOFF32;
360   return Type;
361 }
362 
363 bool X86TargetInfo::isTlsGlobalDynamicRel(uint32_t Type) const {
364   return Type == R_386_TLS_GD;
365 }
366 
367 bool X86TargetInfo::isTlsLocalDynamicRel(uint32_t Type) const {
368   return Type == R_386_TLS_LDO_32 || Type == R_386_TLS_LDM;
369 }
370 
371 bool X86TargetInfo::isTlsInitialExecRel(uint32_t Type) const {
372   return Type == R_386_TLS_IE || Type == R_386_TLS_GOTIE;
373 }
374 
375 void X86TargetInfo::writePltHeader(uint8_t *Buf) const {
376   // Executable files and shared object files have
377   // separate procedure linkage tables.
378   if (Config->Pic) {
379     const uint8_t V[] = {
380         0xff, 0xb3, 0x04, 0x00, 0x00, 0x00, // pushl 4(%ebx)
381         0xff, 0xa3, 0x08, 0x00, 0x00, 0x00, // jmp   *8(%ebx)
382         0x90, 0x90, 0x90, 0x90              // nop; nop; nop; nop
383     };
384     memcpy(Buf, V, sizeof(V));
385     return;
386   }
387 
388   const uint8_t PltData[] = {
389       0xff, 0x35, 0x00, 0x00, 0x00, 0x00, // pushl (GOT+4)
390       0xff, 0x25, 0x00, 0x00, 0x00, 0x00, // jmp   *(GOT+8)
391       0x90, 0x90, 0x90, 0x90              // nop; nop; nop; nop
392   };
393   memcpy(Buf, PltData, sizeof(PltData));
394   uint32_t Got = Out<ELF32LE>::GotPlt->getVA();
395   write32le(Buf + 2, Got + 4);
396   write32le(Buf + 8, Got + 8);
397 }
398 
399 void X86TargetInfo::writePlt(uint8_t *Buf, uint64_t GotEntryAddr,
400                              uint64_t PltEntryAddr, int32_t Index,
401                              unsigned RelOff) const {
402   const uint8_t Inst[] = {
403       0xff, 0x00, 0x00, 0x00, 0x00, 0x00, // jmp *foo_in_GOT|*foo@GOT(%ebx)
404       0x68, 0x00, 0x00, 0x00, 0x00,       // pushl $reloc_offset
405       0xe9, 0x00, 0x00, 0x00, 0x00        // jmp .PLT0@PC
406   };
407   memcpy(Buf, Inst, sizeof(Inst));
408 
409   // jmp *foo@GOT(%ebx) or jmp *foo_in_GOT
410   Buf[1] = Config->Pic ? 0xa3 : 0x25;
411   uint32_t Got = Out<ELF32LE>::GotPlt->getVA();
412   write32le(Buf + 2, Config->Shared ? GotEntryAddr - Got : GotEntryAddr);
413   write32le(Buf + 7, RelOff);
414   write32le(Buf + 12, -Index * PltEntrySize - PltHeaderSize - 16);
415 }
416 
417 uint64_t X86TargetInfo::getImplicitAddend(const uint8_t *Buf,
418                                           uint32_t Type) const {
419   switch (Type) {
420   default:
421     return 0;
422   case R_386_32:
423   case R_386_GOT32:
424   case R_386_GOTOFF:
425   case R_386_GOTPC:
426   case R_386_PC32:
427   case R_386_PLT32:
428     return read32le(Buf);
429   }
430 }
431 
432 void X86TargetInfo::relocateOne(uint8_t *Loc, uint32_t Type,
433                                 uint64_t Val) const {
434   checkInt<32>(Val, Type);
435   write32le(Loc, Val);
436 }
437 
438 void X86TargetInfo::relaxTlsGdToLe(uint8_t *Loc, uint32_t Type,
439                                    uint64_t Val) const {
440   // Convert
441   //   leal x@tlsgd(, %ebx, 1),
442   //   call __tls_get_addr@plt
443   // to
444   //   movl %gs:0,%eax
445   //   subl $x@ntpoff,%eax
446   const uint8_t Inst[] = {
447       0x65, 0xa1, 0x00, 0x00, 0x00, 0x00, // movl %gs:0, %eax
448       0x81, 0xe8, 0x00, 0x00, 0x00, 0x00  // subl 0(%ebx), %eax
449   };
450   memcpy(Loc - 3, Inst, sizeof(Inst));
451   relocateOne(Loc + 5, R_386_32, Val);
452 }
453 
454 void X86TargetInfo::relaxTlsGdToIe(uint8_t *Loc, uint32_t Type,
455                                    uint64_t Val) const {
456   // Convert
457   //   leal x@tlsgd(, %ebx, 1),
458   //   call __tls_get_addr@plt
459   // to
460   //   movl %gs:0, %eax
461   //   addl x@gotntpoff(%ebx), %eax
462   const uint8_t Inst[] = {
463       0x65, 0xa1, 0x00, 0x00, 0x00, 0x00, // movl %gs:0, %eax
464       0x03, 0x83, 0x00, 0x00, 0x00, 0x00  // addl 0(%ebx), %eax
465   };
466   memcpy(Loc - 3, Inst, sizeof(Inst));
467   relocateOne(Loc + 5, R_386_32, Val);
468 }
469 
470 // In some conditions, relocations can be optimized to avoid using GOT.
471 // This function does that for Initial Exec to Local Exec case.
472 void X86TargetInfo::relaxTlsIeToLe(uint8_t *Loc, uint32_t Type,
473                                    uint64_t Val) const {
474   // Ulrich's document section 6.2 says that @gotntpoff can
475   // be used with MOVL or ADDL instructions.
476   // @indntpoff is similar to @gotntpoff, but for use in
477   // position dependent code.
478   uint8_t Reg = (Loc[-1] >> 3) & 7;
479 
480   if (Type == R_386_TLS_IE) {
481     if (Loc[-1] == 0xa1) {
482       // "movl foo@indntpoff,%eax" -> "movl $foo,%eax"
483       // This case is different from the generic case below because
484       // this is a 5 byte instruction while below is 6 bytes.
485       Loc[-1] = 0xb8;
486     } else if (Loc[-2] == 0x8b) {
487       // "movl foo@indntpoff,%reg" -> "movl $foo,%reg"
488       Loc[-2] = 0xc7;
489       Loc[-1] = 0xc0 | Reg;
490     } else {
491       // "addl foo@indntpoff,%reg" -> "addl $foo,%reg"
492       Loc[-2] = 0x81;
493       Loc[-1] = 0xc0 | Reg;
494     }
495   } else {
496     assert(Type == R_386_TLS_GOTIE);
497     if (Loc[-2] == 0x8b) {
498       // "movl foo@gottpoff(%rip),%reg" -> "movl $foo,%reg"
499       Loc[-2] = 0xc7;
500       Loc[-1] = 0xc0 | Reg;
501     } else {
502       // "addl foo@gotntpoff(%rip),%reg" -> "leal foo(%reg),%reg"
503       Loc[-2] = 0x8d;
504       Loc[-1] = 0x80 | (Reg << 3) | Reg;
505     }
506   }
507   relocateOne(Loc, R_386_TLS_LE, Val);
508 }
509 
510 void X86TargetInfo::relaxTlsLdToLe(uint8_t *Loc, uint32_t Type,
511                                    uint64_t Val) const {
512   if (Type == R_386_TLS_LDO_32) {
513     relocateOne(Loc, R_386_TLS_LE, Val);
514     return;
515   }
516 
517   // Convert
518   //   leal foo(%reg),%eax
519   //   call ___tls_get_addr
520   // to
521   //   movl %gs:0,%eax
522   //   nop
523   //   leal 0(%esi,1),%esi
524   const uint8_t Inst[] = {
525       0x65, 0xa1, 0x00, 0x00, 0x00, 0x00, // movl %gs:0,%eax
526       0x90,                               // nop
527       0x8d, 0x74, 0x26, 0x00              // leal 0(%esi,1),%esi
528   };
529   memcpy(Loc - 2, Inst, sizeof(Inst));
530 }
531 
532 X86_64TargetInfo::X86_64TargetInfo() {
533   CopyRel = R_X86_64_COPY;
534   GotRel = R_X86_64_GLOB_DAT;
535   PltRel = R_X86_64_JUMP_SLOT;
536   RelativeRel = R_X86_64_RELATIVE;
537   IRelativeRel = R_X86_64_IRELATIVE;
538   TlsGotRel = R_X86_64_TPOFF64;
539   TlsModuleIndexRel = R_X86_64_DTPMOD64;
540   TlsOffsetRel = R_X86_64_DTPOFF64;
541   PltEntrySize = 16;
542   PltHeaderSize = 16;
543   TlsGdRelaxSkip = 2;
544 }
545 
546 RelExpr X86_64TargetInfo::getRelExpr(uint32_t Type, const SymbolBody &S) const {
547   switch (Type) {
548   default:
549     return R_ABS;
550   case R_X86_64_TPOFF32:
551     return R_TLS;
552   case R_X86_64_TLSLD:
553     return R_TLSLD_PC;
554   case R_X86_64_TLSGD:
555     return R_TLSGD_PC;
556   case R_X86_64_SIZE32:
557   case R_X86_64_SIZE64:
558     return R_SIZE;
559   case R_X86_64_PLT32:
560     return R_PLT_PC;
561   case R_X86_64_PC32:
562   case R_X86_64_PC64:
563     return R_PC;
564   case R_X86_64_GOT32:
565     return R_GOT_FROM_END;
566   case R_X86_64_GOTPCREL:
567   case R_X86_64_GOTPCRELX:
568   case R_X86_64_REX_GOTPCRELX:
569   case R_X86_64_GOTTPOFF:
570     return R_GOT_PC;
571   }
572 }
573 
574 void X86_64TargetInfo::writeGotPltHeader(uint8_t *Buf) const {
575   // The first entry holds the value of _DYNAMIC. It is not clear why that is
576   // required, but it is documented in the psabi and the glibc dynamic linker
577   // seems to use it (note that this is relevant for linking ld.so, not any
578   // other program).
579   write64le(Buf, Out<ELF64LE>::Dynamic->getVA());
580 }
581 
582 void X86_64TargetInfo::writeGotPlt(uint8_t *Buf, const SymbolBody &S) const {
583   // See comments in X86TargetInfo::writeGotPlt.
584   write32le(Buf, S.getPltVA<ELF64LE>() + 6);
585 }
586 
587 void X86_64TargetInfo::writePltHeader(uint8_t *Buf) const {
588   const uint8_t PltData[] = {
589       0xff, 0x35, 0x00, 0x00, 0x00, 0x00, // pushq GOT+8(%rip)
590       0xff, 0x25, 0x00, 0x00, 0x00, 0x00, // jmp *GOT+16(%rip)
591       0x0f, 0x1f, 0x40, 0x00              // nopl 0x0(rax)
592   };
593   memcpy(Buf, PltData, sizeof(PltData));
594   uint64_t Got = Out<ELF64LE>::GotPlt->getVA();
595   uint64_t Plt = Out<ELF64LE>::Plt->getVA();
596   write32le(Buf + 2, Got - Plt + 2); // GOT+8
597   write32le(Buf + 8, Got - Plt + 4); // GOT+16
598 }
599 
600 void X86_64TargetInfo::writePlt(uint8_t *Buf, uint64_t GotEntryAddr,
601                                 uint64_t PltEntryAddr, int32_t Index,
602                                 unsigned RelOff) const {
603   const uint8_t Inst[] = {
604       0xff, 0x25, 0x00, 0x00, 0x00, 0x00, // jmpq *got(%rip)
605       0x68, 0x00, 0x00, 0x00, 0x00,       // pushq <relocation index>
606       0xe9, 0x00, 0x00, 0x00, 0x00        // jmpq plt[0]
607   };
608   memcpy(Buf, Inst, sizeof(Inst));
609 
610   write32le(Buf + 2, GotEntryAddr - PltEntryAddr - 6);
611   write32le(Buf + 7, Index);
612   write32le(Buf + 12, -Index * PltEntrySize - PltHeaderSize - 16);
613 }
614 
615 uint32_t X86_64TargetInfo::getDynRel(uint32_t Type) const {
616   if (Type == R_X86_64_PC32 || Type == R_X86_64_32)
617     errorDynRel(Type);
618   return Type;
619 }
620 
621 bool X86_64TargetInfo::isTlsInitialExecRel(uint32_t Type) const {
622   return Type == R_X86_64_GOTTPOFF;
623 }
624 
625 bool X86_64TargetInfo::isTlsGlobalDynamicRel(uint32_t Type) const {
626   return Type == R_X86_64_TLSGD;
627 }
628 
629 bool X86_64TargetInfo::isTlsLocalDynamicRel(uint32_t Type) const {
630   return Type == R_X86_64_DTPOFF32 || Type == R_X86_64_DTPOFF64 ||
631          Type == R_X86_64_TLSLD;
632 }
633 
634 void X86_64TargetInfo::relaxTlsGdToLe(uint8_t *Loc, uint32_t Type,
635                                       uint64_t Val) const {
636   // Convert
637   //   .byte 0x66
638   //   leaq x@tlsgd(%rip), %rdi
639   //   .word 0x6666
640   //   rex64
641   //   call __tls_get_addr@plt
642   // to
643   //   mov %fs:0x0,%rax
644   //   lea x@tpoff,%rax
645   const uint8_t Inst[] = {
646       0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00, // mov %fs:0x0,%rax
647       0x48, 0x8d, 0x80, 0x00, 0x00, 0x00, 0x00              // lea x@tpoff,%rax
648   };
649   memcpy(Loc - 4, Inst, sizeof(Inst));
650   // The original code used a pc relative relocation and so we have to
651   // compensate for the -4 in had in the addend.
652   relocateOne(Loc + 8, R_X86_64_TPOFF32, Val + 4);
653 }
654 
655 void X86_64TargetInfo::relaxTlsGdToIe(uint8_t *Loc, uint32_t Type,
656                                       uint64_t Val) const {
657   // Convert
658   //   .byte 0x66
659   //   leaq x@tlsgd(%rip), %rdi
660   //   .word 0x6666
661   //   rex64
662   //   call __tls_get_addr@plt
663   // to
664   //   mov %fs:0x0,%rax
665   //   addq x@tpoff,%rax
666   const uint8_t Inst[] = {
667       0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00, // mov %fs:0x0,%rax
668       0x48, 0x03, 0x05, 0x00, 0x00, 0x00, 0x00              // addq x@tpoff,%rax
669   };
670   memcpy(Loc - 4, Inst, sizeof(Inst));
671   // Both code sequences are PC relatives, but since we are moving the constant
672   // forward by 8 bytes we have to subtract the value by 8.
673   relocateOne(Loc + 8, R_X86_64_PC32, Val - 8);
674 }
675 
676 // In some conditions, R_X86_64_GOTTPOFF relocation can be optimized to
677 // R_X86_64_TPOFF32 so that it does not use GOT.
678 void X86_64TargetInfo::relaxTlsIeToLe(uint8_t *Loc, uint32_t Type,
679                                       uint64_t Val) const {
680   uint8_t *Inst = Loc - 3;
681   uint8_t Reg = Loc[-1] >> 3;
682   uint8_t *RegSlot = Loc - 1;
683 
684   // Note that ADD with RSP or R12 is converted to ADD instead of LEA
685   // because LEA with these registers needs 4 bytes to encode and thus
686   // wouldn't fit the space.
687 
688   if (memcmp(Inst, "\x48\x03\x25", 3) == 0) {
689     // "addq foo@gottpoff(%rip),%rsp" -> "addq $foo,%rsp"
690     memcpy(Inst, "\x48\x81\xc4", 3);
691   } else if (memcmp(Inst, "\x4c\x03\x25", 3) == 0) {
692     // "addq foo@gottpoff(%rip),%r12" -> "addq $foo,%r12"
693     memcpy(Inst, "\x49\x81\xc4", 3);
694   } else if (memcmp(Inst, "\x4c\x03", 2) == 0) {
695     // "addq foo@gottpoff(%rip),%r[8-15]" -> "leaq foo(%r[8-15]),%r[8-15]"
696     memcpy(Inst, "\x4d\x8d", 2);
697     *RegSlot = 0x80 | (Reg << 3) | Reg;
698   } else if (memcmp(Inst, "\x48\x03", 2) == 0) {
699     // "addq foo@gottpoff(%rip),%reg -> "leaq foo(%reg),%reg"
700     memcpy(Inst, "\x48\x8d", 2);
701     *RegSlot = 0x80 | (Reg << 3) | Reg;
702   } else if (memcmp(Inst, "\x4c\x8b", 2) == 0) {
703     // "movq foo@gottpoff(%rip),%r[8-15]" -> "movq $foo,%r[8-15]"
704     memcpy(Inst, "\x49\xc7", 2);
705     *RegSlot = 0xc0 | Reg;
706   } else if (memcmp(Inst, "\x48\x8b", 2) == 0) {
707     // "movq foo@gottpoff(%rip),%reg" -> "movq $foo,%reg"
708     memcpy(Inst, "\x48\xc7", 2);
709     *RegSlot = 0xc0 | Reg;
710   } else {
711     fatal("R_X86_64_GOTTPOFF must be used in MOVQ or ADDQ instructions only");
712   }
713 
714   // The original code used a PC relative relocation.
715   // Need to compensate for the -4 it had in the addend.
716   relocateOne(Loc, R_X86_64_TPOFF32, Val + 4);
717 }
718 
719 void X86_64TargetInfo::relaxTlsLdToLe(uint8_t *Loc, uint32_t Type,
720                                       uint64_t Val) const {
721   // Convert
722   //   leaq bar@tlsld(%rip), %rdi
723   //   callq __tls_get_addr@PLT
724   //   leaq bar@dtpoff(%rax), %rcx
725   // to
726   //   .word 0x6666
727   //   .byte 0x66
728   //   mov %fs:0,%rax
729   //   leaq bar@tpoff(%rax), %rcx
730   if (Type == R_X86_64_DTPOFF64) {
731     write64le(Loc, Val);
732     return;
733   }
734   if (Type == R_X86_64_DTPOFF32) {
735     relocateOne(Loc, R_X86_64_TPOFF32, Val);
736     return;
737   }
738 
739   const uint8_t Inst[] = {
740       0x66, 0x66,                                          // .word 0x6666
741       0x66,                                                // .byte 0x66
742       0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00 // mov %fs:0,%rax
743   };
744   memcpy(Loc - 3, Inst, sizeof(Inst));
745 }
746 
747 void X86_64TargetInfo::relocateOne(uint8_t *Loc, uint32_t Type,
748                                    uint64_t Val) const {
749   switch (Type) {
750   case R_X86_64_32:
751     checkUInt<32>(Val, Type);
752     write32le(Loc, Val);
753     break;
754   case R_X86_64_32S:
755   case R_X86_64_TPOFF32:
756   case R_X86_64_GOT32:
757   case R_X86_64_GOTPCREL:
758   case R_X86_64_GOTPCRELX:
759   case R_X86_64_REX_GOTPCRELX:
760   case R_X86_64_PC32:
761   case R_X86_64_GOTTPOFF:
762   case R_X86_64_PLT32:
763   case R_X86_64_TLSGD:
764   case R_X86_64_TLSLD:
765   case R_X86_64_DTPOFF32:
766   case R_X86_64_SIZE32:
767     checkInt<32>(Val, Type);
768     write32le(Loc, Val);
769     break;
770   case R_X86_64_64:
771   case R_X86_64_DTPOFF64:
772   case R_X86_64_SIZE64:
773   case R_X86_64_PC64:
774     write64le(Loc, Val);
775     break;
776   default:
777     fatal("unrecognized reloc " + Twine(Type));
778   }
779 }
780 
781 RelExpr X86_64TargetInfo::adjustRelaxExpr(uint32_t Type, const uint8_t *Data,
782                                           RelExpr RelExpr) const {
783   if (Type != R_X86_64_GOTPCRELX && Type != R_X86_64_REX_GOTPCRELX)
784     return RelExpr;
785   const uint8_t Op = Data[-2];
786   const uint8_t ModRm = Data[-1];
787   // FIXME: When PIC is disabled and foo is defined locally in the
788   // lower 32 bit address space, memory operand in mov can be converted into
789   // immediate operand. Otherwise, mov must be changed to lea. We support only
790   // latter relaxation at this moment.
791   if (Op == 0x8b)
792     return R_RELAX_GOT_PC;
793   // Relax call and jmp.
794   if (Op == 0xff && (ModRm == 0x15 || ModRm == 0x25))
795     return R_RELAX_GOT_PC;
796 
797   // Relaxation of test, adc, add, and, cmp, or, sbb, sub, xor.
798   // If PIC then no relaxation is available.
799   // We also don't relax test/binop instructions without REX byte,
800   // they are 32bit operations and not common to have.
801   assert(Type == R_X86_64_REX_GOTPCRELX);
802   return Config->Pic ? RelExpr : R_RELAX_GOT_PC_NOPIC;
803 }
804 
805 // A subset of relaxations can only be applied for no-PIC. This method
806 // handles such relaxations. Instructions encoding information was taken from:
807 // "Intel 64 and IA-32 Architectures Software Developer's Manual V2"
808 // (http://www.intel.com/content/dam/www/public/us/en/documents/manuals/
809 //    64-ia-32-architectures-software-developer-instruction-set-reference-manual-325383.pdf)
810 void X86_64TargetInfo::relaxGotNoPic(uint8_t *Loc, uint64_t Val, uint8_t Op,
811                                      uint8_t ModRm) const {
812   const uint8_t Rex = Loc[-3];
813   // Convert "test %reg, foo@GOTPCREL(%rip)" to "test $foo, %reg".
814   if (Op == 0x85) {
815     // See "TEST-Logical Compare" (4-428 Vol. 2B),
816     // TEST r/m64, r64 uses "full" ModR / M byte (no opcode extension).
817 
818     // ModR/M byte has form XX YYY ZZZ, where
819     // YYY is MODRM.reg(register 2), ZZZ is MODRM.rm(register 1).
820     // XX has different meanings:
821     // 00: The operand's memory address is in reg1.
822     // 01: The operand's memory address is reg1 + a byte-sized displacement.
823     // 10: The operand's memory address is reg1 + a word-sized displacement.
824     // 11: The operand is reg1 itself.
825     // If an instruction requires only one operand, the unused reg2 field
826     // holds extra opcode bits rather than a register code
827     // 0xC0 == 11 000 000 binary.
828     // 0x38 == 00 111 000 binary.
829     // We transfer reg2 to reg1 here as operand.
830     // See "2.1.3 ModR/M and SIB Bytes" (Vol. 2A 2-3).
831     Loc[-1] = 0xc0 | (ModRm & 0x38) >> 3; // ModR/M byte.
832 
833     // Change opcode from TEST r/m64, r64 to TEST r/m64, imm32
834     // See "TEST-Logical Compare" (4-428 Vol. 2B).
835     Loc[-2] = 0xf7;
836 
837     // Move R bit to the B bit in REX byte.
838     // REX byte is encoded as 0100WRXB, where
839     // 0100 is 4bit fixed pattern.
840     // REX.W When 1, a 64-bit operand size is used. Otherwise, when 0, the
841     //   default operand size is used (which is 32-bit for most but not all
842     //   instructions).
843     // REX.R This 1-bit value is an extension to the MODRM.reg field.
844     // REX.X This 1-bit value is an extension to the SIB.index field.
845     // REX.B This 1-bit value is an extension to the MODRM.rm field or the
846     // SIB.base field.
847     // See "2.2.1.2 More on REX Prefix Fields " (2-8 Vol. 2A).
848     Loc[-3] = (Rex & ~0x4) | (Rex & 0x4) >> 2;
849     relocateOne(Loc, R_X86_64_PC32, Val);
850     return;
851   }
852 
853   // If we are here then we need to relax the adc, add, and, cmp, or, sbb, sub
854   // or xor operations.
855 
856   // Convert "binop foo@GOTPCREL(%rip), %reg" to "binop $foo, %reg".
857   // Logic is close to one for test instruction above, but we also
858   // write opcode extension here, see below for details.
859   Loc[-1] = 0xc0 | (ModRm & 0x38) >> 3 | (Op & 0x3c); // ModR/M byte.
860 
861   // Primary opcode is 0x81, opcode extension is one of:
862   // 000b = ADD, 001b is OR, 010b is ADC, 011b is SBB,
863   // 100b is AND, 101b is SUB, 110b is XOR, 111b is CMP.
864   // This value was wrote to MODRM.reg in a line above.
865   // See "3.2 INSTRUCTIONS (A-M)" (Vol. 2A 3-15),
866   // "INSTRUCTION SET REFERENCE, N-Z" (Vol. 2B 4-1) for
867   // descriptions about each operation.
868   Loc[-2] = 0x81;
869   Loc[-3] = (Rex & ~0x4) | (Rex & 0x4) >> 2;
870   relocateOne(Loc, R_X86_64_PC32, Val);
871 }
872 
873 void X86_64TargetInfo::relaxGot(uint8_t *Loc, uint64_t Val) const {
874   const uint8_t Op = Loc[-2];
875   const uint8_t ModRm = Loc[-1];
876 
877   // Convert "mov foo@GOTPCREL(%rip),%reg" to "lea foo(%rip),%reg".
878   if (Op == 0x8b) {
879     Loc[-2] = 0x8d;
880     relocateOne(Loc, R_X86_64_PC32, Val);
881     return;
882   }
883 
884   if (Op != 0xff) {
885     // We are relaxing a rip relative to an absolute, so compensate
886     // for the old -4 addend.
887     assert(!Config->Pic);
888     relaxGotNoPic(Loc, Val + 4, Op, ModRm);
889     return;
890   }
891 
892   // Convert call/jmp instructions.
893   if (ModRm == 0x15) {
894     // ABI says we can convert "call *foo@GOTPCREL(%rip)" to "nop; call foo".
895     // Instead we convert to "addr32 call foo" where addr32 is an instruction
896     // prefix. That makes result expression to be a single instruction.
897     Loc[-2] = 0x67; // addr32 prefix
898     Loc[-1] = 0xe8; // call
899     relocateOne(Loc, R_X86_64_PC32, Val);
900     return;
901   }
902 
903   // Convert "jmp *foo@GOTPCREL(%rip)" to "jmp foo; nop".
904   // jmp doesn't return, so it is fine to use nop here, it is just a stub.
905   assert(ModRm == 0x25);
906   Loc[-2] = 0xe9; // jmp
907   Loc[3] = 0x90;  // nop
908   relocateOne(Loc - 1, R_X86_64_PC32, Val + 1);
909 }
910 
911 // Relocation masks following the #lo(value), #hi(value), #ha(value),
912 // #higher(value), #highera(value), #highest(value), and #highesta(value)
913 // macros defined in section 4.5.1. Relocation Types of the PPC-elf64abi
914 // document.
915 static uint16_t applyPPCLo(uint64_t V) { return V; }
916 static uint16_t applyPPCHi(uint64_t V) { return V >> 16; }
917 static uint16_t applyPPCHa(uint64_t V) { return (V + 0x8000) >> 16; }
918 static uint16_t applyPPCHigher(uint64_t V) { return V >> 32; }
919 static uint16_t applyPPCHighera(uint64_t V) { return (V + 0x8000) >> 32; }
920 static uint16_t applyPPCHighest(uint64_t V) { return V >> 48; }
921 static uint16_t applyPPCHighesta(uint64_t V) { return (V + 0x8000) >> 48; }
922 
923 PPCTargetInfo::PPCTargetInfo() {}
924 
925 void PPCTargetInfo::relocateOne(uint8_t *Loc, uint32_t Type,
926                                 uint64_t Val) const {
927   switch (Type) {
928   case R_PPC_ADDR16_HA:
929     write16be(Loc, applyPPCHa(Val));
930     break;
931   case R_PPC_ADDR16_LO:
932     write16be(Loc, applyPPCLo(Val));
933     break;
934   default:
935     fatal("unrecognized reloc " + Twine(Type));
936   }
937 }
938 
939 RelExpr PPCTargetInfo::getRelExpr(uint32_t Type, const SymbolBody &S) const {
940   return R_ABS;
941 }
942 
943 PPC64TargetInfo::PPC64TargetInfo() {
944   PltRel = GotRel = R_PPC64_GLOB_DAT;
945   RelativeRel = R_PPC64_RELATIVE;
946   PltEntrySize = 32;
947   PltHeaderSize = 0;
948 
949   // We need 64K pages (at least under glibc/Linux, the loader won't
950   // set different permissions on a finer granularity than that).
951   PageSize = 65536;
952 
953   // The PPC64 ELF ABI v1 spec, says:
954   //
955   //   It is normally desirable to put segments with different characteristics
956   //   in separate 256 Mbyte portions of the address space, to give the
957   //   operating system full paging flexibility in the 64-bit address space.
958   //
959   // And because the lowest non-zero 256M boundary is 0x10000000, PPC64 linkers
960   // use 0x10000000 as the starting address.
961   VAStart = 0x10000000;
962 }
963 
964 static uint64_t PPC64TocOffset = 0x8000;
965 
966 uint64_t getPPC64TocBase() {
967   // The TOC consists of sections .got, .toc, .tocbss, .plt in that order. The
968   // TOC starts where the first of these sections starts. We always create a
969   // .got when we see a relocation that uses it, so for us the start is always
970   // the .got.
971   uint64_t TocVA = Out<ELF64BE>::Got->getVA();
972 
973   // Per the ppc64-elf-linux ABI, The TOC base is TOC value plus 0x8000
974   // thus permitting a full 64 Kbytes segment. Note that the glibc startup
975   // code (crt1.o) assumes that you can get from the TOC base to the
976   // start of the .toc section with only a single (signed) 16-bit relocation.
977   return TocVA + PPC64TocOffset;
978 }
979 
980 RelExpr PPC64TargetInfo::getRelExpr(uint32_t Type, const SymbolBody &S) const {
981   switch (Type) {
982   default:
983     return R_ABS;
984   case R_PPC64_TOC16:
985   case R_PPC64_TOC16_DS:
986   case R_PPC64_TOC16_HA:
987   case R_PPC64_TOC16_HI:
988   case R_PPC64_TOC16_LO:
989   case R_PPC64_TOC16_LO_DS:
990     return R_GOTREL;
991   case R_PPC64_TOC:
992     return R_PPC_TOC;
993   case R_PPC64_REL24:
994     return R_PPC_PLT_OPD;
995   }
996 }
997 
998 void PPC64TargetInfo::writePlt(uint8_t *Buf, uint64_t GotEntryAddr,
999                                uint64_t PltEntryAddr, int32_t Index,
1000                                unsigned RelOff) const {
1001   uint64_t Off = GotEntryAddr - getPPC64TocBase();
1002 
1003   // FIXME: What we should do, in theory, is get the offset of the function
1004   // descriptor in the .opd section, and use that as the offset from %r2 (the
1005   // TOC-base pointer). Instead, we have the GOT-entry offset, and that will
1006   // be a pointer to the function descriptor in the .opd section. Using
1007   // this scheme is simpler, but requires an extra indirection per PLT dispatch.
1008 
1009   write32be(Buf,      0xf8410028);                   // std %r2, 40(%r1)
1010   write32be(Buf + 4,  0x3d620000 | applyPPCHa(Off)); // addis %r11, %r2, X@ha
1011   write32be(Buf + 8,  0xe98b0000 | applyPPCLo(Off)); // ld %r12, X@l(%r11)
1012   write32be(Buf + 12, 0xe96c0000);                   // ld %r11,0(%r12)
1013   write32be(Buf + 16, 0x7d6903a6);                   // mtctr %r11
1014   write32be(Buf + 20, 0xe84c0008);                   // ld %r2,8(%r12)
1015   write32be(Buf + 24, 0xe96c0010);                   // ld %r11,16(%r12)
1016   write32be(Buf + 28, 0x4e800420);                   // bctr
1017 }
1018 
1019 static std::pair<uint32_t, uint64_t> toAddr16Rel(uint32_t Type, uint64_t Val) {
1020   uint64_t V = Val - PPC64TocOffset;
1021   switch (Type) {
1022   case R_PPC64_TOC16: return {R_PPC64_ADDR16, V};
1023   case R_PPC64_TOC16_DS: return {R_PPC64_ADDR16_DS, V};
1024   case R_PPC64_TOC16_HA: return {R_PPC64_ADDR16_HA, V};
1025   case R_PPC64_TOC16_HI: return {R_PPC64_ADDR16_HI, V};
1026   case R_PPC64_TOC16_LO: return {R_PPC64_ADDR16_LO, V};
1027   case R_PPC64_TOC16_LO_DS: return {R_PPC64_ADDR16_LO_DS, V};
1028   default: return {Type, Val};
1029   }
1030 }
1031 
1032 void PPC64TargetInfo::relocateOne(uint8_t *Loc, uint32_t Type,
1033                                   uint64_t Val) const {
1034   // For a TOC-relative relocation, proceed in terms of the corresponding
1035   // ADDR16 relocation type.
1036   std::tie(Type, Val) = toAddr16Rel(Type, Val);
1037 
1038   switch (Type) {
1039   case R_PPC64_ADDR14: {
1040     checkAlignment<4>(Val, Type);
1041     // Preserve the AA/LK bits in the branch instruction
1042     uint8_t AALK = Loc[3];
1043     write16be(Loc + 2, (AALK & 3) | (Val & 0xfffc));
1044     break;
1045   }
1046   case R_PPC64_ADDR16:
1047     checkInt<16>(Val, Type);
1048     write16be(Loc, Val);
1049     break;
1050   case R_PPC64_ADDR16_DS:
1051     checkInt<16>(Val, Type);
1052     write16be(Loc, (read16be(Loc) & 3) | (Val & ~3));
1053     break;
1054   case R_PPC64_ADDR16_HA:
1055   case R_PPC64_REL16_HA:
1056     write16be(Loc, applyPPCHa(Val));
1057     break;
1058   case R_PPC64_ADDR16_HI:
1059   case R_PPC64_REL16_HI:
1060     write16be(Loc, applyPPCHi(Val));
1061     break;
1062   case R_PPC64_ADDR16_HIGHER:
1063     write16be(Loc, applyPPCHigher(Val));
1064     break;
1065   case R_PPC64_ADDR16_HIGHERA:
1066     write16be(Loc, applyPPCHighera(Val));
1067     break;
1068   case R_PPC64_ADDR16_HIGHEST:
1069     write16be(Loc, applyPPCHighest(Val));
1070     break;
1071   case R_PPC64_ADDR16_HIGHESTA:
1072     write16be(Loc, applyPPCHighesta(Val));
1073     break;
1074   case R_PPC64_ADDR16_LO:
1075     write16be(Loc, applyPPCLo(Val));
1076     break;
1077   case R_PPC64_ADDR16_LO_DS:
1078   case R_PPC64_REL16_LO:
1079     write16be(Loc, (read16be(Loc) & 3) | (applyPPCLo(Val) & ~3));
1080     break;
1081   case R_PPC64_ADDR32:
1082   case R_PPC64_REL32:
1083     checkInt<32>(Val, Type);
1084     write32be(Loc, Val);
1085     break;
1086   case R_PPC64_ADDR64:
1087   case R_PPC64_REL64:
1088   case R_PPC64_TOC:
1089     write64be(Loc, Val);
1090     break;
1091   case R_PPC64_REL24: {
1092     uint32_t Mask = 0x03FFFFFC;
1093     checkInt<24>(Val, Type);
1094     write32be(Loc, (read32be(Loc) & ~Mask) | (Val & Mask));
1095     break;
1096   }
1097   default:
1098     fatal("unrecognized reloc " + Twine(Type));
1099   }
1100 }
1101 
1102 AArch64TargetInfo::AArch64TargetInfo() {
1103   CopyRel = R_AARCH64_COPY;
1104   RelativeRel = R_AARCH64_RELATIVE;
1105   IRelativeRel = R_AARCH64_IRELATIVE;
1106   GotRel = R_AARCH64_GLOB_DAT;
1107   PltRel = R_AARCH64_JUMP_SLOT;
1108   TlsDescRel = R_AARCH64_TLSDESC;
1109   TlsGotRel = R_AARCH64_TLS_TPREL64;
1110   PltEntrySize = 16;
1111   PltHeaderSize = 32;
1112 
1113   // It doesn't seem to be documented anywhere, but tls on aarch64 uses variant
1114   // 1 of the tls structures and the tcb size is 16.
1115   TcbSize = 16;
1116 }
1117 
1118 RelExpr AArch64TargetInfo::getRelExpr(uint32_t Type,
1119                                       const SymbolBody &S) const {
1120   switch (Type) {
1121   default:
1122     return R_ABS;
1123   case R_AARCH64_TLSDESC_ADR_PAGE21:
1124     return R_TLSDESC_PAGE;
1125   case R_AARCH64_TLSDESC_LD64_LO12_NC:
1126   case R_AARCH64_TLSDESC_ADD_LO12_NC:
1127     return R_TLSDESC;
1128   case R_AARCH64_TLSDESC_CALL:
1129     return R_HINT;
1130   case R_AARCH64_TLSLE_ADD_TPREL_HI12:
1131   case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
1132     return R_TLS;
1133   case R_AARCH64_CALL26:
1134   case R_AARCH64_CONDBR19:
1135   case R_AARCH64_JUMP26:
1136   case R_AARCH64_TSTBR14:
1137     return R_PLT_PC;
1138   case R_AARCH64_PREL16:
1139   case R_AARCH64_PREL32:
1140   case R_AARCH64_PREL64:
1141   case R_AARCH64_ADR_PREL_LO21:
1142     return R_PC;
1143   case R_AARCH64_ADR_PREL_PG_HI21:
1144     return R_PAGE_PC;
1145   case R_AARCH64_LD64_GOT_LO12_NC:
1146   case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
1147     return R_GOT;
1148   case R_AARCH64_ADR_GOT_PAGE:
1149   case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
1150     return R_GOT_PAGE_PC;
1151   }
1152 }
1153 
1154 RelExpr AArch64TargetInfo::adjustRelaxExpr(uint32_t Type, const uint8_t *Data,
1155                                            RelExpr Expr) const {
1156   if (Expr == R_RELAX_TLS_GD_TO_IE) {
1157     if (Type == R_AARCH64_TLSDESC_ADR_PAGE21)
1158       return R_RELAX_TLS_GD_TO_IE_PAGE_PC;
1159     return R_RELAX_TLS_GD_TO_IE_ABS;
1160   }
1161   return Expr;
1162 }
1163 
1164 bool AArch64TargetInfo::usesOnlyLowPageBits(uint32_t Type) const {
1165   switch (Type) {
1166   default:
1167     return false;
1168   case R_AARCH64_ADD_ABS_LO12_NC:
1169   case R_AARCH64_LD64_GOT_LO12_NC:
1170   case R_AARCH64_LDST128_ABS_LO12_NC:
1171   case R_AARCH64_LDST16_ABS_LO12_NC:
1172   case R_AARCH64_LDST32_ABS_LO12_NC:
1173   case R_AARCH64_LDST64_ABS_LO12_NC:
1174   case R_AARCH64_LDST8_ABS_LO12_NC:
1175   case R_AARCH64_TLSDESC_ADD_LO12_NC:
1176   case R_AARCH64_TLSDESC_LD64_LO12_NC:
1177   case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
1178     return true;
1179   }
1180 }
1181 
1182 bool AArch64TargetInfo::isTlsInitialExecRel(uint32_t Type) const {
1183   return Type == R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 ||
1184          Type == R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC;
1185 }
1186 
1187 uint32_t AArch64TargetInfo::getDynRel(uint32_t Type) const {
1188   if (Type == R_AARCH64_ABS32 || Type == R_AARCH64_ABS64)
1189     return Type;
1190   // Keep it going with a dummy value so that we can find more reloc errors.
1191   errorDynRel(Type);
1192   return R_AARCH64_ABS32;
1193 }
1194 
1195 void AArch64TargetInfo::writeGotPlt(uint8_t *Buf, const SymbolBody &) const {
1196   write64le(Buf, Out<ELF64LE>::Plt->getVA());
1197 }
1198 
1199 static uint64_t getAArch64Page(uint64_t Expr) {
1200   return Expr & (~static_cast<uint64_t>(0xFFF));
1201 }
1202 
1203 void AArch64TargetInfo::writePltHeader(uint8_t *Buf) const {
1204   const uint8_t PltData[] = {
1205       0xf0, 0x7b, 0xbf, 0xa9, // stp	x16, x30, [sp,#-16]!
1206       0x10, 0x00, 0x00, 0x90, // adrp	x16, Page(&(.plt.got[2]))
1207       0x11, 0x02, 0x40, 0xf9, // ldr	x17, [x16, Offset(&(.plt.got[2]))]
1208       0x10, 0x02, 0x00, 0x91, // add	x16, x16, Offset(&(.plt.got[2]))
1209       0x20, 0x02, 0x1f, 0xd6, // br	x17
1210       0x1f, 0x20, 0x03, 0xd5, // nop
1211       0x1f, 0x20, 0x03, 0xd5, // nop
1212       0x1f, 0x20, 0x03, 0xd5  // nop
1213   };
1214   memcpy(Buf, PltData, sizeof(PltData));
1215 
1216   uint64_t Got = Out<ELF64LE>::GotPlt->getVA();
1217   uint64_t Plt = Out<ELF64LE>::Plt->getVA();
1218   relocateOne(Buf + 4, R_AARCH64_ADR_PREL_PG_HI21,
1219               getAArch64Page(Got + 16) - getAArch64Page(Plt + 4));
1220   relocateOne(Buf + 8, R_AARCH64_LDST64_ABS_LO12_NC, Got + 16);
1221   relocateOne(Buf + 12, R_AARCH64_ADD_ABS_LO12_NC, Got + 16);
1222 }
1223 
1224 void AArch64TargetInfo::writePlt(uint8_t *Buf, uint64_t GotEntryAddr,
1225                                  uint64_t PltEntryAddr, int32_t Index,
1226                                  unsigned RelOff) const {
1227   const uint8_t Inst[] = {
1228       0x10, 0x00, 0x00, 0x90, // adrp x16, Page(&(.plt.got[n]))
1229       0x11, 0x02, 0x40, 0xf9, // ldr  x17, [x16, Offset(&(.plt.got[n]))]
1230       0x10, 0x02, 0x00, 0x91, // add  x16, x16, Offset(&(.plt.got[n]))
1231       0x20, 0x02, 0x1f, 0xd6  // br   x17
1232   };
1233   memcpy(Buf, Inst, sizeof(Inst));
1234 
1235   relocateOne(Buf, R_AARCH64_ADR_PREL_PG_HI21,
1236               getAArch64Page(GotEntryAddr) - getAArch64Page(PltEntryAddr));
1237   relocateOne(Buf + 4, R_AARCH64_LDST64_ABS_LO12_NC, GotEntryAddr);
1238   relocateOne(Buf + 8, R_AARCH64_ADD_ABS_LO12_NC, GotEntryAddr);
1239 }
1240 
1241 static void updateAArch64Addr(uint8_t *L, uint64_t Imm) {
1242   uint32_t ImmLo = (Imm & 0x3) << 29;
1243   uint32_t ImmHi = (Imm & 0x1FFFFC) << 3;
1244   uint64_t Mask = (0x3 << 29) | (0x1FFFFC << 3);
1245   write32le(L, (read32le(L) & ~Mask) | ImmLo | ImmHi);
1246 }
1247 
1248 static inline void updateAArch64Add(uint8_t *L, uint64_t Imm) {
1249   or32le(L, (Imm & 0xFFF) << 10);
1250 }
1251 
1252 void AArch64TargetInfo::relocateOne(uint8_t *Loc, uint32_t Type,
1253                                     uint64_t Val) const {
1254   switch (Type) {
1255   case R_AARCH64_ABS16:
1256   case R_AARCH64_PREL16:
1257     checkIntUInt<16>(Val, Type);
1258     write16le(Loc, Val);
1259     break;
1260   case R_AARCH64_ABS32:
1261   case R_AARCH64_PREL32:
1262     checkIntUInt<32>(Val, Type);
1263     write32le(Loc, Val);
1264     break;
1265   case R_AARCH64_ABS64:
1266   case R_AARCH64_PREL64:
1267     write64le(Loc, Val);
1268     break;
1269   case R_AARCH64_ADD_ABS_LO12_NC:
1270     // This relocation stores 12 bits and there's no instruction
1271     // to do it. Instead, we do a 32 bits store of the value
1272     // of r_addend bitwise-or'ed Loc. This assumes that the addend
1273     // bits in Loc are zero.
1274     or32le(Loc, (Val & 0xFFF) << 10);
1275     break;
1276   case R_AARCH64_ADR_GOT_PAGE:
1277   case R_AARCH64_ADR_PREL_PG_HI21:
1278   case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
1279   case R_AARCH64_TLSDESC_ADR_PAGE21:
1280     checkInt<33>(Val, Type);
1281     updateAArch64Addr(Loc, Val >> 12);
1282     break;
1283   case R_AARCH64_ADR_PREL_LO21:
1284     checkInt<21>(Val, Type);
1285     updateAArch64Addr(Loc, Val);
1286     break;
1287   case R_AARCH64_CALL26:
1288   case R_AARCH64_JUMP26:
1289     checkInt<28>(Val, Type);
1290     or32le(Loc, (Val & 0x0FFFFFFC) >> 2);
1291     break;
1292   case R_AARCH64_CONDBR19:
1293     checkInt<21>(Val, Type);
1294     or32le(Loc, (Val & 0x1FFFFC) << 3);
1295     break;
1296   case R_AARCH64_LD64_GOT_LO12_NC:
1297   case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
1298   case R_AARCH64_TLSDESC_LD64_LO12_NC:
1299     checkAlignment<8>(Val, Type);
1300     or32le(Loc, (Val & 0xFF8) << 7);
1301     break;
1302   case R_AARCH64_LDST128_ABS_LO12_NC:
1303     or32le(Loc, (Val & 0x0FF8) << 6);
1304     break;
1305   case R_AARCH64_LDST16_ABS_LO12_NC:
1306     or32le(Loc, (Val & 0x0FFC) << 9);
1307     break;
1308   case R_AARCH64_LDST8_ABS_LO12_NC:
1309     or32le(Loc, (Val & 0xFFF) << 10);
1310     break;
1311   case R_AARCH64_LDST32_ABS_LO12_NC:
1312     or32le(Loc, (Val & 0xFFC) << 8);
1313     break;
1314   case R_AARCH64_LDST64_ABS_LO12_NC:
1315     or32le(Loc, (Val & 0xFF8) << 7);
1316     break;
1317   case R_AARCH64_TSTBR14:
1318     checkInt<16>(Val, Type);
1319     or32le(Loc, (Val & 0xFFFC) << 3);
1320     break;
1321   case R_AARCH64_TLSLE_ADD_TPREL_HI12:
1322     checkInt<24>(Val, Type);
1323     updateAArch64Add(Loc, Val >> 12);
1324     break;
1325   case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
1326   case R_AARCH64_TLSDESC_ADD_LO12_NC:
1327     updateAArch64Add(Loc, Val);
1328     break;
1329   default:
1330     fatal("unrecognized reloc " + Twine(Type));
1331   }
1332 }
1333 
1334 void AArch64TargetInfo::relaxTlsGdToLe(uint8_t *Loc, uint32_t Type,
1335                                        uint64_t Val) const {
1336   // TLSDESC Global-Dynamic relocation are in the form:
1337   //   adrp    x0, :tlsdesc:v             [R_AARCH64_TLSDESC_ADR_PAGE21]
1338   //   ldr     x1, [x0, #:tlsdesc_lo12:v  [R_AARCH64_TLSDESC_LD64_LO12_NC]
1339   //   add     x0, x0, :tlsdesc_los:v     [_AARCH64_TLSDESC_ADD_LO12_NC]
1340   //   .tlsdesccall                       [R_AARCH64_TLSDESC_CALL]
1341   //   blr     x1
1342   // And it can optimized to:
1343   //   movz    x0, #0x0, lsl #16
1344   //   movk    x0, #0x10
1345   //   nop
1346   //   nop
1347   checkUInt<32>(Val, Type);
1348 
1349   switch (Type) {
1350   case R_AARCH64_TLSDESC_ADD_LO12_NC:
1351   case R_AARCH64_TLSDESC_CALL:
1352     write32le(Loc, 0xd503201f); // nop
1353     return;
1354   case R_AARCH64_TLSDESC_ADR_PAGE21:
1355     write32le(Loc, 0xd2a00000 | (((Val >> 16) & 0xffff) << 5)); // movz
1356     return;
1357   case R_AARCH64_TLSDESC_LD64_LO12_NC:
1358     write32le(Loc, 0xf2800000 | ((Val & 0xffff) << 5)); // movk
1359     return;
1360   default:
1361     llvm_unreachable("unsupported relocation for TLS GD to LE relaxation");
1362   }
1363 }
1364 
1365 void AArch64TargetInfo::relaxTlsGdToIe(uint8_t *Loc, uint32_t Type,
1366                                        uint64_t Val) const {
1367   // TLSDESC Global-Dynamic relocation are in the form:
1368   //   adrp    x0, :tlsdesc:v             [R_AARCH64_TLSDESC_ADR_PAGE21]
1369   //   ldr     x1, [x0, #:tlsdesc_lo12:v  [R_AARCH64_TLSDESC_LD64_LO12_NC]
1370   //   add     x0, x0, :tlsdesc_los:v     [_AARCH64_TLSDESC_ADD_LO12_NC]
1371   //   .tlsdesccall                       [R_AARCH64_TLSDESC_CALL]
1372   //   blr     x1
1373   // And it can optimized to:
1374   //   adrp    x0, :gottprel:v
1375   //   ldr     x0, [x0, :gottprel_lo12:v]
1376   //   nop
1377   //   nop
1378 
1379   switch (Type) {
1380   case R_AARCH64_TLSDESC_ADD_LO12_NC:
1381   case R_AARCH64_TLSDESC_CALL:
1382     write32le(Loc, 0xd503201f); // nop
1383     break;
1384   case R_AARCH64_TLSDESC_ADR_PAGE21:
1385     write32le(Loc, 0x90000000); // adrp
1386     relocateOne(Loc, R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21, Val);
1387     break;
1388   case R_AARCH64_TLSDESC_LD64_LO12_NC:
1389     write32le(Loc, 0xf9400000); // ldr
1390     relocateOne(Loc, R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC, Val);
1391     break;
1392   default:
1393     llvm_unreachable("unsupported relocation for TLS GD to LE relaxation");
1394   }
1395 }
1396 
1397 void AArch64TargetInfo::relaxTlsIeToLe(uint8_t *Loc, uint32_t Type,
1398                                        uint64_t Val) const {
1399   checkUInt<32>(Val, Type);
1400 
1401   if (Type == R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21) {
1402     // Generate MOVZ.
1403     uint32_t RegNo = read32le(Loc) & 0x1f;
1404     write32le(Loc, (0xd2a00000 | RegNo) | (((Val >> 16) & 0xffff) << 5));
1405     return;
1406   }
1407   if (Type == R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC) {
1408     // Generate MOVK.
1409     uint32_t RegNo = read32le(Loc) & 0x1f;
1410     write32le(Loc, (0xf2800000 | RegNo) | ((Val & 0xffff) << 5));
1411     return;
1412   }
1413   llvm_unreachable("invalid relocation for TLS IE to LE relaxation");
1414 }
1415 
1416 void AMDGPUTargetInfo::relocateOne(uint8_t *Loc, uint32_t Type,
1417                                    uint64_t Val) const {
1418   assert(Type == R_AMDGPU_REL32);
1419   write32le(Loc, Val);
1420 }
1421 
1422 RelExpr AMDGPUTargetInfo::getRelExpr(uint32_t Type, const SymbolBody &S) const {
1423   if (Type != R_AMDGPU_REL32)
1424     error("do not know how to handle relocation");
1425   return R_PC;
1426 }
1427 
1428 ARMTargetInfo::ARMTargetInfo() {
1429   CopyRel = R_ARM_COPY;
1430   RelativeRel = R_ARM_RELATIVE;
1431   IRelativeRel = R_ARM_IRELATIVE;
1432   GotRel = R_ARM_GLOB_DAT;
1433   PltRel = R_ARM_JUMP_SLOT;
1434   TlsGotRel = R_ARM_TLS_TPOFF32;
1435   TlsModuleIndexRel = R_ARM_TLS_DTPMOD32;
1436   TlsOffsetRel = R_ARM_TLS_DTPOFF32;
1437   PltEntrySize = 16;
1438   PltHeaderSize = 20;
1439 }
1440 
1441 RelExpr ARMTargetInfo::getRelExpr(uint32_t Type, const SymbolBody &S) const {
1442   switch (Type) {
1443   default:
1444     return R_ABS;
1445   case R_ARM_THM_JUMP11:
1446     return R_PC;
1447   case R_ARM_CALL:
1448   case R_ARM_JUMP24:
1449   case R_ARM_PC24:
1450   case R_ARM_PLT32:
1451   case R_ARM_THM_JUMP19:
1452   case R_ARM_THM_JUMP24:
1453   case R_ARM_THM_CALL:
1454     return R_PLT_PC;
1455   case R_ARM_GOTOFF32:
1456     // (S + A) - GOT_ORG
1457     return R_GOTREL;
1458   case R_ARM_GOT_BREL:
1459     // GOT(S) + A - GOT_ORG
1460     return R_GOT_OFF;
1461   case R_ARM_GOT_PREL:
1462     // GOT(S) + - GOT_ORG
1463     return R_GOT_PC;
1464   case R_ARM_BASE_PREL:
1465     // B(S) + A - P
1466     // FIXME: currently B(S) assumed to be .got, this may not hold for all
1467     // platforms.
1468     return R_GOTONLY_PC;
1469   case R_ARM_PREL31:
1470   case R_ARM_REL32:
1471     return R_PC;
1472   }
1473 }
1474 
1475 uint32_t ARMTargetInfo::getDynRel(uint32_t Type) const {
1476   if (Type == R_ARM_ABS32)
1477     return Type;
1478   // Keep it going with a dummy value so that we can find more reloc errors.
1479   errorDynRel(Type);
1480   return R_ARM_ABS32;
1481 }
1482 
1483 void ARMTargetInfo::writeGotPlt(uint8_t *Buf, const SymbolBody &) const {
1484   write32le(Buf, Out<ELF32LE>::Plt->getVA());
1485 }
1486 
1487 void ARMTargetInfo::writePltHeader(uint8_t *Buf) const {
1488   const uint8_t PltData[] = {
1489       0x04, 0xe0, 0x2d, 0xe5, //     str lr, [sp,#-4]!
1490       0x04, 0xe0, 0x9f, 0xe5, //     ldr lr, L2
1491       0x0e, 0xe0, 0x8f, 0xe0, // L1: add lr, pc, lr
1492       0x08, 0xf0, 0xbe, 0xe5, //     ldr pc, [lr, #8]
1493       0x00, 0x00, 0x00, 0x00, // L2: .word   &(.got.plt) - L1 - 8
1494   };
1495   memcpy(Buf, PltData, sizeof(PltData));
1496   uint64_t GotPlt = Out<ELF32LE>::GotPlt->getVA();
1497   uint64_t L1 = Out<ELF32LE>::Plt->getVA() + 8;
1498   write32le(Buf + 16, GotPlt - L1 - 8);
1499 }
1500 
1501 void ARMTargetInfo::writePlt(uint8_t *Buf, uint64_t GotEntryAddr,
1502                              uint64_t PltEntryAddr, int32_t Index,
1503                              unsigned RelOff) const {
1504   // FIXME: Using simple code sequence with simple relocations.
1505   // There is a more optimal sequence but it requires support for the group
1506   // relocations. See ELF for the ARM Architecture Appendix A.3
1507   const uint8_t PltData[] = {
1508       0x04, 0xc0, 0x9f, 0xe5, //     ldr ip, L2
1509       0x0f, 0xc0, 0x8c, 0xe0, // L1: add ip, ip, pc
1510       0x00, 0xf0, 0x9c, 0xe5, //     ldr pc, [ip]
1511       0x00, 0x00, 0x00, 0x00, // L2: .word   Offset(&(.plt.got) - L1 - 8
1512   };
1513   memcpy(Buf, PltData, sizeof(PltData));
1514   uint64_t L1 = PltEntryAddr + 4;
1515   write32le(Buf + 12, GotEntryAddr - L1 - 8);
1516 }
1517 
1518 void ARMTargetInfo::relocateOne(uint8_t *Loc, uint32_t Type,
1519                                 uint64_t Val) const {
1520   switch (Type) {
1521   case R_ARM_NONE:
1522     break;
1523   case R_ARM_ABS32:
1524   case R_ARM_BASE_PREL:
1525   case R_ARM_GOTOFF32:
1526   case R_ARM_GOT_BREL:
1527   case R_ARM_GOT_PREL:
1528   case R_ARM_REL32:
1529     write32le(Loc, Val);
1530     break;
1531   case R_ARM_PREL31:
1532     checkInt<31>(Val, Type);
1533     write32le(Loc, (read32le(Loc) & 0x80000000) | (Val & ~0x80000000));
1534     break;
1535   case R_ARM_CALL:
1536     // R_ARM_CALL is used for BL and BLX instructions, depending on the
1537     // value of bit 0 of Val, we must select a BL or BLX instruction
1538     if (Val & 1) {
1539       // If bit 0 of Val is 1 the target is Thumb, we must select a BLX.
1540       // The BLX encoding is 0xfa:H:imm24 where Val = imm24:H:'1'
1541       checkInt<26>(Val, Type);
1542       write32le(Loc, 0xfa000000 |                    // opcode
1543                          ((Val & 2) << 23) |         // H
1544                          ((Val >> 2) & 0x00ffffff)); // imm24
1545       break;
1546     }
1547     if ((read32le(Loc) & 0xfe000000) == 0xfa000000)
1548       // BLX (always unconditional) instruction to an ARM Target, select an
1549       // unconditional BL.
1550       write32le(Loc, 0xeb000000 | (read32le(Loc) & 0x00ffffff));
1551     // fall through as BL encoding is shared with B
1552   case R_ARM_JUMP24:
1553   case R_ARM_PC24:
1554   case R_ARM_PLT32:
1555     checkInt<26>(Val, Type);
1556     write32le(Loc, (read32le(Loc) & ~0x00ffffff) | ((Val >> 2) & 0x00ffffff));
1557     break;
1558   case R_ARM_THM_JUMP11:
1559     checkInt<12>(Val, Type);
1560     write16le(Loc, (read32le(Loc) & 0xf800) | ((Val >> 1) & 0x07ff));
1561     break;
1562   case R_ARM_THM_JUMP19:
1563     // Encoding T3: Val = S:J2:J1:imm6:imm11:0
1564     checkInt<21>(Val, Type);
1565     write16le(Loc,
1566               (read16le(Loc) & 0xfbc0) |   // opcode cond
1567                   ((Val >> 10) & 0x0400) | // S
1568                   ((Val >> 12) & 0x003f)); // imm6
1569     write16le(Loc + 2,
1570               0x8000 |                    // opcode
1571                   ((Val >> 8) & 0x0800) | // J2
1572                   ((Val >> 5) & 0x2000) | // J1
1573                   ((Val >> 1) & 0x07ff)); // imm11
1574     break;
1575   case R_ARM_THM_CALL:
1576     // R_ARM_THM_CALL is used for BL and BLX instructions, depending on the
1577     // value of bit 0 of Val, we must select a BL or BLX instruction
1578     if ((Val & 1) == 0) {
1579       // Ensure BLX destination is 4-byte aligned. As BLX instruction may
1580       // only be two byte aligned. This must be done before overflow check
1581       Val = alignTo(Val, 4);
1582     }
1583     // Bit 12 is 0 for BLX, 1 for BL
1584     write16le(Loc + 2, (read16le(Loc + 2) & ~0x1000) | (Val & 1) << 12);
1585     // Fall through as rest of encoding is the same as B.W
1586   case R_ARM_THM_JUMP24:
1587     // Encoding B  T4, BL T1, BLX T2: Val = S:I1:I2:imm10:imm11:0
1588     // FIXME: Use of I1 and I2 require v6T2ops
1589     checkInt<25>(Val, Type);
1590     write16le(Loc,
1591               0xf000 |                     // opcode
1592                   ((Val >> 14) & 0x0400) | // S
1593                   ((Val >> 12) & 0x03ff)); // imm10
1594     write16le(Loc + 2,
1595               (read16le(Loc + 2) & 0xd000) |                  // opcode
1596                   (((~(Val >> 10)) ^ (Val >> 11)) & 0x2000) | // J1
1597                   (((~(Val >> 11)) ^ (Val >> 13)) & 0x0800) | // J2
1598                   ((Val >> 1) & 0x07ff));                     // imm11
1599     break;
1600   case R_ARM_MOVW_ABS_NC:
1601     write32le(Loc, (read32le(Loc) & ~0x000f0fff) | ((Val & 0xf000) << 4) |
1602                        (Val & 0x0fff));
1603     break;
1604   case R_ARM_MOVT_ABS:
1605     checkUInt<32>(Val, Type);
1606     write32le(Loc, (read32le(Loc) & ~0x000f0fff) |
1607                        (((Val >> 16) & 0xf000) << 4) | ((Val >> 16) & 0xfff));
1608     break;
1609   case R_ARM_THM_MOVT_ABS:
1610     // Encoding T1: A = imm4:i:imm3:imm8
1611     checkUInt<32>(Val, Type);
1612     write16le(Loc,
1613               0xf2c0 |                     // opcode
1614                   ((Val >> 17) & 0x0400) | // i
1615                   ((Val >> 28) & 0x000f)); // imm4
1616     write16le(Loc + 2,
1617               (read16le(Loc + 2) & 0x8f00) | // opcode
1618                   ((Val >> 12) & 0x7000) |   // imm3
1619                   ((Val >> 16) & 0x00ff));   // imm8
1620     break;
1621   case R_ARM_THM_MOVW_ABS_NC:
1622     // Encoding T3: A = imm4:i:imm3:imm8
1623     write16le(Loc,
1624               0xf240 |                     // opcode
1625                   ((Val >> 1) & 0x0400) |  // i
1626                   ((Val >> 12) & 0x000f)); // imm4
1627     write16le(Loc + 2,
1628               (read16le(Loc + 2) & 0x8f00) | // opcode
1629                   ((Val << 4) & 0x7000) |    // imm3
1630                   (Val & 0x00ff));           // imm8
1631     break;
1632   default:
1633     fatal("unrecognized reloc " + Twine(Type));
1634   }
1635 }
1636 
1637 uint64_t ARMTargetInfo::getImplicitAddend(const uint8_t *Buf,
1638                                           uint32_t Type) const {
1639   switch (Type) {
1640   default:
1641     return 0;
1642   case R_ARM_ABS32:
1643   case R_ARM_BASE_PREL:
1644   case R_ARM_GOTOFF32:
1645   case R_ARM_GOT_BREL:
1646   case R_ARM_GOT_PREL:
1647   case R_ARM_REL32:
1648     return SignExtend64<32>(read32le(Buf));
1649   case R_ARM_PREL31:
1650     return SignExtend64<31>(read32le(Buf));
1651   case R_ARM_CALL:
1652   case R_ARM_JUMP24:
1653   case R_ARM_PC24:
1654   case R_ARM_PLT32:
1655     return SignExtend64<26>(read32le(Buf) << 2);
1656   case R_ARM_THM_JUMP11:
1657     return SignExtend64<12>(read16le(Buf) << 1);
1658   case R_ARM_THM_JUMP19: {
1659     // Encoding T3: A = S:J2:J1:imm10:imm6:0
1660     uint16_t Hi = read16le(Buf);
1661     uint16_t Lo = read16le(Buf + 2);
1662     return SignExtend64<20>(((Hi & 0x0400) << 10) | // S
1663                             ((Lo & 0x0800) << 8) |  // J2
1664                             ((Lo & 0x2000) << 5) |  // J1
1665                             ((Hi & 0x003f) << 12) | // imm6
1666                             ((Lo & 0x07ff) << 1));  // imm11:0
1667   }
1668   case R_ARM_THM_JUMP24:
1669   case R_ARM_THM_CALL: {
1670     // Encoding B T4, BL T1, BLX T2: A = S:I1:I2:imm10:imm11:0
1671     // I1 = NOT(J1 EOR S), I2 = NOT(J2 EOR S)
1672     // FIXME: I1 and I2 require v6T2ops
1673     uint16_t Hi = read16le(Buf);
1674     uint16_t Lo = read16le(Buf + 2);
1675     return SignExtend64<24>(((Hi & 0x0400) << 14) |                    // S
1676                             (~((Lo ^ (Hi << 3)) << 10) & 0x00800000) | // I1
1677                             (~((Lo ^ (Hi << 1)) << 11) & 0x00400000) | // I2
1678                             ((Hi & 0x003ff) << 12) |                   // imm0
1679                             ((Lo & 0x007ff) << 1)); // imm11:0
1680   }
1681   // ELF for the ARM Architecture 4.6.1.1 the implicit addend for MOVW and
1682   // MOVT is in the range -32768 <= A < 32768
1683   case R_ARM_MOVW_ABS_NC:
1684   case R_ARM_MOVT_ABS: {
1685     uint64_t Val = read32le(Buf) & 0x000f0fff;
1686     return SignExtend64<16>(((Val & 0x000f0000) >> 4) | (Val & 0x00fff));
1687   }
1688   case R_ARM_THM_MOVW_ABS_NC:
1689   case R_ARM_THM_MOVT_ABS: {
1690     // Encoding T3: A = imm4:i:imm3:imm8
1691     uint16_t Hi = read16le(Buf);
1692     uint16_t Lo = read16le(Buf + 2);
1693     return SignExtend64<16>(((Hi & 0x000f) << 12) | // imm4
1694                             ((Hi & 0x0400) << 1) |  // i
1695                             ((Lo & 0x7000) >> 4) |  // imm3
1696                             (Lo & 0x00ff));         // imm8
1697   }
1698   }
1699 }
1700 
1701 template <class ELFT> MipsTargetInfo<ELFT>::MipsTargetInfo() {
1702   GotPltHeaderEntriesNum = 2;
1703   PageSize = 65536;
1704   PltEntrySize = 16;
1705   PltHeaderSize = 32;
1706   ThunkSize = 16;
1707   CopyRel = R_MIPS_COPY;
1708   PltRel = R_MIPS_JUMP_SLOT;
1709   if (ELFT::Is64Bits)
1710     RelativeRel = (R_MIPS_64 << 8) | R_MIPS_REL32;
1711   else
1712     RelativeRel = R_MIPS_REL32;
1713 }
1714 
1715 template <class ELFT>
1716 RelExpr MipsTargetInfo<ELFT>::getRelExpr(uint32_t Type,
1717                                          const SymbolBody &S) const {
1718   if (ELFT::Is64Bits)
1719     // See comment in the calculateMips64RelChain.
1720     Type &= 0xff;
1721   switch (Type) {
1722   default:
1723     return R_ABS;
1724   case R_MIPS_JALR:
1725     return R_HINT;
1726   case R_MIPS_GPREL16:
1727   case R_MIPS_GPREL32:
1728     return R_GOTREL;
1729   case R_MIPS_26:
1730     return R_PLT;
1731   case R_MIPS_HI16:
1732   case R_MIPS_LO16:
1733   case R_MIPS_GOT_OFST:
1734     // MIPS _gp_disp designates offset between start of function and 'gp'
1735     // pointer into GOT. __gnu_local_gp is equal to the current value of
1736     // the 'gp'. Therefore any relocations against them do not require
1737     // dynamic relocation.
1738     if (&S == ElfSym<ELFT>::MipsGpDisp)
1739       return R_PC;
1740     return R_ABS;
1741   case R_MIPS_PC32:
1742   case R_MIPS_PC16:
1743   case R_MIPS_PC19_S2:
1744   case R_MIPS_PC21_S2:
1745   case R_MIPS_PC26_S2:
1746   case R_MIPS_PCHI16:
1747   case R_MIPS_PCLO16:
1748     return R_PC;
1749   case R_MIPS_GOT16:
1750     if (S.isLocal())
1751       return R_MIPS_GOT_LOCAL_PAGE;
1752   // fallthrough
1753   case R_MIPS_CALL16:
1754   case R_MIPS_GOT_DISP:
1755     return R_MIPS_GOT_OFF;
1756   case R_MIPS_GOT_PAGE:
1757     return R_MIPS_GOT_LOCAL_PAGE;
1758   }
1759 }
1760 
1761 template <class ELFT>
1762 uint32_t MipsTargetInfo<ELFT>::getDynRel(uint32_t Type) const {
1763   if (Type == R_MIPS_32 || Type == R_MIPS_64)
1764     return RelativeRel;
1765   // Keep it going with a dummy value so that we can find more reloc errors.
1766   errorDynRel(Type);
1767   return R_MIPS_32;
1768 }
1769 
1770 template <class ELFT>
1771 void MipsTargetInfo<ELFT>::writeGotPlt(uint8_t *Buf, const SymbolBody &) const {
1772   write32<ELFT::TargetEndianness>(Buf, Out<ELFT>::Plt->getVA());
1773 }
1774 
1775 static uint16_t mipsHigh(uint64_t V) { return (V + 0x8000) >> 16; }
1776 
1777 template <endianness E, uint8_t BSIZE, uint8_t SHIFT>
1778 static int64_t getPcRelocAddend(const uint8_t *Loc) {
1779   uint32_t Instr = read32<E>(Loc);
1780   uint32_t Mask = 0xffffffff >> (32 - BSIZE);
1781   return SignExtend64<BSIZE + SHIFT>((Instr & Mask) << SHIFT);
1782 }
1783 
1784 template <endianness E, uint8_t BSIZE, uint8_t SHIFT>
1785 static void applyMipsPcReloc(uint8_t *Loc, uint32_t Type, uint64_t V) {
1786   uint32_t Mask = 0xffffffff >> (32 - BSIZE);
1787   uint32_t Instr = read32<E>(Loc);
1788   if (SHIFT > 0)
1789     checkAlignment<(1 << SHIFT)>(V, Type);
1790   checkInt<BSIZE + SHIFT>(V, Type);
1791   write32<E>(Loc, (Instr & ~Mask) | ((V >> SHIFT) & Mask));
1792 }
1793 
1794 template <endianness E>
1795 static void writeMipsHi16(uint8_t *Loc, uint64_t V) {
1796   uint32_t Instr = read32<E>(Loc);
1797   write32<E>(Loc, (Instr & 0xffff0000) | mipsHigh(V));
1798 }
1799 
1800 template <endianness E>
1801 static void writeMipsLo16(uint8_t *Loc, uint64_t V) {
1802   uint32_t Instr = read32<E>(Loc);
1803   write32<E>(Loc, (Instr & 0xffff0000) | (V & 0xffff));
1804 }
1805 
1806 template <class ELFT>
1807 void MipsTargetInfo<ELFT>::writePltHeader(uint8_t *Buf) const {
1808   const endianness E = ELFT::TargetEndianness;
1809   write32<E>(Buf, 0x3c1c0000);      // lui   $28, %hi(&GOTPLT[0])
1810   write32<E>(Buf + 4, 0x8f990000);  // lw    $25, %lo(&GOTPLT[0])($28)
1811   write32<E>(Buf + 8, 0x279c0000);  // addiu $28, $28, %lo(&GOTPLT[0])
1812   write32<E>(Buf + 12, 0x031cc023); // subu  $24, $24, $28
1813   write32<E>(Buf + 16, 0x03e07825); // move  $15, $31
1814   write32<E>(Buf + 20, 0x0018c082); // srl   $24, $24, 2
1815   write32<E>(Buf + 24, 0x0320f809); // jalr  $25
1816   write32<E>(Buf + 28, 0x2718fffe); // subu  $24, $24, 2
1817   uint64_t Got = Out<ELFT>::GotPlt->getVA();
1818   writeMipsHi16<E>(Buf, Got);
1819   writeMipsLo16<E>(Buf + 4, Got);
1820   writeMipsLo16<E>(Buf + 8, Got);
1821 }
1822 
1823 template <class ELFT>
1824 void MipsTargetInfo<ELFT>::writePlt(uint8_t *Buf, uint64_t GotEntryAddr,
1825                                     uint64_t PltEntryAddr, int32_t Index,
1826                                     unsigned RelOff) const {
1827   const endianness E = ELFT::TargetEndianness;
1828   write32<E>(Buf, 0x3c0f0000);      // lui   $15, %hi(.got.plt entry)
1829   write32<E>(Buf + 4, 0x8df90000);  // l[wd] $25, %lo(.got.plt entry)($15)
1830   write32<E>(Buf + 8, 0x03200008);  // jr    $25
1831   write32<E>(Buf + 12, 0x25f80000); // addiu $24, $15, %lo(.got.plt entry)
1832   writeMipsHi16<E>(Buf, GotEntryAddr);
1833   writeMipsLo16<E>(Buf + 4, GotEntryAddr);
1834   writeMipsLo16<E>(Buf + 12, GotEntryAddr);
1835 }
1836 
1837 template <class ELFT>
1838 void MipsTargetInfo<ELFT>::writeThunk(uint8_t *Buf, uint64_t S) const {
1839   // Write MIPS LA25 thunk code to call PIC function from the non-PIC one.
1840   // See MipsTargetInfo::writeThunk for details.
1841   const endianness E = ELFT::TargetEndianness;
1842   write32<E>(Buf, 0x3c190000);                // lui   $25, %hi(func)
1843   write32<E>(Buf + 4, 0x08000000 | (S >> 2)); // j     func
1844   write32<E>(Buf + 8, 0x27390000);            // addiu $25, $25, %lo(func)
1845   write32<E>(Buf + 12, 0x00000000);           // nop
1846   writeMipsHi16<E>(Buf, S);
1847   writeMipsLo16<E>(Buf + 8, S);
1848 }
1849 
1850 template <class ELFT>
1851 bool MipsTargetInfo<ELFT>::needsThunk(uint32_t Type, const InputFile &File,
1852                                       const SymbolBody &S) const {
1853   // Any MIPS PIC code function is invoked with its address in register $t9.
1854   // So if we have a branch instruction from non-PIC code to the PIC one
1855   // we cannot make the jump directly and need to create a small stubs
1856   // to save the target function address.
1857   // See page 3-38 ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf
1858   if (Type != R_MIPS_26)
1859     return false;
1860   auto *F = dyn_cast<ELFFileBase<ELFT>>(&File);
1861   if (!F)
1862     return false;
1863   // If current file has PIC code, LA25 stub is not required.
1864   if (F->getObj().getHeader()->e_flags & EF_MIPS_PIC)
1865     return false;
1866   auto *D = dyn_cast<DefinedRegular<ELFT>>(&S);
1867   if (!D || !D->Section)
1868     return false;
1869   // LA25 is required if target file has PIC code
1870   // or target symbol is a PIC symbol.
1871   return (D->Section->getFile()->getObj().getHeader()->e_flags & EF_MIPS_PIC) ||
1872          (D->StOther & STO_MIPS_MIPS16) == STO_MIPS_PIC;
1873 }
1874 
1875 template <class ELFT>
1876 uint64_t MipsTargetInfo<ELFT>::getImplicitAddend(const uint8_t *Buf,
1877                                                  uint32_t Type) const {
1878   const endianness E = ELFT::TargetEndianness;
1879   switch (Type) {
1880   default:
1881     return 0;
1882   case R_MIPS_32:
1883   case R_MIPS_GPREL32:
1884     return read32<E>(Buf);
1885   case R_MIPS_26:
1886     // FIXME (simon): If the relocation target symbol is not a PLT entry
1887     // we should use another expression for calculation:
1888     // ((A << 2) | (P & 0xf0000000)) >> 2
1889     return SignExtend64<28>(read32<E>(Buf) << 2);
1890   case R_MIPS_GPREL16:
1891   case R_MIPS_LO16:
1892   case R_MIPS_PCLO16:
1893   case R_MIPS_TLS_DTPREL_HI16:
1894   case R_MIPS_TLS_DTPREL_LO16:
1895   case R_MIPS_TLS_TPREL_HI16:
1896   case R_MIPS_TLS_TPREL_LO16:
1897     return SignExtend64<16>(read32<E>(Buf));
1898   case R_MIPS_PC16:
1899     return getPcRelocAddend<E, 16, 2>(Buf);
1900   case R_MIPS_PC19_S2:
1901     return getPcRelocAddend<E, 19, 2>(Buf);
1902   case R_MIPS_PC21_S2:
1903     return getPcRelocAddend<E, 21, 2>(Buf);
1904   case R_MIPS_PC26_S2:
1905     return getPcRelocAddend<E, 26, 2>(Buf);
1906   case R_MIPS_PC32:
1907     return getPcRelocAddend<E, 32, 0>(Buf);
1908   }
1909 }
1910 
1911 static std::pair<uint32_t, uint64_t> calculateMips64RelChain(uint32_t Type,
1912                                                              uint64_t Val) {
1913   // MIPS N64 ABI packs multiple relocations into the single relocation
1914   // record. In general, all up to three relocations can have arbitrary
1915   // types. In fact, Clang and GCC uses only a few combinations. For now,
1916   // we support two of them. That is allow to pass at least all LLVM
1917   // test suite cases.
1918   // <any relocation> / R_MIPS_SUB / R_MIPS_HI16 | R_MIPS_LO16
1919   // <any relocation> / R_MIPS_64 / R_MIPS_NONE
1920   // The first relocation is a 'real' relocation which is calculated
1921   // using the corresponding symbol's value. The second and the third
1922   // relocations used to modify result of the first one: extend it to
1923   // 64-bit, extract high or low part etc. For details, see part 2.9 Relocation
1924   // at the https://dmz-portal.mips.com/mw/images/8/82/007-4658-001.pdf
1925   uint32_t Type2 = (Type >> 8) & 0xff;
1926   uint32_t Type3 = (Type >> 16) & 0xff;
1927   if (Type2 == R_MIPS_NONE && Type3 == R_MIPS_NONE)
1928     return std::make_pair(Type, Val);
1929   if (Type2 == R_MIPS_64 && Type3 == R_MIPS_NONE)
1930     return std::make_pair(Type2, Val);
1931   if (Type2 == R_MIPS_SUB && (Type3 == R_MIPS_HI16 || Type3 == R_MIPS_LO16))
1932     return std::make_pair(Type3, -Val);
1933   error("unsupported relocations combination " + Twine(Type));
1934   return std::make_pair(Type & 0xff, Val);
1935 }
1936 
1937 template <class ELFT>
1938 void MipsTargetInfo<ELFT>::relocateOne(uint8_t *Loc, uint32_t Type,
1939                                        uint64_t Val) const {
1940   const endianness E = ELFT::TargetEndianness;
1941   // Thread pointer and DRP offsets from the start of TLS data area.
1942   // https://www.linux-mips.org/wiki/NPTL
1943   if (Type == R_MIPS_TLS_DTPREL_HI16 || Type == R_MIPS_TLS_DTPREL_LO16)
1944     Val -= 0x8000;
1945   else if (Type == R_MIPS_TLS_TPREL_HI16 || Type == R_MIPS_TLS_TPREL_LO16)
1946     Val -= 0x7000;
1947   if (ELFT::Is64Bits)
1948     std::tie(Type, Val) = calculateMips64RelChain(Type, Val);
1949   switch (Type) {
1950   case R_MIPS_32:
1951   case R_MIPS_GPREL32:
1952     write32<E>(Loc, Val);
1953     break;
1954   case R_MIPS_64:
1955     write64<E>(Loc, Val);
1956     break;
1957   case R_MIPS_26:
1958     write32<E>(Loc, (read32<E>(Loc) & ~0x3ffffff) | (Val >> 2));
1959     break;
1960   case R_MIPS_GOT_DISP:
1961   case R_MIPS_GOT_PAGE:
1962   case R_MIPS_GOT16:
1963   case R_MIPS_GPREL16:
1964     checkInt<16>(Val, Type);
1965   // fallthrough
1966   case R_MIPS_CALL16:
1967   case R_MIPS_GOT_OFST:
1968   case R_MIPS_LO16:
1969   case R_MIPS_PCLO16:
1970   case R_MIPS_TLS_DTPREL_LO16:
1971   case R_MIPS_TLS_TPREL_LO16:
1972     writeMipsLo16<E>(Loc, Val);
1973     break;
1974   case R_MIPS_HI16:
1975   case R_MIPS_PCHI16:
1976   case R_MIPS_TLS_DTPREL_HI16:
1977   case R_MIPS_TLS_TPREL_HI16:
1978     writeMipsHi16<E>(Loc, Val);
1979     break;
1980   case R_MIPS_JALR:
1981     // Ignore this optimization relocation for now
1982     break;
1983   case R_MIPS_PC16:
1984     applyMipsPcReloc<E, 16, 2>(Loc, Type, Val);
1985     break;
1986   case R_MIPS_PC19_S2:
1987     applyMipsPcReloc<E, 19, 2>(Loc, Type, Val);
1988     break;
1989   case R_MIPS_PC21_S2:
1990     applyMipsPcReloc<E, 21, 2>(Loc, Type, Val);
1991     break;
1992   case R_MIPS_PC26_S2:
1993     applyMipsPcReloc<E, 26, 2>(Loc, Type, Val);
1994     break;
1995   case R_MIPS_PC32:
1996     applyMipsPcReloc<E, 32, 0>(Loc, Type, Val);
1997     break;
1998   default:
1999     fatal("unrecognized reloc " + Twine(Type));
2000   }
2001 }
2002 
2003 template <class ELFT>
2004 bool MipsTargetInfo<ELFT>::usesOnlyLowPageBits(uint32_t Type) const {
2005   return Type == R_MIPS_LO16 || Type == R_MIPS_GOT_OFST;
2006 }
2007 }
2008 }
2009