xref: /llvm-project-15.0.7/lld/ELF/Target.cpp (revision e46c0885)
1 //===- Target.cpp ---------------------------------------------------------===//
2 //
3 //                             The LLVM Linker
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // Machine-specific things, such as applying relocations, creation of
11 // GOT or PLT entries, etc., are handled in this file.
12 //
13 // Refer the ELF spec for the single letter varaibles, S, A or P, used
14 // in this file.
15 //
16 // Some functions defined in this file has "relaxTls" as part of their names.
17 // They do peephole optimization for TLS variables by rewriting instructions.
18 // They are not part of the ABI but optional optimization, so you can skip
19 // them if you are not interested in how TLS variables are optimized.
20 // See the following paper for the details.
21 //
22 //   Ulrich Drepper, ELF Handling For Thread-Local Storage
23 //   http://www.akkadia.org/drepper/tls.pdf
24 //
25 //===----------------------------------------------------------------------===//
26 
27 #include "Target.h"
28 #include "Error.h"
29 #include "InputFiles.h"
30 #include "OutputSections.h"
31 #include "Symbols.h"
32 
33 #include "llvm/ADT/ArrayRef.h"
34 #include "llvm/Object/ELF.h"
35 #include "llvm/Support/Endian.h"
36 #include "llvm/Support/ELF.h"
37 
38 using namespace llvm;
39 using namespace llvm::object;
40 using namespace llvm::support::endian;
41 using namespace llvm::ELF;
42 
43 namespace lld {
44 namespace elf {
45 
46 TargetInfo *Target;
47 
48 static void or32le(uint8_t *P, int32_t V) { write32le(P, read32le(P) | V); }
49 
50 StringRef getRelName(uint32_t Type) {
51   return getELFRelocationTypeName(Config->EMachine, Type);
52 }
53 
54 template <unsigned N> static void checkInt(int64_t V, uint32_t Type) {
55   if (!isInt<N>(V))
56     error("relocation " + getRelName(Type) + " out of range");
57 }
58 
59 template <unsigned N> static void checkUInt(uint64_t V, uint32_t Type) {
60   if (!isUInt<N>(V))
61     error("relocation " + getRelName(Type) + " out of range");
62 }
63 
64 template <unsigned N> static void checkIntUInt(uint64_t V, uint32_t Type) {
65   if (!isInt<N>(V) && !isUInt<N>(V))
66     error("relocation " + getRelName(Type) + " out of range");
67 }
68 
69 template <unsigned N> static void checkAlignment(uint64_t V, uint32_t Type) {
70   if ((V & (N - 1)) != 0)
71     error("improper alignment for relocation " + getRelName(Type));
72 }
73 
74 static void errorDynRel(uint32_t Type) {
75   error("relocation " + getRelName(Type) +
76         " cannot be used against shared object; recompile with -fPIC.");
77 }
78 
79 namespace {
80 class X86TargetInfo final : public TargetInfo {
81 public:
82   X86TargetInfo();
83   RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override;
84   uint64_t getImplicitAddend(const uint8_t *Buf, uint32_t Type) const override;
85   void writeGotPltHeader(uint8_t *Buf) const override;
86   uint32_t getDynRel(uint32_t Type) const override;
87   bool isTlsLocalDynamicRel(uint32_t Type) const override;
88   bool isTlsGlobalDynamicRel(uint32_t Type) const override;
89   bool isTlsInitialExecRel(uint32_t Type) const override;
90   void writeGotPlt(uint8_t *Buf, const SymbolBody &S) const override;
91   void writePltHeader(uint8_t *Buf) const override;
92   void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr,
93                 int32_t Index, unsigned RelOff) const override;
94   void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
95 
96   RelExpr adjustRelaxExpr(uint32_t Type, const uint8_t *Data,
97                           RelExpr Expr) const override;
98   void relaxTlsGdToIe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
99   void relaxTlsGdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
100   void relaxTlsIeToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
101   void relaxTlsLdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
102 };
103 
104 class X86_64TargetInfo final : public TargetInfo {
105 public:
106   X86_64TargetInfo();
107   RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override;
108   uint32_t getDynRel(uint32_t Type) const override;
109   bool isTlsLocalDynamicRel(uint32_t Type) const override;
110   bool isTlsGlobalDynamicRel(uint32_t Type) const override;
111   bool isTlsInitialExecRel(uint32_t Type) const override;
112   void writeGotPltHeader(uint8_t *Buf) const override;
113   void writeGotPlt(uint8_t *Buf, const SymbolBody &S) const override;
114   void writePltHeader(uint8_t *Buf) const override;
115   void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr,
116                 int32_t Index, unsigned RelOff) const override;
117   void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
118 
119   RelExpr adjustRelaxExpr(uint32_t Type, const uint8_t *Data,
120                           RelExpr Expr) const override;
121   void relaxGot(uint8_t *Loc, uint64_t Val) const override;
122   void relaxTlsGdToIe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
123   void relaxTlsGdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
124   void relaxTlsIeToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
125   void relaxTlsLdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
126 
127 private:
128   void relaxGotNoPic(uint8_t *Loc, uint64_t Val, uint8_t Op,
129                      uint8_t ModRm) const;
130 };
131 
132 class PPCTargetInfo final : public TargetInfo {
133 public:
134   PPCTargetInfo();
135   void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
136   RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override;
137 };
138 
139 class PPC64TargetInfo final : public TargetInfo {
140 public:
141   PPC64TargetInfo();
142   RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override;
143   void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr,
144                 int32_t Index, unsigned RelOff) const override;
145   void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
146 };
147 
148 class AArch64TargetInfo final : public TargetInfo {
149 public:
150   AArch64TargetInfo();
151   RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override;
152   uint32_t getDynRel(uint32_t Type) const override;
153   bool isTlsInitialExecRel(uint32_t Type) const override;
154   void writeGotPlt(uint8_t *Buf, const SymbolBody &S) const override;
155   void writePltHeader(uint8_t *Buf) const override;
156   void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr,
157                 int32_t Index, unsigned RelOff) const override;
158   bool usesOnlyLowPageBits(uint32_t Type) const override;
159   void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
160   RelExpr adjustRelaxExpr(uint32_t Type, const uint8_t *Data,
161                           RelExpr Expr) const override;
162   void relaxTlsGdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
163   void relaxTlsGdToIe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
164   void relaxTlsIeToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
165 };
166 
167 class AMDGPUTargetInfo final : public TargetInfo {
168 public:
169   AMDGPUTargetInfo() {}
170   void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
171   RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override;
172 };
173 
174 class ARMTargetInfo final : public TargetInfo {
175 public:
176   ARMTargetInfo();
177   RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override;
178   uint32_t getDynRel(uint32_t Type) const override;
179   uint64_t getImplicitAddend(const uint8_t *Buf, uint32_t Type) const override;
180   void writeGotPlt(uint8_t *Buf, const SymbolBody &S) const override;
181   void writePltHeader(uint8_t *Buf) const override;
182   void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr,
183                 int32_t Index, unsigned RelOff) const override;
184   void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
185 };
186 
187 template <class ELFT> class MipsTargetInfo final : public TargetInfo {
188 public:
189   MipsTargetInfo();
190   RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override;
191   uint64_t getImplicitAddend(const uint8_t *Buf, uint32_t Type) const override;
192   uint32_t getDynRel(uint32_t Type) const override;
193   bool isTlsLocalDynamicRel(uint32_t Type) const override;
194   bool isTlsGlobalDynamicRel(uint32_t Type) const override;
195   void writeGotPlt(uint8_t *Buf, const SymbolBody &S) const override;
196   void writePltHeader(uint8_t *Buf) const override;
197   void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr,
198                 int32_t Index, unsigned RelOff) const override;
199   void writeThunk(uint8_t *Buf, uint64_t S) const override;
200   bool needsThunk(uint32_t Type, const InputFile &File,
201                   const SymbolBody &S) const override;
202   void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
203   bool usesOnlyLowPageBits(uint32_t Type) const override;
204 };
205 } // anonymous namespace
206 
207 TargetInfo *createTarget() {
208   switch (Config->EMachine) {
209   case EM_386:
210     return new X86TargetInfo();
211   case EM_AARCH64:
212     return new AArch64TargetInfo();
213   case EM_AMDGPU:
214     return new AMDGPUTargetInfo();
215   case EM_ARM:
216     return new ARMTargetInfo();
217   case EM_MIPS:
218     switch (Config->EKind) {
219     case ELF32LEKind:
220       return new MipsTargetInfo<ELF32LE>();
221     case ELF32BEKind:
222       return new MipsTargetInfo<ELF32BE>();
223     case ELF64LEKind:
224       return new MipsTargetInfo<ELF64LE>();
225     case ELF64BEKind:
226       return new MipsTargetInfo<ELF64BE>();
227     default:
228       fatal("unsupported MIPS target");
229     }
230   case EM_PPC:
231     return new PPCTargetInfo();
232   case EM_PPC64:
233     return new PPC64TargetInfo();
234   case EM_X86_64:
235     return new X86_64TargetInfo();
236   }
237   fatal("unknown target machine");
238 }
239 
240 TargetInfo::~TargetInfo() {}
241 
242 uint64_t TargetInfo::getImplicitAddend(const uint8_t *Buf,
243                                        uint32_t Type) const {
244   return 0;
245 }
246 
247 uint64_t TargetInfo::getVAStart() const { return Config->Pic ? 0 : VAStart; }
248 
249 bool TargetInfo::usesOnlyLowPageBits(uint32_t Type) const { return false; }
250 
251 bool TargetInfo::needsThunk(uint32_t Type, const InputFile &File,
252                             const SymbolBody &S) const {
253   return false;
254 }
255 
256 bool TargetInfo::isTlsInitialExecRel(uint32_t Type) const { return false; }
257 
258 bool TargetInfo::isTlsLocalDynamicRel(uint32_t Type) const { return false; }
259 
260 bool TargetInfo::isTlsGlobalDynamicRel(uint32_t Type) const {
261   return false;
262 }
263 
264 RelExpr TargetInfo::adjustRelaxExpr(uint32_t Type, const uint8_t *Data,
265                                     RelExpr Expr) const {
266   return Expr;
267 }
268 
269 void TargetInfo::relaxGot(uint8_t *Loc, uint64_t Val) const {
270   llvm_unreachable("Should not have claimed to be relaxable");
271 }
272 
273 void TargetInfo::relaxTlsGdToLe(uint8_t *Loc, uint32_t Type,
274                                 uint64_t Val) const {
275   llvm_unreachable("Should not have claimed to be relaxable");
276 }
277 
278 void TargetInfo::relaxTlsGdToIe(uint8_t *Loc, uint32_t Type,
279                                 uint64_t Val) const {
280   llvm_unreachable("Should not have claimed to be relaxable");
281 }
282 
283 void TargetInfo::relaxTlsIeToLe(uint8_t *Loc, uint32_t Type,
284                                 uint64_t Val) const {
285   llvm_unreachable("Should not have claimed to be relaxable");
286 }
287 
288 void TargetInfo::relaxTlsLdToLe(uint8_t *Loc, uint32_t Type,
289                                 uint64_t Val) const {
290   llvm_unreachable("Should not have claimed to be relaxable");
291 }
292 
293 X86TargetInfo::X86TargetInfo() {
294   CopyRel = R_386_COPY;
295   GotRel = R_386_GLOB_DAT;
296   PltRel = R_386_JUMP_SLOT;
297   IRelativeRel = R_386_IRELATIVE;
298   RelativeRel = R_386_RELATIVE;
299   TlsGotRel = R_386_TLS_TPOFF;
300   TlsModuleIndexRel = R_386_TLS_DTPMOD32;
301   TlsOffsetRel = R_386_TLS_DTPOFF32;
302   PltEntrySize = 16;
303   PltHeaderSize = 16;
304   TlsGdRelaxSkip = 2;
305 }
306 
307 RelExpr X86TargetInfo::getRelExpr(uint32_t Type, const SymbolBody &S) const {
308   switch (Type) {
309   default:
310     return R_ABS;
311   case R_386_TLS_GD:
312     return R_TLSGD;
313   case R_386_TLS_LDM:
314     return R_TLSLD;
315   case R_386_PLT32:
316     return R_PLT_PC;
317   case R_386_PC32:
318     return R_PC;
319   case R_386_GOTPC:
320     return R_GOTONLY_PC;
321   case R_386_TLS_IE:
322     return R_GOT;
323   case R_386_GOT32:
324   case R_386_TLS_GOTIE:
325     return R_GOT_FROM_END;
326   case R_386_GOTOFF:
327     return R_GOTREL;
328   case R_386_TLS_LE:
329     return R_TLS;
330   case R_386_TLS_LE_32:
331     return R_NEG_TLS;
332   }
333 }
334 
335 RelExpr X86TargetInfo::adjustRelaxExpr(uint32_t Type, const uint8_t *Data,
336                                        RelExpr Expr) const {
337   switch (Expr) {
338   default:
339     return Expr;
340   case R_RELAX_TLS_GD_TO_IE:
341     return R_RELAX_TLS_GD_TO_IE_END;
342   case R_RELAX_TLS_GD_TO_LE:
343     return R_RELAX_TLS_GD_TO_LE_NEG;
344   }
345 }
346 
347 void X86TargetInfo::writeGotPltHeader(uint8_t *Buf) const {
348   write32le(Buf, Out<ELF32LE>::Dynamic->getVA());
349 }
350 
351 void X86TargetInfo::writeGotPlt(uint8_t *Buf, const SymbolBody &S) const {
352   // Entries in .got.plt initially points back to the corresponding
353   // PLT entries with a fixed offset to skip the first instruction.
354   write32le(Buf, S.getPltVA<ELF32LE>() + 6);
355 }
356 
357 uint32_t X86TargetInfo::getDynRel(uint32_t Type) const {
358   if (Type == R_386_TLS_LE)
359     return R_386_TLS_TPOFF;
360   if (Type == R_386_TLS_LE_32)
361     return R_386_TLS_TPOFF32;
362   return Type;
363 }
364 
365 bool X86TargetInfo::isTlsGlobalDynamicRel(uint32_t Type) const {
366   return Type == R_386_TLS_GD;
367 }
368 
369 bool X86TargetInfo::isTlsLocalDynamicRel(uint32_t Type) const {
370   return Type == R_386_TLS_LDO_32 || Type == R_386_TLS_LDM;
371 }
372 
373 bool X86TargetInfo::isTlsInitialExecRel(uint32_t Type) const {
374   return Type == R_386_TLS_IE || Type == R_386_TLS_GOTIE;
375 }
376 
377 void X86TargetInfo::writePltHeader(uint8_t *Buf) const {
378   // Executable files and shared object files have
379   // separate procedure linkage tables.
380   if (Config->Pic) {
381     const uint8_t V[] = {
382         0xff, 0xb3, 0x04, 0x00, 0x00, 0x00, // pushl 4(%ebx)
383         0xff, 0xa3, 0x08, 0x00, 0x00, 0x00, // jmp   *8(%ebx)
384         0x90, 0x90, 0x90, 0x90              // nop; nop; nop; nop
385     };
386     memcpy(Buf, V, sizeof(V));
387     return;
388   }
389 
390   const uint8_t PltData[] = {
391       0xff, 0x35, 0x00, 0x00, 0x00, 0x00, // pushl (GOT+4)
392       0xff, 0x25, 0x00, 0x00, 0x00, 0x00, // jmp   *(GOT+8)
393       0x90, 0x90, 0x90, 0x90              // nop; nop; nop; nop
394   };
395   memcpy(Buf, PltData, sizeof(PltData));
396   uint32_t Got = Out<ELF32LE>::GotPlt->getVA();
397   write32le(Buf + 2, Got + 4);
398   write32le(Buf + 8, Got + 8);
399 }
400 
401 void X86TargetInfo::writePlt(uint8_t *Buf, uint64_t GotEntryAddr,
402                              uint64_t PltEntryAddr, int32_t Index,
403                              unsigned RelOff) const {
404   const uint8_t Inst[] = {
405       0xff, 0x00, 0x00, 0x00, 0x00, 0x00, // jmp *foo_in_GOT|*foo@GOT(%ebx)
406       0x68, 0x00, 0x00, 0x00, 0x00,       // pushl $reloc_offset
407       0xe9, 0x00, 0x00, 0x00, 0x00        // jmp .PLT0@PC
408   };
409   memcpy(Buf, Inst, sizeof(Inst));
410 
411   // jmp *foo@GOT(%ebx) or jmp *foo_in_GOT
412   Buf[1] = Config->Pic ? 0xa3 : 0x25;
413   uint32_t Got = Out<ELF32LE>::GotPlt->getVA();
414   write32le(Buf + 2, Config->Shared ? GotEntryAddr - Got : GotEntryAddr);
415   write32le(Buf + 7, RelOff);
416   write32le(Buf + 12, -Index * PltEntrySize - PltHeaderSize - 16);
417 }
418 
419 uint64_t X86TargetInfo::getImplicitAddend(const uint8_t *Buf,
420                                           uint32_t Type) const {
421   switch (Type) {
422   default:
423     return 0;
424   case R_386_32:
425   case R_386_GOT32:
426   case R_386_GOTOFF:
427   case R_386_GOTPC:
428   case R_386_PC32:
429   case R_386_PLT32:
430     return read32le(Buf);
431   }
432 }
433 
434 void X86TargetInfo::relocateOne(uint8_t *Loc, uint32_t Type,
435                                 uint64_t Val) const {
436   checkInt<32>(Val, Type);
437   write32le(Loc, Val);
438 }
439 
440 void X86TargetInfo::relaxTlsGdToLe(uint8_t *Loc, uint32_t Type,
441                                    uint64_t Val) const {
442   // Convert
443   //   leal x@tlsgd(, %ebx, 1),
444   //   call __tls_get_addr@plt
445   // to
446   //   movl %gs:0,%eax
447   //   subl $x@ntpoff,%eax
448   const uint8_t Inst[] = {
449       0x65, 0xa1, 0x00, 0x00, 0x00, 0x00, // movl %gs:0, %eax
450       0x81, 0xe8, 0x00, 0x00, 0x00, 0x00  // subl 0(%ebx), %eax
451   };
452   memcpy(Loc - 3, Inst, sizeof(Inst));
453   relocateOne(Loc + 5, R_386_32, Val);
454 }
455 
456 void X86TargetInfo::relaxTlsGdToIe(uint8_t *Loc, uint32_t Type,
457                                    uint64_t Val) const {
458   // Convert
459   //   leal x@tlsgd(, %ebx, 1),
460   //   call __tls_get_addr@plt
461   // to
462   //   movl %gs:0, %eax
463   //   addl x@gotntpoff(%ebx), %eax
464   const uint8_t Inst[] = {
465       0x65, 0xa1, 0x00, 0x00, 0x00, 0x00, // movl %gs:0, %eax
466       0x03, 0x83, 0x00, 0x00, 0x00, 0x00  // addl 0(%ebx), %eax
467   };
468   memcpy(Loc - 3, Inst, sizeof(Inst));
469   relocateOne(Loc + 5, R_386_32, Val);
470 }
471 
472 // In some conditions, relocations can be optimized to avoid using GOT.
473 // This function does that for Initial Exec to Local Exec case.
474 void X86TargetInfo::relaxTlsIeToLe(uint8_t *Loc, uint32_t Type,
475                                    uint64_t Val) const {
476   // Ulrich's document section 6.2 says that @gotntpoff can
477   // be used with MOVL or ADDL instructions.
478   // @indntpoff is similar to @gotntpoff, but for use in
479   // position dependent code.
480   uint8_t Reg = (Loc[-1] >> 3) & 7;
481 
482   if (Type == R_386_TLS_IE) {
483     if (Loc[-1] == 0xa1) {
484       // "movl foo@indntpoff,%eax" -> "movl $foo,%eax"
485       // This case is different from the generic case below because
486       // this is a 5 byte instruction while below is 6 bytes.
487       Loc[-1] = 0xb8;
488     } else if (Loc[-2] == 0x8b) {
489       // "movl foo@indntpoff,%reg" -> "movl $foo,%reg"
490       Loc[-2] = 0xc7;
491       Loc[-1] = 0xc0 | Reg;
492     } else {
493       // "addl foo@indntpoff,%reg" -> "addl $foo,%reg"
494       Loc[-2] = 0x81;
495       Loc[-1] = 0xc0 | Reg;
496     }
497   } else {
498     assert(Type == R_386_TLS_GOTIE);
499     if (Loc[-2] == 0x8b) {
500       // "movl foo@gottpoff(%rip),%reg" -> "movl $foo,%reg"
501       Loc[-2] = 0xc7;
502       Loc[-1] = 0xc0 | Reg;
503     } else {
504       // "addl foo@gotntpoff(%rip),%reg" -> "leal foo(%reg),%reg"
505       Loc[-2] = 0x8d;
506       Loc[-1] = 0x80 | (Reg << 3) | Reg;
507     }
508   }
509   relocateOne(Loc, R_386_TLS_LE, Val);
510 }
511 
512 void X86TargetInfo::relaxTlsLdToLe(uint8_t *Loc, uint32_t Type,
513                                    uint64_t Val) const {
514   if (Type == R_386_TLS_LDO_32) {
515     relocateOne(Loc, R_386_TLS_LE, Val);
516     return;
517   }
518 
519   // Convert
520   //   leal foo(%reg),%eax
521   //   call ___tls_get_addr
522   // to
523   //   movl %gs:0,%eax
524   //   nop
525   //   leal 0(%esi,1),%esi
526   const uint8_t Inst[] = {
527       0x65, 0xa1, 0x00, 0x00, 0x00, 0x00, // movl %gs:0,%eax
528       0x90,                               // nop
529       0x8d, 0x74, 0x26, 0x00              // leal 0(%esi,1),%esi
530   };
531   memcpy(Loc - 2, Inst, sizeof(Inst));
532 }
533 
534 X86_64TargetInfo::X86_64TargetInfo() {
535   CopyRel = R_X86_64_COPY;
536   GotRel = R_X86_64_GLOB_DAT;
537   PltRel = R_X86_64_JUMP_SLOT;
538   RelativeRel = R_X86_64_RELATIVE;
539   IRelativeRel = R_X86_64_IRELATIVE;
540   TlsGotRel = R_X86_64_TPOFF64;
541   TlsModuleIndexRel = R_X86_64_DTPMOD64;
542   TlsOffsetRel = R_X86_64_DTPOFF64;
543   PltEntrySize = 16;
544   PltHeaderSize = 16;
545   TlsGdRelaxSkip = 2;
546 }
547 
548 RelExpr X86_64TargetInfo::getRelExpr(uint32_t Type, const SymbolBody &S) const {
549   switch (Type) {
550   default:
551     return R_ABS;
552   case R_X86_64_TPOFF32:
553     return R_TLS;
554   case R_X86_64_TLSLD:
555     return R_TLSLD_PC;
556   case R_X86_64_TLSGD:
557     return R_TLSGD_PC;
558   case R_X86_64_SIZE32:
559   case R_X86_64_SIZE64:
560     return R_SIZE;
561   case R_X86_64_PLT32:
562     return R_PLT_PC;
563   case R_X86_64_PC32:
564   case R_X86_64_PC64:
565     return R_PC;
566   case R_X86_64_GOT32:
567     return R_GOT_FROM_END;
568   case R_X86_64_GOTPCREL:
569   case R_X86_64_GOTPCRELX:
570   case R_X86_64_REX_GOTPCRELX:
571   case R_X86_64_GOTTPOFF:
572     return R_GOT_PC;
573   }
574 }
575 
576 void X86_64TargetInfo::writeGotPltHeader(uint8_t *Buf) const {
577   // The first entry holds the value of _DYNAMIC. It is not clear why that is
578   // required, but it is documented in the psabi and the glibc dynamic linker
579   // seems to use it (note that this is relevant for linking ld.so, not any
580   // other program).
581   write64le(Buf, Out<ELF64LE>::Dynamic->getVA());
582 }
583 
584 void X86_64TargetInfo::writeGotPlt(uint8_t *Buf, const SymbolBody &S) const {
585   // See comments in X86TargetInfo::writeGotPlt.
586   write32le(Buf, S.getPltVA<ELF64LE>() + 6);
587 }
588 
589 void X86_64TargetInfo::writePltHeader(uint8_t *Buf) const {
590   const uint8_t PltData[] = {
591       0xff, 0x35, 0x00, 0x00, 0x00, 0x00, // pushq GOT+8(%rip)
592       0xff, 0x25, 0x00, 0x00, 0x00, 0x00, // jmp *GOT+16(%rip)
593       0x0f, 0x1f, 0x40, 0x00              // nopl 0x0(rax)
594   };
595   memcpy(Buf, PltData, sizeof(PltData));
596   uint64_t Got = Out<ELF64LE>::GotPlt->getVA();
597   uint64_t Plt = Out<ELF64LE>::Plt->getVA();
598   write32le(Buf + 2, Got - Plt + 2); // GOT+8
599   write32le(Buf + 8, Got - Plt + 4); // GOT+16
600 }
601 
602 void X86_64TargetInfo::writePlt(uint8_t *Buf, uint64_t GotEntryAddr,
603                                 uint64_t PltEntryAddr, int32_t Index,
604                                 unsigned RelOff) const {
605   const uint8_t Inst[] = {
606       0xff, 0x25, 0x00, 0x00, 0x00, 0x00, // jmpq *got(%rip)
607       0x68, 0x00, 0x00, 0x00, 0x00,       // pushq <relocation index>
608       0xe9, 0x00, 0x00, 0x00, 0x00        // jmpq plt[0]
609   };
610   memcpy(Buf, Inst, sizeof(Inst));
611 
612   write32le(Buf + 2, GotEntryAddr - PltEntryAddr - 6);
613   write32le(Buf + 7, Index);
614   write32le(Buf + 12, -Index * PltEntrySize - PltHeaderSize - 16);
615 }
616 
617 uint32_t X86_64TargetInfo::getDynRel(uint32_t Type) const {
618   if (Type == R_X86_64_PC32 || Type == R_X86_64_32)
619     errorDynRel(Type);
620   return Type;
621 }
622 
623 bool X86_64TargetInfo::isTlsInitialExecRel(uint32_t Type) const {
624   return Type == R_X86_64_GOTTPOFF;
625 }
626 
627 bool X86_64TargetInfo::isTlsGlobalDynamicRel(uint32_t Type) const {
628   return Type == R_X86_64_TLSGD;
629 }
630 
631 bool X86_64TargetInfo::isTlsLocalDynamicRel(uint32_t Type) const {
632   return Type == R_X86_64_DTPOFF32 || Type == R_X86_64_DTPOFF64 ||
633          Type == R_X86_64_TLSLD;
634 }
635 
636 void X86_64TargetInfo::relaxTlsGdToLe(uint8_t *Loc, uint32_t Type,
637                                       uint64_t Val) const {
638   // Convert
639   //   .byte 0x66
640   //   leaq x@tlsgd(%rip), %rdi
641   //   .word 0x6666
642   //   rex64
643   //   call __tls_get_addr@plt
644   // to
645   //   mov %fs:0x0,%rax
646   //   lea x@tpoff,%rax
647   const uint8_t Inst[] = {
648       0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00, // mov %fs:0x0,%rax
649       0x48, 0x8d, 0x80, 0x00, 0x00, 0x00, 0x00              // lea x@tpoff,%rax
650   };
651   memcpy(Loc - 4, Inst, sizeof(Inst));
652   // The original code used a pc relative relocation and so we have to
653   // compensate for the -4 in had in the addend.
654   relocateOne(Loc + 8, R_X86_64_TPOFF32, Val + 4);
655 }
656 
657 void X86_64TargetInfo::relaxTlsGdToIe(uint8_t *Loc, uint32_t Type,
658                                       uint64_t Val) const {
659   // Convert
660   //   .byte 0x66
661   //   leaq x@tlsgd(%rip), %rdi
662   //   .word 0x6666
663   //   rex64
664   //   call __tls_get_addr@plt
665   // to
666   //   mov %fs:0x0,%rax
667   //   addq x@tpoff,%rax
668   const uint8_t Inst[] = {
669       0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00, // mov %fs:0x0,%rax
670       0x48, 0x03, 0x05, 0x00, 0x00, 0x00, 0x00              // addq x@tpoff,%rax
671   };
672   memcpy(Loc - 4, Inst, sizeof(Inst));
673   // Both code sequences are PC relatives, but since we are moving the constant
674   // forward by 8 bytes we have to subtract the value by 8.
675   relocateOne(Loc + 8, R_X86_64_PC32, Val - 8);
676 }
677 
678 // In some conditions, R_X86_64_GOTTPOFF relocation can be optimized to
679 // R_X86_64_TPOFF32 so that it does not use GOT.
680 void X86_64TargetInfo::relaxTlsIeToLe(uint8_t *Loc, uint32_t Type,
681                                       uint64_t Val) const {
682   uint8_t *Inst = Loc - 3;
683   uint8_t Reg = Loc[-1] >> 3;
684   uint8_t *RegSlot = Loc - 1;
685 
686   // Note that ADD with RSP or R12 is converted to ADD instead of LEA
687   // because LEA with these registers needs 4 bytes to encode and thus
688   // wouldn't fit the space.
689 
690   if (memcmp(Inst, "\x48\x03\x25", 3) == 0) {
691     // "addq foo@gottpoff(%rip),%rsp" -> "addq $foo,%rsp"
692     memcpy(Inst, "\x48\x81\xc4", 3);
693   } else if (memcmp(Inst, "\x4c\x03\x25", 3) == 0) {
694     // "addq foo@gottpoff(%rip),%r12" -> "addq $foo,%r12"
695     memcpy(Inst, "\x49\x81\xc4", 3);
696   } else if (memcmp(Inst, "\x4c\x03", 2) == 0) {
697     // "addq foo@gottpoff(%rip),%r[8-15]" -> "leaq foo(%r[8-15]),%r[8-15]"
698     memcpy(Inst, "\x4d\x8d", 2);
699     *RegSlot = 0x80 | (Reg << 3) | Reg;
700   } else if (memcmp(Inst, "\x48\x03", 2) == 0) {
701     // "addq foo@gottpoff(%rip),%reg -> "leaq foo(%reg),%reg"
702     memcpy(Inst, "\x48\x8d", 2);
703     *RegSlot = 0x80 | (Reg << 3) | Reg;
704   } else if (memcmp(Inst, "\x4c\x8b", 2) == 0) {
705     // "movq foo@gottpoff(%rip),%r[8-15]" -> "movq $foo,%r[8-15]"
706     memcpy(Inst, "\x49\xc7", 2);
707     *RegSlot = 0xc0 | Reg;
708   } else if (memcmp(Inst, "\x48\x8b", 2) == 0) {
709     // "movq foo@gottpoff(%rip),%reg" -> "movq $foo,%reg"
710     memcpy(Inst, "\x48\xc7", 2);
711     *RegSlot = 0xc0 | Reg;
712   } else {
713     fatal("R_X86_64_GOTTPOFF must be used in MOVQ or ADDQ instructions only");
714   }
715 
716   // The original code used a PC relative relocation.
717   // Need to compensate for the -4 it had in the addend.
718   relocateOne(Loc, R_X86_64_TPOFF32, Val + 4);
719 }
720 
721 void X86_64TargetInfo::relaxTlsLdToLe(uint8_t *Loc, uint32_t Type,
722                                       uint64_t Val) const {
723   // Convert
724   //   leaq bar@tlsld(%rip), %rdi
725   //   callq __tls_get_addr@PLT
726   //   leaq bar@dtpoff(%rax), %rcx
727   // to
728   //   .word 0x6666
729   //   .byte 0x66
730   //   mov %fs:0,%rax
731   //   leaq bar@tpoff(%rax), %rcx
732   if (Type == R_X86_64_DTPOFF64) {
733     write64le(Loc, Val);
734     return;
735   }
736   if (Type == R_X86_64_DTPOFF32) {
737     relocateOne(Loc, R_X86_64_TPOFF32, Val);
738     return;
739   }
740 
741   const uint8_t Inst[] = {
742       0x66, 0x66,                                          // .word 0x6666
743       0x66,                                                // .byte 0x66
744       0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00 // mov %fs:0,%rax
745   };
746   memcpy(Loc - 3, Inst, sizeof(Inst));
747 }
748 
749 void X86_64TargetInfo::relocateOne(uint8_t *Loc, uint32_t Type,
750                                    uint64_t Val) const {
751   switch (Type) {
752   case R_X86_64_32:
753     checkUInt<32>(Val, Type);
754     write32le(Loc, Val);
755     break;
756   case R_X86_64_32S:
757   case R_X86_64_TPOFF32:
758   case R_X86_64_GOT32:
759   case R_X86_64_GOTPCREL:
760   case R_X86_64_GOTPCRELX:
761   case R_X86_64_REX_GOTPCRELX:
762   case R_X86_64_PC32:
763   case R_X86_64_GOTTPOFF:
764   case R_X86_64_PLT32:
765   case R_X86_64_TLSGD:
766   case R_X86_64_TLSLD:
767   case R_X86_64_DTPOFF32:
768   case R_X86_64_SIZE32:
769     checkInt<32>(Val, Type);
770     write32le(Loc, Val);
771     break;
772   case R_X86_64_64:
773   case R_X86_64_DTPOFF64:
774   case R_X86_64_SIZE64:
775   case R_X86_64_PC64:
776     write64le(Loc, Val);
777     break;
778   default:
779     fatal("unrecognized reloc " + Twine(Type));
780   }
781 }
782 
783 RelExpr X86_64TargetInfo::adjustRelaxExpr(uint32_t Type, const uint8_t *Data,
784                                           RelExpr RelExpr) const {
785   if (Type != R_X86_64_GOTPCRELX && Type != R_X86_64_REX_GOTPCRELX)
786     return RelExpr;
787   const uint8_t Op = Data[-2];
788   const uint8_t ModRm = Data[-1];
789   // FIXME: When PIC is disabled and foo is defined locally in the
790   // lower 32 bit address space, memory operand in mov can be converted into
791   // immediate operand. Otherwise, mov must be changed to lea. We support only
792   // latter relaxation at this moment.
793   if (Op == 0x8b)
794     return R_RELAX_GOT_PC;
795   // Relax call and jmp.
796   if (Op == 0xff && (ModRm == 0x15 || ModRm == 0x25))
797     return R_RELAX_GOT_PC;
798 
799   // Relaxation of test, adc, add, and, cmp, or, sbb, sub, xor.
800   // If PIC then no relaxation is available.
801   // We also don't relax test/binop instructions without REX byte,
802   // they are 32bit operations and not common to have.
803   assert(Type == R_X86_64_REX_GOTPCRELX);
804   return Config->Pic ? RelExpr : R_RELAX_GOT_PC_NOPIC;
805 }
806 
807 // A subset of relaxations can only be applied for no-PIC. This method
808 // handles such relaxations. Instructions encoding information was taken from:
809 // "Intel 64 and IA-32 Architectures Software Developer's Manual V2"
810 // (http://www.intel.com/content/dam/www/public/us/en/documents/manuals/
811 //    64-ia-32-architectures-software-developer-instruction-set-reference-manual-325383.pdf)
812 void X86_64TargetInfo::relaxGotNoPic(uint8_t *Loc, uint64_t Val, uint8_t Op,
813                                      uint8_t ModRm) const {
814   const uint8_t Rex = Loc[-3];
815   // Convert "test %reg, foo@GOTPCREL(%rip)" to "test $foo, %reg".
816   if (Op == 0x85) {
817     // See "TEST-Logical Compare" (4-428 Vol. 2B),
818     // TEST r/m64, r64 uses "full" ModR / M byte (no opcode extension).
819 
820     // ModR/M byte has form XX YYY ZZZ, where
821     // YYY is MODRM.reg(register 2), ZZZ is MODRM.rm(register 1).
822     // XX has different meanings:
823     // 00: The operand's memory address is in reg1.
824     // 01: The operand's memory address is reg1 + a byte-sized displacement.
825     // 10: The operand's memory address is reg1 + a word-sized displacement.
826     // 11: The operand is reg1 itself.
827     // If an instruction requires only one operand, the unused reg2 field
828     // holds extra opcode bits rather than a register code
829     // 0xC0 == 11 000 000 binary.
830     // 0x38 == 00 111 000 binary.
831     // We transfer reg2 to reg1 here as operand.
832     // See "2.1.3 ModR/M and SIB Bytes" (Vol. 2A 2-3).
833     Loc[-1] = 0xc0 | (ModRm & 0x38) >> 3; // ModR/M byte.
834 
835     // Change opcode from TEST r/m64, r64 to TEST r/m64, imm32
836     // See "TEST-Logical Compare" (4-428 Vol. 2B).
837     Loc[-2] = 0xf7;
838 
839     // Move R bit to the B bit in REX byte.
840     // REX byte is encoded as 0100WRXB, where
841     // 0100 is 4bit fixed pattern.
842     // REX.W When 1, a 64-bit operand size is used. Otherwise, when 0, the
843     //   default operand size is used (which is 32-bit for most but not all
844     //   instructions).
845     // REX.R This 1-bit value is an extension to the MODRM.reg field.
846     // REX.X This 1-bit value is an extension to the SIB.index field.
847     // REX.B This 1-bit value is an extension to the MODRM.rm field or the
848     // SIB.base field.
849     // See "2.2.1.2 More on REX Prefix Fields " (2-8 Vol. 2A).
850     Loc[-3] = (Rex & ~0x4) | (Rex & 0x4) >> 2;
851     relocateOne(Loc, R_X86_64_PC32, Val);
852     return;
853   }
854 
855   // If we are here then we need to relax the adc, add, and, cmp, or, sbb, sub
856   // or xor operations.
857 
858   // Convert "binop foo@GOTPCREL(%rip), %reg" to "binop $foo, %reg".
859   // Logic is close to one for test instruction above, but we also
860   // write opcode extension here, see below for details.
861   Loc[-1] = 0xc0 | (ModRm & 0x38) >> 3 | (Op & 0x3c); // ModR/M byte.
862 
863   // Primary opcode is 0x81, opcode extension is one of:
864   // 000b = ADD, 001b is OR, 010b is ADC, 011b is SBB,
865   // 100b is AND, 101b is SUB, 110b is XOR, 111b is CMP.
866   // This value was wrote to MODRM.reg in a line above.
867   // See "3.2 INSTRUCTIONS (A-M)" (Vol. 2A 3-15),
868   // "INSTRUCTION SET REFERENCE, N-Z" (Vol. 2B 4-1) for
869   // descriptions about each operation.
870   Loc[-2] = 0x81;
871   Loc[-3] = (Rex & ~0x4) | (Rex & 0x4) >> 2;
872   relocateOne(Loc, R_X86_64_PC32, Val);
873 }
874 
875 void X86_64TargetInfo::relaxGot(uint8_t *Loc, uint64_t Val) const {
876   const uint8_t Op = Loc[-2];
877   const uint8_t ModRm = Loc[-1];
878 
879   // Convert "mov foo@GOTPCREL(%rip),%reg" to "lea foo(%rip),%reg".
880   if (Op == 0x8b) {
881     Loc[-2] = 0x8d;
882     relocateOne(Loc, R_X86_64_PC32, Val);
883     return;
884   }
885 
886   if (Op != 0xff) {
887     // We are relaxing a rip relative to an absolute, so compensate
888     // for the old -4 addend.
889     assert(!Config->Pic);
890     relaxGotNoPic(Loc, Val + 4, Op, ModRm);
891     return;
892   }
893 
894   // Convert call/jmp instructions.
895   if (ModRm == 0x15) {
896     // ABI says we can convert "call *foo@GOTPCREL(%rip)" to "nop; call foo".
897     // Instead we convert to "addr32 call foo" where addr32 is an instruction
898     // prefix. That makes result expression to be a single instruction.
899     Loc[-2] = 0x67; // addr32 prefix
900     Loc[-1] = 0xe8; // call
901     relocateOne(Loc, R_X86_64_PC32, Val);
902     return;
903   }
904 
905   // Convert "jmp *foo@GOTPCREL(%rip)" to "jmp foo; nop".
906   // jmp doesn't return, so it is fine to use nop here, it is just a stub.
907   assert(ModRm == 0x25);
908   Loc[-2] = 0xe9; // jmp
909   Loc[3] = 0x90;  // nop
910   relocateOne(Loc - 1, R_X86_64_PC32, Val + 1);
911 }
912 
913 // Relocation masks following the #lo(value), #hi(value), #ha(value),
914 // #higher(value), #highera(value), #highest(value), and #highesta(value)
915 // macros defined in section 4.5.1. Relocation Types of the PPC-elf64abi
916 // document.
917 static uint16_t applyPPCLo(uint64_t V) { return V; }
918 static uint16_t applyPPCHi(uint64_t V) { return V >> 16; }
919 static uint16_t applyPPCHa(uint64_t V) { return (V + 0x8000) >> 16; }
920 static uint16_t applyPPCHigher(uint64_t V) { return V >> 32; }
921 static uint16_t applyPPCHighera(uint64_t V) { return (V + 0x8000) >> 32; }
922 static uint16_t applyPPCHighest(uint64_t V) { return V >> 48; }
923 static uint16_t applyPPCHighesta(uint64_t V) { return (V + 0x8000) >> 48; }
924 
925 PPCTargetInfo::PPCTargetInfo() {}
926 
927 void PPCTargetInfo::relocateOne(uint8_t *Loc, uint32_t Type,
928                                 uint64_t Val) const {
929   switch (Type) {
930   case R_PPC_ADDR16_HA:
931     write16be(Loc, applyPPCHa(Val));
932     break;
933   case R_PPC_ADDR16_LO:
934     write16be(Loc, applyPPCLo(Val));
935     break;
936   default:
937     fatal("unrecognized reloc " + Twine(Type));
938   }
939 }
940 
941 RelExpr PPCTargetInfo::getRelExpr(uint32_t Type, const SymbolBody &S) const {
942   return R_ABS;
943 }
944 
945 PPC64TargetInfo::PPC64TargetInfo() {
946   PltRel = GotRel = R_PPC64_GLOB_DAT;
947   RelativeRel = R_PPC64_RELATIVE;
948   PltEntrySize = 32;
949   PltHeaderSize = 0;
950 
951   // We need 64K pages (at least under glibc/Linux, the loader won't
952   // set different permissions on a finer granularity than that).
953   PageSize = 65536;
954 
955   // The PPC64 ELF ABI v1 spec, says:
956   //
957   //   It is normally desirable to put segments with different characteristics
958   //   in separate 256 Mbyte portions of the address space, to give the
959   //   operating system full paging flexibility in the 64-bit address space.
960   //
961   // And because the lowest non-zero 256M boundary is 0x10000000, PPC64 linkers
962   // use 0x10000000 as the starting address.
963   VAStart = 0x10000000;
964 }
965 
966 static uint64_t PPC64TocOffset = 0x8000;
967 
968 uint64_t getPPC64TocBase() {
969   // The TOC consists of sections .got, .toc, .tocbss, .plt in that order. The
970   // TOC starts where the first of these sections starts. We always create a
971   // .got when we see a relocation that uses it, so for us the start is always
972   // the .got.
973   uint64_t TocVA = Out<ELF64BE>::Got->getVA();
974 
975   // Per the ppc64-elf-linux ABI, The TOC base is TOC value plus 0x8000
976   // thus permitting a full 64 Kbytes segment. Note that the glibc startup
977   // code (crt1.o) assumes that you can get from the TOC base to the
978   // start of the .toc section with only a single (signed) 16-bit relocation.
979   return TocVA + PPC64TocOffset;
980 }
981 
982 RelExpr PPC64TargetInfo::getRelExpr(uint32_t Type, const SymbolBody &S) const {
983   switch (Type) {
984   default:
985     return R_ABS;
986   case R_PPC64_TOC16:
987   case R_PPC64_TOC16_DS:
988   case R_PPC64_TOC16_HA:
989   case R_PPC64_TOC16_HI:
990   case R_PPC64_TOC16_LO:
991   case R_PPC64_TOC16_LO_DS:
992     return R_GOTREL;
993   case R_PPC64_TOC:
994     return R_PPC_TOC;
995   case R_PPC64_REL24:
996     return R_PPC_PLT_OPD;
997   }
998 }
999 
1000 void PPC64TargetInfo::writePlt(uint8_t *Buf, uint64_t GotEntryAddr,
1001                                uint64_t PltEntryAddr, int32_t Index,
1002                                unsigned RelOff) const {
1003   uint64_t Off = GotEntryAddr - getPPC64TocBase();
1004 
1005   // FIXME: What we should do, in theory, is get the offset of the function
1006   // descriptor in the .opd section, and use that as the offset from %r2 (the
1007   // TOC-base pointer). Instead, we have the GOT-entry offset, and that will
1008   // be a pointer to the function descriptor in the .opd section. Using
1009   // this scheme is simpler, but requires an extra indirection per PLT dispatch.
1010 
1011   write32be(Buf,      0xf8410028);                   // std %r2, 40(%r1)
1012   write32be(Buf + 4,  0x3d620000 | applyPPCHa(Off)); // addis %r11, %r2, X@ha
1013   write32be(Buf + 8,  0xe98b0000 | applyPPCLo(Off)); // ld %r12, X@l(%r11)
1014   write32be(Buf + 12, 0xe96c0000);                   // ld %r11,0(%r12)
1015   write32be(Buf + 16, 0x7d6903a6);                   // mtctr %r11
1016   write32be(Buf + 20, 0xe84c0008);                   // ld %r2,8(%r12)
1017   write32be(Buf + 24, 0xe96c0010);                   // ld %r11,16(%r12)
1018   write32be(Buf + 28, 0x4e800420);                   // bctr
1019 }
1020 
1021 static std::pair<uint32_t, uint64_t> toAddr16Rel(uint32_t Type, uint64_t Val) {
1022   uint64_t V = Val - PPC64TocOffset;
1023   switch (Type) {
1024   case R_PPC64_TOC16: return {R_PPC64_ADDR16, V};
1025   case R_PPC64_TOC16_DS: return {R_PPC64_ADDR16_DS, V};
1026   case R_PPC64_TOC16_HA: return {R_PPC64_ADDR16_HA, V};
1027   case R_PPC64_TOC16_HI: return {R_PPC64_ADDR16_HI, V};
1028   case R_PPC64_TOC16_LO: return {R_PPC64_ADDR16_LO, V};
1029   case R_PPC64_TOC16_LO_DS: return {R_PPC64_ADDR16_LO_DS, V};
1030   default: return {Type, Val};
1031   }
1032 }
1033 
1034 void PPC64TargetInfo::relocateOne(uint8_t *Loc, uint32_t Type,
1035                                   uint64_t Val) const {
1036   // For a TOC-relative relocation, proceed in terms of the corresponding
1037   // ADDR16 relocation type.
1038   std::tie(Type, Val) = toAddr16Rel(Type, Val);
1039 
1040   switch (Type) {
1041   case R_PPC64_ADDR14: {
1042     checkAlignment<4>(Val, Type);
1043     // Preserve the AA/LK bits in the branch instruction
1044     uint8_t AALK = Loc[3];
1045     write16be(Loc + 2, (AALK & 3) | (Val & 0xfffc));
1046     break;
1047   }
1048   case R_PPC64_ADDR16:
1049     checkInt<16>(Val, Type);
1050     write16be(Loc, Val);
1051     break;
1052   case R_PPC64_ADDR16_DS:
1053     checkInt<16>(Val, Type);
1054     write16be(Loc, (read16be(Loc) & 3) | (Val & ~3));
1055     break;
1056   case R_PPC64_ADDR16_HA:
1057   case R_PPC64_REL16_HA:
1058     write16be(Loc, applyPPCHa(Val));
1059     break;
1060   case R_PPC64_ADDR16_HI:
1061   case R_PPC64_REL16_HI:
1062     write16be(Loc, applyPPCHi(Val));
1063     break;
1064   case R_PPC64_ADDR16_HIGHER:
1065     write16be(Loc, applyPPCHigher(Val));
1066     break;
1067   case R_PPC64_ADDR16_HIGHERA:
1068     write16be(Loc, applyPPCHighera(Val));
1069     break;
1070   case R_PPC64_ADDR16_HIGHEST:
1071     write16be(Loc, applyPPCHighest(Val));
1072     break;
1073   case R_PPC64_ADDR16_HIGHESTA:
1074     write16be(Loc, applyPPCHighesta(Val));
1075     break;
1076   case R_PPC64_ADDR16_LO:
1077     write16be(Loc, applyPPCLo(Val));
1078     break;
1079   case R_PPC64_ADDR16_LO_DS:
1080   case R_PPC64_REL16_LO:
1081     write16be(Loc, (read16be(Loc) & 3) | (applyPPCLo(Val) & ~3));
1082     break;
1083   case R_PPC64_ADDR32:
1084   case R_PPC64_REL32:
1085     checkInt<32>(Val, Type);
1086     write32be(Loc, Val);
1087     break;
1088   case R_PPC64_ADDR64:
1089   case R_PPC64_REL64:
1090   case R_PPC64_TOC:
1091     write64be(Loc, Val);
1092     break;
1093   case R_PPC64_REL24: {
1094     uint32_t Mask = 0x03FFFFFC;
1095     checkInt<24>(Val, Type);
1096     write32be(Loc, (read32be(Loc) & ~Mask) | (Val & Mask));
1097     break;
1098   }
1099   default:
1100     fatal("unrecognized reloc " + Twine(Type));
1101   }
1102 }
1103 
1104 AArch64TargetInfo::AArch64TargetInfo() {
1105   CopyRel = R_AARCH64_COPY;
1106   RelativeRel = R_AARCH64_RELATIVE;
1107   IRelativeRel = R_AARCH64_IRELATIVE;
1108   GotRel = R_AARCH64_GLOB_DAT;
1109   PltRel = R_AARCH64_JUMP_SLOT;
1110   TlsDescRel = R_AARCH64_TLSDESC;
1111   TlsGotRel = R_AARCH64_TLS_TPREL64;
1112   PltEntrySize = 16;
1113   PltHeaderSize = 32;
1114 
1115   // It doesn't seem to be documented anywhere, but tls on aarch64 uses variant
1116   // 1 of the tls structures and the tcb size is 16.
1117   TcbSize = 16;
1118 }
1119 
1120 RelExpr AArch64TargetInfo::getRelExpr(uint32_t Type,
1121                                       const SymbolBody &S) const {
1122   switch (Type) {
1123   default:
1124     return R_ABS;
1125   case R_AARCH64_TLSDESC_ADR_PAGE21:
1126     return R_TLSDESC_PAGE;
1127   case R_AARCH64_TLSDESC_LD64_LO12_NC:
1128   case R_AARCH64_TLSDESC_ADD_LO12_NC:
1129     return R_TLSDESC;
1130   case R_AARCH64_TLSDESC_CALL:
1131     return R_HINT;
1132   case R_AARCH64_TLSLE_ADD_TPREL_HI12:
1133   case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
1134     return R_TLS;
1135   case R_AARCH64_CALL26:
1136   case R_AARCH64_CONDBR19:
1137   case R_AARCH64_JUMP26:
1138   case R_AARCH64_TSTBR14:
1139     return R_PLT_PC;
1140   case R_AARCH64_PREL16:
1141   case R_AARCH64_PREL32:
1142   case R_AARCH64_PREL64:
1143   case R_AARCH64_ADR_PREL_LO21:
1144     return R_PC;
1145   case R_AARCH64_ADR_PREL_PG_HI21:
1146     return R_PAGE_PC;
1147   case R_AARCH64_LD64_GOT_LO12_NC:
1148   case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
1149     return R_GOT;
1150   case R_AARCH64_ADR_GOT_PAGE:
1151   case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
1152     return R_GOT_PAGE_PC;
1153   }
1154 }
1155 
1156 RelExpr AArch64TargetInfo::adjustRelaxExpr(uint32_t Type, const uint8_t *Data,
1157                                            RelExpr Expr) const {
1158   if (Expr == R_RELAX_TLS_GD_TO_IE) {
1159     if (Type == R_AARCH64_TLSDESC_ADR_PAGE21)
1160       return R_RELAX_TLS_GD_TO_IE_PAGE_PC;
1161     return R_RELAX_TLS_GD_TO_IE_ABS;
1162   }
1163   return Expr;
1164 }
1165 
1166 bool AArch64TargetInfo::usesOnlyLowPageBits(uint32_t Type) const {
1167   switch (Type) {
1168   default:
1169     return false;
1170   case R_AARCH64_ADD_ABS_LO12_NC:
1171   case R_AARCH64_LD64_GOT_LO12_NC:
1172   case R_AARCH64_LDST128_ABS_LO12_NC:
1173   case R_AARCH64_LDST16_ABS_LO12_NC:
1174   case R_AARCH64_LDST32_ABS_LO12_NC:
1175   case R_AARCH64_LDST64_ABS_LO12_NC:
1176   case R_AARCH64_LDST8_ABS_LO12_NC:
1177   case R_AARCH64_TLSDESC_ADD_LO12_NC:
1178   case R_AARCH64_TLSDESC_LD64_LO12_NC:
1179   case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
1180     return true;
1181   }
1182 }
1183 
1184 bool AArch64TargetInfo::isTlsInitialExecRel(uint32_t Type) const {
1185   return Type == R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 ||
1186          Type == R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC;
1187 }
1188 
1189 uint32_t AArch64TargetInfo::getDynRel(uint32_t Type) const {
1190   if (Type == R_AARCH64_ABS32 || Type == R_AARCH64_ABS64)
1191     return Type;
1192   // Keep it going with a dummy value so that we can find more reloc errors.
1193   errorDynRel(Type);
1194   return R_AARCH64_ABS32;
1195 }
1196 
1197 void AArch64TargetInfo::writeGotPlt(uint8_t *Buf, const SymbolBody &) const {
1198   write64le(Buf, Out<ELF64LE>::Plt->getVA());
1199 }
1200 
1201 static uint64_t getAArch64Page(uint64_t Expr) {
1202   return Expr & (~static_cast<uint64_t>(0xFFF));
1203 }
1204 
1205 void AArch64TargetInfo::writePltHeader(uint8_t *Buf) const {
1206   const uint8_t PltData[] = {
1207       0xf0, 0x7b, 0xbf, 0xa9, // stp	x16, x30, [sp,#-16]!
1208       0x10, 0x00, 0x00, 0x90, // adrp	x16, Page(&(.plt.got[2]))
1209       0x11, 0x02, 0x40, 0xf9, // ldr	x17, [x16, Offset(&(.plt.got[2]))]
1210       0x10, 0x02, 0x00, 0x91, // add	x16, x16, Offset(&(.plt.got[2]))
1211       0x20, 0x02, 0x1f, 0xd6, // br	x17
1212       0x1f, 0x20, 0x03, 0xd5, // nop
1213       0x1f, 0x20, 0x03, 0xd5, // nop
1214       0x1f, 0x20, 0x03, 0xd5  // nop
1215   };
1216   memcpy(Buf, PltData, sizeof(PltData));
1217 
1218   uint64_t Got = Out<ELF64LE>::GotPlt->getVA();
1219   uint64_t Plt = Out<ELF64LE>::Plt->getVA();
1220   relocateOne(Buf + 4, R_AARCH64_ADR_PREL_PG_HI21,
1221               getAArch64Page(Got + 16) - getAArch64Page(Plt + 4));
1222   relocateOne(Buf + 8, R_AARCH64_LDST64_ABS_LO12_NC, Got + 16);
1223   relocateOne(Buf + 12, R_AARCH64_ADD_ABS_LO12_NC, Got + 16);
1224 }
1225 
1226 void AArch64TargetInfo::writePlt(uint8_t *Buf, uint64_t GotEntryAddr,
1227                                  uint64_t PltEntryAddr, int32_t Index,
1228                                  unsigned RelOff) const {
1229   const uint8_t Inst[] = {
1230       0x10, 0x00, 0x00, 0x90, // adrp x16, Page(&(.plt.got[n]))
1231       0x11, 0x02, 0x40, 0xf9, // ldr  x17, [x16, Offset(&(.plt.got[n]))]
1232       0x10, 0x02, 0x00, 0x91, // add  x16, x16, Offset(&(.plt.got[n]))
1233       0x20, 0x02, 0x1f, 0xd6  // br   x17
1234   };
1235   memcpy(Buf, Inst, sizeof(Inst));
1236 
1237   relocateOne(Buf, R_AARCH64_ADR_PREL_PG_HI21,
1238               getAArch64Page(GotEntryAddr) - getAArch64Page(PltEntryAddr));
1239   relocateOne(Buf + 4, R_AARCH64_LDST64_ABS_LO12_NC, GotEntryAddr);
1240   relocateOne(Buf + 8, R_AARCH64_ADD_ABS_LO12_NC, GotEntryAddr);
1241 }
1242 
1243 static void updateAArch64Addr(uint8_t *L, uint64_t Imm) {
1244   uint32_t ImmLo = (Imm & 0x3) << 29;
1245   uint32_t ImmHi = (Imm & 0x1FFFFC) << 3;
1246   uint64_t Mask = (0x3 << 29) | (0x1FFFFC << 3);
1247   write32le(L, (read32le(L) & ~Mask) | ImmLo | ImmHi);
1248 }
1249 
1250 static inline void updateAArch64Add(uint8_t *L, uint64_t Imm) {
1251   or32le(L, (Imm & 0xFFF) << 10);
1252 }
1253 
1254 void AArch64TargetInfo::relocateOne(uint8_t *Loc, uint32_t Type,
1255                                     uint64_t Val) const {
1256   switch (Type) {
1257   case R_AARCH64_ABS16:
1258   case R_AARCH64_PREL16:
1259     checkIntUInt<16>(Val, Type);
1260     write16le(Loc, Val);
1261     break;
1262   case R_AARCH64_ABS32:
1263   case R_AARCH64_PREL32:
1264     checkIntUInt<32>(Val, Type);
1265     write32le(Loc, Val);
1266     break;
1267   case R_AARCH64_ABS64:
1268   case R_AARCH64_PREL64:
1269     write64le(Loc, Val);
1270     break;
1271   case R_AARCH64_ADD_ABS_LO12_NC:
1272     // This relocation stores 12 bits and there's no instruction
1273     // to do it. Instead, we do a 32 bits store of the value
1274     // of r_addend bitwise-or'ed Loc. This assumes that the addend
1275     // bits in Loc are zero.
1276     or32le(Loc, (Val & 0xFFF) << 10);
1277     break;
1278   case R_AARCH64_ADR_GOT_PAGE:
1279   case R_AARCH64_ADR_PREL_PG_HI21:
1280   case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
1281   case R_AARCH64_TLSDESC_ADR_PAGE21:
1282     checkInt<33>(Val, Type);
1283     updateAArch64Addr(Loc, Val >> 12);
1284     break;
1285   case R_AARCH64_ADR_PREL_LO21:
1286     checkInt<21>(Val, Type);
1287     updateAArch64Addr(Loc, Val);
1288     break;
1289   case R_AARCH64_CALL26:
1290   case R_AARCH64_JUMP26:
1291     checkInt<28>(Val, Type);
1292     or32le(Loc, (Val & 0x0FFFFFFC) >> 2);
1293     break;
1294   case R_AARCH64_CONDBR19:
1295     checkInt<21>(Val, Type);
1296     or32le(Loc, (Val & 0x1FFFFC) << 3);
1297     break;
1298   case R_AARCH64_LD64_GOT_LO12_NC:
1299   case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
1300   case R_AARCH64_TLSDESC_LD64_LO12_NC:
1301     checkAlignment<8>(Val, Type);
1302     or32le(Loc, (Val & 0xFF8) << 7);
1303     break;
1304   case R_AARCH64_LDST128_ABS_LO12_NC:
1305     or32le(Loc, (Val & 0x0FF8) << 6);
1306     break;
1307   case R_AARCH64_LDST16_ABS_LO12_NC:
1308     or32le(Loc, (Val & 0x0FFC) << 9);
1309     break;
1310   case R_AARCH64_LDST8_ABS_LO12_NC:
1311     or32le(Loc, (Val & 0xFFF) << 10);
1312     break;
1313   case R_AARCH64_LDST32_ABS_LO12_NC:
1314     or32le(Loc, (Val & 0xFFC) << 8);
1315     break;
1316   case R_AARCH64_LDST64_ABS_LO12_NC:
1317     or32le(Loc, (Val & 0xFF8) << 7);
1318     break;
1319   case R_AARCH64_TSTBR14:
1320     checkInt<16>(Val, Type);
1321     or32le(Loc, (Val & 0xFFFC) << 3);
1322     break;
1323   case R_AARCH64_TLSLE_ADD_TPREL_HI12:
1324     checkInt<24>(Val, Type);
1325     updateAArch64Add(Loc, Val >> 12);
1326     break;
1327   case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
1328   case R_AARCH64_TLSDESC_ADD_LO12_NC:
1329     updateAArch64Add(Loc, Val);
1330     break;
1331   default:
1332     fatal("unrecognized reloc " + Twine(Type));
1333   }
1334 }
1335 
1336 void AArch64TargetInfo::relaxTlsGdToLe(uint8_t *Loc, uint32_t Type,
1337                                        uint64_t Val) const {
1338   // TLSDESC Global-Dynamic relocation are in the form:
1339   //   adrp    x0, :tlsdesc:v             [R_AARCH64_TLSDESC_ADR_PAGE21]
1340   //   ldr     x1, [x0, #:tlsdesc_lo12:v  [R_AARCH64_TLSDESC_LD64_LO12_NC]
1341   //   add     x0, x0, :tlsdesc_los:v     [_AARCH64_TLSDESC_ADD_LO12_NC]
1342   //   .tlsdesccall                       [R_AARCH64_TLSDESC_CALL]
1343   //   blr     x1
1344   // And it can optimized to:
1345   //   movz    x0, #0x0, lsl #16
1346   //   movk    x0, #0x10
1347   //   nop
1348   //   nop
1349   checkUInt<32>(Val, Type);
1350 
1351   switch (Type) {
1352   case R_AARCH64_TLSDESC_ADD_LO12_NC:
1353   case R_AARCH64_TLSDESC_CALL:
1354     write32le(Loc, 0xd503201f); // nop
1355     return;
1356   case R_AARCH64_TLSDESC_ADR_PAGE21:
1357     write32le(Loc, 0xd2a00000 | (((Val >> 16) & 0xffff) << 5)); // movz
1358     return;
1359   case R_AARCH64_TLSDESC_LD64_LO12_NC:
1360     write32le(Loc, 0xf2800000 | ((Val & 0xffff) << 5)); // movk
1361     return;
1362   default:
1363     llvm_unreachable("unsupported relocation for TLS GD to LE relaxation");
1364   }
1365 }
1366 
1367 void AArch64TargetInfo::relaxTlsGdToIe(uint8_t *Loc, uint32_t Type,
1368                                        uint64_t Val) const {
1369   // TLSDESC Global-Dynamic relocation are in the form:
1370   //   adrp    x0, :tlsdesc:v             [R_AARCH64_TLSDESC_ADR_PAGE21]
1371   //   ldr     x1, [x0, #:tlsdesc_lo12:v  [R_AARCH64_TLSDESC_LD64_LO12_NC]
1372   //   add     x0, x0, :tlsdesc_los:v     [_AARCH64_TLSDESC_ADD_LO12_NC]
1373   //   .tlsdesccall                       [R_AARCH64_TLSDESC_CALL]
1374   //   blr     x1
1375   // And it can optimized to:
1376   //   adrp    x0, :gottprel:v
1377   //   ldr     x0, [x0, :gottprel_lo12:v]
1378   //   nop
1379   //   nop
1380 
1381   switch (Type) {
1382   case R_AARCH64_TLSDESC_ADD_LO12_NC:
1383   case R_AARCH64_TLSDESC_CALL:
1384     write32le(Loc, 0xd503201f); // nop
1385     break;
1386   case R_AARCH64_TLSDESC_ADR_PAGE21:
1387     write32le(Loc, 0x90000000); // adrp
1388     relocateOne(Loc, R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21, Val);
1389     break;
1390   case R_AARCH64_TLSDESC_LD64_LO12_NC:
1391     write32le(Loc, 0xf9400000); // ldr
1392     relocateOne(Loc, R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC, Val);
1393     break;
1394   default:
1395     llvm_unreachable("unsupported relocation for TLS GD to LE relaxation");
1396   }
1397 }
1398 
1399 void AArch64TargetInfo::relaxTlsIeToLe(uint8_t *Loc, uint32_t Type,
1400                                        uint64_t Val) const {
1401   checkUInt<32>(Val, Type);
1402 
1403   if (Type == R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21) {
1404     // Generate MOVZ.
1405     uint32_t RegNo = read32le(Loc) & 0x1f;
1406     write32le(Loc, (0xd2a00000 | RegNo) | (((Val >> 16) & 0xffff) << 5));
1407     return;
1408   }
1409   if (Type == R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC) {
1410     // Generate MOVK.
1411     uint32_t RegNo = read32le(Loc) & 0x1f;
1412     write32le(Loc, (0xf2800000 | RegNo) | ((Val & 0xffff) << 5));
1413     return;
1414   }
1415   llvm_unreachable("invalid relocation for TLS IE to LE relaxation");
1416 }
1417 
1418 void AMDGPUTargetInfo::relocateOne(uint8_t *Loc, uint32_t Type,
1419                                    uint64_t Val) const {
1420   assert(Type == R_AMDGPU_REL32);
1421   write32le(Loc, Val);
1422 }
1423 
1424 RelExpr AMDGPUTargetInfo::getRelExpr(uint32_t Type, const SymbolBody &S) const {
1425   if (Type != R_AMDGPU_REL32)
1426     error("do not know how to handle relocation");
1427   return R_PC;
1428 }
1429 
1430 ARMTargetInfo::ARMTargetInfo() {
1431   CopyRel = R_ARM_COPY;
1432   RelativeRel = R_ARM_RELATIVE;
1433   IRelativeRel = R_ARM_IRELATIVE;
1434   GotRel = R_ARM_GLOB_DAT;
1435   PltRel = R_ARM_JUMP_SLOT;
1436   TlsGotRel = R_ARM_TLS_TPOFF32;
1437   TlsModuleIndexRel = R_ARM_TLS_DTPMOD32;
1438   TlsOffsetRel = R_ARM_TLS_DTPOFF32;
1439   PltEntrySize = 16;
1440   PltHeaderSize = 20;
1441 }
1442 
1443 RelExpr ARMTargetInfo::getRelExpr(uint32_t Type, const SymbolBody &S) const {
1444   switch (Type) {
1445   default:
1446     return R_ABS;
1447   case R_ARM_THM_JUMP11:
1448     return R_PC;
1449   case R_ARM_CALL:
1450   case R_ARM_JUMP24:
1451   case R_ARM_PC24:
1452   case R_ARM_PLT32:
1453   case R_ARM_THM_JUMP19:
1454   case R_ARM_THM_JUMP24:
1455   case R_ARM_THM_CALL:
1456     return R_PLT_PC;
1457   case R_ARM_GOTOFF32:
1458     // (S + A) - GOT_ORG
1459     return R_GOTREL;
1460   case R_ARM_GOT_BREL:
1461     // GOT(S) + A - GOT_ORG
1462     return R_GOT_OFF;
1463   case R_ARM_GOT_PREL:
1464     // GOT(S) + - GOT_ORG
1465     return R_GOT_PC;
1466   case R_ARM_BASE_PREL:
1467     // B(S) + A - P
1468     // FIXME: currently B(S) assumed to be .got, this may not hold for all
1469     // platforms.
1470     return R_GOTONLY_PC;
1471   case R_ARM_PREL31:
1472   case R_ARM_REL32:
1473     return R_PC;
1474   }
1475 }
1476 
1477 uint32_t ARMTargetInfo::getDynRel(uint32_t Type) const {
1478   if (Type == R_ARM_ABS32)
1479     return Type;
1480   // Keep it going with a dummy value so that we can find more reloc errors.
1481   errorDynRel(Type);
1482   return R_ARM_ABS32;
1483 }
1484 
1485 void ARMTargetInfo::writeGotPlt(uint8_t *Buf, const SymbolBody &) const {
1486   write32le(Buf, Out<ELF32LE>::Plt->getVA());
1487 }
1488 
1489 void ARMTargetInfo::writePltHeader(uint8_t *Buf) const {
1490   const uint8_t PltData[] = {
1491       0x04, 0xe0, 0x2d, 0xe5, //     str lr, [sp,#-4]!
1492       0x04, 0xe0, 0x9f, 0xe5, //     ldr lr, L2
1493       0x0e, 0xe0, 0x8f, 0xe0, // L1: add lr, pc, lr
1494       0x08, 0xf0, 0xbe, 0xe5, //     ldr pc, [lr, #8]
1495       0x00, 0x00, 0x00, 0x00, // L2: .word   &(.got.plt) - L1 - 8
1496   };
1497   memcpy(Buf, PltData, sizeof(PltData));
1498   uint64_t GotPlt = Out<ELF32LE>::GotPlt->getVA();
1499   uint64_t L1 = Out<ELF32LE>::Plt->getVA() + 8;
1500   write32le(Buf + 16, GotPlt - L1 - 8);
1501 }
1502 
1503 void ARMTargetInfo::writePlt(uint8_t *Buf, uint64_t GotEntryAddr,
1504                              uint64_t PltEntryAddr, int32_t Index,
1505                              unsigned RelOff) const {
1506   // FIXME: Using simple code sequence with simple relocations.
1507   // There is a more optimal sequence but it requires support for the group
1508   // relocations. See ELF for the ARM Architecture Appendix A.3
1509   const uint8_t PltData[] = {
1510       0x04, 0xc0, 0x9f, 0xe5, //     ldr ip, L2
1511       0x0f, 0xc0, 0x8c, 0xe0, // L1: add ip, ip, pc
1512       0x00, 0xf0, 0x9c, 0xe5, //     ldr pc, [ip]
1513       0x00, 0x00, 0x00, 0x00, // L2: .word   Offset(&(.plt.got) - L1 - 8
1514   };
1515   memcpy(Buf, PltData, sizeof(PltData));
1516   uint64_t L1 = PltEntryAddr + 4;
1517   write32le(Buf + 12, GotEntryAddr - L1 - 8);
1518 }
1519 
1520 void ARMTargetInfo::relocateOne(uint8_t *Loc, uint32_t Type,
1521                                 uint64_t Val) const {
1522   switch (Type) {
1523   case R_ARM_NONE:
1524     break;
1525   case R_ARM_ABS32:
1526   case R_ARM_BASE_PREL:
1527   case R_ARM_GOTOFF32:
1528   case R_ARM_GOT_BREL:
1529   case R_ARM_GOT_PREL:
1530   case R_ARM_REL32:
1531     write32le(Loc, Val);
1532     break;
1533   case R_ARM_PREL31:
1534     checkInt<31>(Val, Type);
1535     write32le(Loc, (read32le(Loc) & 0x80000000) | (Val & ~0x80000000));
1536     break;
1537   case R_ARM_CALL:
1538     // R_ARM_CALL is used for BL and BLX instructions, depending on the
1539     // value of bit 0 of Val, we must select a BL or BLX instruction
1540     if (Val & 1) {
1541       // If bit 0 of Val is 1 the target is Thumb, we must select a BLX.
1542       // The BLX encoding is 0xfa:H:imm24 where Val = imm24:H:'1'
1543       checkInt<26>(Val, Type);
1544       write32le(Loc, 0xfa000000 |                    // opcode
1545                          ((Val & 2) << 23) |         // H
1546                          ((Val >> 2) & 0x00ffffff)); // imm24
1547       break;
1548     }
1549     if ((read32le(Loc) & 0xfe000000) == 0xfa000000)
1550       // BLX (always unconditional) instruction to an ARM Target, select an
1551       // unconditional BL.
1552       write32le(Loc, 0xeb000000 | (read32le(Loc) & 0x00ffffff));
1553     // fall through as BL encoding is shared with B
1554   case R_ARM_JUMP24:
1555   case R_ARM_PC24:
1556   case R_ARM_PLT32:
1557     checkInt<26>(Val, Type);
1558     write32le(Loc, (read32le(Loc) & ~0x00ffffff) | ((Val >> 2) & 0x00ffffff));
1559     break;
1560   case R_ARM_THM_JUMP11:
1561     checkInt<12>(Val, Type);
1562     write16le(Loc, (read32le(Loc) & 0xf800) | ((Val >> 1) & 0x07ff));
1563     break;
1564   case R_ARM_THM_JUMP19:
1565     // Encoding T3: Val = S:J2:J1:imm6:imm11:0
1566     checkInt<21>(Val, Type);
1567     write16le(Loc,
1568               (read16le(Loc) & 0xfbc0) |   // opcode cond
1569                   ((Val >> 10) & 0x0400) | // S
1570                   ((Val >> 12) & 0x003f)); // imm6
1571     write16le(Loc + 2,
1572               0x8000 |                    // opcode
1573                   ((Val >> 8) & 0x0800) | // J2
1574                   ((Val >> 5) & 0x2000) | // J1
1575                   ((Val >> 1) & 0x07ff)); // imm11
1576     break;
1577   case R_ARM_THM_CALL:
1578     // R_ARM_THM_CALL is used for BL and BLX instructions, depending on the
1579     // value of bit 0 of Val, we must select a BL or BLX instruction
1580     if ((Val & 1) == 0) {
1581       // Ensure BLX destination is 4-byte aligned. As BLX instruction may
1582       // only be two byte aligned. This must be done before overflow check
1583       Val = alignTo(Val, 4);
1584     }
1585     // Bit 12 is 0 for BLX, 1 for BL
1586     write16le(Loc + 2, (read16le(Loc + 2) & ~0x1000) | (Val & 1) << 12);
1587     // Fall through as rest of encoding is the same as B.W
1588   case R_ARM_THM_JUMP24:
1589     // Encoding B  T4, BL T1, BLX T2: Val = S:I1:I2:imm10:imm11:0
1590     // FIXME: Use of I1 and I2 require v6T2ops
1591     checkInt<25>(Val, Type);
1592     write16le(Loc,
1593               0xf000 |                     // opcode
1594                   ((Val >> 14) & 0x0400) | // S
1595                   ((Val >> 12) & 0x03ff)); // imm10
1596     write16le(Loc + 2,
1597               (read16le(Loc + 2) & 0xd000) |                  // opcode
1598                   (((~(Val >> 10)) ^ (Val >> 11)) & 0x2000) | // J1
1599                   (((~(Val >> 11)) ^ (Val >> 13)) & 0x0800) | // J2
1600                   ((Val >> 1) & 0x07ff));                     // imm11
1601     break;
1602   case R_ARM_MOVW_ABS_NC:
1603     write32le(Loc, (read32le(Loc) & ~0x000f0fff) | ((Val & 0xf000) << 4) |
1604                        (Val & 0x0fff));
1605     break;
1606   case R_ARM_MOVT_ABS:
1607     checkUInt<32>(Val, Type);
1608     write32le(Loc, (read32le(Loc) & ~0x000f0fff) |
1609                        (((Val >> 16) & 0xf000) << 4) | ((Val >> 16) & 0xfff));
1610     break;
1611   case R_ARM_THM_MOVT_ABS:
1612     // Encoding T1: A = imm4:i:imm3:imm8
1613     checkUInt<32>(Val, Type);
1614     write16le(Loc,
1615               0xf2c0 |                     // opcode
1616                   ((Val >> 17) & 0x0400) | // i
1617                   ((Val >> 28) & 0x000f)); // imm4
1618     write16le(Loc + 2,
1619               (read16le(Loc + 2) & 0x8f00) | // opcode
1620                   ((Val >> 12) & 0x7000) |   // imm3
1621                   ((Val >> 16) & 0x00ff));   // imm8
1622     break;
1623   case R_ARM_THM_MOVW_ABS_NC:
1624     // Encoding T3: A = imm4:i:imm3:imm8
1625     write16le(Loc,
1626               0xf240 |                     // opcode
1627                   ((Val >> 1) & 0x0400) |  // i
1628                   ((Val >> 12) & 0x000f)); // imm4
1629     write16le(Loc + 2,
1630               (read16le(Loc + 2) & 0x8f00) | // opcode
1631                   ((Val << 4) & 0x7000) |    // imm3
1632                   (Val & 0x00ff));           // imm8
1633     break;
1634   default:
1635     fatal("unrecognized reloc " + Twine(Type));
1636   }
1637 }
1638 
1639 uint64_t ARMTargetInfo::getImplicitAddend(const uint8_t *Buf,
1640                                           uint32_t Type) const {
1641   switch (Type) {
1642   default:
1643     return 0;
1644   case R_ARM_ABS32:
1645   case R_ARM_BASE_PREL:
1646   case R_ARM_GOTOFF32:
1647   case R_ARM_GOT_BREL:
1648   case R_ARM_GOT_PREL:
1649   case R_ARM_REL32:
1650     return SignExtend64<32>(read32le(Buf));
1651   case R_ARM_PREL31:
1652     return SignExtend64<31>(read32le(Buf));
1653   case R_ARM_CALL:
1654   case R_ARM_JUMP24:
1655   case R_ARM_PC24:
1656   case R_ARM_PLT32:
1657     return SignExtend64<26>(read32le(Buf) << 2);
1658   case R_ARM_THM_JUMP11:
1659     return SignExtend64<12>(read16le(Buf) << 1);
1660   case R_ARM_THM_JUMP19: {
1661     // Encoding T3: A = S:J2:J1:imm10:imm6:0
1662     uint16_t Hi = read16le(Buf);
1663     uint16_t Lo = read16le(Buf + 2);
1664     return SignExtend64<20>(((Hi & 0x0400) << 10) | // S
1665                             ((Lo & 0x0800) << 8) |  // J2
1666                             ((Lo & 0x2000) << 5) |  // J1
1667                             ((Hi & 0x003f) << 12) | // imm6
1668                             ((Lo & 0x07ff) << 1));  // imm11:0
1669   }
1670   case R_ARM_THM_JUMP24:
1671   case R_ARM_THM_CALL: {
1672     // Encoding B T4, BL T1, BLX T2: A = S:I1:I2:imm10:imm11:0
1673     // I1 = NOT(J1 EOR S), I2 = NOT(J2 EOR S)
1674     // FIXME: I1 and I2 require v6T2ops
1675     uint16_t Hi = read16le(Buf);
1676     uint16_t Lo = read16le(Buf + 2);
1677     return SignExtend64<24>(((Hi & 0x0400) << 14) |                    // S
1678                             (~((Lo ^ (Hi << 3)) << 10) & 0x00800000) | // I1
1679                             (~((Lo ^ (Hi << 1)) << 11) & 0x00400000) | // I2
1680                             ((Hi & 0x003ff) << 12) |                   // imm0
1681                             ((Lo & 0x007ff) << 1)); // imm11:0
1682   }
1683   // ELF for the ARM Architecture 4.6.1.1 the implicit addend for MOVW and
1684   // MOVT is in the range -32768 <= A < 32768
1685   case R_ARM_MOVW_ABS_NC:
1686   case R_ARM_MOVT_ABS: {
1687     uint64_t Val = read32le(Buf) & 0x000f0fff;
1688     return SignExtend64<16>(((Val & 0x000f0000) >> 4) | (Val & 0x00fff));
1689   }
1690   case R_ARM_THM_MOVW_ABS_NC:
1691   case R_ARM_THM_MOVT_ABS: {
1692     // Encoding T3: A = imm4:i:imm3:imm8
1693     uint16_t Hi = read16le(Buf);
1694     uint16_t Lo = read16le(Buf + 2);
1695     return SignExtend64<16>(((Hi & 0x000f) << 12) | // imm4
1696                             ((Hi & 0x0400) << 1) |  // i
1697                             ((Lo & 0x7000) >> 4) |  // imm3
1698                             (Lo & 0x00ff));         // imm8
1699   }
1700   }
1701 }
1702 
1703 template <class ELFT> MipsTargetInfo<ELFT>::MipsTargetInfo() {
1704   GotPltHeaderEntriesNum = 2;
1705   PageSize = 65536;
1706   PltEntrySize = 16;
1707   PltHeaderSize = 32;
1708   ThunkSize = 16;
1709   CopyRel = R_MIPS_COPY;
1710   PltRel = R_MIPS_JUMP_SLOT;
1711   if (ELFT::Is64Bits) {
1712     RelativeRel = (R_MIPS_64 << 8) | R_MIPS_REL32;
1713     TlsGotRel = R_MIPS_TLS_TPREL64;
1714     TlsModuleIndexRel = R_MIPS_TLS_DTPMOD64;
1715     TlsOffsetRel = R_MIPS_TLS_DTPREL64;
1716   } else {
1717     RelativeRel = R_MIPS_REL32;
1718     TlsGotRel = R_MIPS_TLS_TPREL32;
1719     TlsModuleIndexRel = R_MIPS_TLS_DTPMOD32;
1720     TlsOffsetRel = R_MIPS_TLS_DTPREL32;
1721   }
1722 }
1723 
1724 template <class ELFT>
1725 RelExpr MipsTargetInfo<ELFT>::getRelExpr(uint32_t Type,
1726                                          const SymbolBody &S) const {
1727   if (ELFT::Is64Bits)
1728     // See comment in the calculateMips64RelChain.
1729     Type &= 0xff;
1730   switch (Type) {
1731   default:
1732     return R_ABS;
1733   case R_MIPS_JALR:
1734     return R_HINT;
1735   case R_MIPS_GPREL16:
1736   case R_MIPS_GPREL32:
1737     return R_GOTREL;
1738   case R_MIPS_26:
1739     return R_PLT;
1740   case R_MIPS_HI16:
1741   case R_MIPS_LO16:
1742   case R_MIPS_GOT_OFST:
1743     // MIPS _gp_disp designates offset between start of function and 'gp'
1744     // pointer into GOT. __gnu_local_gp is equal to the current value of
1745     // the 'gp'. Therefore any relocations against them do not require
1746     // dynamic relocation.
1747     if (&S == ElfSym<ELFT>::MipsGpDisp)
1748       return R_PC;
1749     return R_ABS;
1750   case R_MIPS_PC32:
1751   case R_MIPS_PC16:
1752   case R_MIPS_PC19_S2:
1753   case R_MIPS_PC21_S2:
1754   case R_MIPS_PC26_S2:
1755   case R_MIPS_PCHI16:
1756   case R_MIPS_PCLO16:
1757     return R_PC;
1758   case R_MIPS_GOT16:
1759     if (S.isLocal())
1760       return R_MIPS_GOT_LOCAL_PAGE;
1761   // fallthrough
1762   case R_MIPS_CALL16:
1763   case R_MIPS_GOT_DISP:
1764   case R_MIPS_TLS_GOTTPREL:
1765     return R_MIPS_GOT_OFF;
1766   case R_MIPS_GOT_PAGE:
1767     return R_MIPS_GOT_LOCAL_PAGE;
1768   case R_MIPS_TLS_GD:
1769     return R_MIPS_TLSGD;
1770   case R_MIPS_TLS_LDM:
1771     return R_MIPS_TLSLD;
1772   }
1773 }
1774 
1775 template <class ELFT>
1776 uint32_t MipsTargetInfo<ELFT>::getDynRel(uint32_t Type) const {
1777   if (Type == R_MIPS_32 || Type == R_MIPS_64)
1778     return RelativeRel;
1779   // Keep it going with a dummy value so that we can find more reloc errors.
1780   errorDynRel(Type);
1781   return R_MIPS_32;
1782 }
1783 
1784 template <class ELFT>
1785 bool MipsTargetInfo<ELFT>::isTlsLocalDynamicRel(uint32_t Type) const {
1786   return Type == R_MIPS_TLS_LDM;
1787 }
1788 
1789 template <class ELFT>
1790 bool MipsTargetInfo<ELFT>::isTlsGlobalDynamicRel(uint32_t Type) const {
1791   return Type == R_MIPS_TLS_GD;
1792 }
1793 
1794 template <class ELFT>
1795 void MipsTargetInfo<ELFT>::writeGotPlt(uint8_t *Buf, const SymbolBody &) const {
1796   write32<ELFT::TargetEndianness>(Buf, Out<ELFT>::Plt->getVA());
1797 }
1798 
1799 static uint16_t mipsHigh(uint64_t V) { return (V + 0x8000) >> 16; }
1800 
1801 template <endianness E, uint8_t BSIZE, uint8_t SHIFT>
1802 static int64_t getPcRelocAddend(const uint8_t *Loc) {
1803   uint32_t Instr = read32<E>(Loc);
1804   uint32_t Mask = 0xffffffff >> (32 - BSIZE);
1805   return SignExtend64<BSIZE + SHIFT>((Instr & Mask) << SHIFT);
1806 }
1807 
1808 template <endianness E, uint8_t BSIZE, uint8_t SHIFT>
1809 static void applyMipsPcReloc(uint8_t *Loc, uint32_t Type, uint64_t V) {
1810   uint32_t Mask = 0xffffffff >> (32 - BSIZE);
1811   uint32_t Instr = read32<E>(Loc);
1812   if (SHIFT > 0)
1813     checkAlignment<(1 << SHIFT)>(V, Type);
1814   checkInt<BSIZE + SHIFT>(V, Type);
1815   write32<E>(Loc, (Instr & ~Mask) | ((V >> SHIFT) & Mask));
1816 }
1817 
1818 template <endianness E>
1819 static void writeMipsHi16(uint8_t *Loc, uint64_t V) {
1820   uint32_t Instr = read32<E>(Loc);
1821   write32<E>(Loc, (Instr & 0xffff0000) | mipsHigh(V));
1822 }
1823 
1824 template <endianness E>
1825 static void writeMipsLo16(uint8_t *Loc, uint64_t V) {
1826   uint32_t Instr = read32<E>(Loc);
1827   write32<E>(Loc, (Instr & 0xffff0000) | (V & 0xffff));
1828 }
1829 
1830 template <class ELFT>
1831 void MipsTargetInfo<ELFT>::writePltHeader(uint8_t *Buf) const {
1832   const endianness E = ELFT::TargetEndianness;
1833   write32<E>(Buf, 0x3c1c0000);      // lui   $28, %hi(&GOTPLT[0])
1834   write32<E>(Buf + 4, 0x8f990000);  // lw    $25, %lo(&GOTPLT[0])($28)
1835   write32<E>(Buf + 8, 0x279c0000);  // addiu $28, $28, %lo(&GOTPLT[0])
1836   write32<E>(Buf + 12, 0x031cc023); // subu  $24, $24, $28
1837   write32<E>(Buf + 16, 0x03e07825); // move  $15, $31
1838   write32<E>(Buf + 20, 0x0018c082); // srl   $24, $24, 2
1839   write32<E>(Buf + 24, 0x0320f809); // jalr  $25
1840   write32<E>(Buf + 28, 0x2718fffe); // subu  $24, $24, 2
1841   uint64_t Got = Out<ELFT>::GotPlt->getVA();
1842   writeMipsHi16<E>(Buf, Got);
1843   writeMipsLo16<E>(Buf + 4, Got);
1844   writeMipsLo16<E>(Buf + 8, Got);
1845 }
1846 
1847 template <class ELFT>
1848 void MipsTargetInfo<ELFT>::writePlt(uint8_t *Buf, uint64_t GotEntryAddr,
1849                                     uint64_t PltEntryAddr, int32_t Index,
1850                                     unsigned RelOff) const {
1851   const endianness E = ELFT::TargetEndianness;
1852   write32<E>(Buf, 0x3c0f0000);      // lui   $15, %hi(.got.plt entry)
1853   write32<E>(Buf + 4, 0x8df90000);  // l[wd] $25, %lo(.got.plt entry)($15)
1854   write32<E>(Buf + 8, 0x03200008);  // jr    $25
1855   write32<E>(Buf + 12, 0x25f80000); // addiu $24, $15, %lo(.got.plt entry)
1856   writeMipsHi16<E>(Buf, GotEntryAddr);
1857   writeMipsLo16<E>(Buf + 4, GotEntryAddr);
1858   writeMipsLo16<E>(Buf + 12, GotEntryAddr);
1859 }
1860 
1861 template <class ELFT>
1862 void MipsTargetInfo<ELFT>::writeThunk(uint8_t *Buf, uint64_t S) const {
1863   // Write MIPS LA25 thunk code to call PIC function from the non-PIC one.
1864   // See MipsTargetInfo::writeThunk for details.
1865   const endianness E = ELFT::TargetEndianness;
1866   write32<E>(Buf, 0x3c190000);                // lui   $25, %hi(func)
1867   write32<E>(Buf + 4, 0x08000000 | (S >> 2)); // j     func
1868   write32<E>(Buf + 8, 0x27390000);            // addiu $25, $25, %lo(func)
1869   write32<E>(Buf + 12, 0x00000000);           // nop
1870   writeMipsHi16<E>(Buf, S);
1871   writeMipsLo16<E>(Buf + 8, S);
1872 }
1873 
1874 template <class ELFT>
1875 bool MipsTargetInfo<ELFT>::needsThunk(uint32_t Type, const InputFile &File,
1876                                       const SymbolBody &S) const {
1877   // Any MIPS PIC code function is invoked with its address in register $t9.
1878   // So if we have a branch instruction from non-PIC code to the PIC one
1879   // we cannot make the jump directly and need to create a small stubs
1880   // to save the target function address.
1881   // See page 3-38 ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf
1882   if (Type != R_MIPS_26)
1883     return false;
1884   auto *F = dyn_cast<ELFFileBase<ELFT>>(&File);
1885   if (!F)
1886     return false;
1887   // If current file has PIC code, LA25 stub is not required.
1888   if (F->getObj().getHeader()->e_flags & EF_MIPS_PIC)
1889     return false;
1890   auto *D = dyn_cast<DefinedRegular<ELFT>>(&S);
1891   if (!D || !D->Section)
1892     return false;
1893   // LA25 is required if target file has PIC code
1894   // or target symbol is a PIC symbol.
1895   return (D->Section->getFile()->getObj().getHeader()->e_flags & EF_MIPS_PIC) ||
1896          (D->StOther & STO_MIPS_MIPS16) == STO_MIPS_PIC;
1897 }
1898 
1899 template <class ELFT>
1900 uint64_t MipsTargetInfo<ELFT>::getImplicitAddend(const uint8_t *Buf,
1901                                                  uint32_t Type) const {
1902   const endianness E = ELFT::TargetEndianness;
1903   switch (Type) {
1904   default:
1905     return 0;
1906   case R_MIPS_32:
1907   case R_MIPS_GPREL32:
1908     return read32<E>(Buf);
1909   case R_MIPS_26:
1910     // FIXME (simon): If the relocation target symbol is not a PLT entry
1911     // we should use another expression for calculation:
1912     // ((A << 2) | (P & 0xf0000000)) >> 2
1913     return SignExtend64<28>(read32<E>(Buf) << 2);
1914   case R_MIPS_GPREL16:
1915   case R_MIPS_LO16:
1916   case R_MIPS_PCLO16:
1917   case R_MIPS_TLS_DTPREL_HI16:
1918   case R_MIPS_TLS_DTPREL_LO16:
1919   case R_MIPS_TLS_TPREL_HI16:
1920   case R_MIPS_TLS_TPREL_LO16:
1921     return SignExtend64<16>(read32<E>(Buf));
1922   case R_MIPS_PC16:
1923     return getPcRelocAddend<E, 16, 2>(Buf);
1924   case R_MIPS_PC19_S2:
1925     return getPcRelocAddend<E, 19, 2>(Buf);
1926   case R_MIPS_PC21_S2:
1927     return getPcRelocAddend<E, 21, 2>(Buf);
1928   case R_MIPS_PC26_S2:
1929     return getPcRelocAddend<E, 26, 2>(Buf);
1930   case R_MIPS_PC32:
1931     return getPcRelocAddend<E, 32, 0>(Buf);
1932   }
1933 }
1934 
1935 static std::pair<uint32_t, uint64_t> calculateMips64RelChain(uint32_t Type,
1936                                                              uint64_t Val) {
1937   // MIPS N64 ABI packs multiple relocations into the single relocation
1938   // record. In general, all up to three relocations can have arbitrary
1939   // types. In fact, Clang and GCC uses only a few combinations. For now,
1940   // we support two of them. That is allow to pass at least all LLVM
1941   // test suite cases.
1942   // <any relocation> / R_MIPS_SUB / R_MIPS_HI16 | R_MIPS_LO16
1943   // <any relocation> / R_MIPS_64 / R_MIPS_NONE
1944   // The first relocation is a 'real' relocation which is calculated
1945   // using the corresponding symbol's value. The second and the third
1946   // relocations used to modify result of the first one: extend it to
1947   // 64-bit, extract high or low part etc. For details, see part 2.9 Relocation
1948   // at the https://dmz-portal.mips.com/mw/images/8/82/007-4658-001.pdf
1949   uint32_t Type2 = (Type >> 8) & 0xff;
1950   uint32_t Type3 = (Type >> 16) & 0xff;
1951   if (Type2 == R_MIPS_NONE && Type3 == R_MIPS_NONE)
1952     return std::make_pair(Type, Val);
1953   if (Type2 == R_MIPS_64 && Type3 == R_MIPS_NONE)
1954     return std::make_pair(Type2, Val);
1955   if (Type2 == R_MIPS_SUB && (Type3 == R_MIPS_HI16 || Type3 == R_MIPS_LO16))
1956     return std::make_pair(Type3, -Val);
1957   error("unsupported relocations combination " + Twine(Type));
1958   return std::make_pair(Type & 0xff, Val);
1959 }
1960 
1961 template <class ELFT>
1962 void MipsTargetInfo<ELFT>::relocateOne(uint8_t *Loc, uint32_t Type,
1963                                        uint64_t Val) const {
1964   const endianness E = ELFT::TargetEndianness;
1965   // Thread pointer and DRP offsets from the start of TLS data area.
1966   // https://www.linux-mips.org/wiki/NPTL
1967   if (Type == R_MIPS_TLS_DTPREL_HI16 || Type == R_MIPS_TLS_DTPREL_LO16)
1968     Val -= 0x8000;
1969   else if (Type == R_MIPS_TLS_TPREL_HI16 || Type == R_MIPS_TLS_TPREL_LO16)
1970     Val -= 0x7000;
1971   if (ELFT::Is64Bits)
1972     std::tie(Type, Val) = calculateMips64RelChain(Type, Val);
1973   switch (Type) {
1974   case R_MIPS_32:
1975   case R_MIPS_GPREL32:
1976     write32<E>(Loc, Val);
1977     break;
1978   case R_MIPS_64:
1979     write64<E>(Loc, Val);
1980     break;
1981   case R_MIPS_26:
1982     write32<E>(Loc, (read32<E>(Loc) & ~0x3ffffff) | (Val >> 2));
1983     break;
1984   case R_MIPS_GOT_DISP:
1985   case R_MIPS_GOT_PAGE:
1986   case R_MIPS_GOT16:
1987   case R_MIPS_GPREL16:
1988   case R_MIPS_TLS_GD:
1989   case R_MIPS_TLS_LDM:
1990     checkInt<16>(Val, Type);
1991   // fallthrough
1992   case R_MIPS_CALL16:
1993   case R_MIPS_GOT_OFST:
1994   case R_MIPS_LO16:
1995   case R_MIPS_PCLO16:
1996   case R_MIPS_TLS_DTPREL_LO16:
1997   case R_MIPS_TLS_GOTTPREL:
1998   case R_MIPS_TLS_TPREL_LO16:
1999     writeMipsLo16<E>(Loc, Val);
2000     break;
2001   case R_MIPS_HI16:
2002   case R_MIPS_PCHI16:
2003   case R_MIPS_TLS_DTPREL_HI16:
2004   case R_MIPS_TLS_TPREL_HI16:
2005     writeMipsHi16<E>(Loc, Val);
2006     break;
2007   case R_MIPS_JALR:
2008     // Ignore this optimization relocation for now
2009     break;
2010   case R_MIPS_PC16:
2011     applyMipsPcReloc<E, 16, 2>(Loc, Type, Val);
2012     break;
2013   case R_MIPS_PC19_S2:
2014     applyMipsPcReloc<E, 19, 2>(Loc, Type, Val);
2015     break;
2016   case R_MIPS_PC21_S2:
2017     applyMipsPcReloc<E, 21, 2>(Loc, Type, Val);
2018     break;
2019   case R_MIPS_PC26_S2:
2020     applyMipsPcReloc<E, 26, 2>(Loc, Type, Val);
2021     break;
2022   case R_MIPS_PC32:
2023     applyMipsPcReloc<E, 32, 0>(Loc, Type, Val);
2024     break;
2025   default:
2026     fatal("unrecognized reloc " + Twine(Type));
2027   }
2028 }
2029 
2030 template <class ELFT>
2031 bool MipsTargetInfo<ELFT>::usesOnlyLowPageBits(uint32_t Type) const {
2032   return Type == R_MIPS_LO16 || Type == R_MIPS_GOT_OFST;
2033 }
2034 }
2035 }
2036