xref: /llvm-project-15.0.7/lld/ELF/Target.cpp (revision 1168ca92)
1 //===- Target.cpp ---------------------------------------------------------===//
2 //
3 //                             The LLVM Linker
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // Machine-specific things, such as applying relocations, creation of
11 // GOT or PLT entries, etc., are handled in this file.
12 //
13 // Refer the ELF spec for the single letter varaibles, S, A or P, used
14 // in this file.
15 //
16 // Some functions defined in this file has "relaxTls" as part of their names.
17 // They do peephole optimization for TLS variables by rewriting instructions.
18 // They are not part of the ABI but optional optimization, so you can skip
19 // them if you are not interested in how TLS variables are optimized.
20 // See the following paper for the details.
21 //
22 //   Ulrich Drepper, ELF Handling For Thread-Local Storage
23 //   http://www.akkadia.org/drepper/tls.pdf
24 //
25 //===----------------------------------------------------------------------===//
26 
27 #include "Target.h"
28 #include "Error.h"
29 #include "InputFiles.h"
30 #include "OutputSections.h"
31 #include "Symbols.h"
32 
33 #include "llvm/ADT/ArrayRef.h"
34 #include "llvm/Object/ELF.h"
35 #include "llvm/Support/Endian.h"
36 #include "llvm/Support/ELF.h"
37 
38 using namespace llvm;
39 using namespace llvm::object;
40 using namespace llvm::support::endian;
41 using namespace llvm::ELF;
42 
43 namespace lld {
44 namespace elf {
45 
46 TargetInfo *Target;
47 
48 static void or32le(uint8_t *P, int32_t V) { write32le(P, read32le(P) | V); }
49 
50 template <unsigned N> static void checkInt(int64_t V, uint32_t Type) {
51   if (isInt<N>(V))
52     return;
53   StringRef S = getELFRelocationTypeName(Config->EMachine, Type);
54   error("relocation " + S + " out of range");
55 }
56 
57 template <unsigned N> static void checkUInt(uint64_t V, uint32_t Type) {
58   if (isUInt<N>(V))
59     return;
60   StringRef S = getELFRelocationTypeName(Config->EMachine, Type);
61   error("relocation " + S + " out of range");
62 }
63 
64 template <unsigned N> static void checkIntUInt(uint64_t V, uint32_t Type) {
65   if (isInt<N>(V) || isUInt<N>(V))
66     return;
67   StringRef S = getELFRelocationTypeName(Config->EMachine, Type);
68   error("relocation " + S + " out of range");
69 }
70 
71 template <unsigned N> static void checkAlignment(uint64_t V, uint32_t Type) {
72   if ((V & (N - 1)) == 0)
73     return;
74   StringRef S = getELFRelocationTypeName(Config->EMachine, Type);
75   error("improper alignment for relocation " + S);
76 }
77 
78 namespace {
79 class X86TargetInfo final : public TargetInfo {
80 public:
81   X86TargetInfo();
82   RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override;
83   uint64_t getImplicitAddend(const uint8_t *Buf, uint32_t Type) const override;
84   void writeGotPltHeader(uint8_t *Buf) const override;
85   uint32_t getDynRel(uint32_t Type) const override;
86   bool isTlsLocalDynamicRel(uint32_t Type) const override;
87   bool isTlsGlobalDynamicRel(uint32_t Type) const override;
88   bool isTlsInitialExecRel(uint32_t Type) const override;
89   void writeGotPlt(uint8_t *Buf, uint64_t Plt) const override;
90   void writePltZero(uint8_t *Buf) const override;
91   void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr,
92                 int32_t Index, unsigned RelOff) const override;
93   void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
94 
95   void relaxTlsGdToIe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
96   void relaxTlsGdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
97   void relaxTlsIeToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
98   void relaxTlsLdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
99 };
100 
101 class X86_64TargetInfo final : public TargetInfo {
102 public:
103   X86_64TargetInfo();
104   RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override;
105   uint32_t getDynRel(uint32_t Type) const override;
106   bool isTlsLocalDynamicRel(uint32_t Type) const override;
107   bool isTlsGlobalDynamicRel(uint32_t Type) const override;
108   bool isTlsInitialExecRel(uint32_t Type) const override;
109   void writeGotPltHeader(uint8_t *Buf) const override;
110   void writeGotPlt(uint8_t *Buf, uint64_t Plt) const override;
111   void writePltZero(uint8_t *Buf) const override;
112   void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr,
113                 int32_t Index, unsigned RelOff) const override;
114   void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
115 
116   void relaxTlsGdToIe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
117   void relaxTlsGdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
118   void relaxTlsIeToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
119   void relaxTlsLdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
120 };
121 
122 class PPCTargetInfo final : public TargetInfo {
123 public:
124   PPCTargetInfo();
125   void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
126   RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override;
127 };
128 
129 class PPC64TargetInfo final : public TargetInfo {
130 public:
131   PPC64TargetInfo();
132   RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override;
133   void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr,
134                 int32_t Index, unsigned RelOff) const override;
135   void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
136 };
137 
138 class AArch64TargetInfo final : public TargetInfo {
139 public:
140   AArch64TargetInfo();
141   RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override;
142   uint32_t getDynRel(uint32_t Type) const override;
143   bool isTlsGlobalDynamicRel(uint32_t Type) const override;
144   bool isTlsInitialExecRel(uint32_t Type) const override;
145   void writeGotPlt(uint8_t *Buf, uint64_t Plt) const override;
146   void writePltZero(uint8_t *Buf) const override;
147   void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr,
148                 int32_t Index, unsigned RelOff) const override;
149   bool usesOnlyLowPageBits(uint32_t Type) const override;
150   void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
151   void relaxTlsGdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
152   void relaxTlsIeToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
153 
154 private:
155   static const uint64_t TcbSize = 16;
156 };
157 
158 class AMDGPUTargetInfo final : public TargetInfo {
159 public:
160   AMDGPUTargetInfo() {}
161   void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
162   RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override;
163 };
164 
165 template <class ELFT> class MipsTargetInfo final : public TargetInfo {
166 public:
167   MipsTargetInfo();
168   RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override;
169   uint64_t getImplicitAddend(const uint8_t *Buf, uint32_t Type) const override;
170   uint32_t getDynRel(uint32_t Type) const override;
171   void writeGotPlt(uint8_t *Buf, uint64_t Plt) const override;
172   void writePltZero(uint8_t *Buf) const override;
173   void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr,
174                 int32_t Index, unsigned RelOff) const override;
175   void writeThunk(uint8_t *Buf, uint64_t S) const override;
176   bool needsThunk(uint32_t Type, const InputFile &File,
177                   const SymbolBody &S) const override;
178   void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
179   bool usesOnlyLowPageBits(uint32_t Type) const override;
180 };
181 } // anonymous namespace
182 
183 TargetInfo *createTarget() {
184   switch (Config->EMachine) {
185   case EM_386:
186     return new X86TargetInfo();
187   case EM_AARCH64:
188     return new AArch64TargetInfo();
189   case EM_AMDGPU:
190     return new AMDGPUTargetInfo();
191   case EM_MIPS:
192     switch (Config->EKind) {
193     case ELF32LEKind:
194       return new MipsTargetInfo<ELF32LE>();
195     case ELF32BEKind:
196       return new MipsTargetInfo<ELF32BE>();
197     case ELF64LEKind:
198       return new MipsTargetInfo<ELF64LE>();
199     case ELF64BEKind:
200       return new MipsTargetInfo<ELF64BE>();
201     default:
202       fatal("unsupported MIPS target");
203     }
204   case EM_PPC:
205     return new PPCTargetInfo();
206   case EM_PPC64:
207     return new PPC64TargetInfo();
208   case EM_X86_64:
209     return new X86_64TargetInfo();
210   }
211   fatal("unknown target machine");
212 }
213 
214 TargetInfo::~TargetInfo() {}
215 
216 uint64_t TargetInfo::getImplicitAddend(const uint8_t *Buf,
217                                        uint32_t Type) const {
218   return 0;
219 }
220 
221 uint64_t TargetInfo::getVAStart() const { return Config->Pic ? 0 : VAStart; }
222 
223 bool TargetInfo::usesOnlyLowPageBits(uint32_t Type) const { return false; }
224 
225 bool TargetInfo::needsThunk(uint32_t Type, const InputFile &File,
226                             const SymbolBody &S) const {
227   return false;
228 }
229 
230 bool TargetInfo::isTlsInitialExecRel(uint32_t Type) const { return false; }
231 
232 bool TargetInfo::isTlsLocalDynamicRel(uint32_t Type) const { return false; }
233 
234 bool TargetInfo::isTlsGlobalDynamicRel(uint32_t Type) const {
235   return false;
236 }
237 
238 void TargetInfo::relaxTlsGdToLe(uint8_t *Loc, uint32_t Type,
239                                 uint64_t Val) const {
240   llvm_unreachable("Should not have claimed to be relaxable");
241 }
242 
243 void TargetInfo::relaxTlsGdToIe(uint8_t *Loc, uint32_t Type,
244                                 uint64_t Val) const {
245   llvm_unreachable("Should not have claimed to be relaxable");
246 }
247 
248 void TargetInfo::relaxTlsIeToLe(uint8_t *Loc, uint32_t Type,
249                                 uint64_t Val) const {
250   llvm_unreachable("Should not have claimed to be relaxable");
251 }
252 
253 void TargetInfo::relaxTlsLdToLe(uint8_t *Loc, uint32_t Type,
254                                 uint64_t Val) const {
255   llvm_unreachable("Should not have claimed to be relaxable");
256 }
257 
258 X86TargetInfo::X86TargetInfo() {
259   CopyRel = R_386_COPY;
260   GotRel = R_386_GLOB_DAT;
261   PltRel = R_386_JUMP_SLOT;
262   IRelativeRel = R_386_IRELATIVE;
263   RelativeRel = R_386_RELATIVE;
264   TlsGotRel = R_386_TLS_TPOFF;
265   TlsModuleIndexRel = R_386_TLS_DTPMOD32;
266   TlsOffsetRel = R_386_TLS_DTPOFF32;
267   UseLazyBinding = true;
268   PltEntrySize = 16;
269   PltZeroSize = 16;
270   TlsGdToLeSkip = 2;
271 }
272 
273 RelExpr X86TargetInfo::getRelExpr(uint32_t Type, const SymbolBody &S) const {
274   switch (Type) {
275   default:
276     return R_ABS;
277   case R_386_TLS_GD:
278     return R_TLSGD;
279   case R_386_TLS_LDM:
280     return R_TLSLD;
281   case R_386_PLT32:
282     return R_PLT_PC;
283   case R_386_PC32:
284     return R_PC;
285   case R_386_GOTPC:
286     return R_GOTONLY_PC;
287   case R_386_TLS_IE:
288     return R_GOT;
289   case R_386_GOT32:
290   case R_386_TLS_GOTIE:
291     return R_GOT_FROM_END;
292   case R_386_GOTOFF:
293     return R_GOTREL;
294   case R_386_TLS_LE:
295     return R_TLS;
296   case R_386_TLS_LE_32:
297     return R_NEG_TLS;
298   }
299 }
300 
301 void X86TargetInfo::writeGotPltHeader(uint8_t *Buf) const {
302   write32le(Buf, Out<ELF32LE>::Dynamic->getVA());
303 }
304 
305 void X86TargetInfo::writeGotPlt(uint8_t *Buf, uint64_t Plt) const {
306   // Entries in .got.plt initially points back to the corresponding
307   // PLT entries with a fixed offset to skip the first instruction.
308   write32le(Buf, Plt + 6);
309 }
310 
311 uint32_t X86TargetInfo::getDynRel(uint32_t Type) const {
312   if (Type == R_386_TLS_LE)
313     return R_386_TLS_TPOFF;
314   if (Type == R_386_TLS_LE_32)
315     return R_386_TLS_TPOFF32;
316   return Type;
317 }
318 
319 bool X86TargetInfo::isTlsGlobalDynamicRel(uint32_t Type) const {
320   return Type == R_386_TLS_GD;
321 }
322 
323 bool X86TargetInfo::isTlsLocalDynamicRel(uint32_t Type) const {
324   return Type == R_386_TLS_LDO_32 || Type == R_386_TLS_LDM;
325 }
326 
327 bool X86TargetInfo::isTlsInitialExecRel(uint32_t Type) const {
328   return Type == R_386_TLS_IE || Type == R_386_TLS_GOTIE;
329 }
330 
331 void X86TargetInfo::writePltZero(uint8_t *Buf) const {
332   // Executable files and shared object files have
333   // separate procedure linkage tables.
334   if (Config->Pic) {
335     const uint8_t V[] = {
336         0xff, 0xb3, 0x04, 0x00, 0x00, 0x00, // pushl 4(%ebx)
337         0xff, 0xa3, 0x08, 0x00, 0x00, 0x00, // jmp   *8(%ebx)
338         0x90, 0x90, 0x90, 0x90              // nop; nop; nop; nop
339     };
340     memcpy(Buf, V, sizeof(V));
341     return;
342   }
343 
344   const uint8_t PltData[] = {
345       0xff, 0x35, 0x00, 0x00, 0x00, 0x00, // pushl (GOT+4)
346       0xff, 0x25, 0x00, 0x00, 0x00, 0x00, // jmp   *(GOT+8)
347       0x90, 0x90, 0x90, 0x90              // nop; nop; nop; nop
348   };
349   memcpy(Buf, PltData, sizeof(PltData));
350   uint32_t Got = Out<ELF32LE>::GotPlt->getVA();
351   write32le(Buf + 2, Got + 4);
352   write32le(Buf + 8, Got + 8);
353 }
354 
355 void X86TargetInfo::writePlt(uint8_t *Buf, uint64_t GotEntryAddr,
356                              uint64_t PltEntryAddr, int32_t Index,
357                              unsigned RelOff) const {
358   const uint8_t Inst[] = {
359       0xff, 0x00, 0x00, 0x00, 0x00, 0x00, // jmp *foo_in_GOT|*foo@GOT(%ebx)
360       0x68, 0x00, 0x00, 0x00, 0x00,       // pushl $reloc_offset
361       0xe9, 0x00, 0x00, 0x00, 0x00        // jmp .PLT0@PC
362   };
363   memcpy(Buf, Inst, sizeof(Inst));
364 
365   // jmp *foo@GOT(%ebx) or jmp *foo_in_GOT
366   Buf[1] = Config->Pic ? 0xa3 : 0x25;
367   uint32_t Got = UseLazyBinding ? Out<ELF32LE>::GotPlt->getVA()
368                                 : Out<ELF32LE>::Got->getVA();
369   write32le(Buf + 2, Config->Shared ? GotEntryAddr - Got : GotEntryAddr);
370   write32le(Buf + 7, RelOff);
371   write32le(Buf + 12, -Index * PltEntrySize - PltZeroSize - 16);
372 }
373 
374 uint64_t X86TargetInfo::getImplicitAddend(const uint8_t *Buf,
375                                           uint32_t Type) const {
376   switch (Type) {
377   default:
378     return 0;
379   case R_386_32:
380   case R_386_GOT32:
381   case R_386_GOTOFF:
382   case R_386_GOTPC:
383   case R_386_PC32:
384   case R_386_PLT32:
385     return read32le(Buf);
386   }
387 }
388 
389 void X86TargetInfo::relocateOne(uint8_t *Loc, uint32_t Type,
390                                 uint64_t Val) const {
391   checkInt<32>(Val, Type);
392   write32le(Loc, Val);
393 }
394 
395 void X86TargetInfo::relaxTlsGdToLe(uint8_t *Loc, uint32_t Type,
396                                    uint64_t Val) const {
397   // Convert
398   //   leal x@tlsgd(, %ebx, 1),
399   //   call __tls_get_addr@plt
400   // to
401   //   movl %gs:0,%eax
402   //   subl $x@ntpoff,%eax
403   const uint8_t Inst[] = {
404       0x65, 0xa1, 0x00, 0x00, 0x00, 0x00, // movl %gs:0, %eax
405       0x81, 0xe8, 0x00, 0x00, 0x00, 0x00  // subl 0(%ebx), %eax
406   };
407   memcpy(Loc - 3, Inst, sizeof(Inst));
408   relocateOne(Loc + 5, R_386_32, Out<ELF32LE>::TlsPhdr->p_memsz - Val);
409 }
410 
411 void X86TargetInfo::relaxTlsGdToIe(uint8_t *Loc, uint32_t Type,
412                                    uint64_t Val) const {
413   // Convert
414   //   leal x@tlsgd(, %ebx, 1),
415   //   call __tls_get_addr@plt
416   // to
417   //   movl %gs:0, %eax
418   //   addl x@gotntpoff(%ebx), %eax
419   const uint8_t Inst[] = {
420       0x65, 0xa1, 0x00, 0x00, 0x00, 0x00, // movl %gs:0, %eax
421       0x03, 0x83, 0x00, 0x00, 0x00, 0x00  // addl 0(%ebx), %eax
422   };
423   memcpy(Loc - 3, Inst, sizeof(Inst));
424   relocateOne(Loc + 5, R_386_32, Val - Out<ELF32LE>::Got->getVA() -
425                                      Out<ELF32LE>::Got->getNumEntries() * 4);
426 }
427 
428 // In some conditions, relocations can be optimized to avoid using GOT.
429 // This function does that for Initial Exec to Local Exec case.
430 void X86TargetInfo::relaxTlsIeToLe(uint8_t *Loc, uint32_t Type,
431                                    uint64_t Val) const {
432   // Ulrich's document section 6.2 says that @gotntpoff can
433   // be used with MOVL or ADDL instructions.
434   // @indntpoff is similar to @gotntpoff, but for use in
435   // position dependent code.
436   uint8_t *Inst = Loc - 2;
437   uint8_t *Op = Loc - 1;
438   uint8_t Reg = (Loc[-1] >> 3) & 7;
439   bool IsMov = *Inst == 0x8b;
440   if (Type == R_386_TLS_IE) {
441     // For R_386_TLS_IE relocation we perform the next transformations:
442     // MOVL foo@INDNTPOFF,%EAX is transformed to MOVL $foo,%EAX
443     // MOVL foo@INDNTPOFF,%REG is transformed to MOVL $foo,%REG
444     // ADDL foo@INDNTPOFF,%REG is transformed to ADDL $foo,%REG
445     // First one is special because when EAX is used the sequence is 5 bytes
446     // long, otherwise it is 6 bytes.
447     if (*Op == 0xa1) {
448       *Op = 0xb8;
449     } else {
450       *Inst = IsMov ? 0xc7 : 0x81;
451       *Op = 0xc0 | ((*Op >> 3) & 7);
452     }
453   } else {
454     // R_386_TLS_GOTIE relocation can be optimized to
455     // R_386_TLS_LE so that it does not use GOT.
456     // "MOVL foo@GOTTPOFF(%RIP), %REG" is transformed to "MOVL $foo, %REG".
457     // "ADDL foo@GOTNTPOFF(%RIP), %REG" is transformed to "LEAL foo(%REG), %REG"
458     // Note: gold converts to ADDL instead of LEAL.
459     *Inst = IsMov ? 0xc7 : 0x8d;
460     if (IsMov)
461       *Op = 0xc0 | ((*Op >> 3) & 7);
462     else
463       *Op = 0x80 | Reg | (Reg << 3);
464   }
465   relocateOne(Loc, R_386_TLS_LE, Val - Out<ELF32LE>::TlsPhdr->p_memsz);
466 }
467 
468 void X86TargetInfo::relaxTlsLdToLe(uint8_t *Loc, uint32_t Type,
469                                    uint64_t Val) const {
470   if (Type == R_386_TLS_LDO_32) {
471     relocateOne(Loc, R_386_TLS_LE, Val - Out<ELF32LE>::TlsPhdr->p_memsz);
472     return;
473   }
474 
475   // Convert
476   //   leal foo(%reg),%eax
477   //   call ___tls_get_addr
478   // to
479   //   movl %gs:0,%eax
480   //   nop
481   //   leal 0(%esi,1),%esi
482   const uint8_t Inst[] = {
483       0x65, 0xa1, 0x00, 0x00, 0x00, 0x00, // movl %gs:0,%eax
484       0x90,                               // nop
485       0x8d, 0x74, 0x26, 0x00              // leal 0(%esi,1),%esi
486   };
487   memcpy(Loc - 2, Inst, sizeof(Inst));
488 }
489 
490 X86_64TargetInfo::X86_64TargetInfo() {
491   CopyRel = R_X86_64_COPY;
492   GotRel = R_X86_64_GLOB_DAT;
493   PltRel = R_X86_64_JUMP_SLOT;
494   RelativeRel = R_X86_64_RELATIVE;
495   IRelativeRel = R_X86_64_IRELATIVE;
496   TlsGotRel = R_X86_64_TPOFF64;
497   TlsModuleIndexRel = R_X86_64_DTPMOD64;
498   TlsOffsetRel = R_X86_64_DTPOFF64;
499   UseLazyBinding = true;
500   PltEntrySize = 16;
501   PltZeroSize = 16;
502   TlsGdToLeSkip = 2;
503 }
504 
505 RelExpr X86_64TargetInfo::getRelExpr(uint32_t Type, const SymbolBody &S) const {
506   switch (Type) {
507   default:
508     return R_ABS;
509   case R_X86_64_TPOFF32:
510     return R_TLS;
511   case R_X86_64_TLSLD:
512     return R_TLSLD_PC;
513   case R_X86_64_TLSGD:
514     return R_TLSGD_PC;
515   case R_X86_64_SIZE32:
516   case R_X86_64_SIZE64:
517     return R_SIZE;
518   case R_X86_64_PLT32:
519     return R_PLT_PC;
520   case R_X86_64_PC32:
521   case R_X86_64_PC64:
522     return R_PC;
523   case R_X86_64_GOT32:
524     return R_GOT_FROM_END;
525   case R_X86_64_GOTPCREL:
526   case R_X86_64_GOTPCRELX:
527   case R_X86_64_REX_GOTPCRELX:
528   case R_X86_64_GOTTPOFF:
529     return R_GOT_PC;
530   }
531 }
532 
533 void X86_64TargetInfo::writeGotPltHeader(uint8_t *Buf) const {
534   // The first entry holds the value of _DYNAMIC. It is not clear why that is
535   // required, but it is documented in the psabi and the glibc dynamic linker
536   // seems to use it (note that this is relevant for linking ld.so, not any
537   // other program).
538   write64le(Buf, Out<ELF64LE>::Dynamic->getVA());
539 }
540 
541 void X86_64TargetInfo::writeGotPlt(uint8_t *Buf, uint64_t Plt) const {
542   // See comments in X86TargetInfo::writeGotPlt.
543   write32le(Buf, Plt + 6);
544 }
545 
546 void X86_64TargetInfo::writePltZero(uint8_t *Buf) const {
547   const uint8_t PltData[] = {
548       0xff, 0x35, 0x00, 0x00, 0x00, 0x00, // pushq GOT+8(%rip)
549       0xff, 0x25, 0x00, 0x00, 0x00, 0x00, // jmp *GOT+16(%rip)
550       0x0f, 0x1f, 0x40, 0x00              // nopl 0x0(rax)
551   };
552   memcpy(Buf, PltData, sizeof(PltData));
553   uint64_t Got = Out<ELF64LE>::GotPlt->getVA();
554   uint64_t Plt = Out<ELF64LE>::Plt->getVA();
555   write32le(Buf + 2, Got - Plt + 2); // GOT+8
556   write32le(Buf + 8, Got - Plt + 4); // GOT+16
557 }
558 
559 void X86_64TargetInfo::writePlt(uint8_t *Buf, uint64_t GotEntryAddr,
560                                 uint64_t PltEntryAddr, int32_t Index,
561                                 unsigned RelOff) const {
562   const uint8_t Inst[] = {
563       0xff, 0x25, 0x00, 0x00, 0x00, 0x00, // jmpq *got(%rip)
564       0x68, 0x00, 0x00, 0x00, 0x00,       // pushq <relocation index>
565       0xe9, 0x00, 0x00, 0x00, 0x00        // jmpq plt[0]
566   };
567   memcpy(Buf, Inst, sizeof(Inst));
568 
569   write32le(Buf + 2, GotEntryAddr - PltEntryAddr - 6);
570   write32le(Buf + 7, Index);
571   write32le(Buf + 12, -Index * PltEntrySize - PltZeroSize - 16);
572 }
573 
574 uint32_t X86_64TargetInfo::getDynRel(uint32_t Type) const {
575   if (Type == R_X86_64_PC32 || Type == R_X86_64_32)
576     if (Config->Shared)
577       error(getELFRelocationTypeName(EM_X86_64, Type) +
578             " cannot be a dynamic relocation");
579   return Type;
580 }
581 
582 bool X86_64TargetInfo::isTlsInitialExecRel(uint32_t Type) const {
583   return Type == R_X86_64_GOTTPOFF;
584 }
585 
586 bool X86_64TargetInfo::isTlsGlobalDynamicRel(uint32_t Type) const {
587   return Type == R_X86_64_TLSGD;
588 }
589 
590 bool X86_64TargetInfo::isTlsLocalDynamicRel(uint32_t Type) const {
591   return Type == R_X86_64_DTPOFF32 || Type == R_X86_64_DTPOFF64 ||
592          Type == R_X86_64_TLSLD;
593 }
594 
595 void X86_64TargetInfo::relaxTlsGdToLe(uint8_t *Loc, uint32_t Type,
596                                       uint64_t Val) const {
597   // Convert
598   //   .byte 0x66
599   //   leaq x@tlsgd(%rip), %rdi
600   //   .word 0x6666
601   //   rex64
602   //   call __tls_get_addr@plt
603   // to
604   //   mov %fs:0x0,%rax
605   //   lea x@tpoff,%rax
606   const uint8_t Inst[] = {
607       0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00, // mov %fs:0x0,%rax
608       0x48, 0x8d, 0x80, 0x00, 0x00, 0x00, 0x00              // lea x@tpoff,%rax
609   };
610   memcpy(Loc - 4, Inst, sizeof(Inst));
611   relocateOne(Loc + 8, R_X86_64_TPOFF32,
612               Val + 4 - Out<ELF64LE>::TlsPhdr->p_memsz);
613 }
614 
615 void X86_64TargetInfo::relaxTlsGdToIe(uint8_t *Loc, uint32_t Type,
616                                       uint64_t Val) const {
617   // Convert
618   //   .byte 0x66
619   //   leaq x@tlsgd(%rip), %rdi
620   //   .word 0x6666
621   //   rex64
622   //   call __tls_get_addr@plt
623   // to
624   //   mov %fs:0x0,%rax
625   //   addq x@tpoff,%rax
626   const uint8_t Inst[] = {
627       0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00, // mov %fs:0x0,%rax
628       0x48, 0x03, 0x05, 0x00, 0x00, 0x00, 0x00              // addq x@tpoff,%rax
629   };
630   memcpy(Loc - 4, Inst, sizeof(Inst));
631   relocateOne(Loc + 8, R_X86_64_PC32, Val - 8);
632 }
633 
634 // In some conditions, R_X86_64_GOTTPOFF relocation can be optimized to
635 // R_X86_64_TPOFF32 so that it does not use GOT.
636 void X86_64TargetInfo::relaxTlsIeToLe(uint8_t *Loc, uint32_t Type,
637                                       uint64_t Val) const {
638   // Ulrich's document section 6.5 says that @gottpoff(%rip) must be
639   // used in MOVQ or ADDQ instructions only.
640   // "MOVQ foo@GOTTPOFF(%RIP), %REG" is transformed to "MOVQ $foo, %REG".
641   // "ADDQ foo@GOTTPOFF(%RIP), %REG" is transformed to "LEAQ foo(%REG), %REG"
642   // (if the register is not RSP/R12) or "ADDQ $foo, %RSP".
643   // Opcodes info can be found at http://ref.x86asm.net/coder64.html#x48.
644   uint8_t *Prefix = Loc - 3;
645   uint8_t *Inst = Loc - 2;
646   uint8_t *RegSlot = Loc - 1;
647   uint8_t Reg = Loc[-1] >> 3;
648   bool IsMov = *Inst == 0x8b;
649   bool RspAdd = !IsMov && Reg == 4;
650 
651   // r12 and rsp registers requires special handling.
652   // Problem is that for other registers, for example leaq 0xXXXXXXXX(%r11),%r11
653   // result out is 7 bytes: 4d 8d 9b XX XX XX XX,
654   // but leaq 0xXXXXXXXX(%r12),%r12 is 8 bytes: 4d 8d a4 24 XX XX XX XX.
655   // The same true for rsp. So we convert to addq for them, saving 1 byte that
656   // we dont have.
657   if (RspAdd)
658     *Inst = 0x81;
659   else
660     *Inst = IsMov ? 0xc7 : 0x8d;
661   if (*Prefix == 0x4c)
662     *Prefix = (IsMov || RspAdd) ? 0x49 : 0x4d;
663   *RegSlot = (IsMov || RspAdd) ? (0xc0 | Reg) : (0x80 | Reg | (Reg << 3));
664   relocateOne(Loc, R_X86_64_TPOFF32, Val + 4 - Out<ELF64LE>::TlsPhdr->p_memsz);
665 }
666 
667 void X86_64TargetInfo::relaxTlsLdToLe(uint8_t *Loc, uint32_t Type,
668                                       uint64_t Val) const {
669   // Convert
670   //   leaq bar@tlsld(%rip), %rdi
671   //   callq __tls_get_addr@PLT
672   //   leaq bar@dtpoff(%rax), %rcx
673   // to
674   //   .word 0x6666
675   //   .byte 0x66
676   //   mov %fs:0,%rax
677   //   leaq bar@tpoff(%rax), %rcx
678   if (Type == R_X86_64_DTPOFF64) {
679     write64le(Loc, Val - Out<ELF64LE>::TlsPhdr->p_memsz);
680     return;
681   }
682   if (Type == R_X86_64_DTPOFF32) {
683     relocateOne(Loc, R_X86_64_TPOFF32, Val - Out<ELF64LE>::TlsPhdr->p_memsz);
684     return;
685   }
686 
687   const uint8_t Inst[] = {
688       0x66, 0x66,                                          //.word 0x6666
689       0x66,                                                //.byte 0x66
690       0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00 // mov %fs:0,%rax
691   };
692   memcpy(Loc - 3, Inst, sizeof(Inst));
693 }
694 
695 void X86_64TargetInfo::relocateOne(uint8_t *Loc, uint32_t Type,
696                                    uint64_t Val) const {
697   switch (Type) {
698   case R_X86_64_32:
699     checkUInt<32>(Val, Type);
700     write32le(Loc, Val);
701     break;
702   case R_X86_64_32S:
703   case R_X86_64_TPOFF32:
704   case R_X86_64_GOT32:
705     checkInt<32>(Val, Type);
706     write32le(Loc, Val);
707     break;
708   case R_X86_64_64:
709   case R_X86_64_DTPOFF64:
710   case R_X86_64_SIZE64:
711   case R_X86_64_PC64:
712     write64le(Loc, Val);
713     break;
714   case R_X86_64_GOTPCREL:
715   case R_X86_64_GOTPCRELX:
716   case R_X86_64_REX_GOTPCRELX:
717   case R_X86_64_PC32:
718   case R_X86_64_GOTTPOFF:
719   case R_X86_64_PLT32:
720   case R_X86_64_TLSGD:
721   case R_X86_64_TLSLD:
722   case R_X86_64_DTPOFF32:
723   case R_X86_64_SIZE32:
724     write32le(Loc, Val);
725     break;
726   default:
727     fatal("unrecognized reloc " + Twine(Type));
728   }
729 }
730 
731 // Relocation masks following the #lo(value), #hi(value), #ha(value),
732 // #higher(value), #highera(value), #highest(value), and #highesta(value)
733 // macros defined in section 4.5.1. Relocation Types of the PPC-elf64abi
734 // document.
735 static uint16_t applyPPCLo(uint64_t V) { return V; }
736 static uint16_t applyPPCHi(uint64_t V) { return V >> 16; }
737 static uint16_t applyPPCHa(uint64_t V) { return (V + 0x8000) >> 16; }
738 static uint16_t applyPPCHigher(uint64_t V) { return V >> 32; }
739 static uint16_t applyPPCHighera(uint64_t V) { return (V + 0x8000) >> 32; }
740 static uint16_t applyPPCHighest(uint64_t V) { return V >> 48; }
741 static uint16_t applyPPCHighesta(uint64_t V) { return (V + 0x8000) >> 48; }
742 
743 PPCTargetInfo::PPCTargetInfo() {}
744 
745 void PPCTargetInfo::relocateOne(uint8_t *Loc, uint32_t Type,
746                                 uint64_t Val) const {
747   switch (Type) {
748   case R_PPC_ADDR16_HA:
749     write16be(Loc, applyPPCHa(Val));
750     break;
751   case R_PPC_ADDR16_LO:
752     write16be(Loc, applyPPCLo(Val));
753     break;
754   default:
755     fatal("unrecognized reloc " + Twine(Type));
756   }
757 }
758 
759 RelExpr PPCTargetInfo::getRelExpr(uint32_t Type, const SymbolBody &S) const {
760   return R_ABS;
761 }
762 
763 PPC64TargetInfo::PPC64TargetInfo() {
764   GotRel = R_PPC64_GLOB_DAT;
765   RelativeRel = R_PPC64_RELATIVE;
766   PltEntrySize = 32;
767 
768   // We need 64K pages (at least under glibc/Linux, the loader won't
769   // set different permissions on a finer granularity than that).
770   PageSize = 65536;
771 
772   // The PPC64 ELF ABI v1 spec, says:
773   //
774   //   It is normally desirable to put segments with different characteristics
775   //   in separate 256 Mbyte portions of the address space, to give the
776   //   operating system full paging flexibility in the 64-bit address space.
777   //
778   // And because the lowest non-zero 256M boundary is 0x10000000, PPC64 linkers
779   // use 0x10000000 as the starting address.
780   VAStart = 0x10000000;
781 }
782 
783 static uint64_t PPC64TocOffset = 0x8000;
784 
785 uint64_t getPPC64TocBase() {
786   // The TOC consists of sections .got, .toc, .tocbss, .plt in that order. The
787   // TOC starts where the first of these sections starts. We always create a
788   // .got when we see a relocation that uses it, so for us the start is always
789   // the .got.
790   uint64_t TocVA = Out<ELF64BE>::Got->getVA();
791 
792   // Per the ppc64-elf-linux ABI, The TOC base is TOC value plus 0x8000
793   // thus permitting a full 64 Kbytes segment. Note that the glibc startup
794   // code (crt1.o) assumes that you can get from the TOC base to the
795   // start of the .toc section with only a single (signed) 16-bit relocation.
796   return TocVA + PPC64TocOffset;
797 }
798 
799 RelExpr PPC64TargetInfo::getRelExpr(uint32_t Type, const SymbolBody &S) const {
800   switch (Type) {
801   default:
802     return R_ABS;
803   case R_PPC64_TOC16:
804   case R_PPC64_TOC16_DS:
805   case R_PPC64_TOC16_HA:
806   case R_PPC64_TOC16_HI:
807   case R_PPC64_TOC16_LO:
808   case R_PPC64_TOC16_LO_DS:
809     return R_GOTREL;
810   case R_PPC64_TOC:
811     return R_PPC_TOC;
812   case R_PPC64_REL24:
813     return R_PPC_PLT_OPD;
814   }
815 }
816 
817 void PPC64TargetInfo::writePlt(uint8_t *Buf, uint64_t GotEntryAddr,
818                                uint64_t PltEntryAddr, int32_t Index,
819                                unsigned RelOff) const {
820   uint64_t Off = GotEntryAddr - getPPC64TocBase();
821 
822   // FIXME: What we should do, in theory, is get the offset of the function
823   // descriptor in the .opd section, and use that as the offset from %r2 (the
824   // TOC-base pointer). Instead, we have the GOT-entry offset, and that will
825   // be a pointer to the function descriptor in the .opd section. Using
826   // this scheme is simpler, but requires an extra indirection per PLT dispatch.
827 
828   write32be(Buf,      0xf8410028);                   // std %r2, 40(%r1)
829   write32be(Buf + 4,  0x3d620000 | applyPPCHa(Off)); // addis %r11, %r2, X@ha
830   write32be(Buf + 8,  0xe98b0000 | applyPPCLo(Off)); // ld %r12, X@l(%r11)
831   write32be(Buf + 12, 0xe96c0000);                   // ld %r11,0(%r12)
832   write32be(Buf + 16, 0x7d6903a6);                   // mtctr %r11
833   write32be(Buf + 20, 0xe84c0008);                   // ld %r2,8(%r12)
834   write32be(Buf + 24, 0xe96c0010);                   // ld %r11,16(%r12)
835   write32be(Buf + 28, 0x4e800420);                   // bctr
836 }
837 
838 void PPC64TargetInfo::relocateOne(uint8_t *Loc, uint32_t Type,
839                                   uint64_t Val) const {
840   uint64_t TO = PPC64TocOffset;
841 
842   // For a TOC-relative relocation,  proceed in terms of the corresponding
843   // ADDR16 relocation type.
844   switch (Type) {
845   case R_PPC64_TOC16:       Type = R_PPC64_ADDR16;       Val -= TO; break;
846   case R_PPC64_TOC16_DS:    Type = R_PPC64_ADDR16_DS;    Val -= TO; break;
847   case R_PPC64_TOC16_HA:    Type = R_PPC64_ADDR16_HA;    Val -= TO; break;
848   case R_PPC64_TOC16_HI:    Type = R_PPC64_ADDR16_HI;    Val -= TO; break;
849   case R_PPC64_TOC16_LO:    Type = R_PPC64_ADDR16_LO;    Val -= TO; break;
850   case R_PPC64_TOC16_LO_DS: Type = R_PPC64_ADDR16_LO_DS; Val -= TO; break;
851   default: break;
852   }
853 
854   switch (Type) {
855   case R_PPC64_ADDR14: {
856     checkAlignment<4>(Val, Type);
857     // Preserve the AA/LK bits in the branch instruction
858     uint8_t AALK = Loc[3];
859     write16be(Loc + 2, (AALK & 3) | (Val & 0xfffc));
860     break;
861   }
862   case R_PPC64_ADDR16:
863     checkInt<16>(Val, Type);
864     write16be(Loc, Val);
865     break;
866   case R_PPC64_ADDR16_DS:
867     checkInt<16>(Val, Type);
868     write16be(Loc, (read16be(Loc) & 3) | (Val & ~3));
869     break;
870   case R_PPC64_ADDR16_HA:
871     write16be(Loc, applyPPCHa(Val));
872     break;
873   case R_PPC64_ADDR16_HI:
874     write16be(Loc, applyPPCHi(Val));
875     break;
876   case R_PPC64_ADDR16_HIGHER:
877     write16be(Loc, applyPPCHigher(Val));
878     break;
879   case R_PPC64_ADDR16_HIGHERA:
880     write16be(Loc, applyPPCHighera(Val));
881     break;
882   case R_PPC64_ADDR16_HIGHEST:
883     write16be(Loc, applyPPCHighest(Val));
884     break;
885   case R_PPC64_ADDR16_HIGHESTA:
886     write16be(Loc, applyPPCHighesta(Val));
887     break;
888   case R_PPC64_ADDR16_LO:
889     write16be(Loc, applyPPCLo(Val));
890     break;
891   case R_PPC64_ADDR16_LO_DS:
892     write16be(Loc, (read16be(Loc) & 3) | (applyPPCLo(Val) & ~3));
893     break;
894   case R_PPC64_ADDR32:
895     checkInt<32>(Val, Type);
896     write32be(Loc, Val);
897     break;
898   case R_PPC64_ADDR64:
899     write64be(Loc, Val);
900     break;
901   case R_PPC64_REL16_HA:
902     write16be(Loc, applyPPCHa(Val));
903     break;
904   case R_PPC64_REL16_HI:
905     write16be(Loc, applyPPCHi(Val));
906     break;
907   case R_PPC64_REL16_LO:
908     write16be(Loc, applyPPCLo(Val));
909     break;
910   case R_PPC64_REL24: {
911     uint32_t Mask = 0x03FFFFFC;
912     checkInt<24>(Val, Type);
913     write32be(Loc, (read32be(Loc) & ~Mask) | (Val & Mask));
914     break;
915   }
916   case R_PPC64_REL32:
917     checkInt<32>(Val, Type);
918     write32be(Loc, Val);
919     break;
920   case R_PPC64_REL64:
921     write64be(Loc, Val);
922     break;
923   case R_PPC64_TOC:
924     write64be(Loc, Val);
925     break;
926   default:
927     fatal("unrecognized reloc " + Twine(Type));
928   }
929 }
930 
931 AArch64TargetInfo::AArch64TargetInfo() {
932   CopyRel = R_AARCH64_COPY;
933   RelativeRel = R_AARCH64_RELATIVE;
934   IRelativeRel = R_AARCH64_IRELATIVE;
935   GotRel = R_AARCH64_GLOB_DAT;
936   PltRel = R_AARCH64_JUMP_SLOT;
937   TlsGotRel = R_AARCH64_TLS_TPREL64;
938   TlsModuleIndexRel = R_AARCH64_TLS_DTPMOD64;
939   TlsOffsetRel = R_AARCH64_TLS_DTPREL64;
940   UseLazyBinding = true;
941   PltEntrySize = 16;
942   PltZeroSize = 32;
943 }
944 
945 RelExpr AArch64TargetInfo::getRelExpr(uint32_t Type,
946                                       const SymbolBody &S) const {
947   switch (Type) {
948   default:
949     return R_ABS;
950   case R_AARCH64_CALL26:
951   case R_AARCH64_CONDBR19:
952   case R_AARCH64_JUMP26:
953   case R_AARCH64_TSTBR14:
954     return R_PLT_PC;
955 
956   case R_AARCH64_PREL16:
957   case R_AARCH64_PREL32:
958   case R_AARCH64_PREL64:
959   case R_AARCH64_ADR_PREL_LO21:
960     return R_PC;
961   case R_AARCH64_ADR_PREL_PG_HI21:
962     return R_PAGE_PC;
963   case R_AARCH64_LD64_GOT_LO12_NC:
964   case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
965     return R_GOT;
966   case R_AARCH64_ADR_GOT_PAGE:
967   case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
968     return R_GOT_PAGE_PC;
969   }
970 }
971 
972 bool AArch64TargetInfo::usesOnlyLowPageBits(uint32_t Type) const {
973   switch (Type) {
974   default:
975     return false;
976   case R_AARCH64_ADD_ABS_LO12_NC:
977   case R_AARCH64_LDST8_ABS_LO12_NC:
978   case R_AARCH64_LDST16_ABS_LO12_NC:
979   case R_AARCH64_LDST32_ABS_LO12_NC:
980   case R_AARCH64_LDST64_ABS_LO12_NC:
981   case R_AARCH64_LDST128_ABS_LO12_NC:
982   case R_AARCH64_LD64_GOT_LO12_NC:
983   case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
984     return true;
985   }
986 }
987 
988 bool AArch64TargetInfo::isTlsGlobalDynamicRel(uint32_t Type) const {
989   return Type == R_AARCH64_TLSDESC_ADR_PAGE21 ||
990          Type == R_AARCH64_TLSDESC_LD64_LO12_NC ||
991          Type == R_AARCH64_TLSDESC_ADD_LO12_NC ||
992          Type == R_AARCH64_TLSDESC_CALL;
993 }
994 
995 bool AArch64TargetInfo::isTlsInitialExecRel(uint32_t Type) const {
996   return Type == R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 ||
997          Type == R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC;
998 }
999 
1000 uint32_t AArch64TargetInfo::getDynRel(uint32_t Type) const {
1001   if (Type == R_AARCH64_ABS32 || Type == R_AARCH64_ABS64)
1002     return Type;
1003   StringRef S = getELFRelocationTypeName(EM_AARCH64, Type);
1004   error("relocation " + S + " cannot be used when making a shared object; "
1005                             "recompile with -fPIC.");
1006   // Keep it going with a dummy value so that we can find more reloc errors.
1007   return R_AARCH64_ABS32;
1008 }
1009 
1010 void AArch64TargetInfo::writeGotPlt(uint8_t *Buf, uint64_t Plt) const {
1011   write64le(Buf, Out<ELF64LE>::Plt->getVA());
1012 }
1013 
1014 static uint64_t getAArch64Page(uint64_t Expr) {
1015   return Expr & (~static_cast<uint64_t>(0xFFF));
1016 }
1017 
1018 void AArch64TargetInfo::writePltZero(uint8_t *Buf) const {
1019   const uint8_t PltData[] = {
1020       0xf0, 0x7b, 0xbf, 0xa9, // stp	x16, x30, [sp,#-16]!
1021       0x10, 0x00, 0x00, 0x90, // adrp	x16, Page(&(.plt.got[2]))
1022       0x11, 0x02, 0x40, 0xf9, // ldr	x17, [x16, Offset(&(.plt.got[2]))]
1023       0x10, 0x02, 0x00, 0x91, // add	x16, x16, Offset(&(.plt.got[2]))
1024       0x20, 0x02, 0x1f, 0xd6, // br	x17
1025       0x1f, 0x20, 0x03, 0xd5, // nop
1026       0x1f, 0x20, 0x03, 0xd5, // nop
1027       0x1f, 0x20, 0x03, 0xd5  // nop
1028   };
1029   memcpy(Buf, PltData, sizeof(PltData));
1030 
1031   uint64_t Got = Out<ELF64LE>::GotPlt->getVA();
1032   uint64_t Plt = Out<ELF64LE>::Plt->getVA();
1033   relocateOne(Buf + 4, R_AARCH64_ADR_PREL_PG_HI21,
1034               getAArch64Page(Got + 16) - getAArch64Page(Plt + 4));
1035   relocateOne(Buf + 8, R_AARCH64_LDST64_ABS_LO12_NC, Got + 16);
1036   relocateOne(Buf + 12, R_AARCH64_ADD_ABS_LO12_NC, Got + 16);
1037 }
1038 
1039 void AArch64TargetInfo::writePlt(uint8_t *Buf, uint64_t GotEntryAddr,
1040                                  uint64_t PltEntryAddr, int32_t Index,
1041                                  unsigned RelOff) const {
1042   const uint8_t Inst[] = {
1043       0x10, 0x00, 0x00, 0x90, // adrp x16, Page(&(.plt.got[n]))
1044       0x11, 0x02, 0x40, 0xf9, // ldr  x17, [x16, Offset(&(.plt.got[n]))]
1045       0x10, 0x02, 0x00, 0x91, // add  x16, x16, Offset(&(.plt.got[n]))
1046       0x20, 0x02, 0x1f, 0xd6  // br   x17
1047   };
1048   memcpy(Buf, Inst, sizeof(Inst));
1049 
1050   relocateOne(Buf, R_AARCH64_ADR_PREL_PG_HI21,
1051               getAArch64Page(GotEntryAddr) - getAArch64Page(PltEntryAddr));
1052   relocateOne(Buf + 4, R_AARCH64_LDST64_ABS_LO12_NC, GotEntryAddr);
1053   relocateOne(Buf + 8, R_AARCH64_ADD_ABS_LO12_NC, GotEntryAddr);
1054 }
1055 
1056 static void updateAArch64Addr(uint8_t *L, uint64_t Imm) {
1057   uint32_t ImmLo = (Imm & 0x3) << 29;
1058   uint32_t ImmHi = ((Imm & 0x1FFFFC) >> 2) << 5;
1059   uint64_t Mask = (0x3 << 29) | (0x7FFFF << 5);
1060   write32le(L, (read32le(L) & ~Mask) | ImmLo | ImmHi);
1061 }
1062 
1063 static inline void updateAArch64Add(uint8_t *L, uint64_t Imm) {
1064   or32le(L, (Imm & 0xFFF) << 10);
1065 }
1066 
1067 void AArch64TargetInfo::relocateOne(uint8_t *Loc, uint32_t Type,
1068                                     uint64_t Val) const {
1069   switch (Type) {
1070   case R_AARCH64_ABS16:
1071     checkIntUInt<16>(Val, Type);
1072     write16le(Loc, Val);
1073     break;
1074   case R_AARCH64_ABS32:
1075     checkIntUInt<32>(Val, Type);
1076     write32le(Loc, Val);
1077     break;
1078   case R_AARCH64_ABS64:
1079     write64le(Loc, Val);
1080     break;
1081   case R_AARCH64_ADD_ABS_LO12_NC:
1082     // This relocation stores 12 bits and there's no instruction
1083     // to do it. Instead, we do a 32 bits store of the value
1084     // of r_addend bitwise-or'ed Loc. This assumes that the addend
1085     // bits in Loc are zero.
1086     or32le(Loc, (Val & 0xFFF) << 10);
1087     break;
1088   case R_AARCH64_ADR_GOT_PAGE:
1089     checkInt<33>(Val, Type);
1090     updateAArch64Addr(Loc, (Val >> 12) & 0x1FFFFF); // X[32:12]
1091     break;
1092   case R_AARCH64_ADR_PREL_LO21:
1093     checkInt<21>(Val, Type);
1094     updateAArch64Addr(Loc, Val & 0x1FFFFF);
1095     break;
1096   case R_AARCH64_ADR_PREL_PG_HI21:
1097   case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
1098     checkInt<33>(Val, Type);
1099     updateAArch64Addr(Loc, (Val >> 12) & 0x1FFFFF); // X[32:12]
1100     break;
1101   case R_AARCH64_CALL26:
1102   case R_AARCH64_JUMP26:
1103     checkInt<28>(Val, Type);
1104     or32le(Loc, (Val & 0x0FFFFFFC) >> 2);
1105     break;
1106   case R_AARCH64_CONDBR19:
1107     checkInt<21>(Val, Type);
1108     or32le(Loc, (Val & 0x1FFFFC) << 3);
1109     break;
1110   case R_AARCH64_LD64_GOT_LO12_NC:
1111   case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
1112     checkAlignment<8>(Val, Type);
1113     or32le(Loc, (Val & 0xFF8) << 7);
1114     break;
1115   case R_AARCH64_LDST128_ABS_LO12_NC:
1116     or32le(Loc, (Val & 0x0FF8) << 6);
1117     break;
1118   case R_AARCH64_LDST16_ABS_LO12_NC:
1119     or32le(Loc, (Val & 0x0FFC) << 9);
1120     break;
1121   case R_AARCH64_LDST8_ABS_LO12_NC:
1122     or32le(Loc, (Val & 0xFFF) << 10);
1123     break;
1124   case R_AARCH64_LDST32_ABS_LO12_NC:
1125     or32le(Loc, (Val & 0xFFC) << 8);
1126     break;
1127   case R_AARCH64_LDST64_ABS_LO12_NC:
1128     or32le(Loc, (Val & 0xFF8) << 7);
1129     break;
1130   case R_AARCH64_PREL16:
1131     checkIntUInt<16>(Val, Type);
1132     write16le(Loc, Val);
1133     break;
1134   case R_AARCH64_PREL32:
1135     checkIntUInt<32>(Val, Type);
1136     write32le(Loc, Val);
1137     break;
1138   case R_AARCH64_PREL64:
1139     write64le(Loc, Val);
1140     break;
1141   case R_AARCH64_TSTBR14:
1142     checkInt<16>(Val, Type);
1143     or32le(Loc, (Val & 0xFFFC) << 3);
1144     break;
1145   case R_AARCH64_TLSLE_ADD_TPREL_HI12: {
1146     uint64_t V = llvm::alignTo(TcbSize, Out<ELF64LE>::TlsPhdr->p_align) + Val;
1147     checkInt<24>(V, Type);
1148     updateAArch64Add(Loc, (V & 0xFFF000) >> 12);
1149     break;
1150   }
1151   case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC: {
1152     uint64_t V = llvm::alignTo(TcbSize, Out<ELF64LE>::TlsPhdr->p_align) + Val;
1153     updateAArch64Add(Loc, V & 0xFFF);
1154     break;
1155   }
1156   default:
1157     fatal("unrecognized reloc " + Twine(Type));
1158   }
1159 }
1160 
1161 void AArch64TargetInfo::relaxTlsGdToLe(uint8_t *Loc, uint32_t Type,
1162                                        uint64_t Val) const {
1163   // TLSDESC Global-Dynamic relocation are in the form:
1164   //   adrp    x0, :tlsdesc:v             [R_AARCH64_TLSDESC_ADR_PAGE21]
1165   //   ldr     x1, [x0, #:tlsdesc_lo12:v  [R_AARCH64_TLSDESC_LD64_LO12_NC]
1166   //   add     x0, x0, :tlsdesc_los:v     [_AARCH64_TLSDESC_ADD_LO12_NC]
1167   //   .tlsdesccall                       [R_AARCH64_TLSDESC_CALL]
1168   // And it can optimized to:
1169   //   movz    x0, #0x0, lsl #16
1170   //   movk    x0, #0x10
1171   //   nop
1172   //   nop
1173   uint64_t TPOff = llvm::alignTo(TcbSize, Out<ELF64LE>::TlsPhdr->p_align);
1174   uint64_t X = Val + TPOff;
1175   checkUInt<32>(X, Type);
1176 
1177   uint32_t NewInst;
1178   switch (Type) {
1179   case R_AARCH64_TLSDESC_ADD_LO12_NC:
1180   case R_AARCH64_TLSDESC_CALL:
1181     // nop
1182     NewInst = 0xd503201f;
1183     break;
1184   case R_AARCH64_TLSDESC_ADR_PAGE21:
1185     // movz
1186     NewInst = 0xd2a00000 | (((X >> 16) & 0xffff) << 5);
1187     break;
1188   case R_AARCH64_TLSDESC_LD64_LO12_NC:
1189     // movk
1190     NewInst = 0xf2800000 | ((X & 0xffff) << 5);
1191     break;
1192   default:
1193     llvm_unreachable("unsupported Relocation for TLS GD to LE relax");
1194   }
1195   write32le(Loc, NewInst);
1196 }
1197 
1198 void AArch64TargetInfo::relaxTlsIeToLe(uint8_t *Loc, uint32_t Type,
1199                                        uint64_t Val) const {
1200   uint64_t TPOff = llvm::alignTo(TcbSize, Out<ELF64LE>::TlsPhdr->p_align);
1201   uint64_t X = Val + TPOff;
1202   checkUInt<32>(X, Type);
1203 
1204   uint32_t Inst = read32le(Loc);
1205   uint32_t NewInst;
1206   if (Type == R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21) {
1207     // Generate movz.
1208     unsigned RegNo = (Inst & 0x1f);
1209     NewInst = (0xd2a00000 | RegNo) | (((X >> 16) & 0xffff) << 5);
1210   } else if (Type == R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC) {
1211     // Generate movk
1212     unsigned RegNo = (Inst & 0x1f);
1213     NewInst = (0xf2800000 | RegNo) | ((X & 0xffff) << 5);
1214   } else {
1215     llvm_unreachable("invalid Relocation for TLS IE to LE Relax");
1216   }
1217   write32le(Loc, NewInst);
1218 }
1219 
1220 // Implementing relocations for AMDGPU is low priority since most
1221 // programs don't use relocations now. Thus, this function is not
1222 // actually called (relocateOne is called for each relocation).
1223 // That's why the AMDGPU port works without implementing this function.
1224 void AMDGPUTargetInfo::relocateOne(uint8_t *Loc, uint32_t Type,
1225                                    uint64_t Val) const {
1226   llvm_unreachable("not implemented");
1227 }
1228 
1229 RelExpr AMDGPUTargetInfo::getRelExpr(uint32_t Type, const SymbolBody &S) const {
1230   llvm_unreachable("not implemented");
1231 }
1232 
1233 template <class ELFT> MipsTargetInfo<ELFT>::MipsTargetInfo() {
1234   GotPltHeaderEntriesNum = 2;
1235   PageSize = 65536;
1236   PltEntrySize = 16;
1237   PltZeroSize = 32;
1238   ThunkSize = 16;
1239   UseLazyBinding = true;
1240   CopyRel = R_MIPS_COPY;
1241   PltRel = R_MIPS_JUMP_SLOT;
1242   if (ELFT::Is64Bits)
1243     RelativeRel = (R_MIPS_64 << 8) | R_MIPS_REL32;
1244   else
1245     RelativeRel = R_MIPS_REL32;
1246 }
1247 
1248 template <class ELFT>
1249 RelExpr MipsTargetInfo<ELFT>::getRelExpr(uint32_t Type,
1250                                          const SymbolBody &S) const {
1251   if (ELFT::Is64Bits)
1252     // See comment in the calculateMips64RelChain.
1253     Type &= 0xff;
1254   switch (Type) {
1255   default:
1256     return R_ABS;
1257   case R_MIPS_JALR:
1258     return R_HINT;
1259   case R_MIPS_GPREL16:
1260   case R_MIPS_GPREL32:
1261     return R_GOTREL;
1262   case R_MIPS_26:
1263     return R_PLT;
1264   case R_MIPS_HI16:
1265   case R_MIPS_LO16:
1266   case R_MIPS_GOT_OFST:
1267     // MIPS _gp_disp designates offset between start of function and 'gp'
1268     // pointer into GOT. __gnu_local_gp is equal to the current value of
1269     // the 'gp'. Therefore any relocations against them do not require
1270     // dynamic relocation.
1271     if (&S == ElfSym<ELFT>::MipsGpDisp)
1272       return R_PC;
1273     return R_ABS;
1274   case R_MIPS_PC32:
1275   case R_MIPS_PC16:
1276   case R_MIPS_PC19_S2:
1277   case R_MIPS_PC21_S2:
1278   case R_MIPS_PC26_S2:
1279   case R_MIPS_PCHI16:
1280   case R_MIPS_PCLO16:
1281     return R_PC;
1282   case R_MIPS_GOT16:
1283     if (S.isLocal())
1284       return R_MIPS_GOT_LOCAL;
1285   // fallthrough
1286   case R_MIPS_CALL16:
1287   case R_MIPS_GOT_DISP:
1288     if (!S.isPreemptible())
1289       return R_MIPS_GOT;
1290     return R_GOT_OFF;
1291   case R_MIPS_GOT_PAGE:
1292     return R_MIPS_GOT_LOCAL;
1293   }
1294 }
1295 
1296 template <class ELFT>
1297 uint32_t MipsTargetInfo<ELFT>::getDynRel(uint32_t Type) const {
1298   if (Type == R_MIPS_32 || Type == R_MIPS_64)
1299     return RelativeRel;
1300   StringRef S = getELFRelocationTypeName(EM_MIPS, Type);
1301   error("relocation " + S + " cannot be used when making a shared object; "
1302                             "recompile with -fPIC.");
1303   // Keep it going with a dummy value so that we can find more reloc errors.
1304   return R_MIPS_32;
1305 }
1306 
1307 template <class ELFT>
1308 void MipsTargetInfo<ELFT>::writeGotPlt(uint8_t *Buf, uint64_t Plt) const {
1309   write32<ELFT::TargetEndianness>(Buf, Out<ELFT>::Plt->getVA());
1310 }
1311 
1312 static uint16_t mipsHigh(uint64_t V) { return (V + 0x8000) >> 16; }
1313 
1314 template <endianness E, uint8_t BSIZE, uint8_t SHIFT>
1315 static int64_t getPcRelocAddend(const uint8_t *Loc) {
1316   uint32_t Instr = read32<E>(Loc);
1317   uint32_t Mask = 0xffffffff >> (32 - BSIZE);
1318   return SignExtend64<BSIZE + SHIFT>((Instr & Mask) << SHIFT);
1319 }
1320 
1321 template <endianness E, uint8_t BSIZE, uint8_t SHIFT>
1322 static void applyMipsPcReloc(uint8_t *Loc, uint32_t Type, uint64_t V) {
1323   uint32_t Mask = 0xffffffff >> (32 - BSIZE);
1324   uint32_t Instr = read32<E>(Loc);
1325   if (SHIFT > 0)
1326     checkAlignment<(1 << SHIFT)>(V, Type);
1327   checkInt<BSIZE + SHIFT>(V, Type);
1328   write32<E>(Loc, (Instr & ~Mask) | ((V >> SHIFT) & Mask));
1329 }
1330 
1331 template <endianness E>
1332 static void writeMipsHi16(uint8_t *Loc, uint64_t V) {
1333   uint32_t Instr = read32<E>(Loc);
1334   write32<E>(Loc, (Instr & 0xffff0000) | mipsHigh(V));
1335 }
1336 
1337 template <endianness E>
1338 static void writeMipsLo16(uint8_t *Loc, uint64_t V) {
1339   uint32_t Instr = read32<E>(Loc);
1340   write32<E>(Loc, (Instr & 0xffff0000) | (V & 0xffff));
1341 }
1342 
1343 template <endianness E> static int16_t readSignedLo16(const uint8_t *Loc) {
1344   return SignExtend32<16>(read32<E>(Loc) & 0xffff);
1345 }
1346 
1347 template <class ELFT>
1348 void MipsTargetInfo<ELFT>::writePltZero(uint8_t *Buf) const {
1349   const endianness E = ELFT::TargetEndianness;
1350   write32<E>(Buf, 0x3c1c0000);      // lui   $28, %hi(&GOTPLT[0])
1351   write32<E>(Buf + 4, 0x8f990000);  // lw    $25, %lo(&GOTPLT[0])($28)
1352   write32<E>(Buf + 8, 0x279c0000);  // addiu $28, $28, %lo(&GOTPLT[0])
1353   write32<E>(Buf + 12, 0x031cc023); // subu  $24, $24, $28
1354   write32<E>(Buf + 16, 0x03e07825); // move  $15, $31
1355   write32<E>(Buf + 20, 0x0018c082); // srl   $24, $24, 2
1356   write32<E>(Buf + 24, 0x0320f809); // jalr  $25
1357   write32<E>(Buf + 28, 0x2718fffe); // subu  $24, $24, 2
1358   uint64_t Got = Out<ELFT>::GotPlt->getVA();
1359   writeMipsHi16<E>(Buf, Got);
1360   writeMipsLo16<E>(Buf + 4, Got);
1361   writeMipsLo16<E>(Buf + 8, Got);
1362 }
1363 
1364 template <class ELFT>
1365 void MipsTargetInfo<ELFT>::writePlt(uint8_t *Buf, uint64_t GotEntryAddr,
1366                                     uint64_t PltEntryAddr, int32_t Index,
1367                                     unsigned RelOff) const {
1368   const endianness E = ELFT::TargetEndianness;
1369   write32<E>(Buf, 0x3c0f0000);      // lui   $15, %hi(.got.plt entry)
1370   write32<E>(Buf + 4, 0x8df90000);  // l[wd] $25, %lo(.got.plt entry)($15)
1371   write32<E>(Buf + 8, 0x03200008);  // jr    $25
1372   write32<E>(Buf + 12, 0x25f80000); // addiu $24, $15, %lo(.got.plt entry)
1373   writeMipsHi16<E>(Buf, GotEntryAddr);
1374   writeMipsLo16<E>(Buf + 4, GotEntryAddr);
1375   writeMipsLo16<E>(Buf + 12, GotEntryAddr);
1376 }
1377 
1378 template <class ELFT>
1379 void MipsTargetInfo<ELFT>::writeThunk(uint8_t *Buf, uint64_t S) const {
1380   // Write MIPS LA25 thunk code to call PIC function from the non-PIC one.
1381   // See MipsTargetInfo::writeThunk for details.
1382   const endianness E = ELFT::TargetEndianness;
1383   write32<E>(Buf, 0x3c190000);      // lui   $25, %hi(func)
1384   write32<E>(Buf + 4, 0x08000000);  // j     func
1385   write32<E>(Buf + 8, 0x27390000);  // addiu $25, $25, %lo(func)
1386   write32<E>(Buf + 12, 0x00000000); // nop
1387   writeMipsHi16<E>(Buf, S);
1388   write32<E>(Buf + 4, 0x08000000 | (S >> 2));
1389   writeMipsLo16<E>(Buf + 8, S);
1390 }
1391 
1392 template <class ELFT>
1393 bool MipsTargetInfo<ELFT>::needsThunk(uint32_t Type, const InputFile &File,
1394                                       const SymbolBody &S) const {
1395   // Any MIPS PIC code function is invoked with its address in register $t9.
1396   // So if we have a branch instruction from non-PIC code to the PIC one
1397   // we cannot make the jump directly and need to create a small stubs
1398   // to save the target function address.
1399   // See page 3-38 ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf
1400   if (Type != R_MIPS_26)
1401     return false;
1402   auto *F = dyn_cast<ELFFileBase<ELFT>>(&File);
1403   if (!F)
1404     return false;
1405   // If current file has PIC code, LA25 stub is not required.
1406   if (F->getObj().getHeader()->e_flags & EF_MIPS_PIC)
1407     return false;
1408   auto *D = dyn_cast<DefinedRegular<ELFT>>(&S);
1409   if (!D || !D->Section)
1410     return false;
1411   // LA25 is required if target file has PIC code
1412   // or target symbol is a PIC symbol.
1413   return (D->Section->getFile()->getObj().getHeader()->e_flags & EF_MIPS_PIC) ||
1414          (D->StOther & STO_MIPS_MIPS16) == STO_MIPS_PIC;
1415 }
1416 
1417 template <class ELFT>
1418 uint64_t MipsTargetInfo<ELFT>::getImplicitAddend(const uint8_t *Buf,
1419                                                  uint32_t Type) const {
1420   const endianness E = ELFT::TargetEndianness;
1421   switch (Type) {
1422   default:
1423     return 0;
1424   case R_MIPS_32:
1425   case R_MIPS_GPREL32:
1426     return read32<E>(Buf);
1427   case R_MIPS_26:
1428     // FIXME (simon): If the relocation target symbol is not a PLT entry
1429     // we should use another expression for calculation:
1430     // ((A << 2) | (P & 0xf0000000)) >> 2
1431     return SignExtend64<28>((read32<E>(Buf) & 0x3ffffff) << 2);
1432   case R_MIPS_GPREL16:
1433   case R_MIPS_LO16:
1434   case R_MIPS_PCLO16:
1435   case R_MIPS_TLS_DTPREL_HI16:
1436   case R_MIPS_TLS_DTPREL_LO16:
1437   case R_MIPS_TLS_TPREL_HI16:
1438   case R_MIPS_TLS_TPREL_LO16:
1439     return readSignedLo16<E>(Buf);
1440   case R_MIPS_PC16:
1441     return getPcRelocAddend<E, 16, 2>(Buf);
1442   case R_MIPS_PC19_S2:
1443     return getPcRelocAddend<E, 19, 2>(Buf);
1444   case R_MIPS_PC21_S2:
1445     return getPcRelocAddend<E, 21, 2>(Buf);
1446   case R_MIPS_PC26_S2:
1447     return getPcRelocAddend<E, 26, 2>(Buf);
1448   case R_MIPS_PC32:
1449     return getPcRelocAddend<E, 32, 0>(Buf);
1450   }
1451 }
1452 
1453 static std::pair<uint32_t, uint64_t> calculateMips64RelChain(uint32_t Type,
1454                                                              uint64_t Val) {
1455   // MIPS N64 ABI packs multiple relocations into the single relocation
1456   // record. In general, all up to three relocations can have arbitrary
1457   // types. In fact, Clang and GCC uses only a few combinations. For now,
1458   // we support two of them. That is allow to pass at least all LLVM
1459   // test suite cases.
1460   // <any relocation> / R_MIPS_SUB / R_MIPS_HI16 | R_MIPS_LO16
1461   // <any relocation> / R_MIPS_64 / R_MIPS_NONE
1462   // The first relocation is a 'real' relocation which is calculated
1463   // using the corresponding symbol's value. The second and the third
1464   // relocations used to modify result of the first one: extend it to
1465   // 64-bit, extract high or low part etc. For details, see part 2.9 Relocation
1466   // at the https://dmz-portal.mips.com/mw/images/8/82/007-4658-001.pdf
1467   uint32_t Type2 = (Type >> 8) & 0xff;
1468   uint32_t Type3 = (Type >> 16) & 0xff;
1469   if (Type2 == R_MIPS_NONE && Type3 == R_MIPS_NONE)
1470     return std::make_pair(Type, Val);
1471   if (Type2 == R_MIPS_64 && Type3 == R_MIPS_NONE)
1472     return std::make_pair(Type2, Val);
1473   if (Type2 == R_MIPS_SUB && (Type3 == R_MIPS_HI16 || Type3 == R_MIPS_LO16))
1474     return std::make_pair(Type3, -Val);
1475   error("unsupported relocations combination " + Twine(Type));
1476   return std::make_pair(Type & 0xff, Val);
1477 }
1478 
1479 template <class ELFT>
1480 void MipsTargetInfo<ELFT>::relocateOne(uint8_t *Loc, uint32_t Type,
1481                                        uint64_t Val) const {
1482   const endianness E = ELFT::TargetEndianness;
1483   // Thread pointer and DRP offsets from the start of TLS data area.
1484   // https://www.linux-mips.org/wiki/NPTL
1485   if (Type == R_MIPS_TLS_DTPREL_HI16 || Type == R_MIPS_TLS_DTPREL_LO16)
1486     Val -= 0x8000;
1487   else if (Type == R_MIPS_TLS_TPREL_HI16 || Type == R_MIPS_TLS_TPREL_LO16)
1488     Val -= 0x7000;
1489   if (ELFT::Is64Bits)
1490     std::tie(Type, Val) = calculateMips64RelChain(Type, Val);
1491   switch (Type) {
1492   case R_MIPS_32:
1493   case R_MIPS_GPREL32:
1494     write32<E>(Loc, Val);
1495     break;
1496   case R_MIPS_64:
1497     write64<E>(Loc, Val);
1498     break;
1499   case R_MIPS_26:
1500     write32<E>(Loc, (read32<E>(Loc) & ~0x3ffffff) | (Val >> 2));
1501     break;
1502   case R_MIPS_GOT_DISP:
1503   case R_MIPS_GOT_PAGE:
1504   case R_MIPS_GOT16:
1505   case R_MIPS_GPREL16:
1506     checkInt<16>(Val, Type);
1507   // fallthrough
1508   case R_MIPS_CALL16:
1509   case R_MIPS_GOT_OFST:
1510   case R_MIPS_LO16:
1511   case R_MIPS_PCLO16:
1512   case R_MIPS_TLS_DTPREL_LO16:
1513   case R_MIPS_TLS_TPREL_LO16:
1514     writeMipsLo16<E>(Loc, Val);
1515     break;
1516   case R_MIPS_HI16:
1517   case R_MIPS_PCHI16:
1518   case R_MIPS_TLS_DTPREL_HI16:
1519   case R_MIPS_TLS_TPREL_HI16:
1520     writeMipsHi16<E>(Loc, Val);
1521     break;
1522   case R_MIPS_JALR:
1523     // Ignore this optimization relocation for now
1524     break;
1525   case R_MIPS_PC16:
1526     applyMipsPcReloc<E, 16, 2>(Loc, Type, Val);
1527     break;
1528   case R_MIPS_PC19_S2:
1529     applyMipsPcReloc<E, 19, 2>(Loc, Type, Val);
1530     break;
1531   case R_MIPS_PC21_S2:
1532     applyMipsPcReloc<E, 21, 2>(Loc, Type, Val);
1533     break;
1534   case R_MIPS_PC26_S2:
1535     applyMipsPcReloc<E, 26, 2>(Loc, Type, Val);
1536     break;
1537   case R_MIPS_PC32:
1538     applyMipsPcReloc<E, 32, 0>(Loc, Type, Val);
1539     break;
1540   default:
1541     fatal("unrecognized reloc " + Twine(Type));
1542   }
1543 }
1544 
1545 template <class ELFT>
1546 bool MipsTargetInfo<ELFT>::usesOnlyLowPageBits(uint32_t Type) const {
1547   return Type == R_MIPS_LO16 || Type == R_MIPS_GOT_OFST;
1548 }
1549 }
1550 }
1551