xref: /llvm-project-15.0.7/lld/ELF/Target.cpp (revision 3bdbb10a)
1 //===- Target.cpp ---------------------------------------------------------===//
2 //
3 //                             The LLVM Linker
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // Machine-specific things, such as applying relocations, creation of
11 // GOT or PLT entries, etc., are handled in this file.
12 //
13 // Refer the ELF spec for the single letter varaibles, S, A or P, used
14 // in this file.
15 //
16 // Some functions defined in this file has "relaxTls" as part of their names.
17 // They do peephole optimization for TLS variables by rewriting instructions.
18 // They are not part of the ABI but optional optimization, so you can skip
19 // them if you are not interested in how TLS variables are optimized.
20 // See the following paper for the details.
21 //
22 //   Ulrich Drepper, ELF Handling For Thread-Local Storage
23 //   http://www.akkadia.org/drepper/tls.pdf
24 //
25 //===----------------------------------------------------------------------===//
26 
27 #include "Target.h"
28 #include "Error.h"
29 #include "InputFiles.h"
30 #include "OutputSections.h"
31 #include "Symbols.h"
32 
33 #include "llvm/ADT/ArrayRef.h"
34 #include "llvm/Object/ELF.h"
35 #include "llvm/Support/Endian.h"
36 #include "llvm/Support/ELF.h"
37 
38 using namespace llvm;
39 using namespace llvm::object;
40 using namespace llvm::support::endian;
41 using namespace llvm::ELF;
42 
43 namespace lld {
44 namespace elf {
45 
46 TargetInfo *Target;
47 
48 static void or32le(uint8_t *P, int32_t V) { write32le(P, read32le(P) | V); }
49 
50 template <unsigned N> static void checkInt(int64_t V, uint32_t Type) {
51   if (isInt<N>(V))
52     return;
53   StringRef S = getELFRelocationTypeName(Config->EMachine, Type);
54   error("relocation " + S + " out of range");
55 }
56 
57 template <unsigned N> static void checkUInt(uint64_t V, uint32_t Type) {
58   if (isUInt<N>(V))
59     return;
60   StringRef S = getELFRelocationTypeName(Config->EMachine, Type);
61   error("relocation " + S + " out of range");
62 }
63 
64 template <unsigned N> static void checkIntUInt(uint64_t V, uint32_t Type) {
65   if (isInt<N>(V) || isUInt<N>(V))
66     return;
67   StringRef S = getELFRelocationTypeName(Config->EMachine, Type);
68   error("relocation " + S + " out of range");
69 }
70 
71 template <unsigned N> static void checkAlignment(uint64_t V, uint32_t Type) {
72   if ((V & (N - 1)) == 0)
73     return;
74   StringRef S = getELFRelocationTypeName(Config->EMachine, Type);
75   error("improper alignment for relocation " + S);
76 }
77 
78 namespace {
79 class X86TargetInfo final : public TargetInfo {
80 public:
81   X86TargetInfo();
82   RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override;
83   uint64_t getImplicitAddend(const uint8_t *Buf, uint32_t Type) const override;
84   void writeGotPltHeader(uint8_t *Buf) const override;
85   uint32_t getDynRel(uint32_t Type) const override;
86   bool isTlsLocalDynamicRel(uint32_t Type) const override;
87   bool isTlsGlobalDynamicRel(uint32_t Type) const override;
88   bool isTlsInitialExecRel(uint32_t Type) const override;
89   void writeGotPlt(uint8_t *Buf, uint64_t Plt) const override;
90   void writePltZero(uint8_t *Buf) const override;
91   void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr,
92                 int32_t Index, unsigned RelOff) const override;
93   void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
94 
95   void relaxTlsGdToIe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
96   void relaxTlsGdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
97   void relaxTlsIeToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
98   void relaxTlsLdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
99 };
100 
101 class X86_64TargetInfo final : public TargetInfo {
102 public:
103   X86_64TargetInfo();
104   RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override;
105   uint32_t getDynRel(uint32_t Type) const override;
106   bool isTlsLocalDynamicRel(uint32_t Type) const override;
107   bool isTlsGlobalDynamicRel(uint32_t Type) const override;
108   bool isTlsInitialExecRel(uint32_t Type) const override;
109   void writeGotPltHeader(uint8_t *Buf) const override;
110   void writeGotPlt(uint8_t *Buf, uint64_t Plt) const override;
111   void writePltZero(uint8_t *Buf) const override;
112   void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr,
113                 int32_t Index, unsigned RelOff) const override;
114   void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
115 
116   bool canRelaxGot(uint32_t Type, const uint8_t *Data) const override;
117   void relaxGot(uint8_t *Loc, uint64_t Val) const override;
118   void relaxTlsGdToIe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
119   void relaxTlsGdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
120   void relaxTlsIeToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
121   void relaxTlsLdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
122 };
123 
124 class PPCTargetInfo final : public TargetInfo {
125 public:
126   PPCTargetInfo();
127   void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
128   RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override;
129 };
130 
131 class PPC64TargetInfo final : public TargetInfo {
132 public:
133   PPC64TargetInfo();
134   RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override;
135   void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr,
136                 int32_t Index, unsigned RelOff) const override;
137   void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
138 };
139 
140 class AArch64TargetInfo final : public TargetInfo {
141 public:
142   AArch64TargetInfo();
143   RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override;
144   uint32_t getDynRel(uint32_t Type) const override;
145   bool isTlsGlobalDynamicRel(uint32_t Type) const override;
146   bool isTlsInitialExecRel(uint32_t Type) const override;
147   void writeGotPlt(uint8_t *Buf, uint64_t Plt) const override;
148   void writePltZero(uint8_t *Buf) const override;
149   void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr,
150                 int32_t Index, unsigned RelOff) const override;
151   bool usesOnlyLowPageBits(uint32_t Type) const override;
152   void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
153   void relaxTlsGdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
154   void relaxTlsIeToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
155 };
156 
157 class AMDGPUTargetInfo final : public TargetInfo {
158 public:
159   AMDGPUTargetInfo() {}
160   void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
161   RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override;
162 };
163 
164 template <class ELFT> class MipsTargetInfo final : public TargetInfo {
165 public:
166   MipsTargetInfo();
167   RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override;
168   uint64_t getImplicitAddend(const uint8_t *Buf, uint32_t Type) const override;
169   uint32_t getDynRel(uint32_t Type) const override;
170   void writeGotPlt(uint8_t *Buf, uint64_t Plt) const override;
171   void writePltZero(uint8_t *Buf) const override;
172   void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr,
173                 int32_t Index, unsigned RelOff) const override;
174   void writeThunk(uint8_t *Buf, uint64_t S) const override;
175   bool needsThunk(uint32_t Type, const InputFile &File,
176                   const SymbolBody &S) const override;
177   void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
178   bool usesOnlyLowPageBits(uint32_t Type) const override;
179 };
180 } // anonymous namespace
181 
182 TargetInfo *createTarget() {
183   switch (Config->EMachine) {
184   case EM_386:
185     return new X86TargetInfo();
186   case EM_AARCH64:
187     return new AArch64TargetInfo();
188   case EM_AMDGPU:
189     return new AMDGPUTargetInfo();
190   case EM_MIPS:
191     switch (Config->EKind) {
192     case ELF32LEKind:
193       return new MipsTargetInfo<ELF32LE>();
194     case ELF32BEKind:
195       return new MipsTargetInfo<ELF32BE>();
196     case ELF64LEKind:
197       return new MipsTargetInfo<ELF64LE>();
198     case ELF64BEKind:
199       return new MipsTargetInfo<ELF64BE>();
200     default:
201       fatal("unsupported MIPS target");
202     }
203   case EM_PPC:
204     return new PPCTargetInfo();
205   case EM_PPC64:
206     return new PPC64TargetInfo();
207   case EM_X86_64:
208     return new X86_64TargetInfo();
209   }
210   fatal("unknown target machine");
211 }
212 
213 TargetInfo::~TargetInfo() {}
214 
215 uint64_t TargetInfo::getImplicitAddend(const uint8_t *Buf,
216                                        uint32_t Type) const {
217   return 0;
218 }
219 
220 uint64_t TargetInfo::getVAStart() const { return Config->Pic ? 0 : VAStart; }
221 
222 bool TargetInfo::usesOnlyLowPageBits(uint32_t Type) const { return false; }
223 
224 bool TargetInfo::needsThunk(uint32_t Type, const InputFile &File,
225                             const SymbolBody &S) const {
226   return false;
227 }
228 
229 bool TargetInfo::isTlsInitialExecRel(uint32_t Type) const { return false; }
230 
231 bool TargetInfo::isTlsLocalDynamicRel(uint32_t Type) const { return false; }
232 
233 bool TargetInfo::isTlsGlobalDynamicRel(uint32_t Type) const {
234   return false;
235 }
236 
237 bool TargetInfo::canRelaxGot(uint32_t Type, const uint8_t *Data) const {
238   return false;
239 }
240 
241 void TargetInfo::relaxGot(uint8_t *Loc, uint64_t Val) const {
242   llvm_unreachable("Should not have claimed to be relaxable");
243 }
244 
245 void TargetInfo::relaxTlsGdToLe(uint8_t *Loc, uint32_t Type,
246                                 uint64_t Val) const {
247   llvm_unreachable("Should not have claimed to be relaxable");
248 }
249 
250 void TargetInfo::relaxTlsGdToIe(uint8_t *Loc, uint32_t Type,
251                                 uint64_t Val) const {
252   llvm_unreachable("Should not have claimed to be relaxable");
253 }
254 
255 void TargetInfo::relaxTlsIeToLe(uint8_t *Loc, uint32_t Type,
256                                 uint64_t Val) const {
257   llvm_unreachable("Should not have claimed to be relaxable");
258 }
259 
260 void TargetInfo::relaxTlsLdToLe(uint8_t *Loc, uint32_t Type,
261                                 uint64_t Val) const {
262   llvm_unreachable("Should not have claimed to be relaxable");
263 }
264 
265 X86TargetInfo::X86TargetInfo() {
266   CopyRel = R_386_COPY;
267   GotRel = R_386_GLOB_DAT;
268   PltRel = R_386_JUMP_SLOT;
269   IRelativeRel = R_386_IRELATIVE;
270   RelativeRel = R_386_RELATIVE;
271   TlsGotRel = R_386_TLS_TPOFF;
272   TlsModuleIndexRel = R_386_TLS_DTPMOD32;
273   TlsOffsetRel = R_386_TLS_DTPOFF32;
274   PltEntrySize = 16;
275   PltZeroSize = 16;
276   TlsGdToLeSkip = 2;
277 }
278 
279 RelExpr X86TargetInfo::getRelExpr(uint32_t Type, const SymbolBody &S) const {
280   switch (Type) {
281   default:
282     return R_ABS;
283   case R_386_TLS_GD:
284     return R_TLSGD;
285   case R_386_TLS_LDM:
286     return R_TLSLD;
287   case R_386_PLT32:
288     return R_PLT_PC;
289   case R_386_PC32:
290     return R_PC;
291   case R_386_GOTPC:
292     return R_GOTONLY_PC;
293   case R_386_TLS_IE:
294     return R_GOT;
295   case R_386_GOT32:
296   case R_386_TLS_GOTIE:
297     return R_GOT_FROM_END;
298   case R_386_GOTOFF:
299     return R_GOTREL;
300   case R_386_TLS_LE:
301     return R_TLS;
302   case R_386_TLS_LE_32:
303     return R_NEG_TLS;
304   }
305 }
306 
307 void X86TargetInfo::writeGotPltHeader(uint8_t *Buf) const {
308   write32le(Buf, Out<ELF32LE>::Dynamic->getVA());
309 }
310 
311 void X86TargetInfo::writeGotPlt(uint8_t *Buf, uint64_t Plt) const {
312   // Entries in .got.plt initially points back to the corresponding
313   // PLT entries with a fixed offset to skip the first instruction.
314   write32le(Buf, Plt + 6);
315 }
316 
317 uint32_t X86TargetInfo::getDynRel(uint32_t Type) const {
318   if (Type == R_386_TLS_LE)
319     return R_386_TLS_TPOFF;
320   if (Type == R_386_TLS_LE_32)
321     return R_386_TLS_TPOFF32;
322   return Type;
323 }
324 
325 bool X86TargetInfo::isTlsGlobalDynamicRel(uint32_t Type) const {
326   return Type == R_386_TLS_GD;
327 }
328 
329 bool X86TargetInfo::isTlsLocalDynamicRel(uint32_t Type) const {
330   return Type == R_386_TLS_LDO_32 || Type == R_386_TLS_LDM;
331 }
332 
333 bool X86TargetInfo::isTlsInitialExecRel(uint32_t Type) const {
334   return Type == R_386_TLS_IE || Type == R_386_TLS_GOTIE;
335 }
336 
337 void X86TargetInfo::writePltZero(uint8_t *Buf) const {
338   // Executable files and shared object files have
339   // separate procedure linkage tables.
340   if (Config->Pic) {
341     const uint8_t V[] = {
342         0xff, 0xb3, 0x04, 0x00, 0x00, 0x00, // pushl 4(%ebx)
343         0xff, 0xa3, 0x08, 0x00, 0x00, 0x00, // jmp   *8(%ebx)
344         0x90, 0x90, 0x90, 0x90              // nop; nop; nop; nop
345     };
346     memcpy(Buf, V, sizeof(V));
347     return;
348   }
349 
350   const uint8_t PltData[] = {
351       0xff, 0x35, 0x00, 0x00, 0x00, 0x00, // pushl (GOT+4)
352       0xff, 0x25, 0x00, 0x00, 0x00, 0x00, // jmp   *(GOT+8)
353       0x90, 0x90, 0x90, 0x90              // nop; nop; nop; nop
354   };
355   memcpy(Buf, PltData, sizeof(PltData));
356   uint32_t Got = Out<ELF32LE>::GotPlt->getVA();
357   write32le(Buf + 2, Got + 4);
358   write32le(Buf + 8, Got + 8);
359 }
360 
361 void X86TargetInfo::writePlt(uint8_t *Buf, uint64_t GotEntryAddr,
362                              uint64_t PltEntryAddr, int32_t Index,
363                              unsigned RelOff) const {
364   const uint8_t Inst[] = {
365       0xff, 0x00, 0x00, 0x00, 0x00, 0x00, // jmp *foo_in_GOT|*foo@GOT(%ebx)
366       0x68, 0x00, 0x00, 0x00, 0x00,       // pushl $reloc_offset
367       0xe9, 0x00, 0x00, 0x00, 0x00        // jmp .PLT0@PC
368   };
369   memcpy(Buf, Inst, sizeof(Inst));
370 
371   // jmp *foo@GOT(%ebx) or jmp *foo_in_GOT
372   Buf[1] = Config->Pic ? 0xa3 : 0x25;
373   uint32_t Got = Out<ELF32LE>::GotPlt->getVA();
374   write32le(Buf + 2, Config->Shared ? GotEntryAddr - Got : GotEntryAddr);
375   write32le(Buf + 7, RelOff);
376   write32le(Buf + 12, -Index * PltEntrySize - PltZeroSize - 16);
377 }
378 
379 uint64_t X86TargetInfo::getImplicitAddend(const uint8_t *Buf,
380                                           uint32_t Type) const {
381   switch (Type) {
382   default:
383     return 0;
384   case R_386_32:
385   case R_386_GOT32:
386   case R_386_GOTOFF:
387   case R_386_GOTPC:
388   case R_386_PC32:
389   case R_386_PLT32:
390     return read32le(Buf);
391   }
392 }
393 
394 void X86TargetInfo::relocateOne(uint8_t *Loc, uint32_t Type,
395                                 uint64_t Val) const {
396   checkInt<32>(Val, Type);
397   write32le(Loc, Val);
398 }
399 
400 void X86TargetInfo::relaxTlsGdToLe(uint8_t *Loc, uint32_t Type,
401                                    uint64_t Val) const {
402   // Convert
403   //   leal x@tlsgd(, %ebx, 1),
404   //   call __tls_get_addr@plt
405   // to
406   //   movl %gs:0,%eax
407   //   subl $x@ntpoff,%eax
408   const uint8_t Inst[] = {
409       0x65, 0xa1, 0x00, 0x00, 0x00, 0x00, // movl %gs:0, %eax
410       0x81, 0xe8, 0x00, 0x00, 0x00, 0x00  // subl 0(%ebx), %eax
411   };
412   memcpy(Loc - 3, Inst, sizeof(Inst));
413   relocateOne(Loc + 5, R_386_32, Val);
414 }
415 
416 void X86TargetInfo::relaxTlsGdToIe(uint8_t *Loc, uint32_t Type,
417                                    uint64_t Val) const {
418   // Convert
419   //   leal x@tlsgd(, %ebx, 1),
420   //   call __tls_get_addr@plt
421   // to
422   //   movl %gs:0, %eax
423   //   addl x@gotntpoff(%ebx), %eax
424   const uint8_t Inst[] = {
425       0x65, 0xa1, 0x00, 0x00, 0x00, 0x00, // movl %gs:0, %eax
426       0x03, 0x83, 0x00, 0x00, 0x00, 0x00  // addl 0(%ebx), %eax
427   };
428   memcpy(Loc - 3, Inst, sizeof(Inst));
429   relocateOne(Loc + 5, R_386_32, Val);
430 }
431 
432 // In some conditions, relocations can be optimized to avoid using GOT.
433 // This function does that for Initial Exec to Local Exec case.
434 void X86TargetInfo::relaxTlsIeToLe(uint8_t *Loc, uint32_t Type,
435                                    uint64_t Val) const {
436   // Ulrich's document section 6.2 says that @gotntpoff can
437   // be used with MOVL or ADDL instructions.
438   // @indntpoff is similar to @gotntpoff, but for use in
439   // position dependent code.
440   uint8_t *Inst = Loc - 2;
441   uint8_t *Op = Loc - 1;
442   uint8_t Reg = (Loc[-1] >> 3) & 7;
443   bool IsMov = *Inst == 0x8b;
444   if (Type == R_386_TLS_IE) {
445     // For R_386_TLS_IE relocation we perform the next transformations:
446     // MOVL foo@INDNTPOFF,%EAX is transformed to MOVL $foo,%EAX
447     // MOVL foo@INDNTPOFF,%REG is transformed to MOVL $foo,%REG
448     // ADDL foo@INDNTPOFF,%REG is transformed to ADDL $foo,%REG
449     // First one is special because when EAX is used the sequence is 5 bytes
450     // long, otherwise it is 6 bytes.
451     if (*Op == 0xa1) {
452       *Op = 0xb8;
453     } else {
454       *Inst = IsMov ? 0xc7 : 0x81;
455       *Op = 0xc0 | ((*Op >> 3) & 7);
456     }
457   } else {
458     // R_386_TLS_GOTIE relocation can be optimized to
459     // R_386_TLS_LE so that it does not use GOT.
460     // "MOVL foo@GOTTPOFF(%RIP), %REG" is transformed to "MOVL $foo, %REG".
461     // "ADDL foo@GOTNTPOFF(%RIP), %REG" is transformed to "LEAL foo(%REG), %REG"
462     // Note: gold converts to ADDL instead of LEAL.
463     *Inst = IsMov ? 0xc7 : 0x8d;
464     if (IsMov)
465       *Op = 0xc0 | ((*Op >> 3) & 7);
466     else
467       *Op = 0x80 | Reg | (Reg << 3);
468   }
469   relocateOne(Loc, R_386_TLS_LE, Val);
470 }
471 
472 void X86TargetInfo::relaxTlsLdToLe(uint8_t *Loc, uint32_t Type,
473                                    uint64_t Val) const {
474   if (Type == R_386_TLS_LDO_32) {
475     relocateOne(Loc, R_386_TLS_LE, Val);
476     return;
477   }
478 
479   // Convert
480   //   leal foo(%reg),%eax
481   //   call ___tls_get_addr
482   // to
483   //   movl %gs:0,%eax
484   //   nop
485   //   leal 0(%esi,1),%esi
486   const uint8_t Inst[] = {
487       0x65, 0xa1, 0x00, 0x00, 0x00, 0x00, // movl %gs:0,%eax
488       0x90,                               // nop
489       0x8d, 0x74, 0x26, 0x00              // leal 0(%esi,1),%esi
490   };
491   memcpy(Loc - 2, Inst, sizeof(Inst));
492 }
493 
494 X86_64TargetInfo::X86_64TargetInfo() {
495   CopyRel = R_X86_64_COPY;
496   GotRel = R_X86_64_GLOB_DAT;
497   PltRel = R_X86_64_JUMP_SLOT;
498   RelativeRel = R_X86_64_RELATIVE;
499   IRelativeRel = R_X86_64_IRELATIVE;
500   TlsGotRel = R_X86_64_TPOFF64;
501   TlsModuleIndexRel = R_X86_64_DTPMOD64;
502   TlsOffsetRel = R_X86_64_DTPOFF64;
503   PltEntrySize = 16;
504   PltZeroSize = 16;
505   TlsGdToLeSkip = 2;
506 }
507 
508 RelExpr X86_64TargetInfo::getRelExpr(uint32_t Type, const SymbolBody &S) const {
509   switch (Type) {
510   default:
511     return R_ABS;
512   case R_X86_64_TPOFF32:
513     return R_TLS;
514   case R_X86_64_TLSLD:
515     return R_TLSLD_PC;
516   case R_X86_64_TLSGD:
517     return R_TLSGD_PC;
518   case R_X86_64_SIZE32:
519   case R_X86_64_SIZE64:
520     return R_SIZE;
521   case R_X86_64_PLT32:
522     return R_PLT_PC;
523   case R_X86_64_PC32:
524   case R_X86_64_PC64:
525     return R_PC;
526   case R_X86_64_GOT32:
527     return R_GOT_FROM_END;
528   case R_X86_64_GOTPCREL:
529   case R_X86_64_GOTPCRELX:
530   case R_X86_64_REX_GOTPCRELX:
531   case R_X86_64_GOTTPOFF:
532     return R_GOT_PC;
533   }
534 }
535 
536 void X86_64TargetInfo::writeGotPltHeader(uint8_t *Buf) const {
537   // The first entry holds the value of _DYNAMIC. It is not clear why that is
538   // required, but it is documented in the psabi and the glibc dynamic linker
539   // seems to use it (note that this is relevant for linking ld.so, not any
540   // other program).
541   write64le(Buf, Out<ELF64LE>::Dynamic->getVA());
542 }
543 
544 void X86_64TargetInfo::writeGotPlt(uint8_t *Buf, uint64_t Plt) const {
545   // See comments in X86TargetInfo::writeGotPlt.
546   write32le(Buf, Plt + 6);
547 }
548 
549 void X86_64TargetInfo::writePltZero(uint8_t *Buf) const {
550   const uint8_t PltData[] = {
551       0xff, 0x35, 0x00, 0x00, 0x00, 0x00, // pushq GOT+8(%rip)
552       0xff, 0x25, 0x00, 0x00, 0x00, 0x00, // jmp *GOT+16(%rip)
553       0x0f, 0x1f, 0x40, 0x00              // nopl 0x0(rax)
554   };
555   memcpy(Buf, PltData, sizeof(PltData));
556   uint64_t Got = Out<ELF64LE>::GotPlt->getVA();
557   uint64_t Plt = Out<ELF64LE>::Plt->getVA();
558   write32le(Buf + 2, Got - Plt + 2); // GOT+8
559   write32le(Buf + 8, Got - Plt + 4); // GOT+16
560 }
561 
562 void X86_64TargetInfo::writePlt(uint8_t *Buf, uint64_t GotEntryAddr,
563                                 uint64_t PltEntryAddr, int32_t Index,
564                                 unsigned RelOff) const {
565   const uint8_t Inst[] = {
566       0xff, 0x25, 0x00, 0x00, 0x00, 0x00, // jmpq *got(%rip)
567       0x68, 0x00, 0x00, 0x00, 0x00,       // pushq <relocation index>
568       0xe9, 0x00, 0x00, 0x00, 0x00        // jmpq plt[0]
569   };
570   memcpy(Buf, Inst, sizeof(Inst));
571 
572   write32le(Buf + 2, GotEntryAddr - PltEntryAddr - 6);
573   write32le(Buf + 7, Index);
574   write32le(Buf + 12, -Index * PltEntrySize - PltZeroSize - 16);
575 }
576 
577 uint32_t X86_64TargetInfo::getDynRel(uint32_t Type) const {
578   if (Type == R_X86_64_PC32 || Type == R_X86_64_32)
579     if (Config->Shared)
580       error(getELFRelocationTypeName(EM_X86_64, Type) +
581             " cannot be a dynamic relocation");
582   return Type;
583 }
584 
585 bool X86_64TargetInfo::isTlsInitialExecRel(uint32_t Type) const {
586   return Type == R_X86_64_GOTTPOFF;
587 }
588 
589 bool X86_64TargetInfo::isTlsGlobalDynamicRel(uint32_t Type) const {
590   return Type == R_X86_64_TLSGD;
591 }
592 
593 bool X86_64TargetInfo::isTlsLocalDynamicRel(uint32_t Type) const {
594   return Type == R_X86_64_DTPOFF32 || Type == R_X86_64_DTPOFF64 ||
595          Type == R_X86_64_TLSLD;
596 }
597 
598 void X86_64TargetInfo::relaxTlsGdToLe(uint8_t *Loc, uint32_t Type,
599                                       uint64_t Val) const {
600   // Convert
601   //   .byte 0x66
602   //   leaq x@tlsgd(%rip), %rdi
603   //   .word 0x6666
604   //   rex64
605   //   call __tls_get_addr@plt
606   // to
607   //   mov %fs:0x0,%rax
608   //   lea x@tpoff,%rax
609   const uint8_t Inst[] = {
610       0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00, // mov %fs:0x0,%rax
611       0x48, 0x8d, 0x80, 0x00, 0x00, 0x00, 0x00              // lea x@tpoff,%rax
612   };
613   memcpy(Loc - 4, Inst, sizeof(Inst));
614   // The original code used a pc relative relocation and so we have to
615   // compensate for the -4 in had in the addend.
616   relocateOne(Loc + 8, R_X86_64_TPOFF32, Val + 4);
617 }
618 
619 void X86_64TargetInfo::relaxTlsGdToIe(uint8_t *Loc, uint32_t Type,
620                                       uint64_t Val) const {
621   // Convert
622   //   .byte 0x66
623   //   leaq x@tlsgd(%rip), %rdi
624   //   .word 0x6666
625   //   rex64
626   //   call __tls_get_addr@plt
627   // to
628   //   mov %fs:0x0,%rax
629   //   addq x@tpoff,%rax
630   const uint8_t Inst[] = {
631       0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00, // mov %fs:0x0,%rax
632       0x48, 0x03, 0x05, 0x00, 0x00, 0x00, 0x00              // addq x@tpoff,%rax
633   };
634   memcpy(Loc - 4, Inst, sizeof(Inst));
635   // Both code sequences are PC relatives, but since we are moving the constant
636   // forward by 8 bytes we have to subtract the value by 8.
637   relocateOne(Loc + 8, R_X86_64_PC32, Val - 8);
638 }
639 
640 // In some conditions, R_X86_64_GOTTPOFF relocation can be optimized to
641 // R_X86_64_TPOFF32 so that it does not use GOT.
642 void X86_64TargetInfo::relaxTlsIeToLe(uint8_t *Loc, uint32_t Type,
643                                       uint64_t Val) const {
644   // Ulrich's document section 6.5 says that @gottpoff(%rip) must be
645   // used in MOVQ or ADDQ instructions only.
646   // "MOVQ foo@GOTTPOFF(%RIP), %REG" is transformed to "MOVQ $foo, %REG".
647   // "ADDQ foo@GOTTPOFF(%RIP), %REG" is transformed to "LEAQ foo(%REG), %REG"
648   // (if the register is not RSP/R12) or "ADDQ $foo, %RSP".
649   // Opcodes info can be found at http://ref.x86asm.net/coder64.html#x48.
650   uint8_t *Prefix = Loc - 3;
651   uint8_t *Inst = Loc - 2;
652   uint8_t *RegSlot = Loc - 1;
653   uint8_t Reg = Loc[-1] >> 3;
654   bool IsMov = *Inst == 0x8b;
655   bool RspAdd = !IsMov && Reg == 4;
656 
657   // r12 and rsp registers requires special handling.
658   // Problem is that for other registers, for example leaq 0xXXXXXXXX(%r11),%r11
659   // result out is 7 bytes: 4d 8d 9b XX XX XX XX,
660   // but leaq 0xXXXXXXXX(%r12),%r12 is 8 bytes: 4d 8d a4 24 XX XX XX XX.
661   // The same true for rsp. So we convert to addq for them, saving 1 byte that
662   // we dont have.
663   if (RspAdd)
664     *Inst = 0x81;
665   else
666     *Inst = IsMov ? 0xc7 : 0x8d;
667   if (*Prefix == 0x4c)
668     *Prefix = (IsMov || RspAdd) ? 0x49 : 0x4d;
669   *RegSlot = (IsMov || RspAdd) ? (0xc0 | Reg) : (0x80 | Reg | (Reg << 3));
670   // The original code used a pc relative relocation and so we have to
671   // compensate for the -4 in had in the addend.
672   relocateOne(Loc, R_X86_64_TPOFF32, Val + 4);
673 }
674 
675 void X86_64TargetInfo::relaxTlsLdToLe(uint8_t *Loc, uint32_t Type,
676                                       uint64_t Val) const {
677   // Convert
678   //   leaq bar@tlsld(%rip), %rdi
679   //   callq __tls_get_addr@PLT
680   //   leaq bar@dtpoff(%rax), %rcx
681   // to
682   //   .word 0x6666
683   //   .byte 0x66
684   //   mov %fs:0,%rax
685   //   leaq bar@tpoff(%rax), %rcx
686   if (Type == R_X86_64_DTPOFF64) {
687     write64le(Loc, Val);
688     return;
689   }
690   if (Type == R_X86_64_DTPOFF32) {
691     relocateOne(Loc, R_X86_64_TPOFF32, Val);
692     return;
693   }
694 
695   const uint8_t Inst[] = {
696       0x66, 0x66,                                          // .word 0x6666
697       0x66,                                                // .byte 0x66
698       0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00 // mov %fs:0,%rax
699   };
700   memcpy(Loc - 3, Inst, sizeof(Inst));
701 }
702 
703 void X86_64TargetInfo::relocateOne(uint8_t *Loc, uint32_t Type,
704                                    uint64_t Val) const {
705   switch (Type) {
706   case R_X86_64_32:
707     checkUInt<32>(Val, Type);
708     write32le(Loc, Val);
709     break;
710   case R_X86_64_32S:
711   case R_X86_64_TPOFF32:
712   case R_X86_64_GOT32:
713   case R_X86_64_GOTPCREL:
714   case R_X86_64_GOTPCRELX:
715   case R_X86_64_REX_GOTPCRELX:
716   case R_X86_64_PC32:
717   case R_X86_64_GOTTPOFF:
718   case R_X86_64_PLT32:
719   case R_X86_64_TLSGD:
720   case R_X86_64_TLSLD:
721   case R_X86_64_DTPOFF32:
722   case R_X86_64_SIZE32:
723     checkInt<32>(Val, Type);
724     write32le(Loc, Val);
725     break;
726   case R_X86_64_64:
727   case R_X86_64_DTPOFF64:
728   case R_X86_64_SIZE64:
729   case R_X86_64_PC64:
730     write64le(Loc, Val);
731     break;
732   default:
733     fatal("unrecognized reloc " + Twine(Type));
734   }
735 }
736 
737 bool X86_64TargetInfo::canRelaxGot(uint32_t Type, const uint8_t *Data) const {
738   if (Type != R_X86_64_GOTPCRELX && Type != R_X86_64_REX_GOTPCRELX)
739     return false;
740   const uint8_t Op = Data[-2];
741   const uint8_t ModRm = Data[-1];
742   // Relax mov.
743   if (Op == 0x8b)
744     return true;
745   // Relax call and jmp.
746   return Op == 0xff && (ModRm == 0x15 || ModRm == 0x25);
747 }
748 
749 void X86_64TargetInfo::relaxGot(uint8_t *Loc, uint64_t Val) const {
750   const uint8_t Op = Loc[-2];
751   const uint8_t ModRm = Loc[-1];
752 
753   // Convert mov foo@GOTPCREL(%rip), %reg to lea foo(%rip), %reg.
754   if (Op == 0x8b) {
755     *(Loc - 2) = 0x8d;
756     relocateOne(Loc, R_X86_64_PC32, Val);
757     return;
758   }
759 
760   assert(Op == 0xff);
761   if (ModRm == 0x15) {
762     // ABI says we can convert call *foo@GOTPCREL(%rip) to nop call foo.
763     // Instead we convert to addr32 call foo, where addr32 is instruction
764     // prefix. That makes result expression to be a single instruction.
765     *(Loc - 2) = 0x67; // addr32 prefix
766     *(Loc - 1) = 0xe8; // call
767   } else {
768     assert(ModRm == 0x25);
769     // Convert jmp *foo@GOTPCREL(%rip) to jmp foo nop.
770     // jmp doesn't return, so it is fine to use nop here, it is just a stub.
771     *(Loc - 2) = 0xe9; // jmp
772     *(Loc + 3) = 0x90; // nop
773     Loc -= 1;
774     Val += 1;
775   }
776   relocateOne(Loc, R_X86_64_PC32, Val);
777 }
778 
779 // Relocation masks following the #lo(value), #hi(value), #ha(value),
780 // #higher(value), #highera(value), #highest(value), and #highesta(value)
781 // macros defined in section 4.5.1. Relocation Types of the PPC-elf64abi
782 // document.
783 static uint16_t applyPPCLo(uint64_t V) { return V; }
784 static uint16_t applyPPCHi(uint64_t V) { return V >> 16; }
785 static uint16_t applyPPCHa(uint64_t V) { return (V + 0x8000) >> 16; }
786 static uint16_t applyPPCHigher(uint64_t V) { return V >> 32; }
787 static uint16_t applyPPCHighera(uint64_t V) { return (V + 0x8000) >> 32; }
788 static uint16_t applyPPCHighest(uint64_t V) { return V >> 48; }
789 static uint16_t applyPPCHighesta(uint64_t V) { return (V + 0x8000) >> 48; }
790 
791 PPCTargetInfo::PPCTargetInfo() {}
792 
793 void PPCTargetInfo::relocateOne(uint8_t *Loc, uint32_t Type,
794                                 uint64_t Val) const {
795   switch (Type) {
796   case R_PPC_ADDR16_HA:
797     write16be(Loc, applyPPCHa(Val));
798     break;
799   case R_PPC_ADDR16_LO:
800     write16be(Loc, applyPPCLo(Val));
801     break;
802   default:
803     fatal("unrecognized reloc " + Twine(Type));
804   }
805 }
806 
807 RelExpr PPCTargetInfo::getRelExpr(uint32_t Type, const SymbolBody &S) const {
808   return R_ABS;
809 }
810 
811 PPC64TargetInfo::PPC64TargetInfo() {
812   PltRel = GotRel = R_PPC64_GLOB_DAT;
813   RelativeRel = R_PPC64_RELATIVE;
814   PltEntrySize = 32;
815 
816   // We need 64K pages (at least under glibc/Linux, the loader won't
817   // set different permissions on a finer granularity than that).
818   PageSize = 65536;
819 
820   // The PPC64 ELF ABI v1 spec, says:
821   //
822   //   It is normally desirable to put segments with different characteristics
823   //   in separate 256 Mbyte portions of the address space, to give the
824   //   operating system full paging flexibility in the 64-bit address space.
825   //
826   // And because the lowest non-zero 256M boundary is 0x10000000, PPC64 linkers
827   // use 0x10000000 as the starting address.
828   VAStart = 0x10000000;
829 }
830 
831 static uint64_t PPC64TocOffset = 0x8000;
832 
833 uint64_t getPPC64TocBase() {
834   // The TOC consists of sections .got, .toc, .tocbss, .plt in that order. The
835   // TOC starts where the first of these sections starts. We always create a
836   // .got when we see a relocation that uses it, so for us the start is always
837   // the .got.
838   uint64_t TocVA = Out<ELF64BE>::Got->getVA();
839 
840   // Per the ppc64-elf-linux ABI, The TOC base is TOC value plus 0x8000
841   // thus permitting a full 64 Kbytes segment. Note that the glibc startup
842   // code (crt1.o) assumes that you can get from the TOC base to the
843   // start of the .toc section with only a single (signed) 16-bit relocation.
844   return TocVA + PPC64TocOffset;
845 }
846 
847 RelExpr PPC64TargetInfo::getRelExpr(uint32_t Type, const SymbolBody &S) const {
848   switch (Type) {
849   default:
850     return R_ABS;
851   case R_PPC64_TOC16:
852   case R_PPC64_TOC16_DS:
853   case R_PPC64_TOC16_HA:
854   case R_PPC64_TOC16_HI:
855   case R_PPC64_TOC16_LO:
856   case R_PPC64_TOC16_LO_DS:
857     return R_GOTREL;
858   case R_PPC64_TOC:
859     return R_PPC_TOC;
860   case R_PPC64_REL24:
861     return R_PPC_PLT_OPD;
862   }
863 }
864 
865 void PPC64TargetInfo::writePlt(uint8_t *Buf, uint64_t GotEntryAddr,
866                                uint64_t PltEntryAddr, int32_t Index,
867                                unsigned RelOff) const {
868   uint64_t Off = GotEntryAddr - getPPC64TocBase();
869 
870   // FIXME: What we should do, in theory, is get the offset of the function
871   // descriptor in the .opd section, and use that as the offset from %r2 (the
872   // TOC-base pointer). Instead, we have the GOT-entry offset, and that will
873   // be a pointer to the function descriptor in the .opd section. Using
874   // this scheme is simpler, but requires an extra indirection per PLT dispatch.
875 
876   write32be(Buf,      0xf8410028);                   // std %r2, 40(%r1)
877   write32be(Buf + 4,  0x3d620000 | applyPPCHa(Off)); // addis %r11, %r2, X@ha
878   write32be(Buf + 8,  0xe98b0000 | applyPPCLo(Off)); // ld %r12, X@l(%r11)
879   write32be(Buf + 12, 0xe96c0000);                   // ld %r11,0(%r12)
880   write32be(Buf + 16, 0x7d6903a6);                   // mtctr %r11
881   write32be(Buf + 20, 0xe84c0008);                   // ld %r2,8(%r12)
882   write32be(Buf + 24, 0xe96c0010);                   // ld %r11,16(%r12)
883   write32be(Buf + 28, 0x4e800420);                   // bctr
884 }
885 
886 void PPC64TargetInfo::relocateOne(uint8_t *Loc, uint32_t Type,
887                                   uint64_t Val) const {
888   uint64_t TO = PPC64TocOffset;
889 
890   // For a TOC-relative relocation,  proceed in terms of the corresponding
891   // ADDR16 relocation type.
892   switch (Type) {
893   case R_PPC64_TOC16:       Type = R_PPC64_ADDR16;       Val -= TO; break;
894   case R_PPC64_TOC16_DS:    Type = R_PPC64_ADDR16_DS;    Val -= TO; break;
895   case R_PPC64_TOC16_HA:    Type = R_PPC64_ADDR16_HA;    Val -= TO; break;
896   case R_PPC64_TOC16_HI:    Type = R_PPC64_ADDR16_HI;    Val -= TO; break;
897   case R_PPC64_TOC16_LO:    Type = R_PPC64_ADDR16_LO;    Val -= TO; break;
898   case R_PPC64_TOC16_LO_DS: Type = R_PPC64_ADDR16_LO_DS; Val -= TO; break;
899   default: break;
900   }
901 
902   switch (Type) {
903   case R_PPC64_ADDR14: {
904     checkAlignment<4>(Val, Type);
905     // Preserve the AA/LK bits in the branch instruction
906     uint8_t AALK = Loc[3];
907     write16be(Loc + 2, (AALK & 3) | (Val & 0xfffc));
908     break;
909   }
910   case R_PPC64_ADDR16:
911     checkInt<16>(Val, Type);
912     write16be(Loc, Val);
913     break;
914   case R_PPC64_ADDR16_DS:
915     checkInt<16>(Val, Type);
916     write16be(Loc, (read16be(Loc) & 3) | (Val & ~3));
917     break;
918   case R_PPC64_ADDR16_HA:
919     write16be(Loc, applyPPCHa(Val));
920     break;
921   case R_PPC64_ADDR16_HI:
922     write16be(Loc, applyPPCHi(Val));
923     break;
924   case R_PPC64_ADDR16_HIGHER:
925     write16be(Loc, applyPPCHigher(Val));
926     break;
927   case R_PPC64_ADDR16_HIGHERA:
928     write16be(Loc, applyPPCHighera(Val));
929     break;
930   case R_PPC64_ADDR16_HIGHEST:
931     write16be(Loc, applyPPCHighest(Val));
932     break;
933   case R_PPC64_ADDR16_HIGHESTA:
934     write16be(Loc, applyPPCHighesta(Val));
935     break;
936   case R_PPC64_ADDR16_LO:
937     write16be(Loc, applyPPCLo(Val));
938     break;
939   case R_PPC64_ADDR16_LO_DS:
940     write16be(Loc, (read16be(Loc) & 3) | (applyPPCLo(Val) & ~3));
941     break;
942   case R_PPC64_ADDR32:
943     checkInt<32>(Val, Type);
944     write32be(Loc, Val);
945     break;
946   case R_PPC64_ADDR64:
947     write64be(Loc, Val);
948     break;
949   case R_PPC64_REL16_HA:
950     write16be(Loc, applyPPCHa(Val));
951     break;
952   case R_PPC64_REL16_HI:
953     write16be(Loc, applyPPCHi(Val));
954     break;
955   case R_PPC64_REL16_LO:
956     write16be(Loc, applyPPCLo(Val));
957     break;
958   case R_PPC64_REL24: {
959     uint32_t Mask = 0x03FFFFFC;
960     checkInt<24>(Val, Type);
961     write32be(Loc, (read32be(Loc) & ~Mask) | (Val & Mask));
962     break;
963   }
964   case R_PPC64_REL32:
965     checkInt<32>(Val, Type);
966     write32be(Loc, Val);
967     break;
968   case R_PPC64_REL64:
969     write64be(Loc, Val);
970     break;
971   case R_PPC64_TOC:
972     write64be(Loc, Val);
973     break;
974   default:
975     fatal("unrecognized reloc " + Twine(Type));
976   }
977 }
978 
979 AArch64TargetInfo::AArch64TargetInfo() {
980   CopyRel = R_AARCH64_COPY;
981   RelativeRel = R_AARCH64_RELATIVE;
982   IRelativeRel = R_AARCH64_IRELATIVE;
983   GotRel = R_AARCH64_GLOB_DAT;
984   PltRel = R_AARCH64_JUMP_SLOT;
985   TlsGotRel = R_AARCH64_TLS_TPREL64;
986   TlsModuleIndexRel = R_AARCH64_TLS_DTPMOD64;
987   TlsOffsetRel = R_AARCH64_TLS_DTPREL64;
988   PltEntrySize = 16;
989   PltZeroSize = 32;
990 
991   // It doesn't seem to be documented anywhere, but tls on aarch64 uses variant
992   // 1 of the tls structures and the tcb size is 16.
993   TcbSize = 16;
994 }
995 
996 RelExpr AArch64TargetInfo::getRelExpr(uint32_t Type,
997                                       const SymbolBody &S) const {
998   switch (Type) {
999   default:
1000     return R_ABS;
1001 
1002   case R_AARCH64_TLSLE_ADD_TPREL_HI12:
1003   case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
1004     return R_TLS;
1005 
1006   case R_AARCH64_CALL26:
1007   case R_AARCH64_CONDBR19:
1008   case R_AARCH64_JUMP26:
1009   case R_AARCH64_TSTBR14:
1010     return R_PLT_PC;
1011 
1012   case R_AARCH64_PREL16:
1013   case R_AARCH64_PREL32:
1014   case R_AARCH64_PREL64:
1015   case R_AARCH64_ADR_PREL_LO21:
1016     return R_PC;
1017   case R_AARCH64_ADR_PREL_PG_HI21:
1018     return R_PAGE_PC;
1019   case R_AARCH64_LD64_GOT_LO12_NC:
1020   case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
1021     return R_GOT;
1022   case R_AARCH64_ADR_GOT_PAGE:
1023   case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
1024     return R_GOT_PAGE_PC;
1025   }
1026 }
1027 
1028 bool AArch64TargetInfo::usesOnlyLowPageBits(uint32_t Type) const {
1029   switch (Type) {
1030   default:
1031     return false;
1032   case R_AARCH64_ADD_ABS_LO12_NC:
1033   case R_AARCH64_LDST8_ABS_LO12_NC:
1034   case R_AARCH64_LDST16_ABS_LO12_NC:
1035   case R_AARCH64_LDST32_ABS_LO12_NC:
1036   case R_AARCH64_LDST64_ABS_LO12_NC:
1037   case R_AARCH64_LDST128_ABS_LO12_NC:
1038   case R_AARCH64_LD64_GOT_LO12_NC:
1039   case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
1040     return true;
1041   }
1042 }
1043 
1044 bool AArch64TargetInfo::isTlsGlobalDynamicRel(uint32_t Type) const {
1045   return Type == R_AARCH64_TLSDESC_ADR_PAGE21 ||
1046          Type == R_AARCH64_TLSDESC_LD64_LO12_NC ||
1047          Type == R_AARCH64_TLSDESC_ADD_LO12_NC ||
1048          Type == R_AARCH64_TLSDESC_CALL;
1049 }
1050 
1051 bool AArch64TargetInfo::isTlsInitialExecRel(uint32_t Type) const {
1052   return Type == R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 ||
1053          Type == R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC;
1054 }
1055 
1056 uint32_t AArch64TargetInfo::getDynRel(uint32_t Type) const {
1057   if (Type == R_AARCH64_ABS32 || Type == R_AARCH64_ABS64)
1058     return Type;
1059   StringRef S = getELFRelocationTypeName(EM_AARCH64, Type);
1060   error("relocation " + S + " cannot be used when making a shared object; "
1061                             "recompile with -fPIC.");
1062   // Keep it going with a dummy value so that we can find more reloc errors.
1063   return R_AARCH64_ABS32;
1064 }
1065 
1066 void AArch64TargetInfo::writeGotPlt(uint8_t *Buf, uint64_t Plt) const {
1067   write64le(Buf, Out<ELF64LE>::Plt->getVA());
1068 }
1069 
1070 static uint64_t getAArch64Page(uint64_t Expr) {
1071   return Expr & (~static_cast<uint64_t>(0xFFF));
1072 }
1073 
1074 void AArch64TargetInfo::writePltZero(uint8_t *Buf) const {
1075   const uint8_t PltData[] = {
1076       0xf0, 0x7b, 0xbf, 0xa9, // stp	x16, x30, [sp,#-16]!
1077       0x10, 0x00, 0x00, 0x90, // adrp	x16, Page(&(.plt.got[2]))
1078       0x11, 0x02, 0x40, 0xf9, // ldr	x17, [x16, Offset(&(.plt.got[2]))]
1079       0x10, 0x02, 0x00, 0x91, // add	x16, x16, Offset(&(.plt.got[2]))
1080       0x20, 0x02, 0x1f, 0xd6, // br	x17
1081       0x1f, 0x20, 0x03, 0xd5, // nop
1082       0x1f, 0x20, 0x03, 0xd5, // nop
1083       0x1f, 0x20, 0x03, 0xd5  // nop
1084   };
1085   memcpy(Buf, PltData, sizeof(PltData));
1086 
1087   uint64_t Got = Out<ELF64LE>::GotPlt->getVA();
1088   uint64_t Plt = Out<ELF64LE>::Plt->getVA();
1089   relocateOne(Buf + 4, R_AARCH64_ADR_PREL_PG_HI21,
1090               getAArch64Page(Got + 16) - getAArch64Page(Plt + 4));
1091   relocateOne(Buf + 8, R_AARCH64_LDST64_ABS_LO12_NC, Got + 16);
1092   relocateOne(Buf + 12, R_AARCH64_ADD_ABS_LO12_NC, Got + 16);
1093 }
1094 
1095 void AArch64TargetInfo::writePlt(uint8_t *Buf, uint64_t GotEntryAddr,
1096                                  uint64_t PltEntryAddr, int32_t Index,
1097                                  unsigned RelOff) const {
1098   const uint8_t Inst[] = {
1099       0x10, 0x00, 0x00, 0x90, // adrp x16, Page(&(.plt.got[n]))
1100       0x11, 0x02, 0x40, 0xf9, // ldr  x17, [x16, Offset(&(.plt.got[n]))]
1101       0x10, 0x02, 0x00, 0x91, // add  x16, x16, Offset(&(.plt.got[n]))
1102       0x20, 0x02, 0x1f, 0xd6  // br   x17
1103   };
1104   memcpy(Buf, Inst, sizeof(Inst));
1105 
1106   relocateOne(Buf, R_AARCH64_ADR_PREL_PG_HI21,
1107               getAArch64Page(GotEntryAddr) - getAArch64Page(PltEntryAddr));
1108   relocateOne(Buf + 4, R_AARCH64_LDST64_ABS_LO12_NC, GotEntryAddr);
1109   relocateOne(Buf + 8, R_AARCH64_ADD_ABS_LO12_NC, GotEntryAddr);
1110 }
1111 
1112 static void updateAArch64Addr(uint8_t *L, uint64_t Imm) {
1113   uint32_t ImmLo = (Imm & 0x3) << 29;
1114   uint32_t ImmHi = ((Imm & 0x1FFFFC) >> 2) << 5;
1115   uint64_t Mask = (0x3 << 29) | (0x7FFFF << 5);
1116   write32le(L, (read32le(L) & ~Mask) | ImmLo | ImmHi);
1117 }
1118 
1119 static inline void updateAArch64Add(uint8_t *L, uint64_t Imm) {
1120   or32le(L, (Imm & 0xFFF) << 10);
1121 }
1122 
1123 void AArch64TargetInfo::relocateOne(uint8_t *Loc, uint32_t Type,
1124                                     uint64_t Val) const {
1125   switch (Type) {
1126   case R_AARCH64_ABS16:
1127   case R_AARCH64_PREL16:
1128     checkIntUInt<16>(Val, Type);
1129     write16le(Loc, Val);
1130     break;
1131   case R_AARCH64_ABS32:
1132   case R_AARCH64_PREL32:
1133     checkIntUInt<32>(Val, Type);
1134     write32le(Loc, Val);
1135     break;
1136   case R_AARCH64_ABS64:
1137   case R_AARCH64_PREL64:
1138     write64le(Loc, Val);
1139     break;
1140   case R_AARCH64_ADD_ABS_LO12_NC:
1141     // This relocation stores 12 bits and there's no instruction
1142     // to do it. Instead, we do a 32 bits store of the value
1143     // of r_addend bitwise-or'ed Loc. This assumes that the addend
1144     // bits in Loc are zero.
1145     or32le(Loc, (Val & 0xFFF) << 10);
1146     break;
1147   case R_AARCH64_ADR_GOT_PAGE:
1148   case R_AARCH64_ADR_PREL_PG_HI21:
1149   case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
1150     checkInt<33>(Val, Type);
1151     updateAArch64Addr(Loc, (Val >> 12) & 0x1FFFFF); // X[32:12]
1152     break;
1153   case R_AARCH64_ADR_PREL_LO21:
1154     checkInt<21>(Val, Type);
1155     updateAArch64Addr(Loc, Val & 0x1FFFFF);
1156     break;
1157   case R_AARCH64_CALL26:
1158   case R_AARCH64_JUMP26:
1159     checkInt<28>(Val, Type);
1160     or32le(Loc, (Val & 0x0FFFFFFC) >> 2);
1161     break;
1162   case R_AARCH64_CONDBR19:
1163     checkInt<21>(Val, Type);
1164     or32le(Loc, (Val & 0x1FFFFC) << 3);
1165     break;
1166   case R_AARCH64_LD64_GOT_LO12_NC:
1167   case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
1168     checkAlignment<8>(Val, Type);
1169     or32le(Loc, (Val & 0xFF8) << 7);
1170     break;
1171   case R_AARCH64_LDST128_ABS_LO12_NC:
1172     or32le(Loc, (Val & 0x0FF8) << 6);
1173     break;
1174   case R_AARCH64_LDST16_ABS_LO12_NC:
1175     or32le(Loc, (Val & 0x0FFC) << 9);
1176     break;
1177   case R_AARCH64_LDST8_ABS_LO12_NC:
1178     or32le(Loc, (Val & 0xFFF) << 10);
1179     break;
1180   case R_AARCH64_LDST32_ABS_LO12_NC:
1181     or32le(Loc, (Val & 0xFFC) << 8);
1182     break;
1183   case R_AARCH64_LDST64_ABS_LO12_NC:
1184     or32le(Loc, (Val & 0xFF8) << 7);
1185     break;
1186   case R_AARCH64_TSTBR14:
1187     checkInt<16>(Val, Type);
1188     or32le(Loc, (Val & 0xFFFC) << 3);
1189     break;
1190   case R_AARCH64_TLSLE_ADD_TPREL_HI12:
1191     checkInt<24>(Val, Type);
1192     updateAArch64Add(Loc, (Val & 0xFFF000) >> 12);
1193     break;
1194   case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
1195     updateAArch64Add(Loc, Val & 0xFFF);
1196     break;
1197   default:
1198     fatal("unrecognized reloc " + Twine(Type));
1199   }
1200 }
1201 
1202 void AArch64TargetInfo::relaxTlsGdToLe(uint8_t *Loc, uint32_t Type,
1203                                        uint64_t Val) const {
1204   // TLSDESC Global-Dynamic relocation are in the form:
1205   //   adrp    x0, :tlsdesc:v             [R_AARCH64_TLSDESC_ADR_PAGE21]
1206   //   ldr     x1, [x0, #:tlsdesc_lo12:v  [R_AARCH64_TLSDESC_LD64_LO12_NC]
1207   //   add     x0, x0, :tlsdesc_los:v     [_AARCH64_TLSDESC_ADD_LO12_NC]
1208   //   .tlsdesccall                       [R_AARCH64_TLSDESC_CALL]
1209   // And it can optimized to:
1210   //   movz    x0, #0x0, lsl #16
1211   //   movk    x0, #0x10
1212   //   nop
1213   //   nop
1214   checkUInt<32>(Val, Type);
1215 
1216   uint32_t NewInst;
1217   switch (Type) {
1218   case R_AARCH64_TLSDESC_ADD_LO12_NC:
1219   case R_AARCH64_TLSDESC_CALL:
1220     // nop
1221     NewInst = 0xd503201f;
1222     break;
1223   case R_AARCH64_TLSDESC_ADR_PAGE21:
1224     // movz
1225     NewInst = 0xd2a00000 | (((Val >> 16) & 0xffff) << 5);
1226     break;
1227   case R_AARCH64_TLSDESC_LD64_LO12_NC:
1228     // movk
1229     NewInst = 0xf2800000 | ((Val & 0xffff) << 5);
1230     break;
1231   default:
1232     llvm_unreachable("unsupported Relocation for TLS GD to LE relax");
1233   }
1234   write32le(Loc, NewInst);
1235 }
1236 
1237 void AArch64TargetInfo::relaxTlsIeToLe(uint8_t *Loc, uint32_t Type,
1238                                        uint64_t Val) const {
1239   checkUInt<32>(Val, Type);
1240 
1241   uint32_t Inst = read32le(Loc);
1242   uint32_t NewInst;
1243   if (Type == R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21) {
1244     // Generate movz.
1245     unsigned RegNo = (Inst & 0x1f);
1246     NewInst = (0xd2a00000 | RegNo) | (((Val >> 16) & 0xffff) << 5);
1247   } else if (Type == R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC) {
1248     // Generate movk
1249     unsigned RegNo = (Inst & 0x1f);
1250     NewInst = (0xf2800000 | RegNo) | ((Val & 0xffff) << 5);
1251   } else {
1252     llvm_unreachable("invalid Relocation for TLS IE to LE Relax");
1253   }
1254   write32le(Loc, NewInst);
1255 }
1256 
1257 // Implementing relocations for AMDGPU is low priority since most
1258 // programs don't use relocations now. Thus, this function is not
1259 // actually called (relocateOne is called for each relocation).
1260 // That's why the AMDGPU port works without implementing this function.
1261 void AMDGPUTargetInfo::relocateOne(uint8_t *Loc, uint32_t Type,
1262                                    uint64_t Val) const {
1263   llvm_unreachable("not implemented");
1264 }
1265 
1266 RelExpr AMDGPUTargetInfo::getRelExpr(uint32_t Type, const SymbolBody &S) const {
1267   llvm_unreachable("not implemented");
1268 }
1269 
1270 template <class ELFT> MipsTargetInfo<ELFT>::MipsTargetInfo() {
1271   GotPltHeaderEntriesNum = 2;
1272   PageSize = 65536;
1273   PltEntrySize = 16;
1274   PltZeroSize = 32;
1275   ThunkSize = 16;
1276   CopyRel = R_MIPS_COPY;
1277   PltRel = R_MIPS_JUMP_SLOT;
1278   if (ELFT::Is64Bits)
1279     RelativeRel = (R_MIPS_64 << 8) | R_MIPS_REL32;
1280   else
1281     RelativeRel = R_MIPS_REL32;
1282 }
1283 
1284 template <class ELFT>
1285 RelExpr MipsTargetInfo<ELFT>::getRelExpr(uint32_t Type,
1286                                          const SymbolBody &S) const {
1287   if (ELFT::Is64Bits)
1288     // See comment in the calculateMips64RelChain.
1289     Type &= 0xff;
1290   switch (Type) {
1291   default:
1292     return R_ABS;
1293   case R_MIPS_JALR:
1294     return R_HINT;
1295   case R_MIPS_GPREL16:
1296   case R_MIPS_GPREL32:
1297     return R_GOTREL;
1298   case R_MIPS_26:
1299     return R_PLT;
1300   case R_MIPS_HI16:
1301   case R_MIPS_LO16:
1302   case R_MIPS_GOT_OFST:
1303     // MIPS _gp_disp designates offset between start of function and 'gp'
1304     // pointer into GOT. __gnu_local_gp is equal to the current value of
1305     // the 'gp'. Therefore any relocations against them do not require
1306     // dynamic relocation.
1307     if (&S == ElfSym<ELFT>::MipsGpDisp)
1308       return R_PC;
1309     return R_ABS;
1310   case R_MIPS_PC32:
1311   case R_MIPS_PC16:
1312   case R_MIPS_PC19_S2:
1313   case R_MIPS_PC21_S2:
1314   case R_MIPS_PC26_S2:
1315   case R_MIPS_PCHI16:
1316   case R_MIPS_PCLO16:
1317     return R_PC;
1318   case R_MIPS_GOT16:
1319     if (S.isLocal())
1320       return R_MIPS_GOT_LOCAL_PAGE;
1321   // fallthrough
1322   case R_MIPS_CALL16:
1323   case R_MIPS_GOT_DISP:
1324     if (!S.isPreemptible())
1325       return R_MIPS_GOT_LOCAL;
1326     return R_GOT_OFF;
1327   case R_MIPS_GOT_PAGE:
1328     return R_MIPS_GOT_LOCAL_PAGE;
1329   }
1330 }
1331 
1332 template <class ELFT>
1333 uint32_t MipsTargetInfo<ELFT>::getDynRel(uint32_t Type) const {
1334   if (Type == R_MIPS_32 || Type == R_MIPS_64)
1335     return RelativeRel;
1336   StringRef S = getELFRelocationTypeName(EM_MIPS, Type);
1337   error("relocation " + S + " cannot be used when making a shared object; "
1338                             "recompile with -fPIC.");
1339   // Keep it going with a dummy value so that we can find more reloc errors.
1340   return R_MIPS_32;
1341 }
1342 
1343 template <class ELFT>
1344 void MipsTargetInfo<ELFT>::writeGotPlt(uint8_t *Buf, uint64_t Plt) const {
1345   write32<ELFT::TargetEndianness>(Buf, Out<ELFT>::Plt->getVA());
1346 }
1347 
1348 static uint16_t mipsHigh(uint64_t V) { return (V + 0x8000) >> 16; }
1349 
1350 template <endianness E, uint8_t BSIZE, uint8_t SHIFT>
1351 static int64_t getPcRelocAddend(const uint8_t *Loc) {
1352   uint32_t Instr = read32<E>(Loc);
1353   uint32_t Mask = 0xffffffff >> (32 - BSIZE);
1354   return SignExtend64<BSIZE + SHIFT>((Instr & Mask) << SHIFT);
1355 }
1356 
1357 template <endianness E, uint8_t BSIZE, uint8_t SHIFT>
1358 static void applyMipsPcReloc(uint8_t *Loc, uint32_t Type, uint64_t V) {
1359   uint32_t Mask = 0xffffffff >> (32 - BSIZE);
1360   uint32_t Instr = read32<E>(Loc);
1361   if (SHIFT > 0)
1362     checkAlignment<(1 << SHIFT)>(V, Type);
1363   checkInt<BSIZE + SHIFT>(V, Type);
1364   write32<E>(Loc, (Instr & ~Mask) | ((V >> SHIFT) & Mask));
1365 }
1366 
1367 template <endianness E>
1368 static void writeMipsHi16(uint8_t *Loc, uint64_t V) {
1369   uint32_t Instr = read32<E>(Loc);
1370   write32<E>(Loc, (Instr & 0xffff0000) | mipsHigh(V));
1371 }
1372 
1373 template <endianness E>
1374 static void writeMipsLo16(uint8_t *Loc, uint64_t V) {
1375   uint32_t Instr = read32<E>(Loc);
1376   write32<E>(Loc, (Instr & 0xffff0000) | (V & 0xffff));
1377 }
1378 
1379 template <endianness E> static int16_t readSignedLo16(const uint8_t *Loc) {
1380   return SignExtend32<16>(read32<E>(Loc) & 0xffff);
1381 }
1382 
1383 template <class ELFT>
1384 void MipsTargetInfo<ELFT>::writePltZero(uint8_t *Buf) const {
1385   const endianness E = ELFT::TargetEndianness;
1386   write32<E>(Buf, 0x3c1c0000);      // lui   $28, %hi(&GOTPLT[0])
1387   write32<E>(Buf + 4, 0x8f990000);  // lw    $25, %lo(&GOTPLT[0])($28)
1388   write32<E>(Buf + 8, 0x279c0000);  // addiu $28, $28, %lo(&GOTPLT[0])
1389   write32<E>(Buf + 12, 0x031cc023); // subu  $24, $24, $28
1390   write32<E>(Buf + 16, 0x03e07825); // move  $15, $31
1391   write32<E>(Buf + 20, 0x0018c082); // srl   $24, $24, 2
1392   write32<E>(Buf + 24, 0x0320f809); // jalr  $25
1393   write32<E>(Buf + 28, 0x2718fffe); // subu  $24, $24, 2
1394   uint64_t Got = Out<ELFT>::GotPlt->getVA();
1395   writeMipsHi16<E>(Buf, Got);
1396   writeMipsLo16<E>(Buf + 4, Got);
1397   writeMipsLo16<E>(Buf + 8, Got);
1398 }
1399 
1400 template <class ELFT>
1401 void MipsTargetInfo<ELFT>::writePlt(uint8_t *Buf, uint64_t GotEntryAddr,
1402                                     uint64_t PltEntryAddr, int32_t Index,
1403                                     unsigned RelOff) const {
1404   const endianness E = ELFT::TargetEndianness;
1405   write32<E>(Buf, 0x3c0f0000);      // lui   $15, %hi(.got.plt entry)
1406   write32<E>(Buf + 4, 0x8df90000);  // l[wd] $25, %lo(.got.plt entry)($15)
1407   write32<E>(Buf + 8, 0x03200008);  // jr    $25
1408   write32<E>(Buf + 12, 0x25f80000); // addiu $24, $15, %lo(.got.plt entry)
1409   writeMipsHi16<E>(Buf, GotEntryAddr);
1410   writeMipsLo16<E>(Buf + 4, GotEntryAddr);
1411   writeMipsLo16<E>(Buf + 12, GotEntryAddr);
1412 }
1413 
1414 template <class ELFT>
1415 void MipsTargetInfo<ELFT>::writeThunk(uint8_t *Buf, uint64_t S) const {
1416   // Write MIPS LA25 thunk code to call PIC function from the non-PIC one.
1417   // See MipsTargetInfo::writeThunk for details.
1418   const endianness E = ELFT::TargetEndianness;
1419   write32<E>(Buf, 0x3c190000);      // lui   $25, %hi(func)
1420   write32<E>(Buf + 4, 0x08000000);  // j     func
1421   write32<E>(Buf + 8, 0x27390000);  // addiu $25, $25, %lo(func)
1422   write32<E>(Buf + 12, 0x00000000); // nop
1423   writeMipsHi16<E>(Buf, S);
1424   write32<E>(Buf + 4, 0x08000000 | (S >> 2));
1425   writeMipsLo16<E>(Buf + 8, S);
1426 }
1427 
1428 template <class ELFT>
1429 bool MipsTargetInfo<ELFT>::needsThunk(uint32_t Type, const InputFile &File,
1430                                       const SymbolBody &S) const {
1431   // Any MIPS PIC code function is invoked with its address in register $t9.
1432   // So if we have a branch instruction from non-PIC code to the PIC one
1433   // we cannot make the jump directly and need to create a small stubs
1434   // to save the target function address.
1435   // See page 3-38 ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf
1436   if (Type != R_MIPS_26)
1437     return false;
1438   auto *F = dyn_cast<ELFFileBase<ELFT>>(&File);
1439   if (!F)
1440     return false;
1441   // If current file has PIC code, LA25 stub is not required.
1442   if (F->getObj().getHeader()->e_flags & EF_MIPS_PIC)
1443     return false;
1444   auto *D = dyn_cast<DefinedRegular<ELFT>>(&S);
1445   if (!D || !D->Section)
1446     return false;
1447   // LA25 is required if target file has PIC code
1448   // or target symbol is a PIC symbol.
1449   return (D->Section->getFile()->getObj().getHeader()->e_flags & EF_MIPS_PIC) ||
1450          (D->StOther & STO_MIPS_MIPS16) == STO_MIPS_PIC;
1451 }
1452 
1453 template <class ELFT>
1454 uint64_t MipsTargetInfo<ELFT>::getImplicitAddend(const uint8_t *Buf,
1455                                                  uint32_t Type) const {
1456   const endianness E = ELFT::TargetEndianness;
1457   switch (Type) {
1458   default:
1459     return 0;
1460   case R_MIPS_32:
1461   case R_MIPS_GPREL32:
1462     return read32<E>(Buf);
1463   case R_MIPS_26:
1464     // FIXME (simon): If the relocation target symbol is not a PLT entry
1465     // we should use another expression for calculation:
1466     // ((A << 2) | (P & 0xf0000000)) >> 2
1467     return SignExtend64<28>((read32<E>(Buf) & 0x3ffffff) << 2);
1468   case R_MIPS_GPREL16:
1469   case R_MIPS_LO16:
1470   case R_MIPS_PCLO16:
1471   case R_MIPS_TLS_DTPREL_HI16:
1472   case R_MIPS_TLS_DTPREL_LO16:
1473   case R_MIPS_TLS_TPREL_HI16:
1474   case R_MIPS_TLS_TPREL_LO16:
1475     return readSignedLo16<E>(Buf);
1476   case R_MIPS_PC16:
1477     return getPcRelocAddend<E, 16, 2>(Buf);
1478   case R_MIPS_PC19_S2:
1479     return getPcRelocAddend<E, 19, 2>(Buf);
1480   case R_MIPS_PC21_S2:
1481     return getPcRelocAddend<E, 21, 2>(Buf);
1482   case R_MIPS_PC26_S2:
1483     return getPcRelocAddend<E, 26, 2>(Buf);
1484   case R_MIPS_PC32:
1485     return getPcRelocAddend<E, 32, 0>(Buf);
1486   }
1487 }
1488 
1489 static std::pair<uint32_t, uint64_t> calculateMips64RelChain(uint32_t Type,
1490                                                              uint64_t Val) {
1491   // MIPS N64 ABI packs multiple relocations into the single relocation
1492   // record. In general, all up to three relocations can have arbitrary
1493   // types. In fact, Clang and GCC uses only a few combinations. For now,
1494   // we support two of them. That is allow to pass at least all LLVM
1495   // test suite cases.
1496   // <any relocation> / R_MIPS_SUB / R_MIPS_HI16 | R_MIPS_LO16
1497   // <any relocation> / R_MIPS_64 / R_MIPS_NONE
1498   // The first relocation is a 'real' relocation which is calculated
1499   // using the corresponding symbol's value. The second and the third
1500   // relocations used to modify result of the first one: extend it to
1501   // 64-bit, extract high or low part etc. For details, see part 2.9 Relocation
1502   // at the https://dmz-portal.mips.com/mw/images/8/82/007-4658-001.pdf
1503   uint32_t Type2 = (Type >> 8) & 0xff;
1504   uint32_t Type3 = (Type >> 16) & 0xff;
1505   if (Type2 == R_MIPS_NONE && Type3 == R_MIPS_NONE)
1506     return std::make_pair(Type, Val);
1507   if (Type2 == R_MIPS_64 && Type3 == R_MIPS_NONE)
1508     return std::make_pair(Type2, Val);
1509   if (Type2 == R_MIPS_SUB && (Type3 == R_MIPS_HI16 || Type3 == R_MIPS_LO16))
1510     return std::make_pair(Type3, -Val);
1511   error("unsupported relocations combination " + Twine(Type));
1512   return std::make_pair(Type & 0xff, Val);
1513 }
1514 
1515 template <class ELFT>
1516 void MipsTargetInfo<ELFT>::relocateOne(uint8_t *Loc, uint32_t Type,
1517                                        uint64_t Val) const {
1518   const endianness E = ELFT::TargetEndianness;
1519   // Thread pointer and DRP offsets from the start of TLS data area.
1520   // https://www.linux-mips.org/wiki/NPTL
1521   if (Type == R_MIPS_TLS_DTPREL_HI16 || Type == R_MIPS_TLS_DTPREL_LO16)
1522     Val -= 0x8000;
1523   else if (Type == R_MIPS_TLS_TPREL_HI16 || Type == R_MIPS_TLS_TPREL_LO16)
1524     Val -= 0x7000;
1525   if (ELFT::Is64Bits)
1526     std::tie(Type, Val) = calculateMips64RelChain(Type, Val);
1527   switch (Type) {
1528   case R_MIPS_32:
1529   case R_MIPS_GPREL32:
1530     write32<E>(Loc, Val);
1531     break;
1532   case R_MIPS_64:
1533     write64<E>(Loc, Val);
1534     break;
1535   case R_MIPS_26:
1536     write32<E>(Loc, (read32<E>(Loc) & ~0x3ffffff) | (Val >> 2));
1537     break;
1538   case R_MIPS_GOT_DISP:
1539   case R_MIPS_GOT_PAGE:
1540   case R_MIPS_GOT16:
1541   case R_MIPS_GPREL16:
1542     checkInt<16>(Val, Type);
1543   // fallthrough
1544   case R_MIPS_CALL16:
1545   case R_MIPS_GOT_OFST:
1546   case R_MIPS_LO16:
1547   case R_MIPS_PCLO16:
1548   case R_MIPS_TLS_DTPREL_LO16:
1549   case R_MIPS_TLS_TPREL_LO16:
1550     writeMipsLo16<E>(Loc, Val);
1551     break;
1552   case R_MIPS_HI16:
1553   case R_MIPS_PCHI16:
1554   case R_MIPS_TLS_DTPREL_HI16:
1555   case R_MIPS_TLS_TPREL_HI16:
1556     writeMipsHi16<E>(Loc, Val);
1557     break;
1558   case R_MIPS_JALR:
1559     // Ignore this optimization relocation for now
1560     break;
1561   case R_MIPS_PC16:
1562     applyMipsPcReloc<E, 16, 2>(Loc, Type, Val);
1563     break;
1564   case R_MIPS_PC19_S2:
1565     applyMipsPcReloc<E, 19, 2>(Loc, Type, Val);
1566     break;
1567   case R_MIPS_PC21_S2:
1568     applyMipsPcReloc<E, 21, 2>(Loc, Type, Val);
1569     break;
1570   case R_MIPS_PC26_S2:
1571     applyMipsPcReloc<E, 26, 2>(Loc, Type, Val);
1572     break;
1573   case R_MIPS_PC32:
1574     applyMipsPcReloc<E, 32, 0>(Loc, Type, Val);
1575     break;
1576   default:
1577     fatal("unrecognized reloc " + Twine(Type));
1578   }
1579 }
1580 
1581 template <class ELFT>
1582 bool MipsTargetInfo<ELFT>::usesOnlyLowPageBits(uint32_t Type) const {
1583   return Type == R_MIPS_LO16 || Type == R_MIPS_GOT_OFST;
1584 }
1585 }
1586 }
1587