1 //===- Target.cpp ---------------------------------------------------------===// 2 // 3 // The LLVM Linker 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // Machine-specific things, such as applying relocations, creation of 11 // GOT or PLT entries, etc., are handled in this file. 12 // 13 // Refer the ELF spec for the single letter varaibles, S, A or P, used 14 // in this file. SA is S+A. 15 // 16 //===----------------------------------------------------------------------===// 17 18 #include "Target.h" 19 #include "Error.h" 20 #include "OutputSections.h" 21 #include "Symbols.h" 22 23 #include "llvm/ADT/ArrayRef.h" 24 #include "llvm/Object/ELF.h" 25 #include "llvm/Support/Endian.h" 26 #include "llvm/Support/ELF.h" 27 28 using namespace llvm; 29 using namespace llvm::object; 30 using namespace llvm::support::endian; 31 using namespace llvm::ELF; 32 33 namespace lld { 34 namespace elf2 { 35 36 std::unique_ptr<TargetInfo> Target; 37 38 template <endianness E> static void add32(void *P, int32_t V) { 39 write32<E>(P, read32<E>(P) + V); 40 } 41 42 static void add32le(uint8_t *P, int32_t V) { add32<support::little>(P, V); } 43 static void or32le(uint8_t *P, int32_t V) { write32le(P, read32le(P) | V); } 44 45 template <unsigned N> static void checkInt(int64_t V, uint32_t Type) { 46 if (isInt<N>(V)) 47 return; 48 StringRef S = getELFRelocationTypeName(Config->EMachine, Type); 49 error("Relocation " + S + " out of range"); 50 } 51 52 template <unsigned N> static void checkUInt(uint64_t V, uint32_t Type) { 53 if (isUInt<N>(V)) 54 return; 55 StringRef S = getELFRelocationTypeName(Config->EMachine, Type); 56 error("Relocation " + S + " out of range"); 57 } 58 59 template <unsigned N> static void checkIntUInt(uint64_t V, uint32_t Type) { 60 if (isInt<N>(V) || isUInt<N>(V)) 61 return; 62 StringRef S = getELFRelocationTypeName(Config->EMachine, Type); 63 error("Relocation " + S + " out of range"); 64 } 65 66 template <unsigned N> static void checkAlignment(uint64_t V, uint32_t Type) { 67 if ((V & (N - 1)) == 0) 68 return; 69 StringRef S = getELFRelocationTypeName(Config->EMachine, Type); 70 error("Improper alignment for relocation " + S); 71 } 72 73 template <class ELFT> bool isGnuIFunc(const SymbolBody &S) { 74 if (auto *SS = dyn_cast<DefinedElf<ELFT>>(&S)) 75 return SS->Sym.getType() == STT_GNU_IFUNC; 76 return false; 77 } 78 79 namespace { 80 class X86TargetInfo final : public TargetInfo { 81 public: 82 X86TargetInfo(); 83 void writeGotPltHeader(uint8_t *Buf) const override; 84 unsigned getDynRel(unsigned Type) const override; 85 unsigned getTlsGotRel(unsigned Type) const override; 86 bool isTlsDynRel(unsigned Type, const SymbolBody &S) const override; 87 void writeGotPlt(uint8_t *Buf, uint64_t Plt) const override; 88 void writePltZero(uint8_t *Buf) const override; 89 void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr, 90 int32_t Index, unsigned RelOff) const override; 91 bool needsCopyRel(uint32_t Type, const SymbolBody &S) const override; 92 bool needsDynRelative(unsigned Type) const override; 93 bool needsGot(uint32_t Type, const SymbolBody &S) const override; 94 bool needsPlt(uint32_t Type, const SymbolBody &S) const override; 95 void relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, uint64_t P, 96 uint64_t SA, uint64_t ZA = 0, 97 uint8_t *PairedLoc = nullptr) const override; 98 bool canRelaxTls(unsigned Type, const SymbolBody *S) const override; 99 unsigned relaxTls(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, uint64_t P, 100 uint64_t SA, const SymbolBody *S) const override; 101 bool isGotRelative(uint32_t Type) const override; 102 103 private: 104 void relocateTlsLdToLe(uint8_t *Loc, uint8_t *BufEnd, uint64_t P, 105 uint64_t SA) const; 106 void relocateTlsGdToIe(uint8_t *Loc, uint8_t *BufEnd, uint64_t P, 107 uint64_t SA) const; 108 void relocateTlsGdToLe(uint8_t *Loc, uint8_t *BufEnd, uint64_t P, 109 uint64_t SA) const; 110 void relocateTlsIeToLe(unsigned Type, uint8_t *Loc, uint8_t *BufEnd, 111 uint64_t P, uint64_t SA) const; 112 }; 113 114 class X86_64TargetInfo final : public TargetInfo { 115 public: 116 X86_64TargetInfo(); 117 bool isTlsDynRel(unsigned Type, const SymbolBody &S) const override; 118 void writeGotPltHeader(uint8_t *Buf) const override; 119 void writeGotPlt(uint8_t *Buf, uint64_t Plt) const override; 120 void writePltZero(uint8_t *Buf) const override; 121 void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr, 122 int32_t Index, unsigned RelOff) const override; 123 bool needsCopyRel(uint32_t Type, const SymbolBody &S) const override; 124 bool needsGot(uint32_t Type, const SymbolBody &S) const override; 125 bool needsPlt(uint32_t Type, const SymbolBody &S) const override; 126 void relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, uint64_t P, 127 uint64_t SA, uint64_t ZA = 0, 128 uint8_t *PairedLoc = nullptr) const override; 129 bool isRelRelative(uint32_t Type) const override; 130 bool canRelaxTls(unsigned Type, const SymbolBody *S) const override; 131 bool isSizeRel(uint32_t Type) const override; 132 unsigned relaxTls(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, uint64_t P, 133 uint64_t SA, const SymbolBody *S) const override; 134 135 private: 136 void relocateTlsLdToLe(uint8_t *Loc, uint8_t *BufEnd, uint64_t P, 137 uint64_t SA) const; 138 void relocateTlsGdToLe(uint8_t *Loc, uint8_t *BufEnd, uint64_t P, 139 uint64_t SA) const; 140 void relocateTlsGdToIe(uint8_t *Loc, uint8_t *BufEnd, uint64_t P, 141 uint64_t SA) const; 142 void relocateTlsIeToLe(uint8_t *Loc, uint8_t *BufEnd, uint64_t P, 143 uint64_t SA) const; 144 }; 145 146 class PPCTargetInfo final : public TargetInfo { 147 public: 148 PPCTargetInfo(); 149 void relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, uint64_t P, 150 uint64_t SA, uint64_t ZA = 0, 151 uint8_t *PairedLoc = nullptr) const override; 152 bool isRelRelative(uint32_t Type) const override; 153 }; 154 155 class PPC64TargetInfo final : public TargetInfo { 156 public: 157 PPC64TargetInfo(); 158 void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr, 159 int32_t Index, unsigned RelOff) const override; 160 bool needsGot(uint32_t Type, const SymbolBody &S) const override; 161 bool needsPlt(uint32_t Type, const SymbolBody &S) const override; 162 void relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, uint64_t P, 163 uint64_t SA, uint64_t ZA = 0, 164 uint8_t *PairedLoc = nullptr) const override; 165 bool isRelRelative(uint32_t Type) const override; 166 }; 167 168 class AArch64TargetInfo final : public TargetInfo { 169 public: 170 AArch64TargetInfo(); 171 unsigned getDynRel(unsigned Type) const override; 172 void writeGotPlt(uint8_t *Buf, uint64_t Plt) const override; 173 void writePltZero(uint8_t *Buf) const override; 174 void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr, 175 int32_t Index, unsigned RelOff) const override; 176 unsigned getTlsGotRel(unsigned Type = -1) const override; 177 bool isTlsDynRel(unsigned Type, const SymbolBody &S) const override; 178 bool needsCopyRel(uint32_t Type, const SymbolBody &S) const override; 179 bool needsGot(uint32_t Type, const SymbolBody &S) const override; 180 bool needsPlt(uint32_t Type, const SymbolBody &S) const override; 181 void relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, uint64_t P, 182 uint64_t SA, uint64_t ZA = 0, 183 uint8_t *PairedLoc = nullptr) const override; 184 }; 185 186 class AMDGPUTargetInfo final : public TargetInfo { 187 public: 188 AMDGPUTargetInfo() {} 189 void relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, uint64_t P, 190 uint64_t SA, uint64_t ZA = 0, 191 uint8_t *PairedLoc = nullptr) const override; 192 }; 193 194 template <class ELFT> class MipsTargetInfo final : public TargetInfo { 195 public: 196 MipsTargetInfo(); 197 unsigned getDynRel(unsigned Type) const override; 198 void writeGotHeader(uint8_t *Buf) const override; 199 bool needsGot(uint32_t Type, const SymbolBody &S) const override; 200 bool needsPlt(uint32_t Type, const SymbolBody &S) const override; 201 void relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, uint64_t P, 202 uint64_t SA, uint64_t ZA = 0, 203 uint8_t *PairedLoc = nullptr) const override; 204 bool isHintRel(uint32_t Type) const override; 205 bool isRelRelative(uint32_t Type) const override; 206 }; 207 } // anonymous namespace 208 209 TargetInfo *createTarget() { 210 switch (Config->EMachine) { 211 case EM_386: 212 return new X86TargetInfo(); 213 case EM_AARCH64: 214 return new AArch64TargetInfo(); 215 case EM_AMDGPU: 216 return new AMDGPUTargetInfo(); 217 case EM_MIPS: 218 switch (Config->EKind) { 219 case ELF32LEKind: 220 return new MipsTargetInfo<ELF32LE>(); 221 case ELF32BEKind: 222 return new MipsTargetInfo<ELF32BE>(); 223 default: 224 fatal("Unsupported MIPS target"); 225 } 226 case EM_PPC: 227 return new PPCTargetInfo(); 228 case EM_PPC64: 229 return new PPC64TargetInfo(); 230 case EM_X86_64: 231 return new X86_64TargetInfo(); 232 } 233 fatal("Unknown target machine"); 234 } 235 236 TargetInfo::~TargetInfo() {} 237 238 bool TargetInfo::canRelaxTls(unsigned Type, const SymbolBody *S) const { 239 return false; 240 } 241 242 uint64_t TargetInfo::getVAStart() const { return Config->Shared ? 0 : VAStart; } 243 244 bool TargetInfo::needsCopyRel(uint32_t Type, const SymbolBody &S) const { 245 return false; 246 } 247 248 bool TargetInfo::isTlsLocalDynamicRel(unsigned Type) const { 249 return Type == TlsLocalDynamicRel; 250 } 251 252 bool TargetInfo::isTlsGlobalDynamicRel(unsigned Type) const { 253 return Type == TlsGlobalDynamicRel; 254 } 255 256 bool TargetInfo::isTlsDynRel(unsigned Type, const SymbolBody &S) const { 257 return false; 258 } 259 260 bool TargetInfo::isGotRelative(uint32_t Type) const { return false; } 261 bool TargetInfo::isHintRel(uint32_t Type) const { return false; } 262 bool TargetInfo::isRelRelative(uint32_t Type) const { return true; } 263 bool TargetInfo::isSizeRel(uint32_t Type) const { return false; } 264 265 bool TargetInfo::needsGot(uint32_t Type, const SymbolBody &S) const { 266 return false; 267 } 268 269 bool TargetInfo::needsPlt(uint32_t Type, const SymbolBody &S) const { 270 return false; 271 } 272 273 unsigned TargetInfo::relaxTls(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, 274 uint64_t P, uint64_t SA, 275 const SymbolBody *S) const { 276 return 0; 277 } 278 279 X86TargetInfo::X86TargetInfo() { 280 CopyRel = R_386_COPY; 281 GotRel = R_386_GLOB_DAT; 282 PltRel = R_386_JUMP_SLOT; 283 IRelativeRel = R_386_IRELATIVE; 284 RelativeRel = R_386_RELATIVE; 285 TlsGotRel = R_386_TLS_TPOFF; 286 TlsGlobalDynamicRel = R_386_TLS_GD; 287 TlsLocalDynamicRel = R_386_TLS_LDM; 288 TlsModuleIndexRel = R_386_TLS_DTPMOD32; 289 TlsOffsetRel = R_386_TLS_DTPOFF32; 290 UseLazyBinding = true; 291 PltEntrySize = 16; 292 PltZeroSize = 16; 293 } 294 295 void X86TargetInfo::writeGotPltHeader(uint8_t *Buf) const { 296 write32le(Buf, Out<ELF32LE>::Dynamic->getVA()); 297 } 298 299 void X86TargetInfo::writeGotPlt(uint8_t *Buf, uint64_t Plt) const { 300 // Entries in .got.plt initially points back to the corresponding 301 // PLT entries with a fixed offset to skip the first instruction. 302 write32le(Buf, Plt + 6); 303 } 304 305 unsigned X86TargetInfo::getDynRel(unsigned Type) const { 306 if (Type == R_386_TLS_LE) 307 return R_386_TLS_TPOFF; 308 if (Type == R_386_TLS_LE_32) 309 return R_386_TLS_TPOFF32; 310 return Type; 311 } 312 313 unsigned X86TargetInfo::getTlsGotRel(unsigned Type) const { 314 if (Type == R_386_TLS_IE) 315 return Type; 316 return TlsGotRel; 317 } 318 319 bool X86TargetInfo::isTlsDynRel(unsigned Type, const SymbolBody &S) const { 320 if (Type == R_386_TLS_LE || Type == R_386_TLS_LE_32 || 321 Type == R_386_TLS_GOTIE) 322 return Config->Shared; 323 if (Type == R_386_TLS_IE) 324 return canBePreempted(&S, true); 325 return Type == R_386_TLS_GD; 326 } 327 328 void X86TargetInfo::writePltZero(uint8_t *Buf) const { 329 // Executable files and shared object files have 330 // separate procedure linkage tables. 331 if (Config->Shared) { 332 const uint8_t V[] = { 333 0xff, 0xb3, 0x04, 0x00, 0x00, 0x00, // pushl 4(%ebx) 334 0xff, 0xa3, 0x08, 0x00, 0x00, 0x00, // jmp *8(%ebx) 335 0x90, 0x90, 0x90, 0x90 // nop; nop; nop; nop 336 }; 337 memcpy(Buf, V, sizeof(V)); 338 return; 339 } 340 341 const uint8_t PltData[] = { 342 0xff, 0x35, 0x00, 0x00, 0x00, 0x00, // pushl (GOT+4) 343 0xff, 0x25, 0x00, 0x00, 0x00, 0x00, // jmp *(GOT+8) 344 0x90, 0x90, 0x90, 0x90 // nop; nop; nop; nop 345 }; 346 memcpy(Buf, PltData, sizeof(PltData)); 347 uint32_t Got = Out<ELF32LE>::GotPlt->getVA(); 348 write32le(Buf + 2, Got + 4); 349 write32le(Buf + 8, Got + 8); 350 } 351 352 void X86TargetInfo::writePlt(uint8_t *Buf, uint64_t GotEntryAddr, 353 uint64_t PltEntryAddr, int32_t Index, 354 unsigned RelOff) const { 355 const uint8_t Inst[] = { 356 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, // jmp *foo_in_GOT|*foo@GOT(%ebx) 357 0x68, 0x00, 0x00, 0x00, 0x00, // pushl $reloc_offset 358 0xe9, 0x00, 0x00, 0x00, 0x00 // jmp .PLT0@PC 359 }; 360 memcpy(Buf, Inst, sizeof(Inst)); 361 362 // jmp *foo@GOT(%ebx) or jmp *foo_in_GOT 363 Buf[1] = Config->Shared ? 0xa3 : 0x25; 364 uint32_t Got = UseLazyBinding ? Out<ELF32LE>::GotPlt->getVA() 365 : Out<ELF32LE>::Got->getVA(); 366 write32le(Buf + 2, Config->Shared ? GotEntryAddr - Got : GotEntryAddr); 367 write32le(Buf + 7, RelOff); 368 write32le(Buf + 12, -Index * PltEntrySize - PltZeroSize - 16); 369 } 370 371 bool X86TargetInfo::needsCopyRel(uint32_t Type, const SymbolBody &S) const { 372 if (Type == R_386_32 || Type == R_386_16 || Type == R_386_8) 373 if (auto *SS = dyn_cast<SharedSymbol<ELF32LE>>(&S)) 374 return SS->Sym.getType() == STT_OBJECT; 375 return false; 376 } 377 378 bool X86TargetInfo::needsGot(uint32_t Type, const SymbolBody &S) const { 379 if (S.isTls() && Type == R_386_TLS_GD) 380 return Target->canRelaxTls(Type, &S) && canBePreempted(&S, true); 381 if (Type == R_386_TLS_GOTIE || Type == R_386_TLS_IE) 382 return !canRelaxTls(Type, &S); 383 return Type == R_386_GOT32 || needsPlt(Type, S); 384 } 385 386 bool X86TargetInfo::needsPlt(uint32_t Type, const SymbolBody &S) const { 387 return isGnuIFunc<ELF32LE>(S) || 388 (Type == R_386_PLT32 && canBePreempted(&S, true)) || 389 (Type == R_386_PC32 && S.isShared()); 390 } 391 392 bool X86TargetInfo::isGotRelative(uint32_t Type) const { 393 // This relocation does not require got entry, 394 // but it is relative to got and needs it to be created. 395 // Here we request for that. 396 return Type == R_386_GOTOFF; 397 } 398 399 void X86TargetInfo::relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, 400 uint64_t P, uint64_t SA, uint64_t ZA, 401 uint8_t *PairedLoc) const { 402 switch (Type) { 403 case R_386_32: 404 add32le(Loc, SA); 405 break; 406 case R_386_GOT32: { 407 uint64_t V = SA - Out<ELF32LE>::Got->getVA() - 408 Out<ELF32LE>::Got->getNumEntries() * 4; 409 checkInt<32>(V, Type); 410 add32le(Loc, V); 411 break; 412 } 413 case R_386_GOTOFF: 414 add32le(Loc, SA - Out<ELF32LE>::Got->getVA()); 415 break; 416 case R_386_GOTPC: 417 add32le(Loc, SA + Out<ELF32LE>::Got->getVA() - P); 418 break; 419 case R_386_PC32: 420 case R_386_PLT32: 421 add32le(Loc, SA - P); 422 break; 423 case R_386_TLS_GD: 424 case R_386_TLS_LDM: 425 case R_386_TLS_TPOFF: { 426 uint64_t V = SA - Out<ELF32LE>::Got->getVA() - 427 Out<ELF32LE>::Got->getNumEntries() * 4; 428 checkInt<32>(V, Type); 429 write32le(Loc, V); 430 break; 431 } 432 case R_386_TLS_IE: 433 case R_386_TLS_LDO_32: 434 write32le(Loc, SA); 435 break; 436 case R_386_TLS_LE: 437 write32le(Loc, SA - Out<ELF32LE>::TlsPhdr->p_memsz); 438 break; 439 case R_386_TLS_LE_32: 440 write32le(Loc, Out<ELF32LE>::TlsPhdr->p_memsz - SA); 441 break; 442 default: 443 fatal("unrecognized reloc " + Twine(Type)); 444 } 445 } 446 447 bool X86TargetInfo::canRelaxTls(unsigned Type, const SymbolBody *S) const { 448 if (Config->Shared || (S && !S->isTls())) 449 return false; 450 return Type == R_386_TLS_LDO_32 || Type == R_386_TLS_LDM || 451 Type == R_386_TLS_GD || 452 (Type == R_386_TLS_IE && !canBePreempted(S, true)) || 453 (Type == R_386_TLS_GOTIE && !canBePreempted(S, true)); 454 } 455 456 bool X86TargetInfo::needsDynRelative(unsigned Type) const { 457 return Config->Shared && Type == R_386_TLS_IE; 458 } 459 460 unsigned X86TargetInfo::relaxTls(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, 461 uint64_t P, uint64_t SA, 462 const SymbolBody *S) const { 463 switch (Type) { 464 case R_386_TLS_GD: 465 if (canBePreempted(S, true)) 466 relocateTlsGdToIe(Loc, BufEnd, P, SA); 467 else 468 relocateTlsGdToLe(Loc, BufEnd, P, SA); 469 // The next relocation should be against __tls_get_addr, so skip it 470 return 1; 471 case R_386_TLS_GOTIE: 472 case R_386_TLS_IE: 473 relocateTlsIeToLe(Type, Loc, BufEnd, P, SA); 474 return 0; 475 case R_386_TLS_LDM: 476 relocateTlsLdToLe(Loc, BufEnd, P, SA); 477 // The next relocation should be against __tls_get_addr, so skip it 478 return 1; 479 case R_386_TLS_LDO_32: 480 relocateOne(Loc, BufEnd, R_386_TLS_LE, P, SA); 481 return 0; 482 } 483 llvm_unreachable("Unknown TLS optimization"); 484 } 485 486 // "Ulrich Drepper, ELF Handling For Thread-Local Storage" (5.1 487 // IA-32 Linker Optimizations, http://www.akkadia.org/drepper/tls.pdf) shows 488 // how GD can be optimized to IE: 489 // leal x@tlsgd(, %ebx, 1), 490 // call __tls_get_addr@plt 491 // Is converted to: 492 // movl %gs:0, %eax 493 // addl x@gotntpoff(%ebx), %eax 494 void X86TargetInfo::relocateTlsGdToIe(uint8_t *Loc, uint8_t *BufEnd, uint64_t P, 495 uint64_t SA) const { 496 const uint8_t Inst[] = { 497 0x65, 0xa1, 0x00, 0x00, 0x00, 0x00, // movl %gs:0, %eax 498 0x03, 0x83, 0x00, 0x00, 0x00, 0x00 // addl 0(%ebx), %eax 499 }; 500 memcpy(Loc - 3, Inst, sizeof(Inst)); 501 relocateOne(Loc + 5, BufEnd, R_386_32, P, 502 SA - Out<ELF32LE>::Got->getVA() - 503 Out<ELF32LE>::Got->getNumEntries() * 4); 504 } 505 506 // GD can be optimized to LE: 507 // leal x@tlsgd(, %ebx, 1), 508 // call __tls_get_addr@plt 509 // Can be converted to: 510 // movl %gs:0,%eax 511 // addl $x@ntpoff,%eax 512 // But gold emits subl $foo@tpoff,%eax instead of addl. 513 // These instructions are completely equal in behavior. 514 // This method generates subl to be consistent with gold. 515 void X86TargetInfo::relocateTlsGdToLe(uint8_t *Loc, uint8_t *BufEnd, uint64_t P, 516 uint64_t SA) const { 517 const uint8_t Inst[] = { 518 0x65, 0xa1, 0x00, 0x00, 0x00, 0x00, // movl %gs:0, %eax 519 0x81, 0xe8, 0x00, 0x00, 0x00, 0x00 // subl 0(%ebx), %eax 520 }; 521 memcpy(Loc - 3, Inst, sizeof(Inst)); 522 relocateOne(Loc + 5, BufEnd, R_386_32, P, 523 Out<ELF32LE>::TlsPhdr->p_memsz - SA); 524 } 525 526 // LD can be optimized to LE: 527 // leal foo(%reg),%eax 528 // call ___tls_get_addr 529 // Is converted to: 530 // movl %gs:0,%eax 531 // nop 532 // leal 0(%esi,1),%esi 533 void X86TargetInfo::relocateTlsLdToLe(uint8_t *Loc, uint8_t *BufEnd, uint64_t P, 534 uint64_t SA) const { 535 const uint8_t Inst[] = { 536 0x65, 0xa1, 0x00, 0x00, 0x00, 0x00, // movl %gs:0,%eax 537 0x90, // nop 538 0x8d, 0x74, 0x26, 0x00 // leal 0(%esi,1),%esi 539 }; 540 memcpy(Loc - 2, Inst, sizeof(Inst)); 541 } 542 543 // In some conditions, relocations can be optimized to avoid using GOT. 544 // This function does that for Initial Exec to Local Exec case. 545 // Read "ELF Handling For Thread-Local Storage, 5.1 546 // IA-32 Linker Optimizations" (http://www.akkadia.org/drepper/tls.pdf) 547 // by Ulrich Drepper for details. 548 void X86TargetInfo::relocateTlsIeToLe(unsigned Type, uint8_t *Loc, 549 uint8_t *BufEnd, uint64_t P, 550 uint64_t SA) const { 551 // Ulrich's document section 6.2 says that @gotntpoff can 552 // be used with MOVL or ADDL instructions. 553 // @indntpoff is similar to @gotntpoff, but for use in 554 // position dependent code. 555 uint8_t *Inst = Loc - 2; 556 uint8_t *Op = Loc - 1; 557 uint8_t Reg = (Loc[-1] >> 3) & 7; 558 bool IsMov = *Inst == 0x8b; 559 if (Type == R_386_TLS_IE) { 560 // For R_386_TLS_IE relocation we perform the next transformations: 561 // MOVL foo@INDNTPOFF,%EAX is transformed to MOVL $foo,%EAX 562 // MOVL foo@INDNTPOFF,%REG is transformed to MOVL $foo,%REG 563 // ADDL foo@INDNTPOFF,%REG is transformed to ADDL $foo,%REG 564 // First one is special because when EAX is used the sequence is 5 bytes 565 // long, otherwise it is 6 bytes. 566 if (*Op == 0xa1) { 567 *Op = 0xb8; 568 } else { 569 *Inst = IsMov ? 0xc7 : 0x81; 570 *Op = 0xc0 | ((*Op >> 3) & 7); 571 } 572 } else { 573 // R_386_TLS_GOTIE relocation can be optimized to 574 // R_386_TLS_LE so that it does not use GOT. 575 // "MOVL foo@GOTTPOFF(%RIP), %REG" is transformed to "MOVL $foo, %REG". 576 // "ADDL foo@GOTNTPOFF(%RIP), %REG" is transformed to "LEAL foo(%REG), %REG" 577 // Note: gold converts to ADDL instead of LEAL. 578 *Inst = IsMov ? 0xc7 : 0x8d; 579 if (IsMov) 580 *Op = 0xc0 | ((*Op >> 3) & 7); 581 else 582 *Op = 0x80 | Reg | (Reg << 3); 583 } 584 relocateOne(Loc, BufEnd, R_386_TLS_LE, P, SA); 585 } 586 587 X86_64TargetInfo::X86_64TargetInfo() { 588 CopyRel = R_X86_64_COPY; 589 GotRel = R_X86_64_GLOB_DAT; 590 PltRel = R_X86_64_JUMP_SLOT; 591 RelativeRel = R_X86_64_RELATIVE; 592 IRelativeRel = R_X86_64_IRELATIVE; 593 TlsGotRel = R_X86_64_TPOFF64; 594 TlsLocalDynamicRel = R_X86_64_TLSLD; 595 TlsGlobalDynamicRel = R_X86_64_TLSGD; 596 TlsModuleIndexRel = R_X86_64_DTPMOD64; 597 TlsOffsetRel = R_X86_64_DTPOFF64; 598 UseLazyBinding = true; 599 PltEntrySize = 16; 600 PltZeroSize = 16; 601 } 602 603 void X86_64TargetInfo::writeGotPltHeader(uint8_t *Buf) const { 604 write64le(Buf, Out<ELF64LE>::Dynamic->getVA()); 605 } 606 607 void X86_64TargetInfo::writeGotPlt(uint8_t *Buf, uint64_t Plt) const { 608 // See comments in X86TargetInfo::writeGotPlt. 609 write32le(Buf, Plt + 6); 610 } 611 612 void X86_64TargetInfo::writePltZero(uint8_t *Buf) const { 613 const uint8_t PltData[] = { 614 0xff, 0x35, 0x00, 0x00, 0x00, 0x00, // pushq GOT+8(%rip) 615 0xff, 0x25, 0x00, 0x00, 0x00, 0x00, // jmp *GOT+16(%rip) 616 0x0f, 0x1f, 0x40, 0x00 // nopl 0x0(rax) 617 }; 618 memcpy(Buf, PltData, sizeof(PltData)); 619 uint64_t Got = Out<ELF64LE>::GotPlt->getVA(); 620 uint64_t Plt = Out<ELF64LE>::Plt->getVA(); 621 write32le(Buf + 2, Got - Plt + 2); // GOT+8 622 write32le(Buf + 8, Got - Plt + 4); // GOT+16 623 } 624 625 void X86_64TargetInfo::writePlt(uint8_t *Buf, uint64_t GotEntryAddr, 626 uint64_t PltEntryAddr, int32_t Index, 627 unsigned RelOff) const { 628 const uint8_t Inst[] = { 629 0xff, 0x25, 0x00, 0x00, 0x00, 0x00, // jmpq *got(%rip) 630 0x68, 0x00, 0x00, 0x00, 0x00, // pushq <relocation index> 631 0xe9, 0x00, 0x00, 0x00, 0x00 // jmpq plt[0] 632 }; 633 memcpy(Buf, Inst, sizeof(Inst)); 634 635 write32le(Buf + 2, GotEntryAddr - PltEntryAddr - 6); 636 write32le(Buf + 7, Index); 637 write32le(Buf + 12, -Index * PltEntrySize - PltZeroSize - 16); 638 } 639 640 bool X86_64TargetInfo::needsCopyRel(uint32_t Type, const SymbolBody &S) const { 641 if (Type == R_X86_64_32S || Type == R_X86_64_32 || Type == R_X86_64_PC32 || 642 Type == R_X86_64_64) 643 if (auto *SS = dyn_cast<SharedSymbol<ELF64LE>>(&S)) 644 return SS->Sym.getType() == STT_OBJECT; 645 return false; 646 } 647 648 bool X86_64TargetInfo::needsGot(uint32_t Type, const SymbolBody &S) const { 649 if (Type == R_X86_64_TLSGD) 650 return Target->canRelaxTls(Type, &S) && canBePreempted(&S, true); 651 if (Type == R_X86_64_GOTTPOFF) 652 return !canRelaxTls(Type, &S); 653 return Type == R_X86_64_GOTPCREL || needsPlt(Type, S); 654 } 655 656 bool X86_64TargetInfo::isTlsDynRel(unsigned Type, const SymbolBody &S) const { 657 return Type == R_X86_64_GOTTPOFF || Type == R_X86_64_TLSGD; 658 } 659 660 bool X86_64TargetInfo::needsPlt(uint32_t Type, const SymbolBody &S) const { 661 if (needsCopyRel(Type, S)) 662 return false; 663 if (isGnuIFunc<ELF64LE>(S)) 664 return true; 665 666 switch (Type) { 667 default: 668 return false; 669 case R_X86_64_32: 670 case R_X86_64_64: 671 case R_X86_64_PC32: 672 // This relocation is defined to have a value of (S + A - P). 673 // The problems start when a non PIC program calls a function in a shared 674 // library. 675 // In an ideal world, we could just report an error saying the relocation 676 // can overflow at runtime. 677 // In the real world with glibc, crt1.o has a R_X86_64_PC32 pointing to 678 // libc.so. 679 // 680 // The general idea on how to handle such cases is to create a PLT entry 681 // and use that as the function value. 682 // 683 // For the static linking part, we just return true and everything else 684 // will use the the PLT entry as the address. 685 // 686 // The remaining (unimplemented) problem is making sure pointer equality 687 // still works. We need the help of the dynamic linker for that. We 688 // let it know that we have a direct reference to a so symbol by creating 689 // an undefined symbol with a non zero st_value. Seeing that, the 690 // dynamic linker resolves the symbol to the value of the symbol we created. 691 // This is true even for got entries, so pointer equality is maintained. 692 // To avoid an infinite loop, the only entry that points to the 693 // real function is a dedicated got entry used by the plt. That is 694 // identified by special relocation types (R_X86_64_JUMP_SLOT, 695 // R_386_JMP_SLOT, etc). 696 return S.isShared(); 697 case R_X86_64_PLT32: 698 return canBePreempted(&S, true); 699 } 700 } 701 702 bool X86_64TargetInfo::isRelRelative(uint32_t Type) const { 703 switch (Type) { 704 default: 705 return false; 706 case R_X86_64_DTPOFF32: 707 case R_X86_64_DTPOFF64: 708 case R_X86_64_PC8: 709 case R_X86_64_PC16: 710 case R_X86_64_PC32: 711 case R_X86_64_PC64: 712 case R_X86_64_PLT32: 713 return true; 714 } 715 } 716 717 bool X86_64TargetInfo::isSizeRel(uint32_t Type) const { 718 return Type == R_X86_64_SIZE32 || Type == R_X86_64_SIZE64; 719 } 720 721 bool X86_64TargetInfo::canRelaxTls(unsigned Type, const SymbolBody *S) const { 722 if (Config->Shared || (S && !S->isTls())) 723 return false; 724 return Type == R_X86_64_TLSGD || Type == R_X86_64_TLSLD || 725 Type == R_X86_64_DTPOFF32 || 726 (Type == R_X86_64_GOTTPOFF && !canBePreempted(S, true)); 727 } 728 729 // "Ulrich Drepper, ELF Handling For Thread-Local Storage" (5.5 730 // x86-x64 linker optimizations, http://www.akkadia.org/drepper/tls.pdf) shows 731 // how LD can be optimized to LE: 732 // leaq bar@tlsld(%rip), %rdi 733 // callq __tls_get_addr@PLT 734 // leaq bar@dtpoff(%rax), %rcx 735 // Is converted to: 736 // .word 0x6666 737 // .byte 0x66 738 // mov %fs:0,%rax 739 // leaq bar@tpoff(%rax), %rcx 740 void X86_64TargetInfo::relocateTlsLdToLe(uint8_t *Loc, uint8_t *BufEnd, 741 uint64_t P, uint64_t SA) const { 742 const uint8_t Inst[] = { 743 0x66, 0x66, //.word 0x6666 744 0x66, //.byte 0x66 745 0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00 // mov %fs:0,%rax 746 }; 747 memcpy(Loc - 3, Inst, sizeof(Inst)); 748 } 749 750 // "Ulrich Drepper, ELF Handling For Thread-Local Storage" (5.5 751 // x86-x64 linker optimizations, http://www.akkadia.org/drepper/tls.pdf) shows 752 // how GD can be optimized to LE: 753 // .byte 0x66 754 // leaq x@tlsgd(%rip), %rdi 755 // .word 0x6666 756 // rex64 757 // call __tls_get_addr@plt 758 // Is converted to: 759 // mov %fs:0x0,%rax 760 // lea x@tpoff,%rax 761 void X86_64TargetInfo::relocateTlsGdToLe(uint8_t *Loc, uint8_t *BufEnd, 762 uint64_t P, uint64_t SA) const { 763 const uint8_t Inst[] = { 764 0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00, // mov %fs:0x0,%rax 765 0x48, 0x8d, 0x80, 0x00, 0x00, 0x00, 0x00 // lea x@tpoff,%rax 766 }; 767 memcpy(Loc - 4, Inst, sizeof(Inst)); 768 relocateOne(Loc + 8, BufEnd, R_X86_64_TPOFF32, P, SA); 769 } 770 771 // "Ulrich Drepper, ELF Handling For Thread-Local Storage" (5.5 772 // x86-x64 linker optimizations, http://www.akkadia.org/drepper/tls.pdf) shows 773 // how GD can be optimized to IE: 774 // .byte 0x66 775 // leaq x@tlsgd(%rip), %rdi 776 // .word 0x6666 777 // rex64 778 // call __tls_get_addr@plt 779 // Is converted to: 780 // mov %fs:0x0,%rax 781 // addq x@tpoff,%rax 782 void X86_64TargetInfo::relocateTlsGdToIe(uint8_t *Loc, uint8_t *BufEnd, 783 uint64_t P, uint64_t SA) const { 784 const uint8_t Inst[] = { 785 0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00, // mov %fs:0x0,%rax 786 0x48, 0x03, 0x05, 0x00, 0x00, 0x00, 0x00 // addq x@tpoff,%rax 787 }; 788 memcpy(Loc - 4, Inst, sizeof(Inst)); 789 relocateOne(Loc + 8, BufEnd, R_X86_64_TPOFF64, P + 12, SA); 790 } 791 792 // In some conditions, R_X86_64_GOTTPOFF relocation can be optimized to 793 // R_X86_64_TPOFF32 so that it does not use GOT. 794 // This function does that. Read "ELF Handling For Thread-Local Storage, 795 // 5.5 x86-x64 linker optimizations" (http://www.akkadia.org/drepper/tls.pdf) 796 // by Ulrich Drepper for details. 797 void X86_64TargetInfo::relocateTlsIeToLe(uint8_t *Loc, uint8_t *BufEnd, 798 uint64_t P, uint64_t SA) const { 799 // Ulrich's document section 6.5 says that @gottpoff(%rip) must be 800 // used in MOVQ or ADDQ instructions only. 801 // "MOVQ foo@GOTTPOFF(%RIP), %REG" is transformed to "MOVQ $foo, %REG". 802 // "ADDQ foo@GOTTPOFF(%RIP), %REG" is transformed to "LEAQ foo(%REG), %REG" 803 // (if the register is not RSP/R12) or "ADDQ $foo, %RSP". 804 // Opcodes info can be found at http://ref.x86asm.net/coder64.html#x48. 805 uint8_t *Prefix = Loc - 3; 806 uint8_t *Inst = Loc - 2; 807 uint8_t *RegSlot = Loc - 1; 808 uint8_t Reg = Loc[-1] >> 3; 809 bool IsMov = *Inst == 0x8b; 810 bool RspAdd = !IsMov && Reg == 4; 811 // r12 and rsp registers requires special handling. 812 // Problem is that for other registers, for example leaq 0xXXXXXXXX(%r11),%r11 813 // result out is 7 bytes: 4d 8d 9b XX XX XX XX, 814 // but leaq 0xXXXXXXXX(%r12),%r12 is 8 bytes: 4d 8d a4 24 XX XX XX XX. 815 // The same true for rsp. So we convert to addq for them, saving 1 byte that 816 // we dont have. 817 if (RspAdd) 818 *Inst = 0x81; 819 else 820 *Inst = IsMov ? 0xc7 : 0x8d; 821 if (*Prefix == 0x4c) 822 *Prefix = (IsMov || RspAdd) ? 0x49 : 0x4d; 823 *RegSlot = (IsMov || RspAdd) ? (0xc0 | Reg) : (0x80 | Reg | (Reg << 3)); 824 relocateOne(Loc, BufEnd, R_X86_64_TPOFF32, P, SA); 825 } 826 827 // This function applies a TLS relocation with an optimization as described 828 // in the Ulrich's document. As a result of rewriting instructions at the 829 // relocation target, relocations immediately follow the TLS relocation (which 830 // would be applied to rewritten instructions) may have to be skipped. 831 // This function returns a number of relocations that need to be skipped. 832 unsigned X86_64TargetInfo::relaxTls(uint8_t *Loc, uint8_t *BufEnd, 833 uint32_t Type, uint64_t P, uint64_t SA, 834 const SymbolBody *S) const { 835 switch (Type) { 836 case R_X86_64_DTPOFF32: 837 relocateOne(Loc, BufEnd, R_X86_64_TPOFF32, P, SA); 838 return 0; 839 case R_X86_64_GOTTPOFF: 840 relocateTlsIeToLe(Loc, BufEnd, P, SA); 841 return 0; 842 case R_X86_64_TLSGD: { 843 if (canBePreempted(S, true)) 844 relocateTlsGdToIe(Loc, BufEnd, P, SA); 845 else 846 relocateTlsGdToLe(Loc, BufEnd, P, SA); 847 // The next relocation should be against __tls_get_addr, so skip it 848 return 1; 849 } 850 case R_X86_64_TLSLD: 851 relocateTlsLdToLe(Loc, BufEnd, P, SA); 852 // The next relocation should be against __tls_get_addr, so skip it 853 return 1; 854 } 855 llvm_unreachable("Unknown TLS optimization"); 856 } 857 858 void X86_64TargetInfo::relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, 859 uint64_t P, uint64_t SA, uint64_t ZA, 860 uint8_t *PairedLoc) const { 861 switch (Type) { 862 case R_X86_64_32: 863 checkUInt<32>(SA, Type); 864 write32le(Loc, SA); 865 break; 866 case R_X86_64_32S: 867 checkInt<32>(SA, Type); 868 write32le(Loc, SA); 869 break; 870 case R_X86_64_64: 871 write64le(Loc, SA); 872 break; 873 case R_X86_64_DTPOFF32: 874 write32le(Loc, SA); 875 break; 876 case R_X86_64_GOTPCREL: 877 case R_X86_64_PC32: 878 case R_X86_64_PLT32: 879 case R_X86_64_TLSGD: 880 case R_X86_64_TLSLD: 881 write32le(Loc, SA - P); 882 break; 883 case R_X86_64_SIZE32: 884 write32le(Loc, ZA); 885 break; 886 case R_X86_64_SIZE64: 887 write64le(Loc, ZA); 888 break; 889 case R_X86_64_TPOFF32: { 890 uint64_t Val = SA - Out<ELF64LE>::TlsPhdr->p_memsz; 891 checkInt<32>(Val, Type); 892 write32le(Loc, Val); 893 break; 894 } 895 case R_X86_64_TPOFF64: 896 write32le(Loc, SA - P); 897 break; 898 default: 899 fatal("unrecognized reloc " + Twine(Type)); 900 } 901 } 902 903 // Relocation masks following the #lo(value), #hi(value), #ha(value), 904 // #higher(value), #highera(value), #highest(value), and #highesta(value) 905 // macros defined in section 4.5.1. Relocation Types of the PPC-elf64abi 906 // document. 907 static uint16_t applyPPCLo(uint64_t V) { return V; } 908 static uint16_t applyPPCHi(uint64_t V) { return V >> 16; } 909 static uint16_t applyPPCHa(uint64_t V) { return (V + 0x8000) >> 16; } 910 static uint16_t applyPPCHigher(uint64_t V) { return V >> 32; } 911 static uint16_t applyPPCHighera(uint64_t V) { return (V + 0x8000) >> 32; } 912 static uint16_t applyPPCHighest(uint64_t V) { return V >> 48; } 913 static uint16_t applyPPCHighesta(uint64_t V) { return (V + 0x8000) >> 48; } 914 915 PPCTargetInfo::PPCTargetInfo() {} 916 bool PPCTargetInfo::isRelRelative(uint32_t Type) const { return false; } 917 918 void PPCTargetInfo::relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, 919 uint64_t P, uint64_t SA, uint64_t ZA, 920 uint8_t *PairedLoc) const { 921 switch (Type) { 922 case R_PPC_ADDR16_HA: 923 write16be(Loc, applyPPCHa(SA)); 924 break; 925 case R_PPC_ADDR16_LO: 926 write16be(Loc, applyPPCLo(SA)); 927 break; 928 default: 929 fatal("unrecognized reloc " + Twine(Type)); 930 } 931 } 932 933 PPC64TargetInfo::PPC64TargetInfo() { 934 GotRel = R_PPC64_GLOB_DAT; 935 RelativeRel = R_PPC64_RELATIVE; 936 PltEntrySize = 32; 937 938 // We need 64K pages (at least under glibc/Linux, the loader won't 939 // set different permissions on a finer granularity than that). 940 PageSize = 65536; 941 942 // The PPC64 ELF ABI v1 spec, says: 943 // 944 // It is normally desirable to put segments with different characteristics 945 // in separate 256 Mbyte portions of the address space, to give the 946 // operating system full paging flexibility in the 64-bit address space. 947 // 948 // And because the lowest non-zero 256M boundary is 0x10000000, PPC64 linkers 949 // use 0x10000000 as the starting address. 950 VAStart = 0x10000000; 951 } 952 953 uint64_t getPPC64TocBase() { 954 // The TOC consists of sections .got, .toc, .tocbss, .plt in that 955 // order. The TOC starts where the first of these sections starts. 956 957 // FIXME: This obviously does not do the right thing when there is no .got 958 // section, but there is a .toc or .tocbss section. 959 uint64_t TocVA = Out<ELF64BE>::Got->getVA(); 960 if (!TocVA) 961 TocVA = Out<ELF64BE>::Plt->getVA(); 962 963 // Per the ppc64-elf-linux ABI, The TOC base is TOC value plus 0x8000 964 // thus permitting a full 64 Kbytes segment. Note that the glibc startup 965 // code (crt1.o) assumes that you can get from the TOC base to the 966 // start of the .toc section with only a single (signed) 16-bit relocation. 967 return TocVA + 0x8000; 968 } 969 970 void PPC64TargetInfo::writePlt(uint8_t *Buf, uint64_t GotEntryAddr, 971 uint64_t PltEntryAddr, int32_t Index, 972 unsigned RelOff) const { 973 uint64_t Off = GotEntryAddr - getPPC64TocBase(); 974 975 // FIXME: What we should do, in theory, is get the offset of the function 976 // descriptor in the .opd section, and use that as the offset from %r2 (the 977 // TOC-base pointer). Instead, we have the GOT-entry offset, and that will 978 // be a pointer to the function descriptor in the .opd section. Using 979 // this scheme is simpler, but requires an extra indirection per PLT dispatch. 980 981 write32be(Buf, 0xf8410028); // std %r2, 40(%r1) 982 write32be(Buf + 4, 0x3d620000 | applyPPCHa(Off)); // addis %r11, %r2, X@ha 983 write32be(Buf + 8, 0xe98b0000 | applyPPCLo(Off)); // ld %r12, X@l(%r11) 984 write32be(Buf + 12, 0xe96c0000); // ld %r11,0(%r12) 985 write32be(Buf + 16, 0x7d6903a6); // mtctr %r11 986 write32be(Buf + 20, 0xe84c0008); // ld %r2,8(%r12) 987 write32be(Buf + 24, 0xe96c0010); // ld %r11,16(%r12) 988 write32be(Buf + 28, 0x4e800420); // bctr 989 } 990 991 bool PPC64TargetInfo::needsGot(uint32_t Type, const SymbolBody &S) const { 992 if (needsPlt(Type, S)) 993 return true; 994 995 switch (Type) { 996 default: return false; 997 case R_PPC64_GOT16: 998 case R_PPC64_GOT16_DS: 999 case R_PPC64_GOT16_HA: 1000 case R_PPC64_GOT16_HI: 1001 case R_PPC64_GOT16_LO: 1002 case R_PPC64_GOT16_LO_DS: 1003 return true; 1004 } 1005 } 1006 1007 bool PPC64TargetInfo::needsPlt(uint32_t Type, const SymbolBody &S) const { 1008 // These are function calls that need to be redirected through a PLT stub. 1009 return Type == R_PPC64_REL24 && canBePreempted(&S, false); 1010 } 1011 1012 bool PPC64TargetInfo::isRelRelative(uint32_t Type) const { 1013 switch (Type) { 1014 default: 1015 return true; 1016 case R_PPC64_ADDR64: 1017 case R_PPC64_TOC: 1018 return false; 1019 } 1020 } 1021 1022 void PPC64TargetInfo::relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, 1023 uint64_t P, uint64_t SA, uint64_t ZA, 1024 uint8_t *PairedLoc) const { 1025 uint64_t TB = getPPC64TocBase(); 1026 1027 // For a TOC-relative relocation, adjust the addend and proceed in terms of 1028 // the corresponding ADDR16 relocation type. 1029 switch (Type) { 1030 case R_PPC64_TOC16: Type = R_PPC64_ADDR16; SA -= TB; break; 1031 case R_PPC64_TOC16_DS: Type = R_PPC64_ADDR16_DS; SA -= TB; break; 1032 case R_PPC64_TOC16_HA: Type = R_PPC64_ADDR16_HA; SA -= TB; break; 1033 case R_PPC64_TOC16_HI: Type = R_PPC64_ADDR16_HI; SA -= TB; break; 1034 case R_PPC64_TOC16_LO: Type = R_PPC64_ADDR16_LO; SA -= TB; break; 1035 case R_PPC64_TOC16_LO_DS: Type = R_PPC64_ADDR16_LO_DS; SA -= TB; break; 1036 default: break; 1037 } 1038 1039 switch (Type) { 1040 case R_PPC64_ADDR14: { 1041 checkAlignment<4>(SA, Type); 1042 // Preserve the AA/LK bits in the branch instruction 1043 uint8_t AALK = Loc[3]; 1044 write16be(Loc + 2, (AALK & 3) | (SA & 0xfffc)); 1045 break; 1046 } 1047 case R_PPC64_ADDR16: 1048 checkInt<16>(SA, Type); 1049 write16be(Loc, SA); 1050 break; 1051 case R_PPC64_ADDR16_DS: 1052 checkInt<16>(SA, Type); 1053 write16be(Loc, (read16be(Loc) & 3) | (SA & ~3)); 1054 break; 1055 case R_PPC64_ADDR16_HA: 1056 write16be(Loc, applyPPCHa(SA)); 1057 break; 1058 case R_PPC64_ADDR16_HI: 1059 write16be(Loc, applyPPCHi(SA)); 1060 break; 1061 case R_PPC64_ADDR16_HIGHER: 1062 write16be(Loc, applyPPCHigher(SA)); 1063 break; 1064 case R_PPC64_ADDR16_HIGHERA: 1065 write16be(Loc, applyPPCHighera(SA)); 1066 break; 1067 case R_PPC64_ADDR16_HIGHEST: 1068 write16be(Loc, applyPPCHighest(SA)); 1069 break; 1070 case R_PPC64_ADDR16_HIGHESTA: 1071 write16be(Loc, applyPPCHighesta(SA)); 1072 break; 1073 case R_PPC64_ADDR16_LO: 1074 write16be(Loc, applyPPCLo(SA)); 1075 break; 1076 case R_PPC64_ADDR16_LO_DS: 1077 write16be(Loc, (read16be(Loc) & 3) | (applyPPCLo(SA) & ~3)); 1078 break; 1079 case R_PPC64_ADDR32: 1080 checkInt<32>(SA, Type); 1081 write32be(Loc, SA); 1082 break; 1083 case R_PPC64_ADDR64: 1084 write64be(Loc, SA); 1085 break; 1086 case R_PPC64_REL16_HA: 1087 write16be(Loc, applyPPCHa(SA - P)); 1088 break; 1089 case R_PPC64_REL16_HI: 1090 write16be(Loc, applyPPCHi(SA - P)); 1091 break; 1092 case R_PPC64_REL16_LO: 1093 write16be(Loc, applyPPCLo(SA - P)); 1094 break; 1095 case R_PPC64_REL24: { 1096 // If we have an undefined weak symbol, we might get here with a symbol 1097 // address of zero. That could overflow, but the code must be unreachable, 1098 // so don't bother doing anything at all. 1099 if (!SA) 1100 break; 1101 1102 uint64_t PltStart = Out<ELF64BE>::Plt->getVA(); 1103 uint64_t PltEnd = PltStart + Out<ELF64BE>::Plt->getSize(); 1104 bool InPlt = PltStart <= SA && SA < PltEnd; 1105 1106 if (!InPlt && Out<ELF64BE>::Opd) { 1107 // If this is a local call, and we currently have the address of a 1108 // function-descriptor, get the underlying code address instead. 1109 uint64_t OpdStart = Out<ELF64BE>::Opd->getVA(); 1110 uint64_t OpdEnd = OpdStart + Out<ELF64BE>::Opd->getSize(); 1111 bool InOpd = OpdStart <= SA && SA < OpdEnd; 1112 1113 if (InOpd) 1114 SA = read64be(&Out<ELF64BE>::OpdBuf[SA - OpdStart]); 1115 } 1116 1117 uint32_t Mask = 0x03FFFFFC; 1118 checkInt<24>(SA - P, Type); 1119 write32be(Loc, (read32be(Loc) & ~Mask) | ((SA - P) & Mask)); 1120 1121 uint32_t Nop = 0x60000000; 1122 if (InPlt && Loc + 8 <= BufEnd && read32be(Loc + 4) == Nop) 1123 write32be(Loc + 4, 0xe8410028); // ld %r2, 40(%r1) 1124 break; 1125 } 1126 case R_PPC64_REL32: 1127 checkInt<32>(SA - P, Type); 1128 write32be(Loc, SA - P); 1129 break; 1130 case R_PPC64_REL64: 1131 write64be(Loc, SA - P); 1132 break; 1133 case R_PPC64_TOC: 1134 write64be(Loc, SA); 1135 break; 1136 default: 1137 fatal("unrecognized reloc " + Twine(Type)); 1138 } 1139 } 1140 1141 AArch64TargetInfo::AArch64TargetInfo() { 1142 CopyRel = R_AARCH64_COPY; 1143 IRelativeRel = R_AARCH64_IRELATIVE; 1144 GotRel = R_AARCH64_GLOB_DAT; 1145 PltRel = R_AARCH64_JUMP_SLOT; 1146 TlsGotRel = R_AARCH64_TLS_TPREL64; 1147 UseLazyBinding = true; 1148 PltEntrySize = 16; 1149 PltZeroSize = 32; 1150 } 1151 1152 unsigned AArch64TargetInfo::getDynRel(unsigned Type) const { 1153 if (Type == R_AARCH64_ABS32 || Type == R_AARCH64_ABS64) 1154 return Type; 1155 StringRef S = getELFRelocationTypeName(EM_AARCH64, Type); 1156 error("Relocation " + S + " cannot be used when making a shared object; " 1157 "recompile with -fPIC."); 1158 // Keep it going with a dummy value so that we can find more reloc errors. 1159 return R_AARCH64_ABS32; 1160 } 1161 1162 void AArch64TargetInfo::writeGotPlt(uint8_t *Buf, uint64_t Plt) const { 1163 write64le(Buf, Out<ELF64LE>::Plt->getVA()); 1164 } 1165 1166 void AArch64TargetInfo::writePltZero(uint8_t *Buf) const { 1167 const uint8_t PltData[] = { 1168 0xf0, 0x7b, 0xbf, 0xa9, // stp x16, x30, [sp,#-16]! 1169 0x10, 0x00, 0x00, 0x90, // adrp x16, Page(&(.plt.got[2])) 1170 0x11, 0x02, 0x40, 0xf9, // ldr x17, [x16, Offset(&(.plt.got[2]))] 1171 0x10, 0x02, 0x00, 0x91, // add x16, x16, Offset(&(.plt.got[2])) 1172 0x20, 0x02, 0x1f, 0xd6, // br x17 1173 0x1f, 0x20, 0x03, 0xd5, // nop 1174 0x1f, 0x20, 0x03, 0xd5, // nop 1175 0x1f, 0x20, 0x03, 0xd5 // nop 1176 }; 1177 memcpy(Buf, PltData, sizeof(PltData)); 1178 1179 uint64_t Got = Out<ELF64LE>::GotPlt->getVA(); 1180 uint64_t Plt = Out<ELF64LE>::Plt->getVA(); 1181 relocateOne(Buf + 4, Buf + 8, R_AARCH64_ADR_PREL_PG_HI21, Plt + 4, Got + 16); 1182 relocateOne(Buf + 8, Buf + 12, R_AARCH64_LDST64_ABS_LO12_NC, Plt + 8, 1183 Got + 16); 1184 relocateOne(Buf + 12, Buf + 16, R_AARCH64_ADD_ABS_LO12_NC, Plt + 12, 1185 Got + 16); 1186 } 1187 1188 void AArch64TargetInfo::writePlt(uint8_t *Buf, uint64_t GotEntryAddr, 1189 uint64_t PltEntryAddr, int32_t Index, 1190 unsigned RelOff) const { 1191 const uint8_t Inst[] = { 1192 0x10, 0x00, 0x00, 0x90, // adrp x16, Page(&(.plt.got[n])) 1193 0x11, 0x02, 0x40, 0xf9, // ldr x17, [x16, Offset(&(.plt.got[n]))] 1194 0x10, 0x02, 0x00, 0x91, // add x16, x16, Offset(&(.plt.got[n])) 1195 0x20, 0x02, 0x1f, 0xd6 // br x17 1196 }; 1197 memcpy(Buf, Inst, sizeof(Inst)); 1198 1199 relocateOne(Buf, Buf + 4, R_AARCH64_ADR_PREL_PG_HI21, PltEntryAddr, 1200 GotEntryAddr); 1201 relocateOne(Buf + 4, Buf + 8, R_AARCH64_LDST64_ABS_LO12_NC, PltEntryAddr + 4, 1202 GotEntryAddr); 1203 relocateOne(Buf + 8, Buf + 12, R_AARCH64_ADD_ABS_LO12_NC, PltEntryAddr + 8, 1204 GotEntryAddr); 1205 } 1206 1207 unsigned AArch64TargetInfo::getTlsGotRel(unsigned Type) const { 1208 if (Type == R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 || 1209 Type == R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC) 1210 return Type; 1211 return TlsGotRel; 1212 } 1213 1214 bool AArch64TargetInfo::isTlsDynRel(unsigned Type, const SymbolBody &S) const { 1215 return Type == R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 || 1216 Type == R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC; 1217 } 1218 1219 bool AArch64TargetInfo::needsCopyRel(uint32_t Type, const SymbolBody &S) const { 1220 if (Config->Shared) 1221 return false; 1222 switch (Type) { 1223 default: 1224 return false; 1225 case R_AARCH64_ABS16: 1226 case R_AARCH64_ABS32: 1227 case R_AARCH64_ABS64: 1228 case R_AARCH64_ADD_ABS_LO12_NC: 1229 case R_AARCH64_ADR_PREL_LO21: 1230 case R_AARCH64_ADR_PREL_PG_HI21: 1231 case R_AARCH64_LDST8_ABS_LO12_NC: 1232 case R_AARCH64_LDST16_ABS_LO12_NC: 1233 case R_AARCH64_LDST32_ABS_LO12_NC: 1234 case R_AARCH64_LDST64_ABS_LO12_NC: 1235 case R_AARCH64_LDST128_ABS_LO12_NC: 1236 if (auto *SS = dyn_cast<SharedSymbol<ELF64LE>>(&S)) 1237 return SS->Sym.getType() == STT_OBJECT; 1238 return false; 1239 } 1240 } 1241 1242 bool AArch64TargetInfo::needsGot(uint32_t Type, const SymbolBody &S) const { 1243 switch (Type) { 1244 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: 1245 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: 1246 case R_AARCH64_ADR_GOT_PAGE: 1247 case R_AARCH64_LD64_GOT_LO12_NC: 1248 return true; 1249 default: 1250 return needsPlt(Type, S); 1251 } 1252 } 1253 1254 bool AArch64TargetInfo::needsPlt(uint32_t Type, const SymbolBody &S) const { 1255 if (isGnuIFunc<ELF64LE>(S)) 1256 return true; 1257 switch (Type) { 1258 default: 1259 return false; 1260 case R_AARCH64_CALL26: 1261 case R_AARCH64_CONDBR19: 1262 case R_AARCH64_JUMP26: 1263 case R_AARCH64_TSTBR14: 1264 return canBePreempted(&S, true); 1265 } 1266 } 1267 1268 static void updateAArch64Adr(uint8_t *L, uint64_t Imm) { 1269 uint32_t ImmLo = (Imm & 0x3) << 29; 1270 uint32_t ImmHi = ((Imm & 0x1FFFFC) >> 2) << 5; 1271 uint64_t Mask = (0x3 << 29) | (0x7FFFF << 5); 1272 write32le(L, (read32le(L) & ~Mask) | ImmLo | ImmHi); 1273 } 1274 1275 // Page(Expr) is the page address of the expression Expr, defined 1276 // as (Expr & ~0xFFF). (This applies even if the machine page size 1277 // supported by the platform has a different value.) 1278 static uint64_t getAArch64Page(uint64_t Expr) { 1279 return Expr & (~static_cast<uint64_t>(0xFFF)); 1280 } 1281 1282 void AArch64TargetInfo::relocateOne(uint8_t *Loc, uint8_t *BufEnd, 1283 uint32_t Type, uint64_t P, uint64_t SA, 1284 uint64_t ZA, uint8_t *PairedLoc) const { 1285 switch (Type) { 1286 case R_AARCH64_ABS16: 1287 checkIntUInt<16>(SA, Type); 1288 write16le(Loc, SA); 1289 break; 1290 case R_AARCH64_ABS32: 1291 checkIntUInt<32>(SA, Type); 1292 write32le(Loc, SA); 1293 break; 1294 case R_AARCH64_ABS64: 1295 write64le(Loc, SA); 1296 break; 1297 case R_AARCH64_ADD_ABS_LO12_NC: 1298 // This relocation stores 12 bits and there's no instruction 1299 // to do it. Instead, we do a 32 bits store of the value 1300 // of r_addend bitwise-or'ed Loc. This assumes that the addend 1301 // bits in Loc are zero. 1302 or32le(Loc, (SA & 0xFFF) << 10); 1303 break; 1304 case R_AARCH64_ADR_GOT_PAGE: { 1305 uint64_t X = getAArch64Page(SA) - getAArch64Page(P); 1306 checkInt<33>(X, Type); 1307 updateAArch64Adr(Loc, (X >> 12) & 0x1FFFFF); // X[32:12] 1308 break; 1309 } 1310 case R_AARCH64_ADR_PREL_LO21: { 1311 uint64_t X = SA - P; 1312 checkInt<21>(X, Type); 1313 updateAArch64Adr(Loc, X & 0x1FFFFF); 1314 break; 1315 } 1316 case R_AARCH64_ADR_PREL_PG_HI21: 1317 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: { 1318 uint64_t X = getAArch64Page(SA) - getAArch64Page(P); 1319 checkInt<33>(X, Type); 1320 updateAArch64Adr(Loc, (X >> 12) & 0x1FFFFF); // X[32:12] 1321 break; 1322 } 1323 case R_AARCH64_CALL26: 1324 case R_AARCH64_JUMP26: { 1325 uint64_t X = SA - P; 1326 checkInt<28>(X, Type); 1327 or32le(Loc, (X & 0x0FFFFFFC) >> 2); 1328 break; 1329 } 1330 case R_AARCH64_CONDBR19: { 1331 uint64_t X = SA - P; 1332 checkInt<21>(X, Type); 1333 or32le(Loc, (X & 0x1FFFFC) << 3); 1334 break; 1335 } 1336 case R_AARCH64_LD64_GOT_LO12_NC: 1337 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: 1338 checkAlignment<8>(SA, Type); 1339 or32le(Loc, (SA & 0xFF8) << 7); 1340 break; 1341 case R_AARCH64_LDST128_ABS_LO12_NC: 1342 or32le(Loc, (SA & 0x0FF8) << 6); 1343 break; 1344 case R_AARCH64_LDST16_ABS_LO12_NC: 1345 or32le(Loc, (SA & 0x0FFC) << 9); 1346 break; 1347 case R_AARCH64_LDST8_ABS_LO12_NC: 1348 or32le(Loc, (SA & 0xFFF) << 10); 1349 break; 1350 case R_AARCH64_LDST32_ABS_LO12_NC: 1351 or32le(Loc, (SA & 0xFFC) << 8); 1352 break; 1353 case R_AARCH64_LDST64_ABS_LO12_NC: 1354 or32le(Loc, (SA & 0xFF8) << 7); 1355 break; 1356 case R_AARCH64_PREL16: 1357 checkIntUInt<16>(SA - P, Type); 1358 write16le(Loc, SA - P); 1359 break; 1360 case R_AARCH64_PREL32: 1361 checkIntUInt<32>(SA - P, Type); 1362 write32le(Loc, SA - P); 1363 break; 1364 case R_AARCH64_PREL64: 1365 write64le(Loc, SA - P); 1366 break; 1367 case R_AARCH64_TSTBR14: { 1368 uint64_t X = SA - P; 1369 checkInt<16>(X, Type); 1370 or32le(Loc, (X & 0xFFFC) << 3); 1371 break; 1372 } 1373 default: 1374 fatal("unrecognized reloc " + Twine(Type)); 1375 } 1376 } 1377 1378 // Implementing relocations for AMDGPU is low priority since most 1379 // programs don't use relocations now. Thus, this function is not 1380 // actually called (relocateOne is called for each relocation). 1381 // That's why the AMDGPU port works without implementing this function. 1382 void AMDGPUTargetInfo::relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, 1383 uint64_t P, uint64_t SA, uint64_t ZA, 1384 uint8_t *PairedLoc) const { 1385 llvm_unreachable("not implemented"); 1386 } 1387 1388 template <class ELFT> MipsTargetInfo<ELFT>::MipsTargetInfo() { 1389 PageSize = 65536; 1390 GotHeaderEntriesNum = 2; 1391 RelativeRel = R_MIPS_REL32; 1392 } 1393 1394 template <class ELFT> 1395 unsigned MipsTargetInfo<ELFT>::getDynRel(unsigned Type) const { 1396 if (Type == R_MIPS_32 || Type == R_MIPS_64) 1397 return R_MIPS_REL32; 1398 StringRef S = getELFRelocationTypeName(EM_MIPS, Type); 1399 error("Relocation " + S + " cannot be used when making a shared object; " 1400 "recompile with -fPIC."); 1401 // Keep it going with a dummy value so that we can find more reloc errors. 1402 return R_MIPS_32; 1403 } 1404 1405 template <class ELFT> 1406 void MipsTargetInfo<ELFT>::writeGotHeader(uint8_t *Buf) const { 1407 typedef typename ELFFile<ELFT>::Elf_Off Elf_Off; 1408 typedef typename ELFFile<ELFT>::uintX_t uintX_t; 1409 1410 // Set the MSB of the second GOT slot. This is not required by any 1411 // MIPS ABI documentation, though. 1412 // 1413 // There is a comment in glibc saying that "The MSB of got[1] of a 1414 // gnu object is set to identify gnu objects," and in GNU gold it 1415 // says "the second entry will be used by some runtime loaders". 1416 // But how this field is being used is unclear. 1417 // 1418 // We are not really willing to mimic other linkers behaviors 1419 // without understanding why they do that, but because all files 1420 // generated by GNU tools have this special GOT value, and because 1421 // we've been doing this for years, it is probably a safe bet to 1422 // keep doing this for now. We really need to revisit this to see 1423 // if we had to do this. 1424 auto *P = reinterpret_cast<Elf_Off *>(Buf); 1425 P[1] = uintX_t(1) << (ELFT::Is64Bits ? 63 : 31); 1426 } 1427 1428 template <class ELFT> 1429 bool MipsTargetInfo<ELFT>::needsGot(uint32_t Type, const SymbolBody &S) const { 1430 return Type == R_MIPS_GOT16 || Type == R_MIPS_CALL16; 1431 } 1432 1433 template <class ELFT> 1434 bool MipsTargetInfo<ELFT>::needsPlt(uint32_t Type, const SymbolBody &S) const { 1435 return false; 1436 } 1437 1438 static uint16_t mipsHigh(uint64_t V) { return (V + 0x8000) >> 16; } 1439 1440 template <endianness E, uint8_t BSIZE, uint8_t SHIFT> 1441 static void applyMipsPcReloc(uint8_t *Loc, uint32_t Type, uint64_t P, 1442 uint64_t SA) { 1443 uint32_t Mask = 0xffffffff >> (32 - BSIZE); 1444 uint32_t Instr = read32<E>(Loc); 1445 int64_t A = SignExtend64<BSIZE + SHIFT>((Instr & Mask) << SHIFT); 1446 if (SHIFT > 0) 1447 checkAlignment<(1 << SHIFT)>(SA + A, Type); 1448 int64_t V = SA + A - P; 1449 checkInt<BSIZE + SHIFT>(V, Type); 1450 write32<E>(Loc, (Instr & ~Mask) | ((V >> SHIFT) & Mask)); 1451 } 1452 1453 template <class ELFT> 1454 void MipsTargetInfo<ELFT>::relocateOne(uint8_t *Loc, uint8_t *BufEnd, 1455 uint32_t Type, uint64_t P, uint64_t SA, 1456 uint64_t ZA, uint8_t *PairedLoc) const { 1457 const endianness E = ELFT::TargetEndianness; 1458 switch (Type) { 1459 case R_MIPS_32: 1460 add32<E>(Loc, SA); 1461 break; 1462 case R_MIPS_CALL16: 1463 case R_MIPS_GOT16: { 1464 int64_t V = SA - getMipsGpAddr<ELFT>(); 1465 if (Type == R_MIPS_GOT16) 1466 checkInt<16>(V, Type); 1467 write32<E>(Loc, (read32<E>(Loc) & 0xffff0000) | (V & 0xffff)); 1468 break; 1469 } 1470 case R_MIPS_GPREL16: { 1471 uint32_t Instr = read32<E>(Loc); 1472 int64_t V = SA + SignExtend64<16>(Instr & 0xffff) - getMipsGpAddr<ELFT>(); 1473 checkInt<16>(V, Type); 1474 write32<E>(Loc, (Instr & 0xffff0000) | (V & 0xffff)); 1475 break; 1476 } 1477 case R_MIPS_GPREL32: 1478 write32<E>(Loc, SA + int32_t(read32<E>(Loc)) - getMipsGpAddr<ELFT>()); 1479 break; 1480 case R_MIPS_HI16: { 1481 uint32_t Instr = read32<E>(Loc); 1482 if (PairedLoc) { 1483 uint64_t AHL = ((Instr & 0xffff) << 16) + 1484 SignExtend64<16>(read32<E>(PairedLoc) & 0xffff); 1485 write32<E>(Loc, (Instr & 0xffff0000) | mipsHigh(SA + AHL)); 1486 } else { 1487 warning("Can't find matching R_MIPS_LO16 relocation for R_MIPS_HI16"); 1488 write32<E>(Loc, (Instr & 0xffff0000) | mipsHigh(SA)); 1489 } 1490 break; 1491 } 1492 case R_MIPS_JALR: 1493 // Ignore this optimization relocation for now 1494 break; 1495 case R_MIPS_LO16: { 1496 uint32_t Instr = read32<E>(Loc); 1497 int64_t AHL = SignExtend64<16>(Instr & 0xffff); 1498 write32<E>(Loc, (Instr & 0xffff0000) | ((SA + AHL) & 0xffff)); 1499 break; 1500 } 1501 case R_MIPS_PC16: 1502 applyMipsPcReloc<E, 16, 2>(Loc, Type, P, SA); 1503 break; 1504 case R_MIPS_PC19_S2: 1505 applyMipsPcReloc<E, 19, 2>(Loc, Type, P, SA); 1506 break; 1507 case R_MIPS_PC21_S2: 1508 applyMipsPcReloc<E, 21, 2>(Loc, Type, P, SA); 1509 break; 1510 case R_MIPS_PC26_S2: 1511 applyMipsPcReloc<E, 26, 2>(Loc, Type, P, SA); 1512 break; 1513 case R_MIPS_PC32: 1514 applyMipsPcReloc<E, 32, 0>(Loc, Type, P, SA); 1515 break; 1516 case R_MIPS_PCHI16: { 1517 uint32_t Instr = read32<E>(Loc); 1518 if (PairedLoc) { 1519 uint64_t AHL = ((Instr & 0xffff) << 16) + 1520 SignExtend64<16>(read32<E>(PairedLoc) & 0xffff); 1521 write32<E>(Loc, (Instr & 0xffff0000) | mipsHigh(SA + AHL - P)); 1522 } else { 1523 warning("Can't find matching R_MIPS_PCLO16 relocation for R_MIPS_PCHI16"); 1524 write32<E>(Loc, (Instr & 0xffff0000) | mipsHigh(SA - P)); 1525 } 1526 break; 1527 } 1528 case R_MIPS_PCLO16: { 1529 uint32_t Instr = read32<E>(Loc); 1530 int64_t AHL = SignExtend64<16>(Instr & 0xffff); 1531 write32<E>(Loc, (Instr & 0xffff0000) | ((SA + AHL - P) & 0xffff)); 1532 break; 1533 } 1534 default: 1535 fatal("unrecognized reloc " + Twine(Type)); 1536 } 1537 } 1538 1539 template <class ELFT> 1540 bool MipsTargetInfo<ELFT>::isHintRel(uint32_t Type) const { 1541 return Type == R_MIPS_JALR; 1542 } 1543 1544 template <class ELFT> 1545 bool MipsTargetInfo<ELFT>::isRelRelative(uint32_t Type) const { 1546 switch (Type) { 1547 default: 1548 return false; 1549 case R_MIPS_PC16: 1550 case R_MIPS_PC19_S2: 1551 case R_MIPS_PC21_S2: 1552 case R_MIPS_PC26_S2: 1553 case R_MIPS_PC32: 1554 case R_MIPS_PCHI16: 1555 case R_MIPS_PCLO16: 1556 return true; 1557 } 1558 } 1559 1560 // _gp is a MIPS-specific ABI-defined symbol which points to 1561 // a location that is relative to GOT. This function returns 1562 // the value for the symbol. 1563 template <class ELFT> typename ELFFile<ELFT>::uintX_t getMipsGpAddr() { 1564 unsigned GPOffset = 0x7ff0; 1565 if (uint64_t V = Out<ELFT>::Got->getVA()) 1566 return V + GPOffset; 1567 return 0; 1568 } 1569 1570 template bool isGnuIFunc<ELF32LE>(const SymbolBody &S); 1571 template bool isGnuIFunc<ELF32BE>(const SymbolBody &S); 1572 template bool isGnuIFunc<ELF64LE>(const SymbolBody &S); 1573 template bool isGnuIFunc<ELF64BE>(const SymbolBody &S); 1574 1575 template uint32_t getMipsGpAddr<ELF32LE>(); 1576 template uint32_t getMipsGpAddr<ELF32BE>(); 1577 template uint64_t getMipsGpAddr<ELF64LE>(); 1578 template uint64_t getMipsGpAddr<ELF64BE>(); 1579 } 1580 } 1581