1 //===- Target.cpp ---------------------------------------------------------===// 2 // 3 // The LLVM Linker 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // Machine-specific things, such as applying relocations, creation of 11 // GOT or PLT entries, etc., are handled in this file. 12 // 13 // Refer the ELF spec for the single letter varaibles, S, A or P, used 14 // in this file. SA is S+A. 15 // 16 //===----------------------------------------------------------------------===// 17 18 #include "Target.h" 19 #include "Error.h" 20 #include "OutputSections.h" 21 #include "Symbols.h" 22 23 #include "llvm/ADT/ArrayRef.h" 24 #include "llvm/Object/ELF.h" 25 #include "llvm/Support/Endian.h" 26 #include "llvm/Support/ELF.h" 27 28 using namespace llvm; 29 using namespace llvm::object; 30 using namespace llvm::support::endian; 31 using namespace llvm::ELF; 32 33 namespace lld { 34 namespace elf2 { 35 36 std::unique_ptr<TargetInfo> Target; 37 38 template <endianness E> static void add32(void *P, int32_t V) { 39 write32<E>(P, read32<E>(P) + V); 40 } 41 42 static void add32le(uint8_t *P, int32_t V) { add32<support::little>(P, V); } 43 static void or32le(uint8_t *P, int32_t V) { write32le(P, read32le(P) | V); } 44 45 template <unsigned N> static void checkInt(int64_t V, uint32_t Type) { 46 if (isInt<N>(V)) 47 return; 48 StringRef S = getELFRelocationTypeName(Config->EMachine, Type); 49 fatal("Relocation " + S + " out of range"); 50 } 51 52 template <unsigned N> static void checkUInt(uint64_t V, uint32_t Type) { 53 if (isUInt<N>(V)) 54 return; 55 StringRef S = getELFRelocationTypeName(Config->EMachine, Type); 56 fatal("Relocation " + S + " out of range"); 57 } 58 59 template <unsigned N> static void checkIntUInt(uint64_t V, uint32_t Type) { 60 if (isInt<N>(V) || isUInt<N>(V)) 61 return; 62 StringRef S = getELFRelocationTypeName(Config->EMachine, Type); 63 fatal("Relocation " + S + " out of range"); 64 } 65 66 template <unsigned N> static void checkAlignment(uint64_t V, uint32_t Type) { 67 if ((V & (N - 1)) == 0) 68 return; 69 StringRef S = getELFRelocationTypeName(Config->EMachine, Type); 70 fatal("Improper alignment for relocation " + S); 71 } 72 73 template <class ELFT> bool isGnuIFunc(const SymbolBody &S) { 74 if (auto *SS = dyn_cast<DefinedElf<ELFT>>(&S)) 75 return SS->Sym.getType() == STT_GNU_IFUNC; 76 return false; 77 } 78 79 namespace { 80 class X86TargetInfo final : public TargetInfo { 81 public: 82 X86TargetInfo(); 83 void writeGotPltHeader(uint8_t *Buf) const override; 84 unsigned getDynRel(unsigned Type) const override; 85 unsigned getTlsGotRel(unsigned Type) const override; 86 bool isTlsDynRel(unsigned Type, const SymbolBody &S) const override; 87 void writeGotPlt(uint8_t *Buf, uint64_t Plt) const override; 88 void writePltZero(uint8_t *Buf) const override; 89 void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr, 90 int32_t Index, unsigned RelOff) const override; 91 bool needsCopyRel(uint32_t Type, const SymbolBody &S) const override; 92 bool needsDynRelative(unsigned Type) const override; 93 bool needsGot(uint32_t Type, const SymbolBody &S) const override; 94 bool needsPlt(uint32_t Type, const SymbolBody &S) const override; 95 void relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, uint64_t P, 96 uint64_t SA, uint64_t ZA = 0, 97 uint8_t *PairedLoc = nullptr) const override; 98 bool canRelaxTls(unsigned Type, const SymbolBody *S) const override; 99 unsigned relaxTls(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, uint64_t P, 100 uint64_t SA, const SymbolBody *S) const override; 101 bool isGotRelative(uint32_t Type) const override; 102 103 private: 104 void relocateTlsLdToLe(uint8_t *Loc, uint8_t *BufEnd, uint64_t P, 105 uint64_t SA) const; 106 void relocateTlsGdToIe(uint8_t *Loc, uint8_t *BufEnd, uint64_t P, 107 uint64_t SA) const; 108 void relocateTlsGdToLe(uint8_t *Loc, uint8_t *BufEnd, uint64_t P, 109 uint64_t SA) const; 110 void relocateTlsIeToLe(unsigned Type, uint8_t *Loc, uint8_t *BufEnd, 111 uint64_t P, uint64_t SA) const; 112 }; 113 114 class X86_64TargetInfo final : public TargetInfo { 115 public: 116 X86_64TargetInfo(); 117 bool isTlsDynRel(unsigned Type, const SymbolBody &S) const override; 118 void writeGotPltHeader(uint8_t *Buf) const override; 119 void writeGotPlt(uint8_t *Buf, uint64_t Plt) const override; 120 void writePltZero(uint8_t *Buf) const override; 121 void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr, 122 int32_t Index, unsigned RelOff) const override; 123 bool needsCopyRel(uint32_t Type, const SymbolBody &S) const override; 124 bool needsGot(uint32_t Type, const SymbolBody &S) const override; 125 bool needsPlt(uint32_t Type, const SymbolBody &S) const override; 126 void relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, uint64_t P, 127 uint64_t SA, uint64_t ZA = 0, 128 uint8_t *PairedLoc = nullptr) const override; 129 bool isRelRelative(uint32_t Type) const override; 130 bool canRelaxTls(unsigned Type, const SymbolBody *S) const override; 131 bool isSizeRel(uint32_t Type) const override; 132 unsigned relaxTls(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, uint64_t P, 133 uint64_t SA, const SymbolBody *S) const override; 134 135 private: 136 void relocateTlsLdToLe(uint8_t *Loc, uint8_t *BufEnd, uint64_t P, 137 uint64_t SA) const; 138 void relocateTlsGdToLe(uint8_t *Loc, uint8_t *BufEnd, uint64_t P, 139 uint64_t SA) const; 140 void relocateTlsGdToIe(uint8_t *Loc, uint8_t *BufEnd, uint64_t P, 141 uint64_t SA) const; 142 void relocateTlsIeToLe(uint8_t *Loc, uint8_t *BufEnd, uint64_t P, 143 uint64_t SA) const; 144 }; 145 146 class PPCTargetInfo final : public TargetInfo { 147 public: 148 PPCTargetInfo(); 149 void relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, uint64_t P, 150 uint64_t SA, uint64_t ZA = 0, 151 uint8_t *PairedLoc = nullptr) const override; 152 bool isRelRelative(uint32_t Type) const override; 153 }; 154 155 class PPC64TargetInfo final : public TargetInfo { 156 public: 157 PPC64TargetInfo(); 158 void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr, 159 int32_t Index, unsigned RelOff) const override; 160 bool needsGot(uint32_t Type, const SymbolBody &S) const override; 161 bool needsPlt(uint32_t Type, const SymbolBody &S) const override; 162 void relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, uint64_t P, 163 uint64_t SA, uint64_t ZA = 0, 164 uint8_t *PairedLoc = nullptr) const override; 165 bool isRelRelative(uint32_t Type) const override; 166 }; 167 168 class AArch64TargetInfo final : public TargetInfo { 169 public: 170 AArch64TargetInfo(); 171 unsigned getDynRel(unsigned Type) const override; 172 void writeGotPlt(uint8_t *Buf, uint64_t Plt) const override; 173 void writePltZero(uint8_t *Buf) const override; 174 void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr, 175 int32_t Index, unsigned RelOff) const override; 176 unsigned getTlsGotRel(unsigned Type = -1) const override; 177 bool isTlsDynRel(unsigned Type, const SymbolBody &S) const override; 178 bool needsCopyRel(uint32_t Type, const SymbolBody &S) const override; 179 bool needsGot(uint32_t Type, const SymbolBody &S) const override; 180 bool needsPlt(uint32_t Type, const SymbolBody &S) const override; 181 void relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, uint64_t P, 182 uint64_t SA, uint64_t ZA = 0, 183 uint8_t *PairedLoc = nullptr) const override; 184 }; 185 186 class AMDGPUTargetInfo final : public TargetInfo { 187 public: 188 AMDGPUTargetInfo() {} 189 void relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, uint64_t P, 190 uint64_t SA, uint64_t ZA = 0, 191 uint8_t *PairedLoc = nullptr) const override; 192 }; 193 194 template <class ELFT> class MipsTargetInfo final : public TargetInfo { 195 public: 196 MipsTargetInfo(); 197 unsigned getDynRel(unsigned Type) const override; 198 void writeGotHeader(uint8_t *Buf) const override; 199 bool needsGot(uint32_t Type, const SymbolBody &S) const override; 200 bool needsPlt(uint32_t Type, const SymbolBody &S) const override; 201 void relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, uint64_t P, 202 uint64_t SA, uint64_t ZA = 0, 203 uint8_t *PairedLoc = nullptr) const override; 204 bool isHintRel(uint32_t Type) const override; 205 bool isRelRelative(uint32_t Type) const override; 206 }; 207 } // anonymous namespace 208 209 TargetInfo *createTarget() { 210 switch (Config->EMachine) { 211 case EM_386: 212 return new X86TargetInfo(); 213 case EM_AARCH64: 214 return new AArch64TargetInfo(); 215 case EM_AMDGPU: 216 return new AMDGPUTargetInfo(); 217 case EM_MIPS: 218 switch (Config->EKind) { 219 case ELF32LEKind: 220 return new MipsTargetInfo<ELF32LE>(); 221 case ELF32BEKind: 222 return new MipsTargetInfo<ELF32BE>(); 223 default: 224 fatal("Unsupported MIPS target"); 225 } 226 case EM_PPC: 227 return new PPCTargetInfo(); 228 case EM_PPC64: 229 return new PPC64TargetInfo(); 230 case EM_X86_64: 231 return new X86_64TargetInfo(); 232 } 233 fatal("Unknown target machine"); 234 } 235 236 TargetInfo::~TargetInfo() {} 237 238 bool TargetInfo::canRelaxTls(unsigned Type, const SymbolBody *S) const { 239 return false; 240 } 241 242 uint64_t TargetInfo::getVAStart() const { return Config->Shared ? 0 : VAStart; } 243 244 bool TargetInfo::needsCopyRel(uint32_t Type, const SymbolBody &S) const { 245 return false; 246 } 247 248 bool TargetInfo::isTlsLocalDynamicRel(unsigned Type) const { 249 return Type == TlsLocalDynamicRel; 250 } 251 252 bool TargetInfo::isTlsGlobalDynamicRel(unsigned Type) const { 253 return Type == TlsGlobalDynamicRel; 254 } 255 256 bool TargetInfo::isTlsDynRel(unsigned Type, const SymbolBody &S) const { 257 return false; 258 } 259 260 bool TargetInfo::isGotRelative(uint32_t Type) const { return false; } 261 bool TargetInfo::isHintRel(uint32_t Type) const { return false; } 262 bool TargetInfo::isRelRelative(uint32_t Type) const { return true; } 263 bool TargetInfo::isSizeRel(uint32_t Type) const { return false; } 264 265 bool TargetInfo::needsGot(uint32_t Type, const SymbolBody &S) const { 266 return false; 267 } 268 269 bool TargetInfo::needsPlt(uint32_t Type, const SymbolBody &S) const { 270 return false; 271 } 272 273 unsigned TargetInfo::relaxTls(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, 274 uint64_t P, uint64_t SA, 275 const SymbolBody *S) const { 276 return 0; 277 } 278 279 X86TargetInfo::X86TargetInfo() { 280 CopyRel = R_386_COPY; 281 GotRel = R_386_GLOB_DAT; 282 PltRel = R_386_JUMP_SLOT; 283 IRelativeRel = R_386_IRELATIVE; 284 RelativeRel = R_386_RELATIVE; 285 TlsGotRel = R_386_TLS_TPOFF; 286 TlsGlobalDynamicRel = R_386_TLS_GD; 287 TlsLocalDynamicRel = R_386_TLS_LDM; 288 TlsModuleIndexRel = R_386_TLS_DTPMOD32; 289 TlsOffsetRel = R_386_TLS_DTPOFF32; 290 UseLazyBinding = true; 291 PltEntrySize = 16; 292 PltZeroSize = 16; 293 } 294 295 void X86TargetInfo::writeGotPltHeader(uint8_t *Buf) const { 296 write32le(Buf, Out<ELF32LE>::Dynamic->getVA()); 297 } 298 299 void X86TargetInfo::writeGotPlt(uint8_t *Buf, uint64_t Plt) const { 300 // Entries in .got.plt initially points back to the corresponding 301 // PLT entries with a fixed offset to skip the first instruction. 302 write32le(Buf, Plt + 6); 303 } 304 305 unsigned X86TargetInfo::getDynRel(unsigned Type) const { 306 if (Type == R_386_TLS_LE) 307 return R_386_TLS_TPOFF; 308 if (Type == R_386_TLS_LE_32) 309 return R_386_TLS_TPOFF32; 310 return Type; 311 } 312 313 unsigned X86TargetInfo::getTlsGotRel(unsigned Type) const { 314 if (Type == R_386_TLS_IE) 315 return Type; 316 return TlsGotRel; 317 } 318 319 bool X86TargetInfo::isTlsDynRel(unsigned Type, const SymbolBody &S) const { 320 if (Type == R_386_TLS_LE || Type == R_386_TLS_LE_32 || 321 Type == R_386_TLS_GOTIE) 322 return Config->Shared; 323 if (Type == R_386_TLS_IE) 324 return canBePreempted(&S, true); 325 return Type == R_386_TLS_GD; 326 } 327 328 void X86TargetInfo::writePltZero(uint8_t *Buf) const { 329 // Executable files and shared object files have 330 // separate procedure linkage tables. 331 if (Config->Shared) { 332 const uint8_t V[] = { 333 0xff, 0xb3, 0x04, 0x00, 0x00, 0x00, // pushl 4(%ebx) 334 0xff, 0xa3, 0x08, 0x00, 0x00, 0x00, // jmp *8(%ebx) 335 0x90, 0x90, 0x90, 0x90 // nop; nop; nop; nop 336 }; 337 memcpy(Buf, V, sizeof(V)); 338 return; 339 } 340 341 const uint8_t PltData[] = { 342 0xff, 0x35, 0x00, 0x00, 0x00, 0x00, // pushl (GOT+4) 343 0xff, 0x25, 0x00, 0x00, 0x00, 0x00, // jmp *(GOT+8) 344 0x90, 0x90, 0x90, 0x90 // nop; nop; nop; nop 345 }; 346 memcpy(Buf, PltData, sizeof(PltData)); 347 uint32_t Got = Out<ELF32LE>::GotPlt->getVA(); 348 write32le(Buf + 2, Got + 4); 349 write32le(Buf + 8, Got + 8); 350 } 351 352 void X86TargetInfo::writePlt(uint8_t *Buf, uint64_t GotEntryAddr, 353 uint64_t PltEntryAddr, int32_t Index, 354 unsigned RelOff) const { 355 const uint8_t Inst[] = { 356 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, // jmp *foo_in_GOT|*foo@GOT(%ebx) 357 0x68, 0x00, 0x00, 0x00, 0x00, // pushl $reloc_offset 358 0xe9, 0x00, 0x00, 0x00, 0x00 // jmp .PLT0@PC 359 }; 360 memcpy(Buf, Inst, sizeof(Inst)); 361 362 // jmp *foo@GOT(%ebx) or jmp *foo_in_GOT 363 Buf[1] = Config->Shared ? 0xa3 : 0x25; 364 uint32_t Got = UseLazyBinding ? Out<ELF32LE>::GotPlt->getVA() 365 : Out<ELF32LE>::Got->getVA(); 366 write32le(Buf + 2, Config->Shared ? GotEntryAddr - Got : GotEntryAddr); 367 write32le(Buf + 7, RelOff); 368 write32le(Buf + 12, -Index * PltEntrySize - PltZeroSize - 16); 369 } 370 371 bool X86TargetInfo::needsCopyRel(uint32_t Type, const SymbolBody &S) const { 372 if (Type == R_386_32 || Type == R_386_16 || Type == R_386_8) 373 if (auto *SS = dyn_cast<SharedSymbol<ELF32LE>>(&S)) 374 return SS->Sym.getType() == STT_OBJECT; 375 return false; 376 } 377 378 bool X86TargetInfo::needsGot(uint32_t Type, const SymbolBody &S) const { 379 if (S.isTls() && Type == R_386_TLS_GD) 380 return Target->canRelaxTls(Type, &S) && canBePreempted(&S, true); 381 if (Type == R_386_TLS_GOTIE || Type == R_386_TLS_IE) 382 return !canRelaxTls(Type, &S); 383 return Type == R_386_GOT32 || needsPlt(Type, S); 384 } 385 386 bool X86TargetInfo::needsPlt(uint32_t Type, const SymbolBody &S) const { 387 return isGnuIFunc<ELF32LE>(S) || 388 (Type == R_386_PLT32 && canBePreempted(&S, true)) || 389 (Type == R_386_PC32 && S.isShared()); 390 } 391 392 bool X86TargetInfo::isGotRelative(uint32_t Type) const { 393 // This relocation does not require got entry, 394 // but it is relative to got and needs it to be created. 395 // Here we request for that. 396 return Type == R_386_GOTOFF; 397 } 398 399 void X86TargetInfo::relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, 400 uint64_t P, uint64_t SA, uint64_t ZA, 401 uint8_t *PairedLoc) const { 402 switch (Type) { 403 case R_386_32: 404 add32le(Loc, SA); 405 break; 406 case R_386_GOT32: { 407 uint64_t V = SA - Out<ELF32LE>::Got->getVA() - 408 Out<ELF32LE>::Got->getNumEntries() * 4; 409 checkInt<32>(V, Type); 410 add32le(Loc, V); 411 break; 412 } 413 case R_386_GOTOFF: 414 add32le(Loc, SA - Out<ELF32LE>::Got->getVA()); 415 break; 416 case R_386_GOTPC: 417 add32le(Loc, SA + Out<ELF32LE>::Got->getVA() - P); 418 break; 419 case R_386_PC32: 420 case R_386_PLT32: 421 add32le(Loc, SA - P); 422 break; 423 case R_386_TLS_GD: 424 case R_386_TLS_LDM: 425 case R_386_TLS_TPOFF: { 426 uint64_t V = SA - Out<ELF32LE>::Got->getVA() - 427 Out<ELF32LE>::Got->getNumEntries() * 4; 428 checkInt<32>(V, Type); 429 write32le(Loc, V); 430 break; 431 } 432 case R_386_TLS_IE: 433 case R_386_TLS_LDO_32: 434 write32le(Loc, SA); 435 break; 436 case R_386_TLS_LE: 437 write32le(Loc, SA - Out<ELF32LE>::TlsPhdr->p_memsz); 438 break; 439 case R_386_TLS_LE_32: 440 write32le(Loc, Out<ELF32LE>::TlsPhdr->p_memsz - SA); 441 break; 442 default: 443 fatal("unrecognized reloc " + Twine(Type)); 444 } 445 } 446 447 bool X86TargetInfo::canRelaxTls(unsigned Type, const SymbolBody *S) const { 448 if (Config->Shared || (S && !S->isTls())) 449 return false; 450 return Type == R_386_TLS_LDO_32 || Type == R_386_TLS_LDM || 451 Type == R_386_TLS_GD || 452 (Type == R_386_TLS_IE && !canBePreempted(S, true)) || 453 (Type == R_386_TLS_GOTIE && !canBePreempted(S, true)); 454 } 455 456 bool X86TargetInfo::needsDynRelative(unsigned Type) const { 457 return Config->Shared && Type == R_386_TLS_IE; 458 } 459 460 unsigned X86TargetInfo::relaxTls(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, 461 uint64_t P, uint64_t SA, 462 const SymbolBody *S) const { 463 switch (Type) { 464 case R_386_TLS_GD: 465 if (canBePreempted(S, true)) 466 relocateTlsGdToIe(Loc, BufEnd, P, SA); 467 else 468 relocateTlsGdToLe(Loc, BufEnd, P, SA); 469 // The next relocation should be against __tls_get_addr, so skip it 470 return 1; 471 case R_386_TLS_GOTIE: 472 case R_386_TLS_IE: 473 relocateTlsIeToLe(Type, Loc, BufEnd, P, SA); 474 return 0; 475 case R_386_TLS_LDM: 476 relocateTlsLdToLe(Loc, BufEnd, P, SA); 477 // The next relocation should be against __tls_get_addr, so skip it 478 return 1; 479 case R_386_TLS_LDO_32: 480 relocateOne(Loc, BufEnd, R_386_TLS_LE, P, SA); 481 return 0; 482 } 483 llvm_unreachable("Unknown TLS optimization"); 484 } 485 486 // "Ulrich Drepper, ELF Handling For Thread-Local Storage" (5.1 487 // IA-32 Linker Optimizations, http://www.akkadia.org/drepper/tls.pdf) shows 488 // how GD can be optimized to IE: 489 // leal x@tlsgd(, %ebx, 1), 490 // call __tls_get_addr@plt 491 // Is converted to: 492 // movl %gs:0, %eax 493 // addl x@gotntpoff(%ebx), %eax 494 void X86TargetInfo::relocateTlsGdToIe(uint8_t *Loc, uint8_t *BufEnd, uint64_t P, 495 uint64_t SA) const { 496 const uint8_t Inst[] = { 497 0x65, 0xa1, 0x00, 0x00, 0x00, 0x00, // movl %gs:0, %eax 498 0x03, 0x83, 0x00, 0x00, 0x00, 0x00 // addl 0(%ebx), %eax 499 }; 500 memcpy(Loc - 3, Inst, sizeof(Inst)); 501 relocateOne(Loc + 5, BufEnd, R_386_32, P, 502 SA - Out<ELF32LE>::Got->getVA() - 503 Out<ELF32LE>::Got->getNumEntries() * 4); 504 } 505 506 // GD can be optimized to LE: 507 // leal x@tlsgd(, %ebx, 1), 508 // call __tls_get_addr@plt 509 // Can be converted to: 510 // movl %gs:0,%eax 511 // addl $x@ntpoff,%eax 512 // But gold emits subl $foo@tpoff,%eax instead of addl. 513 // These instructions are completely equal in behavior. 514 // This method generates subl to be consistent with gold. 515 void X86TargetInfo::relocateTlsGdToLe(uint8_t *Loc, uint8_t *BufEnd, uint64_t P, 516 uint64_t SA) const { 517 const uint8_t Inst[] = { 518 0x65, 0xa1, 0x00, 0x00, 0x00, 0x00, // movl %gs:0, %eax 519 0x81, 0xe8, 0x00, 0x00, 0x00, 0x00 // subl 0(%ebx), %eax 520 }; 521 memcpy(Loc - 3, Inst, sizeof(Inst)); 522 relocateOne(Loc + 5, BufEnd, R_386_32, P, 523 Out<ELF32LE>::TlsPhdr->p_memsz - SA); 524 } 525 526 // LD can be optimized to LE: 527 // leal foo(%reg),%eax 528 // call ___tls_get_addr 529 // Is converted to: 530 // movl %gs:0,%eax 531 // nop 532 // leal 0(%esi,1),%esi 533 void X86TargetInfo::relocateTlsLdToLe(uint8_t *Loc, uint8_t *BufEnd, uint64_t P, 534 uint64_t SA) const { 535 const uint8_t Inst[] = { 536 0x65, 0xa1, 0x00, 0x00, 0x00, 0x00, // movl %gs:0,%eax 537 0x90, // nop 538 0x8d, 0x74, 0x26, 0x00 // leal 0(%esi,1),%esi 539 }; 540 memcpy(Loc - 2, Inst, sizeof(Inst)); 541 } 542 543 // In some conditions, relocations can be optimized to avoid using GOT. 544 // This function does that for Initial Exec to Local Exec case. 545 // Read "ELF Handling For Thread-Local Storage, 5.1 546 // IA-32 Linker Optimizations" (http://www.akkadia.org/drepper/tls.pdf) 547 // by Ulrich Drepper for details. 548 void X86TargetInfo::relocateTlsIeToLe(unsigned Type, uint8_t *Loc, 549 uint8_t *BufEnd, uint64_t P, 550 uint64_t SA) const { 551 // Ulrich's document section 6.2 says that @gotntpoff can 552 // be used with MOVL or ADDL instructions. 553 // @indntpoff is similar to @gotntpoff, but for use in 554 // position dependent code. 555 uint8_t *Inst = Loc - 2; 556 uint8_t *Op = Loc - 1; 557 uint8_t Reg = (Loc[-1] >> 3) & 7; 558 bool IsMov = *Inst == 0x8b; 559 if (Type == R_386_TLS_IE) { 560 // For R_386_TLS_IE relocation we perform the next transformations: 561 // MOVL foo@INDNTPOFF,%EAX is transformed to MOVL $foo,%EAX 562 // MOVL foo@INDNTPOFF,%REG is transformed to MOVL $foo,%REG 563 // ADDL foo@INDNTPOFF,%REG is transformed to ADDL $foo,%REG 564 // First one is special because when EAX is used the sequence is 5 bytes 565 // long, otherwise it is 6 bytes. 566 if (*Op == 0xa1) { 567 *Op = 0xb8; 568 } else { 569 *Inst = IsMov ? 0xc7 : 0x81; 570 *Op = 0xc0 | ((*Op >> 3) & 7); 571 } 572 } else { 573 // R_386_TLS_GOTIE relocation can be optimized to 574 // R_386_TLS_LE so that it does not use GOT. 575 // "MOVL foo@GOTTPOFF(%RIP), %REG" is transformed to "MOVL $foo, %REG". 576 // "ADDL foo@GOTNTPOFF(%RIP), %REG" is transformed to "LEAL foo(%REG), %REG" 577 // Note: gold converts to ADDL instead of LEAL. 578 *Inst = IsMov ? 0xc7 : 0x8d; 579 if (IsMov) 580 *Op = 0xc0 | ((*Op >> 3) & 7); 581 else 582 *Op = 0x80 | Reg | (Reg << 3); 583 } 584 relocateOne(Loc, BufEnd, R_386_TLS_LE, P, SA); 585 } 586 587 X86_64TargetInfo::X86_64TargetInfo() { 588 CopyRel = R_X86_64_COPY; 589 GotRel = R_X86_64_GLOB_DAT; 590 PltRel = R_X86_64_JUMP_SLOT; 591 RelativeRel = R_X86_64_RELATIVE; 592 IRelativeRel = R_X86_64_IRELATIVE; 593 TlsGotRel = R_X86_64_TPOFF64; 594 TlsLocalDynamicRel = R_X86_64_TLSLD; 595 TlsGlobalDynamicRel = R_X86_64_TLSGD; 596 TlsModuleIndexRel = R_X86_64_DTPMOD64; 597 TlsOffsetRel = R_X86_64_DTPOFF64; 598 UseLazyBinding = true; 599 PltEntrySize = 16; 600 PltZeroSize = 16; 601 } 602 603 void X86_64TargetInfo::writeGotPltHeader(uint8_t *Buf) const { 604 write64le(Buf, Out<ELF64LE>::Dynamic->getVA()); 605 } 606 607 void X86_64TargetInfo::writeGotPlt(uint8_t *Buf, uint64_t Plt) const { 608 // See comments in X86TargetInfo::writeGotPlt. 609 write32le(Buf, Plt + 6); 610 } 611 612 void X86_64TargetInfo::writePltZero(uint8_t *Buf) const { 613 const uint8_t PltData[] = { 614 0xff, 0x35, 0x00, 0x00, 0x00, 0x00, // pushq GOT+8(%rip) 615 0xff, 0x25, 0x00, 0x00, 0x00, 0x00, // jmp *GOT+16(%rip) 616 0x0f, 0x1f, 0x40, 0x00 // nopl 0x0(rax) 617 }; 618 memcpy(Buf, PltData, sizeof(PltData)); 619 uint64_t Got = Out<ELF64LE>::GotPlt->getVA(); 620 uint64_t Plt = Out<ELF64LE>::Plt->getVA(); 621 write32le(Buf + 2, Got - Plt + 2); // GOT+8 622 write32le(Buf + 8, Got - Plt + 4); // GOT+16 623 } 624 625 void X86_64TargetInfo::writePlt(uint8_t *Buf, uint64_t GotEntryAddr, 626 uint64_t PltEntryAddr, int32_t Index, 627 unsigned RelOff) const { 628 const uint8_t Inst[] = { 629 0xff, 0x25, 0x00, 0x00, 0x00, 0x00, // jmpq *got(%rip) 630 0x68, 0x00, 0x00, 0x00, 0x00, // pushq <relocation index> 631 0xe9, 0x00, 0x00, 0x00, 0x00 // jmpq plt[0] 632 }; 633 memcpy(Buf, Inst, sizeof(Inst)); 634 635 write32le(Buf + 2, GotEntryAddr - PltEntryAddr - 6); 636 write32le(Buf + 7, Index); 637 write32le(Buf + 12, -Index * PltEntrySize - PltZeroSize - 16); 638 } 639 640 bool X86_64TargetInfo::needsCopyRel(uint32_t Type, const SymbolBody &S) const { 641 if (Type == R_X86_64_32S || Type == R_X86_64_32 || Type == R_X86_64_PC32 || 642 Type == R_X86_64_64) 643 if (auto *SS = dyn_cast<SharedSymbol<ELF64LE>>(&S)) 644 return SS->Sym.getType() == STT_OBJECT; 645 return false; 646 } 647 648 bool X86_64TargetInfo::needsGot(uint32_t Type, const SymbolBody &S) const { 649 if (Type == R_X86_64_TLSGD) 650 return Target->canRelaxTls(Type, &S) && canBePreempted(&S, true); 651 if (Type == R_X86_64_GOTTPOFF) 652 return !canRelaxTls(Type, &S); 653 return Type == R_X86_64_GOTPCREL || needsPlt(Type, S); 654 } 655 656 bool X86_64TargetInfo::isTlsDynRel(unsigned Type, const SymbolBody &S) const { 657 return Type == R_X86_64_GOTTPOFF || Type == R_X86_64_TLSGD; 658 } 659 660 bool X86_64TargetInfo::needsPlt(uint32_t Type, const SymbolBody &S) const { 661 if (needsCopyRel(Type, S)) 662 return false; 663 if (isGnuIFunc<ELF64LE>(S)) 664 return true; 665 666 switch (Type) { 667 default: 668 return false; 669 case R_X86_64_32: 670 case R_X86_64_64: 671 case R_X86_64_PC32: 672 // This relocation is defined to have a value of (S + A - P). 673 // The problems start when a non PIC program calls a function in a shared 674 // library. 675 // In an ideal world, we could just report an error saying the relocation 676 // can overflow at runtime. 677 // In the real world with glibc, crt1.o has a R_X86_64_PC32 pointing to 678 // libc.so. 679 // 680 // The general idea on how to handle such cases is to create a PLT entry 681 // and use that as the function value. 682 // 683 // For the static linking part, we just return true and everything else 684 // will use the the PLT entry as the address. 685 // 686 // The remaining (unimplemented) problem is making sure pointer equality 687 // still works. We need the help of the dynamic linker for that. We 688 // let it know that we have a direct reference to a so symbol by creating 689 // an undefined symbol with a non zero st_value. Seeing that, the 690 // dynamic linker resolves the symbol to the value of the symbol we created. 691 // This is true even for got entries, so pointer equality is maintained. 692 // To avoid an infinite loop, the only entry that points to the 693 // real function is a dedicated got entry used by the plt. That is 694 // identified by special relocation types (R_X86_64_JUMP_SLOT, 695 // R_386_JMP_SLOT, etc). 696 return S.isShared(); 697 case R_X86_64_PLT32: 698 return canBePreempted(&S, true); 699 } 700 } 701 702 bool X86_64TargetInfo::isRelRelative(uint32_t Type) const { 703 switch (Type) { 704 default: 705 return false; 706 case R_X86_64_DTPOFF32: 707 case R_X86_64_DTPOFF64: 708 case R_X86_64_PC8: 709 case R_X86_64_PC16: 710 case R_X86_64_PC32: 711 case R_X86_64_PC64: 712 case R_X86_64_PLT32: 713 return true; 714 } 715 } 716 717 bool X86_64TargetInfo::isSizeRel(uint32_t Type) const { 718 return Type == R_X86_64_SIZE32 || Type == R_X86_64_SIZE64; 719 } 720 721 bool X86_64TargetInfo::canRelaxTls(unsigned Type, const SymbolBody *S) const { 722 if (Config->Shared || (S && !S->isTls())) 723 return false; 724 return Type == R_X86_64_TLSGD || Type == R_X86_64_TLSLD || 725 Type == R_X86_64_DTPOFF32 || 726 (Type == R_X86_64_GOTTPOFF && !canBePreempted(S, true)); 727 } 728 729 // "Ulrich Drepper, ELF Handling For Thread-Local Storage" (5.5 730 // x86-x64 linker optimizations, http://www.akkadia.org/drepper/tls.pdf) shows 731 // how LD can be optimized to LE: 732 // leaq bar@tlsld(%rip), %rdi 733 // callq __tls_get_addr@PLT 734 // leaq bar@dtpoff(%rax), %rcx 735 // Is converted to: 736 // .word 0x6666 737 // .byte 0x66 738 // mov %fs:0,%rax 739 // leaq bar@tpoff(%rax), %rcx 740 void X86_64TargetInfo::relocateTlsLdToLe(uint8_t *Loc, uint8_t *BufEnd, 741 uint64_t P, uint64_t SA) const { 742 const uint8_t Inst[] = { 743 0x66, 0x66, //.word 0x6666 744 0x66, //.byte 0x66 745 0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00 // mov %fs:0,%rax 746 }; 747 memcpy(Loc - 3, Inst, sizeof(Inst)); 748 } 749 750 // "Ulrich Drepper, ELF Handling For Thread-Local Storage" (5.5 751 // x86-x64 linker optimizations, http://www.akkadia.org/drepper/tls.pdf) shows 752 // how GD can be optimized to LE: 753 // .byte 0x66 754 // leaq x@tlsgd(%rip), %rdi 755 // .word 0x6666 756 // rex64 757 // call __tls_get_addr@plt 758 // Is converted to: 759 // mov %fs:0x0,%rax 760 // lea x@tpoff,%rax 761 void X86_64TargetInfo::relocateTlsGdToLe(uint8_t *Loc, uint8_t *BufEnd, 762 uint64_t P, uint64_t SA) const { 763 const uint8_t Inst[] = { 764 0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00, // mov %fs:0x0,%rax 765 0x48, 0x8d, 0x80, 0x00, 0x00, 0x00, 0x00 // lea x@tpoff,%rax 766 }; 767 memcpy(Loc - 4, Inst, sizeof(Inst)); 768 relocateOne(Loc + 8, BufEnd, R_X86_64_TPOFF32, P, SA); 769 } 770 771 // "Ulrich Drepper, ELF Handling For Thread-Local Storage" (5.5 772 // x86-x64 linker optimizations, http://www.akkadia.org/drepper/tls.pdf) shows 773 // how GD can be optimized to IE: 774 // .byte 0x66 775 // leaq x@tlsgd(%rip), %rdi 776 // .word 0x6666 777 // rex64 778 // call __tls_get_addr@plt 779 // Is converted to: 780 // mov %fs:0x0,%rax 781 // addq x@tpoff,%rax 782 void X86_64TargetInfo::relocateTlsGdToIe(uint8_t *Loc, uint8_t *BufEnd, 783 uint64_t P, uint64_t SA) const { 784 const uint8_t Inst[] = { 785 0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00, // mov %fs:0x0,%rax 786 0x48, 0x03, 0x05, 0x00, 0x00, 0x00, 0x00 // addq x@tpoff,%rax 787 }; 788 memcpy(Loc - 4, Inst, sizeof(Inst)); 789 relocateOne(Loc + 8, BufEnd, R_X86_64_TPOFF64, P + 12, SA); 790 } 791 792 // In some conditions, R_X86_64_GOTTPOFF relocation can be optimized to 793 // R_X86_64_TPOFF32 so that it does not use GOT. 794 // This function does that. Read "ELF Handling For Thread-Local Storage, 795 // 5.5 x86-x64 linker optimizations" (http://www.akkadia.org/drepper/tls.pdf) 796 // by Ulrich Drepper for details. 797 void X86_64TargetInfo::relocateTlsIeToLe(uint8_t *Loc, uint8_t *BufEnd, 798 uint64_t P, uint64_t SA) const { 799 // Ulrich's document section 6.5 says that @gottpoff(%rip) must be 800 // used in MOVQ or ADDQ instructions only. 801 // "MOVQ foo@GOTTPOFF(%RIP), %REG" is transformed to "MOVQ $foo, %REG". 802 // "ADDQ foo@GOTTPOFF(%RIP), %REG" is transformed to "LEAQ foo(%REG), %REG" 803 // (if the register is not RSP/R12) or "ADDQ $foo, %RSP". 804 // Opcodes info can be found at http://ref.x86asm.net/coder64.html#x48. 805 uint8_t *Prefix = Loc - 3; 806 uint8_t *Inst = Loc - 2; 807 uint8_t *RegSlot = Loc - 1; 808 uint8_t Reg = Loc[-1] >> 3; 809 bool IsMov = *Inst == 0x8b; 810 bool RspAdd = !IsMov && Reg == 4; 811 // r12 and rsp registers requires special handling. 812 // Problem is that for other registers, for example leaq 0xXXXXXXXX(%r11),%r11 813 // result out is 7 bytes: 4d 8d 9b XX XX XX XX, 814 // but leaq 0xXXXXXXXX(%r12),%r12 is 8 bytes: 4d 8d a4 24 XX XX XX XX. 815 // The same true for rsp. So we convert to addq for them, saving 1 byte that 816 // we dont have. 817 if (RspAdd) 818 *Inst = 0x81; 819 else 820 *Inst = IsMov ? 0xc7 : 0x8d; 821 if (*Prefix == 0x4c) 822 *Prefix = (IsMov || RspAdd) ? 0x49 : 0x4d; 823 *RegSlot = (IsMov || RspAdd) ? (0xc0 | Reg) : (0x80 | Reg | (Reg << 3)); 824 relocateOne(Loc, BufEnd, R_X86_64_TPOFF32, P, SA); 825 } 826 827 // This function applies a TLS relocation with an optimization as described 828 // in the Ulrich's document. As a result of rewriting instructions at the 829 // relocation target, relocations immediately follow the TLS relocation (which 830 // would be applied to rewritten instructions) may have to be skipped. 831 // This function returns a number of relocations that need to be skipped. 832 unsigned X86_64TargetInfo::relaxTls(uint8_t *Loc, uint8_t *BufEnd, 833 uint32_t Type, uint64_t P, uint64_t SA, 834 const SymbolBody *S) const { 835 switch (Type) { 836 case R_X86_64_DTPOFF32: 837 relocateOne(Loc, BufEnd, R_X86_64_TPOFF32, P, SA); 838 return 0; 839 case R_X86_64_GOTTPOFF: 840 relocateTlsIeToLe(Loc, BufEnd, P, SA); 841 return 0; 842 case R_X86_64_TLSGD: { 843 if (canBePreempted(S, true)) 844 relocateTlsGdToIe(Loc, BufEnd, P, SA); 845 else 846 relocateTlsGdToLe(Loc, BufEnd, P, SA); 847 // The next relocation should be against __tls_get_addr, so skip it 848 return 1; 849 } 850 case R_X86_64_TLSLD: 851 relocateTlsLdToLe(Loc, BufEnd, P, SA); 852 // The next relocation should be against __tls_get_addr, so skip it 853 return 1; 854 } 855 llvm_unreachable("Unknown TLS optimization"); 856 } 857 858 void X86_64TargetInfo::relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, 859 uint64_t P, uint64_t SA, uint64_t ZA, 860 uint8_t *PairedLoc) const { 861 switch (Type) { 862 case R_X86_64_32: 863 checkUInt<32>(SA, Type); 864 write32le(Loc, SA); 865 break; 866 case R_X86_64_32S: 867 checkInt<32>(SA, Type); 868 write32le(Loc, SA); 869 break; 870 case R_X86_64_64: 871 write64le(Loc, SA); 872 break; 873 case R_X86_64_DTPOFF32: 874 write32le(Loc, SA); 875 break; 876 case R_X86_64_DTPOFF64: 877 write64le(Loc, SA); 878 break; 879 case R_X86_64_GOTPCREL: 880 case R_X86_64_PC32: 881 case R_X86_64_PLT32: 882 case R_X86_64_TLSGD: 883 case R_X86_64_TLSLD: 884 write32le(Loc, SA - P); 885 break; 886 case R_X86_64_SIZE32: 887 write32le(Loc, ZA); 888 break; 889 case R_X86_64_SIZE64: 890 write64le(Loc, ZA); 891 break; 892 case R_X86_64_TPOFF32: { 893 uint64_t Val = SA - Out<ELF64LE>::TlsPhdr->p_memsz; 894 checkInt<32>(Val, Type); 895 write32le(Loc, Val); 896 break; 897 } 898 case R_X86_64_TPOFF64: 899 write32le(Loc, SA - P); 900 break; 901 default: 902 fatal("unrecognized reloc " + Twine(Type)); 903 } 904 } 905 906 // Relocation masks following the #lo(value), #hi(value), #ha(value), 907 // #higher(value), #highera(value), #highest(value), and #highesta(value) 908 // macros defined in section 4.5.1. Relocation Types of the PPC-elf64abi 909 // document. 910 static uint16_t applyPPCLo(uint64_t V) { return V; } 911 static uint16_t applyPPCHi(uint64_t V) { return V >> 16; } 912 static uint16_t applyPPCHa(uint64_t V) { return (V + 0x8000) >> 16; } 913 static uint16_t applyPPCHigher(uint64_t V) { return V >> 32; } 914 static uint16_t applyPPCHighera(uint64_t V) { return (V + 0x8000) >> 32; } 915 static uint16_t applyPPCHighest(uint64_t V) { return V >> 48; } 916 static uint16_t applyPPCHighesta(uint64_t V) { return (V + 0x8000) >> 48; } 917 918 PPCTargetInfo::PPCTargetInfo() {} 919 bool PPCTargetInfo::isRelRelative(uint32_t Type) const { return false; } 920 921 void PPCTargetInfo::relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, 922 uint64_t P, uint64_t SA, uint64_t ZA, 923 uint8_t *PairedLoc) const { 924 switch (Type) { 925 case R_PPC_ADDR16_HA: 926 write16be(Loc, applyPPCHa(SA)); 927 break; 928 case R_PPC_ADDR16_LO: 929 write16be(Loc, applyPPCLo(SA)); 930 break; 931 default: 932 fatal("unrecognized reloc " + Twine(Type)); 933 } 934 } 935 936 PPC64TargetInfo::PPC64TargetInfo() { 937 GotRel = R_PPC64_GLOB_DAT; 938 RelativeRel = R_PPC64_RELATIVE; 939 PltEntrySize = 32; 940 941 // We need 64K pages (at least under glibc/Linux, the loader won't 942 // set different permissions on a finer granularity than that). 943 PageSize = 65536; 944 945 // The PPC64 ELF ABI v1 spec, says: 946 // 947 // It is normally desirable to put segments with different characteristics 948 // in separate 256 Mbyte portions of the address space, to give the 949 // operating system full paging flexibility in the 64-bit address space. 950 // 951 // And because the lowest non-zero 256M boundary is 0x10000000, PPC64 linkers 952 // use 0x10000000 as the starting address. 953 VAStart = 0x10000000; 954 } 955 956 uint64_t getPPC64TocBase() { 957 // The TOC consists of sections .got, .toc, .tocbss, .plt in that 958 // order. The TOC starts where the first of these sections starts. 959 960 // FIXME: This obviously does not do the right thing when there is no .got 961 // section, but there is a .toc or .tocbss section. 962 uint64_t TocVA = Out<ELF64BE>::Got->getVA(); 963 if (!TocVA) 964 TocVA = Out<ELF64BE>::Plt->getVA(); 965 966 // Per the ppc64-elf-linux ABI, The TOC base is TOC value plus 0x8000 967 // thus permitting a full 64 Kbytes segment. Note that the glibc startup 968 // code (crt1.o) assumes that you can get from the TOC base to the 969 // start of the .toc section with only a single (signed) 16-bit relocation. 970 return TocVA + 0x8000; 971 } 972 973 void PPC64TargetInfo::writePlt(uint8_t *Buf, uint64_t GotEntryAddr, 974 uint64_t PltEntryAddr, int32_t Index, 975 unsigned RelOff) const { 976 uint64_t Off = GotEntryAddr - getPPC64TocBase(); 977 978 // FIXME: What we should do, in theory, is get the offset of the function 979 // descriptor in the .opd section, and use that as the offset from %r2 (the 980 // TOC-base pointer). Instead, we have the GOT-entry offset, and that will 981 // be a pointer to the function descriptor in the .opd section. Using 982 // this scheme is simpler, but requires an extra indirection per PLT dispatch. 983 984 write32be(Buf, 0xf8410028); // std %r2, 40(%r1) 985 write32be(Buf + 4, 0x3d620000 | applyPPCHa(Off)); // addis %r11, %r2, X@ha 986 write32be(Buf + 8, 0xe98b0000 | applyPPCLo(Off)); // ld %r12, X@l(%r11) 987 write32be(Buf + 12, 0xe96c0000); // ld %r11,0(%r12) 988 write32be(Buf + 16, 0x7d6903a6); // mtctr %r11 989 write32be(Buf + 20, 0xe84c0008); // ld %r2,8(%r12) 990 write32be(Buf + 24, 0xe96c0010); // ld %r11,16(%r12) 991 write32be(Buf + 28, 0x4e800420); // bctr 992 } 993 994 bool PPC64TargetInfo::needsGot(uint32_t Type, const SymbolBody &S) const { 995 if (needsPlt(Type, S)) 996 return true; 997 998 switch (Type) { 999 default: return false; 1000 case R_PPC64_GOT16: 1001 case R_PPC64_GOT16_DS: 1002 case R_PPC64_GOT16_HA: 1003 case R_PPC64_GOT16_HI: 1004 case R_PPC64_GOT16_LO: 1005 case R_PPC64_GOT16_LO_DS: 1006 return true; 1007 } 1008 } 1009 1010 bool PPC64TargetInfo::needsPlt(uint32_t Type, const SymbolBody &S) const { 1011 // These are function calls that need to be redirected through a PLT stub. 1012 return Type == R_PPC64_REL24 && canBePreempted(&S, false); 1013 } 1014 1015 bool PPC64TargetInfo::isRelRelative(uint32_t Type) const { 1016 switch (Type) { 1017 default: 1018 return true; 1019 case R_PPC64_ADDR64: 1020 case R_PPC64_TOC: 1021 return false; 1022 } 1023 } 1024 1025 void PPC64TargetInfo::relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, 1026 uint64_t P, uint64_t SA, uint64_t ZA, 1027 uint8_t *PairedLoc) const { 1028 uint64_t TB = getPPC64TocBase(); 1029 1030 // For a TOC-relative relocation, adjust the addend and proceed in terms of 1031 // the corresponding ADDR16 relocation type. 1032 switch (Type) { 1033 case R_PPC64_TOC16: Type = R_PPC64_ADDR16; SA -= TB; break; 1034 case R_PPC64_TOC16_DS: Type = R_PPC64_ADDR16_DS; SA -= TB; break; 1035 case R_PPC64_TOC16_HA: Type = R_PPC64_ADDR16_HA; SA -= TB; break; 1036 case R_PPC64_TOC16_HI: Type = R_PPC64_ADDR16_HI; SA -= TB; break; 1037 case R_PPC64_TOC16_LO: Type = R_PPC64_ADDR16_LO; SA -= TB; break; 1038 case R_PPC64_TOC16_LO_DS: Type = R_PPC64_ADDR16_LO_DS; SA -= TB; break; 1039 default: break; 1040 } 1041 1042 switch (Type) { 1043 case R_PPC64_ADDR14: { 1044 checkAlignment<4>(SA, Type); 1045 // Preserve the AA/LK bits in the branch instruction 1046 uint8_t AALK = Loc[3]; 1047 write16be(Loc + 2, (AALK & 3) | (SA & 0xfffc)); 1048 break; 1049 } 1050 case R_PPC64_ADDR16: 1051 checkInt<16>(SA, Type); 1052 write16be(Loc, SA); 1053 break; 1054 case R_PPC64_ADDR16_DS: 1055 checkInt<16>(SA, Type); 1056 write16be(Loc, (read16be(Loc) & 3) | (SA & ~3)); 1057 break; 1058 case R_PPC64_ADDR16_HA: 1059 write16be(Loc, applyPPCHa(SA)); 1060 break; 1061 case R_PPC64_ADDR16_HI: 1062 write16be(Loc, applyPPCHi(SA)); 1063 break; 1064 case R_PPC64_ADDR16_HIGHER: 1065 write16be(Loc, applyPPCHigher(SA)); 1066 break; 1067 case R_PPC64_ADDR16_HIGHERA: 1068 write16be(Loc, applyPPCHighera(SA)); 1069 break; 1070 case R_PPC64_ADDR16_HIGHEST: 1071 write16be(Loc, applyPPCHighest(SA)); 1072 break; 1073 case R_PPC64_ADDR16_HIGHESTA: 1074 write16be(Loc, applyPPCHighesta(SA)); 1075 break; 1076 case R_PPC64_ADDR16_LO: 1077 write16be(Loc, applyPPCLo(SA)); 1078 break; 1079 case R_PPC64_ADDR16_LO_DS: 1080 write16be(Loc, (read16be(Loc) & 3) | (applyPPCLo(SA) & ~3)); 1081 break; 1082 case R_PPC64_ADDR32: 1083 checkInt<32>(SA, Type); 1084 write32be(Loc, SA); 1085 break; 1086 case R_PPC64_ADDR64: 1087 write64be(Loc, SA); 1088 break; 1089 case R_PPC64_REL16_HA: 1090 write16be(Loc, applyPPCHa(SA - P)); 1091 break; 1092 case R_PPC64_REL16_HI: 1093 write16be(Loc, applyPPCHi(SA - P)); 1094 break; 1095 case R_PPC64_REL16_LO: 1096 write16be(Loc, applyPPCLo(SA - P)); 1097 break; 1098 case R_PPC64_REL24: { 1099 // If we have an undefined weak symbol, we might get here with a symbol 1100 // address of zero. That could overflow, but the code must be unreachable, 1101 // so don't bother doing anything at all. 1102 if (!SA) 1103 break; 1104 1105 uint64_t PltStart = Out<ELF64BE>::Plt->getVA(); 1106 uint64_t PltEnd = PltStart + Out<ELF64BE>::Plt->getSize(); 1107 bool InPlt = PltStart <= SA && SA < PltEnd; 1108 1109 if (!InPlt && Out<ELF64BE>::Opd) { 1110 // If this is a local call, and we currently have the address of a 1111 // function-descriptor, get the underlying code address instead. 1112 uint64_t OpdStart = Out<ELF64BE>::Opd->getVA(); 1113 uint64_t OpdEnd = OpdStart + Out<ELF64BE>::Opd->getSize(); 1114 bool InOpd = OpdStart <= SA && SA < OpdEnd; 1115 1116 if (InOpd) 1117 SA = read64be(&Out<ELF64BE>::OpdBuf[SA - OpdStart]); 1118 } 1119 1120 uint32_t Mask = 0x03FFFFFC; 1121 checkInt<24>(SA - P, Type); 1122 write32be(Loc, (read32be(Loc) & ~Mask) | ((SA - P) & Mask)); 1123 1124 uint32_t Nop = 0x60000000; 1125 if (InPlt && Loc + 8 <= BufEnd && read32be(Loc + 4) == Nop) 1126 write32be(Loc + 4, 0xe8410028); // ld %r2, 40(%r1) 1127 break; 1128 } 1129 case R_PPC64_REL32: 1130 checkInt<32>(SA - P, Type); 1131 write32be(Loc, SA - P); 1132 break; 1133 case R_PPC64_REL64: 1134 write64be(Loc, SA - P); 1135 break; 1136 case R_PPC64_TOC: 1137 write64be(Loc, SA); 1138 break; 1139 default: 1140 fatal("unrecognized reloc " + Twine(Type)); 1141 } 1142 } 1143 1144 AArch64TargetInfo::AArch64TargetInfo() { 1145 CopyRel = R_AARCH64_COPY; 1146 IRelativeRel = R_AARCH64_IRELATIVE; 1147 GotRel = R_AARCH64_GLOB_DAT; 1148 PltRel = R_AARCH64_JUMP_SLOT; 1149 TlsGotRel = R_AARCH64_TLS_TPREL64; 1150 UseLazyBinding = true; 1151 PltEntrySize = 16; 1152 PltZeroSize = 32; 1153 } 1154 1155 unsigned AArch64TargetInfo::getDynRel(unsigned Type) const { 1156 if (Type == R_AARCH64_ABS32 || Type == R_AARCH64_ABS64) 1157 return Type; 1158 StringRef S = getELFRelocationTypeName(EM_AARCH64, Type); 1159 fatal("Relocation " + S + " cannot be used when making a shared object; " 1160 "recompile with -fPIC."); 1161 } 1162 1163 void AArch64TargetInfo::writeGotPlt(uint8_t *Buf, uint64_t Plt) const { 1164 write64le(Buf, Out<ELF64LE>::Plt->getVA()); 1165 } 1166 1167 void AArch64TargetInfo::writePltZero(uint8_t *Buf) const { 1168 const uint8_t PltData[] = { 1169 0xf0, 0x7b, 0xbf, 0xa9, // stp x16, x30, [sp,#-16]! 1170 0x10, 0x00, 0x00, 0x90, // adrp x16, Page(&(.plt.got[2])) 1171 0x11, 0x02, 0x40, 0xf9, // ldr x17, [x16, Offset(&(.plt.got[2]))] 1172 0x10, 0x02, 0x00, 0x91, // add x16, x16, Offset(&(.plt.got[2])) 1173 0x20, 0x02, 0x1f, 0xd6, // br x17 1174 0x1f, 0x20, 0x03, 0xd5, // nop 1175 0x1f, 0x20, 0x03, 0xd5, // nop 1176 0x1f, 0x20, 0x03, 0xd5 // nop 1177 }; 1178 memcpy(Buf, PltData, sizeof(PltData)); 1179 1180 uint64_t Got = Out<ELF64LE>::GotPlt->getVA(); 1181 uint64_t Plt = Out<ELF64LE>::Plt->getVA(); 1182 relocateOne(Buf + 4, Buf + 8, R_AARCH64_ADR_PREL_PG_HI21, Plt + 4, Got + 16); 1183 relocateOne(Buf + 8, Buf + 12, R_AARCH64_LDST64_ABS_LO12_NC, Plt + 8, 1184 Got + 16); 1185 relocateOne(Buf + 12, Buf + 16, R_AARCH64_ADD_ABS_LO12_NC, Plt + 12, 1186 Got + 16); 1187 } 1188 1189 void AArch64TargetInfo::writePlt(uint8_t *Buf, uint64_t GotEntryAddr, 1190 uint64_t PltEntryAddr, int32_t Index, 1191 unsigned RelOff) const { 1192 const uint8_t Inst[] = { 1193 0x10, 0x00, 0x00, 0x90, // adrp x16, Page(&(.plt.got[n])) 1194 0x11, 0x02, 0x40, 0xf9, // ldr x17, [x16, Offset(&(.plt.got[n]))] 1195 0x10, 0x02, 0x00, 0x91, // add x16, x16, Offset(&(.plt.got[n])) 1196 0x20, 0x02, 0x1f, 0xd6 // br x17 1197 }; 1198 memcpy(Buf, Inst, sizeof(Inst)); 1199 1200 relocateOne(Buf, Buf + 4, R_AARCH64_ADR_PREL_PG_HI21, PltEntryAddr, 1201 GotEntryAddr); 1202 relocateOne(Buf + 4, Buf + 8, R_AARCH64_LDST64_ABS_LO12_NC, PltEntryAddr + 4, 1203 GotEntryAddr); 1204 relocateOne(Buf + 8, Buf + 12, R_AARCH64_ADD_ABS_LO12_NC, PltEntryAddr + 8, 1205 GotEntryAddr); 1206 } 1207 1208 unsigned AArch64TargetInfo::getTlsGotRel(unsigned Type) const { 1209 if (Type == R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 || 1210 Type == R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC) 1211 return Type; 1212 return TlsGotRel; 1213 } 1214 1215 bool AArch64TargetInfo::isTlsDynRel(unsigned Type, const SymbolBody &S) const { 1216 return Type == R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 || 1217 Type == R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC; 1218 } 1219 1220 bool AArch64TargetInfo::needsCopyRel(uint32_t Type, const SymbolBody &S) const { 1221 if (Config->Shared) 1222 return false; 1223 switch (Type) { 1224 default: 1225 return false; 1226 case R_AARCH64_ABS16: 1227 case R_AARCH64_ABS32: 1228 case R_AARCH64_ABS64: 1229 case R_AARCH64_ADD_ABS_LO12_NC: 1230 case R_AARCH64_ADR_PREL_LO21: 1231 case R_AARCH64_ADR_PREL_PG_HI21: 1232 case R_AARCH64_LDST8_ABS_LO12_NC: 1233 case R_AARCH64_LDST16_ABS_LO12_NC: 1234 case R_AARCH64_LDST32_ABS_LO12_NC: 1235 case R_AARCH64_LDST64_ABS_LO12_NC: 1236 case R_AARCH64_LDST128_ABS_LO12_NC: 1237 if (auto *SS = dyn_cast<SharedSymbol<ELF64LE>>(&S)) 1238 return SS->Sym.getType() == STT_OBJECT; 1239 return false; 1240 } 1241 } 1242 1243 bool AArch64TargetInfo::needsGot(uint32_t Type, const SymbolBody &S) const { 1244 switch (Type) { 1245 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: 1246 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: 1247 case R_AARCH64_ADR_GOT_PAGE: 1248 case R_AARCH64_LD64_GOT_LO12_NC: 1249 return true; 1250 default: 1251 return needsPlt(Type, S); 1252 } 1253 } 1254 1255 bool AArch64TargetInfo::needsPlt(uint32_t Type, const SymbolBody &S) const { 1256 if (isGnuIFunc<ELF64LE>(S)) 1257 return true; 1258 switch (Type) { 1259 default: 1260 return false; 1261 case R_AARCH64_CALL26: 1262 case R_AARCH64_CONDBR19: 1263 case R_AARCH64_JUMP26: 1264 case R_AARCH64_TSTBR14: 1265 return canBePreempted(&S, true); 1266 } 1267 } 1268 1269 static void updateAArch64Adr(uint8_t *L, uint64_t Imm) { 1270 uint32_t ImmLo = (Imm & 0x3) << 29; 1271 uint32_t ImmHi = ((Imm & 0x1FFFFC) >> 2) << 5; 1272 uint64_t Mask = (0x3 << 29) | (0x7FFFF << 5); 1273 write32le(L, (read32le(L) & ~Mask) | ImmLo | ImmHi); 1274 } 1275 1276 // Page(Expr) is the page address of the expression Expr, defined 1277 // as (Expr & ~0xFFF). (This applies even if the machine page size 1278 // supported by the platform has a different value.) 1279 static uint64_t getAArch64Page(uint64_t Expr) { 1280 return Expr & (~static_cast<uint64_t>(0xFFF)); 1281 } 1282 1283 void AArch64TargetInfo::relocateOne(uint8_t *Loc, uint8_t *BufEnd, 1284 uint32_t Type, uint64_t P, uint64_t SA, 1285 uint64_t ZA, uint8_t *PairedLoc) const { 1286 switch (Type) { 1287 case R_AARCH64_ABS16: 1288 checkIntUInt<16>(SA, Type); 1289 write16le(Loc, SA); 1290 break; 1291 case R_AARCH64_ABS32: 1292 checkIntUInt<32>(SA, Type); 1293 write32le(Loc, SA); 1294 break; 1295 case R_AARCH64_ABS64: 1296 write64le(Loc, SA); 1297 break; 1298 case R_AARCH64_ADD_ABS_LO12_NC: 1299 // This relocation stores 12 bits and there's no instruction 1300 // to do it. Instead, we do a 32 bits store of the value 1301 // of r_addend bitwise-or'ed Loc. This assumes that the addend 1302 // bits in Loc are zero. 1303 or32le(Loc, (SA & 0xFFF) << 10); 1304 break; 1305 case R_AARCH64_ADR_GOT_PAGE: { 1306 uint64_t X = getAArch64Page(SA) - getAArch64Page(P); 1307 checkInt<33>(X, Type); 1308 updateAArch64Adr(Loc, (X >> 12) & 0x1FFFFF); // X[32:12] 1309 break; 1310 } 1311 case R_AARCH64_ADR_PREL_LO21: { 1312 uint64_t X = SA - P; 1313 checkInt<21>(X, Type); 1314 updateAArch64Adr(Loc, X & 0x1FFFFF); 1315 break; 1316 } 1317 case R_AARCH64_ADR_PREL_PG_HI21: 1318 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: { 1319 uint64_t X = getAArch64Page(SA) - getAArch64Page(P); 1320 checkInt<33>(X, Type); 1321 updateAArch64Adr(Loc, (X >> 12) & 0x1FFFFF); // X[32:12] 1322 break; 1323 } 1324 case R_AARCH64_CALL26: 1325 case R_AARCH64_JUMP26: { 1326 uint64_t X = SA - P; 1327 checkInt<28>(X, Type); 1328 or32le(Loc, (X & 0x0FFFFFFC) >> 2); 1329 break; 1330 } 1331 case R_AARCH64_CONDBR19: { 1332 uint64_t X = SA - P; 1333 checkInt<21>(X, Type); 1334 or32le(Loc, (X & 0x1FFFFC) << 3); 1335 break; 1336 } 1337 case R_AARCH64_LD64_GOT_LO12_NC: 1338 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: 1339 checkAlignment<8>(SA, Type); 1340 or32le(Loc, (SA & 0xFF8) << 7); 1341 break; 1342 case R_AARCH64_LDST128_ABS_LO12_NC: 1343 or32le(Loc, (SA & 0x0FF8) << 6); 1344 break; 1345 case R_AARCH64_LDST16_ABS_LO12_NC: 1346 or32le(Loc, (SA & 0x0FFC) << 9); 1347 break; 1348 case R_AARCH64_LDST8_ABS_LO12_NC: 1349 or32le(Loc, (SA & 0xFFF) << 10); 1350 break; 1351 case R_AARCH64_LDST32_ABS_LO12_NC: 1352 or32le(Loc, (SA & 0xFFC) << 8); 1353 break; 1354 case R_AARCH64_LDST64_ABS_LO12_NC: 1355 or32le(Loc, (SA & 0xFF8) << 7); 1356 break; 1357 case R_AARCH64_PREL16: 1358 checkIntUInt<16>(SA - P, Type); 1359 write16le(Loc, SA - P); 1360 break; 1361 case R_AARCH64_PREL32: 1362 checkIntUInt<32>(SA - P, Type); 1363 write32le(Loc, SA - P); 1364 break; 1365 case R_AARCH64_PREL64: 1366 write64le(Loc, SA - P); 1367 break; 1368 case R_AARCH64_TSTBR14: { 1369 uint64_t X = SA - P; 1370 checkInt<16>(X, Type); 1371 or32le(Loc, (X & 0xFFFC) << 3); 1372 break; 1373 } 1374 default: 1375 fatal("unrecognized reloc " + Twine(Type)); 1376 } 1377 } 1378 1379 // Implementing relocations for AMDGPU is low priority since most 1380 // programs don't use relocations now. Thus, this function is not 1381 // actually called (relocateOne is called for each relocation). 1382 // That's why the AMDGPU port works without implementing this function. 1383 void AMDGPUTargetInfo::relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, 1384 uint64_t P, uint64_t SA, uint64_t ZA, 1385 uint8_t *PairedLoc) const { 1386 llvm_unreachable("not implemented"); 1387 } 1388 1389 template <class ELFT> MipsTargetInfo<ELFT>::MipsTargetInfo() { 1390 PageSize = 65536; 1391 GotHeaderEntriesNum = 2; 1392 RelativeRel = R_MIPS_REL32; 1393 } 1394 1395 template <class ELFT> 1396 unsigned MipsTargetInfo<ELFT>::getDynRel(unsigned Type) const { 1397 if (Type == R_MIPS_32 || Type == R_MIPS_64) 1398 return R_MIPS_REL32; 1399 StringRef S = getELFRelocationTypeName(EM_MIPS, Type); 1400 fatal("Relocation " + S + " cannot be used when making a shared object; " 1401 "recompile with -fPIC."); 1402 } 1403 1404 template <class ELFT> 1405 void MipsTargetInfo<ELFT>::writeGotHeader(uint8_t *Buf) const { 1406 typedef typename ELFFile<ELFT>::Elf_Off Elf_Off; 1407 typedef typename ELFFile<ELFT>::uintX_t uintX_t; 1408 1409 // Set the MSB of the second GOT slot. This is not required by any 1410 // MIPS ABI documentation, though. 1411 // 1412 // There is a comment in glibc saying that "The MSB of got[1] of a 1413 // gnu object is set to identify gnu objects," and in GNU gold it 1414 // says "the second entry will be used by some runtime loaders". 1415 // But how this field is being used is unclear. 1416 // 1417 // We are not really willing to mimic other linkers behaviors 1418 // without understanding why they do that, but because all files 1419 // generated by GNU tools have this special GOT value, and because 1420 // we've been doing this for years, it is probably a safe bet to 1421 // keep doing this for now. We really need to revisit this to see 1422 // if we had to do this. 1423 auto *P = reinterpret_cast<Elf_Off *>(Buf); 1424 P[1] = uintX_t(1) << (ELFT::Is64Bits ? 63 : 31); 1425 } 1426 1427 template <class ELFT> 1428 bool MipsTargetInfo<ELFT>::needsGot(uint32_t Type, const SymbolBody &S) const { 1429 return Type == R_MIPS_GOT16 || Type == R_MIPS_CALL16; 1430 } 1431 1432 template <class ELFT> 1433 bool MipsTargetInfo<ELFT>::needsPlt(uint32_t Type, const SymbolBody &S) const { 1434 return false; 1435 } 1436 1437 static uint16_t mipsHigh(uint64_t V) { return (V + 0x8000) >> 16; } 1438 1439 template <endianness E, uint8_t BSIZE> 1440 static void applyMipsPcReloc(uint8_t *Loc, uint32_t Type, uint64_t P, 1441 uint64_t SA) { 1442 uint32_t Mask = ~(0xffffffff << BSIZE); 1443 uint32_t Instr = read32<E>(Loc); 1444 int64_t A = SignExtend64<BSIZE + 2>((Instr & Mask) << 2); 1445 checkAlignment<4>(SA + A, Type); 1446 int64_t V = SA + A - P; 1447 checkInt<BSIZE + 2>(V, Type); 1448 write32<E>(Loc, (Instr & ~Mask) | ((V >> 2) & Mask)); 1449 } 1450 1451 template <class ELFT> 1452 void MipsTargetInfo<ELFT>::relocateOne(uint8_t *Loc, uint8_t *BufEnd, 1453 uint32_t Type, uint64_t P, uint64_t SA, 1454 uint64_t ZA, uint8_t *PairedLoc) const { 1455 const endianness E = ELFT::TargetEndianness; 1456 switch (Type) { 1457 case R_MIPS_32: 1458 add32<E>(Loc, SA); 1459 break; 1460 case R_MIPS_CALL16: 1461 case R_MIPS_GOT16: { 1462 int64_t V = SA - getMipsGpAddr<ELFT>(); 1463 if (Type == R_MIPS_GOT16) 1464 checkInt<16>(V, Type); 1465 write32<E>(Loc, (read32<E>(Loc) & 0xffff0000) | (V & 0xffff)); 1466 break; 1467 } 1468 case R_MIPS_GPREL16: { 1469 uint32_t Instr = read32<E>(Loc); 1470 int64_t V = SA + SignExtend64<16>(Instr & 0xffff) - getMipsGpAddr<ELFT>(); 1471 checkInt<16>(V, Type); 1472 write32<E>(Loc, (Instr & 0xffff0000) | (V & 0xffff)); 1473 break; 1474 } 1475 case R_MIPS_GPREL32: 1476 write32<E>(Loc, SA + int32_t(read32<E>(Loc)) - getMipsGpAddr<ELFT>()); 1477 break; 1478 case R_MIPS_HI16: { 1479 uint32_t Instr = read32<E>(Loc); 1480 if (PairedLoc) { 1481 uint64_t AHL = ((Instr & 0xffff) << 16) + 1482 SignExtend64<16>(read32<E>(PairedLoc) & 0xffff); 1483 write32<E>(Loc, (Instr & 0xffff0000) | mipsHigh(SA + AHL)); 1484 } else { 1485 warning("Can't find matching R_MIPS_LO16 relocation for R_MIPS_HI16"); 1486 write32<E>(Loc, (Instr & 0xffff0000) | mipsHigh(SA)); 1487 } 1488 break; 1489 } 1490 case R_MIPS_JALR: 1491 // Ignore this optimization relocation for now 1492 break; 1493 case R_MIPS_LO16: { 1494 uint32_t Instr = read32<E>(Loc); 1495 int64_t AHL = SignExtend64<16>(Instr & 0xffff); 1496 write32<E>(Loc, (Instr & 0xffff0000) | ((SA + AHL) & 0xffff)); 1497 break; 1498 } 1499 case R_MIPS_PC16: 1500 applyMipsPcReloc<E, 16>(Loc, Type, P, SA); 1501 break; 1502 case R_MIPS_PC19_S2: 1503 applyMipsPcReloc<E, 19>(Loc, Type, P, SA); 1504 break; 1505 case R_MIPS_PC21_S2: 1506 applyMipsPcReloc<E, 21>(Loc, Type, P, SA); 1507 break; 1508 case R_MIPS_PC26_S2: 1509 applyMipsPcReloc<E, 26>(Loc, Type, P, SA); 1510 break; 1511 case R_MIPS_PCHI16: { 1512 uint32_t Instr = read32<E>(Loc); 1513 if (PairedLoc) { 1514 uint64_t AHL = ((Instr & 0xffff) << 16) + 1515 SignExtend64<16>(read32<E>(PairedLoc) & 0xffff); 1516 write32<E>(Loc, (Instr & 0xffff0000) | mipsHigh(SA + AHL - P)); 1517 } else { 1518 warning("Can't find matching R_MIPS_PCLO16 relocation for R_MIPS_PCHI16"); 1519 write32<E>(Loc, (Instr & 0xffff0000) | mipsHigh(SA - P)); 1520 } 1521 break; 1522 } 1523 case R_MIPS_PCLO16: { 1524 uint32_t Instr = read32<E>(Loc); 1525 int64_t AHL = SignExtend64<16>(Instr & 0xffff); 1526 write32<E>(Loc, (Instr & 0xffff0000) | ((SA + AHL - P) & 0xffff)); 1527 break; 1528 } 1529 default: 1530 fatal("unrecognized reloc " + Twine(Type)); 1531 } 1532 } 1533 1534 template <class ELFT> 1535 bool MipsTargetInfo<ELFT>::isHintRel(uint32_t Type) const { 1536 return Type == R_MIPS_JALR; 1537 } 1538 1539 template <class ELFT> 1540 bool MipsTargetInfo<ELFT>::isRelRelative(uint32_t Type) const { 1541 switch (Type) { 1542 default: 1543 return false; 1544 case R_MIPS_PC16: 1545 case R_MIPS_PC19_S2: 1546 case R_MIPS_PC21_S2: 1547 case R_MIPS_PC26_S2: 1548 case R_MIPS_PCHI16: 1549 case R_MIPS_PCLO16: 1550 return true; 1551 } 1552 } 1553 1554 // _gp is a MIPS-specific ABI-defined symbol which points to 1555 // a location that is relative to GOT. This function returns 1556 // the value for the symbol. 1557 template <class ELFT> typename ELFFile<ELFT>::uintX_t getMipsGpAddr() { 1558 unsigned GPOffset = 0x7ff0; 1559 if (uint64_t V = Out<ELFT>::Got->getVA()) 1560 return V + GPOffset; 1561 return 0; 1562 } 1563 1564 bool needsMipsLocalGot(uint32_t Type, SymbolBody *Body) { 1565 // The R_MIPS_GOT16 relocation requires creation of entry in the local part 1566 // of GOT if its target is a local symbol or non-local symbol with 'local' 1567 // visibility. 1568 if (Type != R_MIPS_GOT16) 1569 return false; 1570 if (!Body) 1571 return true; 1572 uint8_t V = Body->getVisibility(); 1573 if (V != STV_DEFAULT && V != STV_PROTECTED) 1574 return true; 1575 return !Config->Shared; 1576 } 1577 1578 template bool isGnuIFunc<ELF32LE>(const SymbolBody &S); 1579 template bool isGnuIFunc<ELF32BE>(const SymbolBody &S); 1580 template bool isGnuIFunc<ELF64LE>(const SymbolBody &S); 1581 template bool isGnuIFunc<ELF64BE>(const SymbolBody &S); 1582 1583 template uint32_t getMipsGpAddr<ELF32LE>(); 1584 template uint32_t getMipsGpAddr<ELF32BE>(); 1585 template uint64_t getMipsGpAddr<ELF64LE>(); 1586 template uint64_t getMipsGpAddr<ELF64BE>(); 1587 } 1588 } 1589