1 //===- Target.cpp ---------------------------------------------------------===// 2 // 3 // The LLVM Linker 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // Machine-specific things, such as applying relocations, creation of 11 // GOT or PLT entries, etc., are handled in this file. 12 // 13 // Refer the ELF spec for the single letter variables, S, A or P, used 14 // in this file. 15 // 16 // Some functions defined in this file has "relaxTls" as part of their names. 17 // They do peephole optimization for TLS variables by rewriting instructions. 18 // They are not part of the ABI but optional optimization, so you can skip 19 // them if you are not interested in how TLS variables are optimized. 20 // See the following paper for the details. 21 // 22 // Ulrich Drepper, ELF Handling For Thread-Local Storage 23 // http://www.akkadia.org/drepper/tls.pdf 24 // 25 //===----------------------------------------------------------------------===// 26 27 #include "Target.h" 28 #include "Error.h" 29 #include "InputFiles.h" 30 #include "Memory.h" 31 #include "OutputSections.h" 32 #include "SymbolTable.h" 33 #include "Symbols.h" 34 #include "SyntheticSections.h" 35 #include "Thunks.h" 36 #include "Writer.h" 37 #include "llvm/ADT/ArrayRef.h" 38 #include "llvm/Object/ELF.h" 39 #include "llvm/Support/ELF.h" 40 #include "llvm/Support/Endian.h" 41 42 using namespace llvm; 43 using namespace llvm::object; 44 using namespace llvm::support::endian; 45 using namespace llvm::ELF; 46 47 std::string lld::toString(uint32_t Type) { 48 return getELFRelocationTypeName(elf::Config->EMachine, Type); 49 } 50 51 namespace lld { 52 namespace elf { 53 54 TargetInfo *Target; 55 56 static void or32le(uint8_t *P, int32_t V) { write32le(P, read32le(P) | V); } 57 static void or32be(uint8_t *P, int32_t V) { write32be(P, read32be(P) | V); } 58 59 template <class ELFT> static std::string getErrorLoc(uint8_t *Loc) { 60 for (InputSectionData *D : Symtab<ELFT>::X->Sections) { 61 auto *IS = dyn_cast_or_null<InputSection<ELFT>>(D); 62 if (!IS || !IS->OutSec) 63 continue; 64 65 uint8_t *ISLoc = cast<OutputSection<ELFT>>(IS->OutSec)->Loc + IS->OutSecOff; 66 if (ISLoc <= Loc && Loc < ISLoc + IS->getSize()) 67 return IS->getLocation(Loc - ISLoc) + ": "; 68 } 69 return ""; 70 } 71 72 static std::string getErrorLocation(uint8_t *Loc) { 73 switch (Config->EKind) { 74 case ELF32LEKind: 75 return getErrorLoc<ELF32LE>(Loc); 76 case ELF32BEKind: 77 return getErrorLoc<ELF32BE>(Loc); 78 case ELF64LEKind: 79 return getErrorLoc<ELF64LE>(Loc); 80 case ELF64BEKind: 81 return getErrorLoc<ELF64BE>(Loc); 82 default: 83 llvm_unreachable("unknown ELF type"); 84 } 85 } 86 87 template <unsigned N> 88 static void checkInt(uint8_t *Loc, int64_t V, uint32_t Type) { 89 if (!isInt<N>(V)) 90 error(getErrorLocation(Loc) + "relocation " + toString(Type) + 91 " out of range"); 92 } 93 94 template <unsigned N> 95 static void checkUInt(uint8_t *Loc, uint64_t V, uint32_t Type) { 96 if (!isUInt<N>(V)) 97 error(getErrorLocation(Loc) + "relocation " + toString(Type) + 98 " out of range"); 99 } 100 101 template <unsigned N> 102 static void checkIntUInt(uint8_t *Loc, uint64_t V, uint32_t Type) { 103 if (!isInt<N>(V) && !isUInt<N>(V)) 104 error(getErrorLocation(Loc) + "relocation " + toString(Type) + 105 " out of range"); 106 } 107 108 template <unsigned N> 109 static void checkAlignment(uint8_t *Loc, uint64_t V, uint32_t Type) { 110 if ((V & (N - 1)) != 0) 111 error(getErrorLocation(Loc) + "improper alignment for relocation " + 112 toString(Type)); 113 } 114 115 namespace { 116 class X86TargetInfo final : public TargetInfo { 117 public: 118 X86TargetInfo(); 119 RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override; 120 uint64_t getImplicitAddend(const uint8_t *Buf, uint32_t Type) const override; 121 void writeGotPltHeader(uint8_t *Buf) const override; 122 uint32_t getDynRel(uint32_t Type) const override; 123 bool isTlsLocalDynamicRel(uint32_t Type) const override; 124 bool isTlsGlobalDynamicRel(uint32_t Type) const override; 125 bool isTlsInitialExecRel(uint32_t Type) const override; 126 void writeGotPlt(uint8_t *Buf, const SymbolBody &S) const override; 127 void writeIgotPlt(uint8_t *Buf, const SymbolBody &S) const override; 128 void writePltHeader(uint8_t *Buf) const override; 129 void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr, 130 int32_t Index, unsigned RelOff) const override; 131 void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override; 132 133 RelExpr adjustRelaxExpr(uint32_t Type, const uint8_t *Data, 134 RelExpr Expr) const override; 135 void relaxTlsGdToIe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override; 136 void relaxTlsGdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override; 137 void relaxTlsIeToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override; 138 void relaxTlsLdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override; 139 }; 140 141 template <class ELFT> class X86_64TargetInfo final : public TargetInfo { 142 public: 143 X86_64TargetInfo(); 144 RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override; 145 bool isPicRel(uint32_t Type) const override; 146 bool isTlsLocalDynamicRel(uint32_t Type) const override; 147 bool isTlsGlobalDynamicRel(uint32_t Type) const override; 148 bool isTlsInitialExecRel(uint32_t Type) const override; 149 void writeGotPltHeader(uint8_t *Buf) const override; 150 void writeGotPlt(uint8_t *Buf, const SymbolBody &S) const override; 151 void writePltHeader(uint8_t *Buf) const override; 152 void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr, 153 int32_t Index, unsigned RelOff) const override; 154 void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override; 155 156 RelExpr adjustRelaxExpr(uint32_t Type, const uint8_t *Data, 157 RelExpr Expr) const override; 158 void relaxGot(uint8_t *Loc, uint64_t Val) const override; 159 void relaxTlsGdToIe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override; 160 void relaxTlsGdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override; 161 void relaxTlsIeToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override; 162 void relaxTlsLdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override; 163 164 private: 165 void relaxGotNoPic(uint8_t *Loc, uint64_t Val, uint8_t Op, 166 uint8_t ModRm) const; 167 }; 168 169 class PPCTargetInfo final : public TargetInfo { 170 public: 171 PPCTargetInfo(); 172 void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override; 173 RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override; 174 }; 175 176 class PPC64TargetInfo final : public TargetInfo { 177 public: 178 PPC64TargetInfo(); 179 RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override; 180 void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr, 181 int32_t Index, unsigned RelOff) const override; 182 void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override; 183 }; 184 185 class AArch64TargetInfo final : public TargetInfo { 186 public: 187 AArch64TargetInfo(); 188 RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override; 189 bool isPicRel(uint32_t Type) const override; 190 bool isTlsInitialExecRel(uint32_t Type) const override; 191 void writeGotPlt(uint8_t *Buf, const SymbolBody &S) const override; 192 void writePltHeader(uint8_t *Buf) const override; 193 void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr, 194 int32_t Index, unsigned RelOff) const override; 195 bool usesOnlyLowPageBits(uint32_t Type) const override; 196 void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override; 197 RelExpr adjustRelaxExpr(uint32_t Type, const uint8_t *Data, 198 RelExpr Expr) const override; 199 void relaxTlsGdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override; 200 void relaxTlsGdToIe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override; 201 void relaxTlsIeToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override; 202 }; 203 204 class AMDGPUTargetInfo final : public TargetInfo { 205 public: 206 AMDGPUTargetInfo(); 207 void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override; 208 RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override; 209 }; 210 211 class ARMTargetInfo final : public TargetInfo { 212 public: 213 ARMTargetInfo(); 214 RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override; 215 bool isPicRel(uint32_t Type) const override; 216 uint32_t getDynRel(uint32_t Type) const override; 217 uint64_t getImplicitAddend(const uint8_t *Buf, uint32_t Type) const override; 218 bool isTlsLocalDynamicRel(uint32_t Type) const override; 219 bool isTlsGlobalDynamicRel(uint32_t Type) const override; 220 bool isTlsInitialExecRel(uint32_t Type) const override; 221 void writeGotPlt(uint8_t *Buf, const SymbolBody &S) const override; 222 void writeIgotPlt(uint8_t *Buf, const SymbolBody &S) const override; 223 void writePltHeader(uint8_t *Buf) const override; 224 void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr, 225 int32_t Index, unsigned RelOff) const override; 226 RelExpr getThunkExpr(RelExpr Expr, uint32_t RelocType, const InputFile &File, 227 const SymbolBody &S) const override; 228 void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override; 229 }; 230 231 template <class ELFT> class MipsTargetInfo final : public TargetInfo { 232 public: 233 MipsTargetInfo(); 234 RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override; 235 uint64_t getImplicitAddend(const uint8_t *Buf, uint32_t Type) const override; 236 bool isPicRel(uint32_t Type) const override; 237 uint32_t getDynRel(uint32_t Type) const override; 238 bool isTlsLocalDynamicRel(uint32_t Type) const override; 239 bool isTlsGlobalDynamicRel(uint32_t Type) const override; 240 void writeGotPlt(uint8_t *Buf, const SymbolBody &S) const override; 241 void writePltHeader(uint8_t *Buf) const override; 242 void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr, 243 int32_t Index, unsigned RelOff) const override; 244 RelExpr getThunkExpr(RelExpr Expr, uint32_t RelocType, const InputFile &File, 245 const SymbolBody &S) const override; 246 void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override; 247 bool usesOnlyLowPageBits(uint32_t Type) const override; 248 }; 249 } // anonymous namespace 250 251 TargetInfo *createTarget() { 252 switch (Config->EMachine) { 253 case EM_386: 254 case EM_IAMCU: 255 return make<X86TargetInfo>(); 256 case EM_AARCH64: 257 return make<AArch64TargetInfo>(); 258 case EM_AMDGPU: 259 return make<AMDGPUTargetInfo>(); 260 case EM_ARM: 261 return make<ARMTargetInfo>(); 262 case EM_MIPS: 263 switch (Config->EKind) { 264 case ELF32LEKind: 265 return make<MipsTargetInfo<ELF32LE>>(); 266 case ELF32BEKind: 267 return make<MipsTargetInfo<ELF32BE>>(); 268 case ELF64LEKind: 269 return make<MipsTargetInfo<ELF64LE>>(); 270 case ELF64BEKind: 271 return make<MipsTargetInfo<ELF64BE>>(); 272 default: 273 fatal("unsupported MIPS target"); 274 } 275 case EM_PPC: 276 return make<PPCTargetInfo>(); 277 case EM_PPC64: 278 return make<PPC64TargetInfo>(); 279 case EM_X86_64: 280 if (Config->EKind == ELF32LEKind) 281 return make<X86_64TargetInfo<ELF32LE>>(); 282 return make<X86_64TargetInfo<ELF64LE>>(); 283 } 284 fatal("unknown target machine"); 285 } 286 287 TargetInfo::~TargetInfo() {} 288 289 uint64_t TargetInfo::getImplicitAddend(const uint8_t *Buf, 290 uint32_t Type) const { 291 return 0; 292 } 293 294 bool TargetInfo::usesOnlyLowPageBits(uint32_t Type) const { return false; } 295 296 RelExpr TargetInfo::getThunkExpr(RelExpr Expr, uint32_t RelocType, 297 const InputFile &File, 298 const SymbolBody &S) const { 299 return Expr; 300 } 301 302 bool TargetInfo::isTlsInitialExecRel(uint32_t Type) const { return false; } 303 304 bool TargetInfo::isTlsLocalDynamicRel(uint32_t Type) const { return false; } 305 306 bool TargetInfo::isTlsGlobalDynamicRel(uint32_t Type) const { return false; } 307 308 void TargetInfo::writeIgotPlt(uint8_t *Buf, const SymbolBody &S) const { 309 writeGotPlt(Buf, S); 310 } 311 312 RelExpr TargetInfo::adjustRelaxExpr(uint32_t Type, const uint8_t *Data, 313 RelExpr Expr) const { 314 return Expr; 315 } 316 317 void TargetInfo::relaxGot(uint8_t *Loc, uint64_t Val) const { 318 llvm_unreachable("Should not have claimed to be relaxable"); 319 } 320 321 void TargetInfo::relaxTlsGdToLe(uint8_t *Loc, uint32_t Type, 322 uint64_t Val) const { 323 llvm_unreachable("Should not have claimed to be relaxable"); 324 } 325 326 void TargetInfo::relaxTlsGdToIe(uint8_t *Loc, uint32_t Type, 327 uint64_t Val) const { 328 llvm_unreachable("Should not have claimed to be relaxable"); 329 } 330 331 void TargetInfo::relaxTlsIeToLe(uint8_t *Loc, uint32_t Type, 332 uint64_t Val) const { 333 llvm_unreachable("Should not have claimed to be relaxable"); 334 } 335 336 void TargetInfo::relaxTlsLdToLe(uint8_t *Loc, uint32_t Type, 337 uint64_t Val) const { 338 llvm_unreachable("Should not have claimed to be relaxable"); 339 } 340 341 X86TargetInfo::X86TargetInfo() { 342 CopyRel = R_386_COPY; 343 GotRel = R_386_GLOB_DAT; 344 PltRel = R_386_JUMP_SLOT; 345 IRelativeRel = R_386_IRELATIVE; 346 RelativeRel = R_386_RELATIVE; 347 TlsGotRel = R_386_TLS_TPOFF; 348 TlsModuleIndexRel = R_386_TLS_DTPMOD32; 349 TlsOffsetRel = R_386_TLS_DTPOFF32; 350 GotEntrySize = 4; 351 GotPltEntrySize = 4; 352 PltEntrySize = 16; 353 PltHeaderSize = 16; 354 TlsGdRelaxSkip = 2; 355 } 356 357 RelExpr X86TargetInfo::getRelExpr(uint32_t Type, const SymbolBody &S) const { 358 switch (Type) { 359 case R_386_16: 360 case R_386_32: 361 case R_386_TLS_LDO_32: 362 return R_ABS; 363 case R_386_TLS_GD: 364 return R_TLSGD; 365 case R_386_TLS_LDM: 366 return R_TLSLD; 367 case R_386_PLT32: 368 return R_PLT_PC; 369 case R_386_PC16: 370 case R_386_PC32: 371 return R_PC; 372 case R_386_GOTPC: 373 return R_GOTONLY_PC_FROM_END; 374 case R_386_TLS_IE: 375 return R_GOT; 376 case R_386_GOT32: 377 case R_386_GOT32X: 378 case R_386_TLS_GOTIE: 379 return R_GOT_FROM_END; 380 case R_386_GOTOFF: 381 return R_GOTREL_FROM_END; 382 case R_386_TLS_LE: 383 return R_TLS; 384 case R_386_TLS_LE_32: 385 return R_NEG_TLS; 386 case R_386_NONE: 387 return R_HINT; 388 default: 389 error("do not know how to handle relocation '" + toString(Type) + "' (" + 390 Twine(Type) + ")"); 391 return R_HINT; 392 } 393 } 394 395 RelExpr X86TargetInfo::adjustRelaxExpr(uint32_t Type, const uint8_t *Data, 396 RelExpr Expr) const { 397 switch (Expr) { 398 default: 399 return Expr; 400 case R_RELAX_TLS_GD_TO_IE: 401 return R_RELAX_TLS_GD_TO_IE_END; 402 case R_RELAX_TLS_GD_TO_LE: 403 return R_RELAX_TLS_GD_TO_LE_NEG; 404 } 405 } 406 407 void X86TargetInfo::writeGotPltHeader(uint8_t *Buf) const { 408 write32le(Buf, In<ELF32LE>::Dynamic->getVA()); 409 } 410 411 void X86TargetInfo::writeGotPlt(uint8_t *Buf, const SymbolBody &S) const { 412 // Entries in .got.plt initially points back to the corresponding 413 // PLT entries with a fixed offset to skip the first instruction. 414 write32le(Buf, S.getPltVA<ELF32LE>() + 6); 415 } 416 417 void X86TargetInfo::writeIgotPlt(uint8_t *Buf, const SymbolBody &S) const { 418 // An x86 entry is the address of the ifunc resolver function. 419 write32le(Buf, S.getVA<ELF32LE>()); 420 } 421 422 uint32_t X86TargetInfo::getDynRel(uint32_t Type) const { 423 if (Type == R_386_TLS_LE) 424 return R_386_TLS_TPOFF; 425 if (Type == R_386_TLS_LE_32) 426 return R_386_TLS_TPOFF32; 427 return Type; 428 } 429 430 bool X86TargetInfo::isTlsGlobalDynamicRel(uint32_t Type) const { 431 return Type == R_386_TLS_GD; 432 } 433 434 bool X86TargetInfo::isTlsLocalDynamicRel(uint32_t Type) const { 435 return Type == R_386_TLS_LDO_32 || Type == R_386_TLS_LDM; 436 } 437 438 bool X86TargetInfo::isTlsInitialExecRel(uint32_t Type) const { 439 return Type == R_386_TLS_IE || Type == R_386_TLS_GOTIE; 440 } 441 442 void X86TargetInfo::writePltHeader(uint8_t *Buf) const { 443 // Executable files and shared object files have 444 // separate procedure linkage tables. 445 if (Config->Pic) { 446 const uint8_t V[] = { 447 0xff, 0xb3, 0x04, 0x00, 0x00, 0x00, // pushl 4(%ebx) 448 0xff, 0xa3, 0x08, 0x00, 0x00, 0x00, // jmp *8(%ebx) 449 0x90, 0x90, 0x90, 0x90 // nop; nop; nop; nop 450 }; 451 memcpy(Buf, V, sizeof(V)); 452 return; 453 } 454 455 const uint8_t PltData[] = { 456 0xff, 0x35, 0x00, 0x00, 0x00, 0x00, // pushl (GOT+4) 457 0xff, 0x25, 0x00, 0x00, 0x00, 0x00, // jmp *(GOT+8) 458 0x90, 0x90, 0x90, 0x90 // nop; nop; nop; nop 459 }; 460 memcpy(Buf, PltData, sizeof(PltData)); 461 uint32_t Got = In<ELF32LE>::GotPlt->getVA(); 462 write32le(Buf + 2, Got + 4); 463 write32le(Buf + 8, Got + 8); 464 } 465 466 void X86TargetInfo::writePlt(uint8_t *Buf, uint64_t GotEntryAddr, 467 uint64_t PltEntryAddr, int32_t Index, 468 unsigned RelOff) const { 469 const uint8_t Inst[] = { 470 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, // jmp *foo_in_GOT|*foo@GOT(%ebx) 471 0x68, 0x00, 0x00, 0x00, 0x00, // pushl $reloc_offset 472 0xe9, 0x00, 0x00, 0x00, 0x00 // jmp .PLT0@PC 473 }; 474 memcpy(Buf, Inst, sizeof(Inst)); 475 476 // jmp *foo@GOT(%ebx) or jmp *foo_in_GOT 477 Buf[1] = Config->Pic ? 0xa3 : 0x25; 478 uint32_t Got = In<ELF32LE>::GotPlt->getVA(); 479 write32le(Buf + 2, Config->Shared ? GotEntryAddr - Got : GotEntryAddr); 480 write32le(Buf + 7, RelOff); 481 write32le(Buf + 12, -Index * PltEntrySize - PltHeaderSize - 16); 482 } 483 484 uint64_t X86TargetInfo::getImplicitAddend(const uint8_t *Buf, 485 uint32_t Type) const { 486 switch (Type) { 487 default: 488 return 0; 489 case R_386_16: 490 case R_386_PC16: 491 return read16le(Buf); 492 case R_386_32: 493 case R_386_GOT32: 494 case R_386_GOT32X: 495 case R_386_GOTOFF: 496 case R_386_GOTPC: 497 case R_386_PC32: 498 case R_386_PLT32: 499 case R_386_TLS_LE: 500 return read32le(Buf); 501 } 502 } 503 504 void X86TargetInfo::relocateOne(uint8_t *Loc, uint32_t Type, 505 uint64_t Val) const { 506 checkInt<32>(Loc, Val, Type); 507 508 // R_386_PC16 and R_386_16 are not part of the current i386 psABI. They are 509 // used by 16-bit x86 objects, like boot loaders. 510 if (Type == R_386_16 || Type == R_386_PC16) { 511 write16le(Loc, Val); 512 return; 513 } 514 write32le(Loc, Val); 515 } 516 517 void X86TargetInfo::relaxTlsGdToLe(uint8_t *Loc, uint32_t Type, 518 uint64_t Val) const { 519 // Convert 520 // leal x@tlsgd(, %ebx, 1), 521 // call __tls_get_addr@plt 522 // to 523 // movl %gs:0,%eax 524 // subl $x@ntpoff,%eax 525 const uint8_t Inst[] = { 526 0x65, 0xa1, 0x00, 0x00, 0x00, 0x00, // movl %gs:0, %eax 527 0x81, 0xe8, 0x00, 0x00, 0x00, 0x00 // subl 0(%ebx), %eax 528 }; 529 memcpy(Loc - 3, Inst, sizeof(Inst)); 530 relocateOne(Loc + 5, R_386_32, Val); 531 } 532 533 void X86TargetInfo::relaxTlsGdToIe(uint8_t *Loc, uint32_t Type, 534 uint64_t Val) const { 535 // Convert 536 // leal x@tlsgd(, %ebx, 1), 537 // call __tls_get_addr@plt 538 // to 539 // movl %gs:0, %eax 540 // addl x@gotntpoff(%ebx), %eax 541 const uint8_t Inst[] = { 542 0x65, 0xa1, 0x00, 0x00, 0x00, 0x00, // movl %gs:0, %eax 543 0x03, 0x83, 0x00, 0x00, 0x00, 0x00 // addl 0(%ebx), %eax 544 }; 545 memcpy(Loc - 3, Inst, sizeof(Inst)); 546 relocateOne(Loc + 5, R_386_32, Val); 547 } 548 549 // In some conditions, relocations can be optimized to avoid using GOT. 550 // This function does that for Initial Exec to Local Exec case. 551 void X86TargetInfo::relaxTlsIeToLe(uint8_t *Loc, uint32_t Type, 552 uint64_t Val) const { 553 // Ulrich's document section 6.2 says that @gotntpoff can 554 // be used with MOVL or ADDL instructions. 555 // @indntpoff is similar to @gotntpoff, but for use in 556 // position dependent code. 557 uint8_t Reg = (Loc[-1] >> 3) & 7; 558 559 if (Type == R_386_TLS_IE) { 560 if (Loc[-1] == 0xa1) { 561 // "movl foo@indntpoff,%eax" -> "movl $foo,%eax" 562 // This case is different from the generic case below because 563 // this is a 5 byte instruction while below is 6 bytes. 564 Loc[-1] = 0xb8; 565 } else if (Loc[-2] == 0x8b) { 566 // "movl foo@indntpoff,%reg" -> "movl $foo,%reg" 567 Loc[-2] = 0xc7; 568 Loc[-1] = 0xc0 | Reg; 569 } else { 570 // "addl foo@indntpoff,%reg" -> "addl $foo,%reg" 571 Loc[-2] = 0x81; 572 Loc[-1] = 0xc0 | Reg; 573 } 574 } else { 575 assert(Type == R_386_TLS_GOTIE); 576 if (Loc[-2] == 0x8b) { 577 // "movl foo@gottpoff(%rip),%reg" -> "movl $foo,%reg" 578 Loc[-2] = 0xc7; 579 Loc[-1] = 0xc0 | Reg; 580 } else { 581 // "addl foo@gotntpoff(%rip),%reg" -> "leal foo(%reg),%reg" 582 Loc[-2] = 0x8d; 583 Loc[-1] = 0x80 | (Reg << 3) | Reg; 584 } 585 } 586 relocateOne(Loc, R_386_TLS_LE, Val); 587 } 588 589 void X86TargetInfo::relaxTlsLdToLe(uint8_t *Loc, uint32_t Type, 590 uint64_t Val) const { 591 if (Type == R_386_TLS_LDO_32) { 592 relocateOne(Loc, R_386_TLS_LE, Val); 593 return; 594 } 595 596 // Convert 597 // leal foo(%reg),%eax 598 // call ___tls_get_addr 599 // to 600 // movl %gs:0,%eax 601 // nop 602 // leal 0(%esi,1),%esi 603 const uint8_t Inst[] = { 604 0x65, 0xa1, 0x00, 0x00, 0x00, 0x00, // movl %gs:0,%eax 605 0x90, // nop 606 0x8d, 0x74, 0x26, 0x00 // leal 0(%esi,1),%esi 607 }; 608 memcpy(Loc - 2, Inst, sizeof(Inst)); 609 } 610 611 template <class ELFT> X86_64TargetInfo<ELFT>::X86_64TargetInfo() { 612 CopyRel = R_X86_64_COPY; 613 GotRel = R_X86_64_GLOB_DAT; 614 PltRel = R_X86_64_JUMP_SLOT; 615 RelativeRel = R_X86_64_RELATIVE; 616 IRelativeRel = R_X86_64_IRELATIVE; 617 TlsGotRel = R_X86_64_TPOFF64; 618 TlsModuleIndexRel = R_X86_64_DTPMOD64; 619 TlsOffsetRel = R_X86_64_DTPOFF64; 620 GotEntrySize = 8; 621 GotPltEntrySize = 8; 622 PltEntrySize = 16; 623 PltHeaderSize = 16; 624 TlsGdRelaxSkip = 2; 625 // Align to the large page size (known as a superpage or huge page). 626 // FreeBSD automatically promotes large, superpage-aligned allocations. 627 DefaultImageBase = 0x200000; 628 } 629 630 template <class ELFT> 631 RelExpr X86_64TargetInfo<ELFT>::getRelExpr(uint32_t Type, 632 const SymbolBody &S) const { 633 switch (Type) { 634 case R_X86_64_32: 635 case R_X86_64_32S: 636 case R_X86_64_64: 637 case R_X86_64_DTPOFF32: 638 case R_X86_64_DTPOFF64: 639 return R_ABS; 640 case R_X86_64_TPOFF32: 641 return R_TLS; 642 case R_X86_64_TLSLD: 643 return R_TLSLD_PC; 644 case R_X86_64_TLSGD: 645 return R_TLSGD_PC; 646 case R_X86_64_SIZE32: 647 case R_X86_64_SIZE64: 648 return R_SIZE; 649 case R_X86_64_PLT32: 650 return R_PLT_PC; 651 case R_X86_64_PC32: 652 case R_X86_64_PC64: 653 return R_PC; 654 case R_X86_64_GOT32: 655 case R_X86_64_GOT64: 656 return R_GOT_FROM_END; 657 case R_X86_64_GOTPCREL: 658 case R_X86_64_GOTPCRELX: 659 case R_X86_64_REX_GOTPCRELX: 660 case R_X86_64_GOTTPOFF: 661 return R_GOT_PC; 662 case R_X86_64_NONE: 663 return R_HINT; 664 default: 665 error("do not know how to handle relocation '" + toString(Type) + "' (" + 666 Twine(Type) + ")"); 667 return R_HINT; 668 } 669 } 670 671 template <class ELFT> 672 void X86_64TargetInfo<ELFT>::writeGotPltHeader(uint8_t *Buf) const { 673 // The first entry holds the value of _DYNAMIC. It is not clear why that is 674 // required, but it is documented in the psabi and the glibc dynamic linker 675 // seems to use it (note that this is relevant for linking ld.so, not any 676 // other program). 677 write64le(Buf, In<ELFT>::Dynamic->getVA()); 678 } 679 680 template <class ELFT> 681 void X86_64TargetInfo<ELFT>::writeGotPlt(uint8_t *Buf, 682 const SymbolBody &S) const { 683 // See comments in X86TargetInfo::writeGotPlt. 684 write32le(Buf, S.getPltVA<ELFT>() + 6); 685 } 686 687 template <class ELFT> 688 void X86_64TargetInfo<ELFT>::writePltHeader(uint8_t *Buf) const { 689 const uint8_t PltData[] = { 690 0xff, 0x35, 0x00, 0x00, 0x00, 0x00, // pushq GOT+8(%rip) 691 0xff, 0x25, 0x00, 0x00, 0x00, 0x00, // jmp *GOT+16(%rip) 692 0x0f, 0x1f, 0x40, 0x00 // nopl 0x0(rax) 693 }; 694 memcpy(Buf, PltData, sizeof(PltData)); 695 uint64_t Got = In<ELFT>::GotPlt->getVA(); 696 uint64_t Plt = In<ELFT>::Plt->getVA(); 697 write32le(Buf + 2, Got - Plt + 2); // GOT+8 698 write32le(Buf + 8, Got - Plt + 4); // GOT+16 699 } 700 701 template <class ELFT> 702 void X86_64TargetInfo<ELFT>::writePlt(uint8_t *Buf, uint64_t GotEntryAddr, 703 uint64_t PltEntryAddr, int32_t Index, 704 unsigned RelOff) const { 705 const uint8_t Inst[] = { 706 0xff, 0x25, 0x00, 0x00, 0x00, 0x00, // jmpq *got(%rip) 707 0x68, 0x00, 0x00, 0x00, 0x00, // pushq <relocation index> 708 0xe9, 0x00, 0x00, 0x00, 0x00 // jmpq plt[0] 709 }; 710 memcpy(Buf, Inst, sizeof(Inst)); 711 712 write32le(Buf + 2, GotEntryAddr - PltEntryAddr - 6); 713 write32le(Buf + 7, Index); 714 write32le(Buf + 12, -Index * PltEntrySize - PltHeaderSize - 16); 715 } 716 717 template <class ELFT> 718 bool X86_64TargetInfo<ELFT>::isPicRel(uint32_t Type) const { 719 return Type != R_X86_64_PC32 && Type != R_X86_64_32; 720 } 721 722 template <class ELFT> 723 bool X86_64TargetInfo<ELFT>::isTlsInitialExecRel(uint32_t Type) const { 724 return Type == R_X86_64_GOTTPOFF; 725 } 726 727 template <class ELFT> 728 bool X86_64TargetInfo<ELFT>::isTlsGlobalDynamicRel(uint32_t Type) const { 729 return Type == R_X86_64_TLSGD; 730 } 731 732 template <class ELFT> 733 bool X86_64TargetInfo<ELFT>::isTlsLocalDynamicRel(uint32_t Type) const { 734 return Type == R_X86_64_DTPOFF32 || Type == R_X86_64_DTPOFF64 || 735 Type == R_X86_64_TLSLD; 736 } 737 738 template <class ELFT> 739 void X86_64TargetInfo<ELFT>::relaxTlsGdToLe(uint8_t *Loc, uint32_t Type, 740 uint64_t Val) const { 741 // Convert 742 // .byte 0x66 743 // leaq x@tlsgd(%rip), %rdi 744 // .word 0x6666 745 // rex64 746 // call __tls_get_addr@plt 747 // to 748 // mov %fs:0x0,%rax 749 // lea x@tpoff,%rax 750 const uint8_t Inst[] = { 751 0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00, // mov %fs:0x0,%rax 752 0x48, 0x8d, 0x80, 0x00, 0x00, 0x00, 0x00 // lea x@tpoff,%rax 753 }; 754 memcpy(Loc - 4, Inst, sizeof(Inst)); 755 // The original code used a pc relative relocation and so we have to 756 // compensate for the -4 in had in the addend. 757 relocateOne(Loc + 8, R_X86_64_TPOFF32, Val + 4); 758 } 759 760 template <class ELFT> 761 void X86_64TargetInfo<ELFT>::relaxTlsGdToIe(uint8_t *Loc, uint32_t Type, 762 uint64_t Val) const { 763 // Convert 764 // .byte 0x66 765 // leaq x@tlsgd(%rip), %rdi 766 // .word 0x6666 767 // rex64 768 // call __tls_get_addr@plt 769 // to 770 // mov %fs:0x0,%rax 771 // addq x@tpoff,%rax 772 const uint8_t Inst[] = { 773 0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00, // mov %fs:0x0,%rax 774 0x48, 0x03, 0x05, 0x00, 0x00, 0x00, 0x00 // addq x@tpoff,%rax 775 }; 776 memcpy(Loc - 4, Inst, sizeof(Inst)); 777 // Both code sequences are PC relatives, but since we are moving the constant 778 // forward by 8 bytes we have to subtract the value by 8. 779 relocateOne(Loc + 8, R_X86_64_PC32, Val - 8); 780 } 781 782 // In some conditions, R_X86_64_GOTTPOFF relocation can be optimized to 783 // R_X86_64_TPOFF32 so that it does not use GOT. 784 template <class ELFT> 785 void X86_64TargetInfo<ELFT>::relaxTlsIeToLe(uint8_t *Loc, uint32_t Type, 786 uint64_t Val) const { 787 uint8_t *Inst = Loc - 3; 788 uint8_t Reg = Loc[-1] >> 3; 789 uint8_t *RegSlot = Loc - 1; 790 791 // Note that ADD with RSP or R12 is converted to ADD instead of LEA 792 // because LEA with these registers needs 4 bytes to encode and thus 793 // wouldn't fit the space. 794 795 if (memcmp(Inst, "\x48\x03\x25", 3) == 0) { 796 // "addq foo@gottpoff(%rip),%rsp" -> "addq $foo,%rsp" 797 memcpy(Inst, "\x48\x81\xc4", 3); 798 } else if (memcmp(Inst, "\x4c\x03\x25", 3) == 0) { 799 // "addq foo@gottpoff(%rip),%r12" -> "addq $foo,%r12" 800 memcpy(Inst, "\x49\x81\xc4", 3); 801 } else if (memcmp(Inst, "\x4c\x03", 2) == 0) { 802 // "addq foo@gottpoff(%rip),%r[8-15]" -> "leaq foo(%r[8-15]),%r[8-15]" 803 memcpy(Inst, "\x4d\x8d", 2); 804 *RegSlot = 0x80 | (Reg << 3) | Reg; 805 } else if (memcmp(Inst, "\x48\x03", 2) == 0) { 806 // "addq foo@gottpoff(%rip),%reg -> "leaq foo(%reg),%reg" 807 memcpy(Inst, "\x48\x8d", 2); 808 *RegSlot = 0x80 | (Reg << 3) | Reg; 809 } else if (memcmp(Inst, "\x4c\x8b", 2) == 0) { 810 // "movq foo@gottpoff(%rip),%r[8-15]" -> "movq $foo,%r[8-15]" 811 memcpy(Inst, "\x49\xc7", 2); 812 *RegSlot = 0xc0 | Reg; 813 } else if (memcmp(Inst, "\x48\x8b", 2) == 0) { 814 // "movq foo@gottpoff(%rip),%reg" -> "movq $foo,%reg" 815 memcpy(Inst, "\x48\xc7", 2); 816 *RegSlot = 0xc0 | Reg; 817 } else { 818 error(getErrorLocation(Loc - 3) + 819 "R_X86_64_GOTTPOFF must be used in MOVQ or ADDQ instructions only"); 820 } 821 822 // The original code used a PC relative relocation. 823 // Need to compensate for the -4 it had in the addend. 824 relocateOne(Loc, R_X86_64_TPOFF32, Val + 4); 825 } 826 827 template <class ELFT> 828 void X86_64TargetInfo<ELFT>::relaxTlsLdToLe(uint8_t *Loc, uint32_t Type, 829 uint64_t Val) const { 830 // Convert 831 // leaq bar@tlsld(%rip), %rdi 832 // callq __tls_get_addr@PLT 833 // leaq bar@dtpoff(%rax), %rcx 834 // to 835 // .word 0x6666 836 // .byte 0x66 837 // mov %fs:0,%rax 838 // leaq bar@tpoff(%rax), %rcx 839 if (Type == R_X86_64_DTPOFF64) { 840 write64le(Loc, Val); 841 return; 842 } 843 if (Type == R_X86_64_DTPOFF32) { 844 relocateOne(Loc, R_X86_64_TPOFF32, Val); 845 return; 846 } 847 848 const uint8_t Inst[] = { 849 0x66, 0x66, // .word 0x6666 850 0x66, // .byte 0x66 851 0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00 // mov %fs:0,%rax 852 }; 853 memcpy(Loc - 3, Inst, sizeof(Inst)); 854 } 855 856 template <class ELFT> 857 void X86_64TargetInfo<ELFT>::relocateOne(uint8_t *Loc, uint32_t Type, 858 uint64_t Val) const { 859 switch (Type) { 860 case R_X86_64_32: 861 checkUInt<32>(Loc, Val, Type); 862 write32le(Loc, Val); 863 break; 864 case R_X86_64_32S: 865 case R_X86_64_TPOFF32: 866 case R_X86_64_GOT32: 867 case R_X86_64_GOTPCREL: 868 case R_X86_64_GOTPCRELX: 869 case R_X86_64_REX_GOTPCRELX: 870 case R_X86_64_PC32: 871 case R_X86_64_GOTTPOFF: 872 case R_X86_64_PLT32: 873 case R_X86_64_TLSGD: 874 case R_X86_64_TLSLD: 875 case R_X86_64_DTPOFF32: 876 case R_X86_64_SIZE32: 877 checkInt<32>(Loc, Val, Type); 878 write32le(Loc, Val); 879 break; 880 case R_X86_64_64: 881 case R_X86_64_DTPOFF64: 882 case R_X86_64_GLOB_DAT: 883 case R_X86_64_PC64: 884 case R_X86_64_SIZE64: 885 case R_X86_64_GOT64: 886 write64le(Loc, Val); 887 break; 888 default: 889 llvm_unreachable("unexpected relocation"); 890 } 891 } 892 893 template <class ELFT> 894 RelExpr X86_64TargetInfo<ELFT>::adjustRelaxExpr(uint32_t Type, 895 const uint8_t *Data, 896 RelExpr RelExpr) const { 897 if (Type != R_X86_64_GOTPCRELX && Type != R_X86_64_REX_GOTPCRELX) 898 return RelExpr; 899 const uint8_t Op = Data[-2]; 900 const uint8_t ModRm = Data[-1]; 901 // FIXME: When PIC is disabled and foo is defined locally in the 902 // lower 32 bit address space, memory operand in mov can be converted into 903 // immediate operand. Otherwise, mov must be changed to lea. We support only 904 // latter relaxation at this moment. 905 if (Op == 0x8b) 906 return R_RELAX_GOT_PC; 907 // Relax call and jmp. 908 if (Op == 0xff && (ModRm == 0x15 || ModRm == 0x25)) 909 return R_RELAX_GOT_PC; 910 911 // Relaxation of test, adc, add, and, cmp, or, sbb, sub, xor. 912 // If PIC then no relaxation is available. 913 // We also don't relax test/binop instructions without REX byte, 914 // they are 32bit operations and not common to have. 915 assert(Type == R_X86_64_REX_GOTPCRELX); 916 return Config->Pic ? RelExpr : R_RELAX_GOT_PC_NOPIC; 917 } 918 919 // A subset of relaxations can only be applied for no-PIC. This method 920 // handles such relaxations. Instructions encoding information was taken from: 921 // "Intel 64 and IA-32 Architectures Software Developer's Manual V2" 922 // (http://www.intel.com/content/dam/www/public/us/en/documents/manuals/ 923 // 64-ia-32-architectures-software-developer-instruction-set-reference-manual-325383.pdf) 924 template <class ELFT> 925 void X86_64TargetInfo<ELFT>::relaxGotNoPic(uint8_t *Loc, uint64_t Val, 926 uint8_t Op, uint8_t ModRm) const { 927 const uint8_t Rex = Loc[-3]; 928 // Convert "test %reg, foo@GOTPCREL(%rip)" to "test $foo, %reg". 929 if (Op == 0x85) { 930 // See "TEST-Logical Compare" (4-428 Vol. 2B), 931 // TEST r/m64, r64 uses "full" ModR / M byte (no opcode extension). 932 933 // ModR/M byte has form XX YYY ZZZ, where 934 // YYY is MODRM.reg(register 2), ZZZ is MODRM.rm(register 1). 935 // XX has different meanings: 936 // 00: The operand's memory address is in reg1. 937 // 01: The operand's memory address is reg1 + a byte-sized displacement. 938 // 10: The operand's memory address is reg1 + a word-sized displacement. 939 // 11: The operand is reg1 itself. 940 // If an instruction requires only one operand, the unused reg2 field 941 // holds extra opcode bits rather than a register code 942 // 0xC0 == 11 000 000 binary. 943 // 0x38 == 00 111 000 binary. 944 // We transfer reg2 to reg1 here as operand. 945 // See "2.1.3 ModR/M and SIB Bytes" (Vol. 2A 2-3). 946 Loc[-1] = 0xc0 | (ModRm & 0x38) >> 3; // ModR/M byte. 947 948 // Change opcode from TEST r/m64, r64 to TEST r/m64, imm32 949 // See "TEST-Logical Compare" (4-428 Vol. 2B). 950 Loc[-2] = 0xf7; 951 952 // Move R bit to the B bit in REX byte. 953 // REX byte is encoded as 0100WRXB, where 954 // 0100 is 4bit fixed pattern. 955 // REX.W When 1, a 64-bit operand size is used. Otherwise, when 0, the 956 // default operand size is used (which is 32-bit for most but not all 957 // instructions). 958 // REX.R This 1-bit value is an extension to the MODRM.reg field. 959 // REX.X This 1-bit value is an extension to the SIB.index field. 960 // REX.B This 1-bit value is an extension to the MODRM.rm field or the 961 // SIB.base field. 962 // See "2.2.1.2 More on REX Prefix Fields " (2-8 Vol. 2A). 963 Loc[-3] = (Rex & ~0x4) | (Rex & 0x4) >> 2; 964 relocateOne(Loc, R_X86_64_PC32, Val); 965 return; 966 } 967 968 // If we are here then we need to relax the adc, add, and, cmp, or, sbb, sub 969 // or xor operations. 970 971 // Convert "binop foo@GOTPCREL(%rip), %reg" to "binop $foo, %reg". 972 // Logic is close to one for test instruction above, but we also 973 // write opcode extension here, see below for details. 974 Loc[-1] = 0xc0 | (ModRm & 0x38) >> 3 | (Op & 0x3c); // ModR/M byte. 975 976 // Primary opcode is 0x81, opcode extension is one of: 977 // 000b = ADD, 001b is OR, 010b is ADC, 011b is SBB, 978 // 100b is AND, 101b is SUB, 110b is XOR, 111b is CMP. 979 // This value was wrote to MODRM.reg in a line above. 980 // See "3.2 INSTRUCTIONS (A-M)" (Vol. 2A 3-15), 981 // "INSTRUCTION SET REFERENCE, N-Z" (Vol. 2B 4-1) for 982 // descriptions about each operation. 983 Loc[-2] = 0x81; 984 Loc[-3] = (Rex & ~0x4) | (Rex & 0x4) >> 2; 985 relocateOne(Loc, R_X86_64_PC32, Val); 986 } 987 988 template <class ELFT> 989 void X86_64TargetInfo<ELFT>::relaxGot(uint8_t *Loc, uint64_t Val) const { 990 const uint8_t Op = Loc[-2]; 991 const uint8_t ModRm = Loc[-1]; 992 993 // Convert "mov foo@GOTPCREL(%rip),%reg" to "lea foo(%rip),%reg". 994 if (Op == 0x8b) { 995 Loc[-2] = 0x8d; 996 relocateOne(Loc, R_X86_64_PC32, Val); 997 return; 998 } 999 1000 if (Op != 0xff) { 1001 // We are relaxing a rip relative to an absolute, so compensate 1002 // for the old -4 addend. 1003 assert(!Config->Pic); 1004 relaxGotNoPic(Loc, Val + 4, Op, ModRm); 1005 return; 1006 } 1007 1008 // Convert call/jmp instructions. 1009 if (ModRm == 0x15) { 1010 // ABI says we can convert "call *foo@GOTPCREL(%rip)" to "nop; call foo". 1011 // Instead we convert to "addr32 call foo" where addr32 is an instruction 1012 // prefix. That makes result expression to be a single instruction. 1013 Loc[-2] = 0x67; // addr32 prefix 1014 Loc[-1] = 0xe8; // call 1015 relocateOne(Loc, R_X86_64_PC32, Val); 1016 return; 1017 } 1018 1019 // Convert "jmp *foo@GOTPCREL(%rip)" to "jmp foo; nop". 1020 // jmp doesn't return, so it is fine to use nop here, it is just a stub. 1021 assert(ModRm == 0x25); 1022 Loc[-2] = 0xe9; // jmp 1023 Loc[3] = 0x90; // nop 1024 relocateOne(Loc - 1, R_X86_64_PC32, Val + 1); 1025 } 1026 1027 // Relocation masks following the #lo(value), #hi(value), #ha(value), 1028 // #higher(value), #highera(value), #highest(value), and #highesta(value) 1029 // macros defined in section 4.5.1. Relocation Types of the PPC-elf64abi 1030 // document. 1031 static uint16_t applyPPCLo(uint64_t V) { return V; } 1032 static uint16_t applyPPCHi(uint64_t V) { return V >> 16; } 1033 static uint16_t applyPPCHa(uint64_t V) { return (V + 0x8000) >> 16; } 1034 static uint16_t applyPPCHigher(uint64_t V) { return V >> 32; } 1035 static uint16_t applyPPCHighera(uint64_t V) { return (V + 0x8000) >> 32; } 1036 static uint16_t applyPPCHighest(uint64_t V) { return V >> 48; } 1037 static uint16_t applyPPCHighesta(uint64_t V) { return (V + 0x8000) >> 48; } 1038 1039 PPCTargetInfo::PPCTargetInfo() {} 1040 1041 void PPCTargetInfo::relocateOne(uint8_t *Loc, uint32_t Type, 1042 uint64_t Val) const { 1043 switch (Type) { 1044 case R_PPC_ADDR16_HA: 1045 write16be(Loc, applyPPCHa(Val)); 1046 break; 1047 case R_PPC_ADDR16_LO: 1048 write16be(Loc, applyPPCLo(Val)); 1049 break; 1050 case R_PPC_ADDR32: 1051 case R_PPC_REL32: 1052 write32be(Loc, Val); 1053 break; 1054 case R_PPC_REL24: 1055 or32be(Loc, Val & 0x3FFFFFC); 1056 break; 1057 default: 1058 error(getErrorLocation(Loc) + "unrecognized reloc " + Twine(Type)); 1059 } 1060 } 1061 1062 RelExpr PPCTargetInfo::getRelExpr(uint32_t Type, const SymbolBody &S) const { 1063 switch (Type) { 1064 case R_PPC_REL24: 1065 case R_PPC_REL32: 1066 return R_PC; 1067 default: 1068 return R_ABS; 1069 } 1070 } 1071 1072 PPC64TargetInfo::PPC64TargetInfo() { 1073 PltRel = GotRel = R_PPC64_GLOB_DAT; 1074 RelativeRel = R_PPC64_RELATIVE; 1075 GotEntrySize = 8; 1076 GotPltEntrySize = 8; 1077 PltEntrySize = 32; 1078 PltHeaderSize = 0; 1079 1080 // We need 64K pages (at least under glibc/Linux, the loader won't 1081 // set different permissions on a finer granularity than that). 1082 DefaultMaxPageSize = 65536; 1083 1084 // The PPC64 ELF ABI v1 spec, says: 1085 // 1086 // It is normally desirable to put segments with different characteristics 1087 // in separate 256 Mbyte portions of the address space, to give the 1088 // operating system full paging flexibility in the 64-bit address space. 1089 // 1090 // And because the lowest non-zero 256M boundary is 0x10000000, PPC64 linkers 1091 // use 0x10000000 as the starting address. 1092 DefaultImageBase = 0x10000000; 1093 } 1094 1095 static uint64_t PPC64TocOffset = 0x8000; 1096 1097 uint64_t getPPC64TocBase() { 1098 // The TOC consists of sections .got, .toc, .tocbss, .plt in that order. The 1099 // TOC starts where the first of these sections starts. We always create a 1100 // .got when we see a relocation that uses it, so for us the start is always 1101 // the .got. 1102 uint64_t TocVA = In<ELF64BE>::Got->getVA(); 1103 1104 // Per the ppc64-elf-linux ABI, The TOC base is TOC value plus 0x8000 1105 // thus permitting a full 64 Kbytes segment. Note that the glibc startup 1106 // code (crt1.o) assumes that you can get from the TOC base to the 1107 // start of the .toc section with only a single (signed) 16-bit relocation. 1108 return TocVA + PPC64TocOffset; 1109 } 1110 1111 RelExpr PPC64TargetInfo::getRelExpr(uint32_t Type, const SymbolBody &S) const { 1112 switch (Type) { 1113 default: 1114 return R_ABS; 1115 case R_PPC64_TOC16: 1116 case R_PPC64_TOC16_DS: 1117 case R_PPC64_TOC16_HA: 1118 case R_PPC64_TOC16_HI: 1119 case R_PPC64_TOC16_LO: 1120 case R_PPC64_TOC16_LO_DS: 1121 return R_GOTREL; 1122 case R_PPC64_TOC: 1123 return R_PPC_TOC; 1124 case R_PPC64_REL24: 1125 return R_PPC_PLT_OPD; 1126 } 1127 } 1128 1129 void PPC64TargetInfo::writePlt(uint8_t *Buf, uint64_t GotEntryAddr, 1130 uint64_t PltEntryAddr, int32_t Index, 1131 unsigned RelOff) const { 1132 uint64_t Off = GotEntryAddr - getPPC64TocBase(); 1133 1134 // FIXME: What we should do, in theory, is get the offset of the function 1135 // descriptor in the .opd section, and use that as the offset from %r2 (the 1136 // TOC-base pointer). Instead, we have the GOT-entry offset, and that will 1137 // be a pointer to the function descriptor in the .opd section. Using 1138 // this scheme is simpler, but requires an extra indirection per PLT dispatch. 1139 1140 write32be(Buf, 0xf8410028); // std %r2, 40(%r1) 1141 write32be(Buf + 4, 0x3d620000 | applyPPCHa(Off)); // addis %r11, %r2, X@ha 1142 write32be(Buf + 8, 0xe98b0000 | applyPPCLo(Off)); // ld %r12, X@l(%r11) 1143 write32be(Buf + 12, 0xe96c0000); // ld %r11,0(%r12) 1144 write32be(Buf + 16, 0x7d6903a6); // mtctr %r11 1145 write32be(Buf + 20, 0xe84c0008); // ld %r2,8(%r12) 1146 write32be(Buf + 24, 0xe96c0010); // ld %r11,16(%r12) 1147 write32be(Buf + 28, 0x4e800420); // bctr 1148 } 1149 1150 static std::pair<uint32_t, uint64_t> toAddr16Rel(uint32_t Type, uint64_t Val) { 1151 uint64_t V = Val - PPC64TocOffset; 1152 switch (Type) { 1153 case R_PPC64_TOC16: 1154 return {R_PPC64_ADDR16, V}; 1155 case R_PPC64_TOC16_DS: 1156 return {R_PPC64_ADDR16_DS, V}; 1157 case R_PPC64_TOC16_HA: 1158 return {R_PPC64_ADDR16_HA, V}; 1159 case R_PPC64_TOC16_HI: 1160 return {R_PPC64_ADDR16_HI, V}; 1161 case R_PPC64_TOC16_LO: 1162 return {R_PPC64_ADDR16_LO, V}; 1163 case R_PPC64_TOC16_LO_DS: 1164 return {R_PPC64_ADDR16_LO_DS, V}; 1165 default: 1166 return {Type, Val}; 1167 } 1168 } 1169 1170 void PPC64TargetInfo::relocateOne(uint8_t *Loc, uint32_t Type, 1171 uint64_t Val) const { 1172 // For a TOC-relative relocation, proceed in terms of the corresponding 1173 // ADDR16 relocation type. 1174 std::tie(Type, Val) = toAddr16Rel(Type, Val); 1175 1176 switch (Type) { 1177 case R_PPC64_ADDR14: { 1178 checkAlignment<4>(Loc, Val, Type); 1179 // Preserve the AA/LK bits in the branch instruction 1180 uint8_t AALK = Loc[3]; 1181 write16be(Loc + 2, (AALK & 3) | (Val & 0xfffc)); 1182 break; 1183 } 1184 case R_PPC64_ADDR16: 1185 checkInt<16>(Loc, Val, Type); 1186 write16be(Loc, Val); 1187 break; 1188 case R_PPC64_ADDR16_DS: 1189 checkInt<16>(Loc, Val, Type); 1190 write16be(Loc, (read16be(Loc) & 3) | (Val & ~3)); 1191 break; 1192 case R_PPC64_ADDR16_HA: 1193 case R_PPC64_REL16_HA: 1194 write16be(Loc, applyPPCHa(Val)); 1195 break; 1196 case R_PPC64_ADDR16_HI: 1197 case R_PPC64_REL16_HI: 1198 write16be(Loc, applyPPCHi(Val)); 1199 break; 1200 case R_PPC64_ADDR16_HIGHER: 1201 write16be(Loc, applyPPCHigher(Val)); 1202 break; 1203 case R_PPC64_ADDR16_HIGHERA: 1204 write16be(Loc, applyPPCHighera(Val)); 1205 break; 1206 case R_PPC64_ADDR16_HIGHEST: 1207 write16be(Loc, applyPPCHighest(Val)); 1208 break; 1209 case R_PPC64_ADDR16_HIGHESTA: 1210 write16be(Loc, applyPPCHighesta(Val)); 1211 break; 1212 case R_PPC64_ADDR16_LO: 1213 write16be(Loc, applyPPCLo(Val)); 1214 break; 1215 case R_PPC64_ADDR16_LO_DS: 1216 case R_PPC64_REL16_LO: 1217 write16be(Loc, (read16be(Loc) & 3) | (applyPPCLo(Val) & ~3)); 1218 break; 1219 case R_PPC64_ADDR32: 1220 case R_PPC64_REL32: 1221 checkInt<32>(Loc, Val, Type); 1222 write32be(Loc, Val); 1223 break; 1224 case R_PPC64_ADDR64: 1225 case R_PPC64_REL64: 1226 case R_PPC64_TOC: 1227 write64be(Loc, Val); 1228 break; 1229 case R_PPC64_REL24: { 1230 uint32_t Mask = 0x03FFFFFC; 1231 checkInt<24>(Loc, Val, Type); 1232 write32be(Loc, (read32be(Loc) & ~Mask) | (Val & Mask)); 1233 break; 1234 } 1235 default: 1236 error(getErrorLocation(Loc) + "unrecognized reloc " + Twine(Type)); 1237 } 1238 } 1239 1240 AArch64TargetInfo::AArch64TargetInfo() { 1241 CopyRel = R_AARCH64_COPY; 1242 RelativeRel = R_AARCH64_RELATIVE; 1243 IRelativeRel = R_AARCH64_IRELATIVE; 1244 GotRel = R_AARCH64_GLOB_DAT; 1245 PltRel = R_AARCH64_JUMP_SLOT; 1246 TlsDescRel = R_AARCH64_TLSDESC; 1247 TlsGotRel = R_AARCH64_TLS_TPREL64; 1248 GotEntrySize = 8; 1249 GotPltEntrySize = 8; 1250 PltEntrySize = 16; 1251 PltHeaderSize = 32; 1252 DefaultMaxPageSize = 65536; 1253 1254 // It doesn't seem to be documented anywhere, but tls on aarch64 uses variant 1255 // 1 of the tls structures and the tcb size is 16. 1256 TcbSize = 16; 1257 } 1258 1259 RelExpr AArch64TargetInfo::getRelExpr(uint32_t Type, 1260 const SymbolBody &S) const { 1261 switch (Type) { 1262 default: 1263 return R_ABS; 1264 case R_AARCH64_TLSDESC_ADR_PAGE21: 1265 return R_TLSDESC_PAGE; 1266 case R_AARCH64_TLSDESC_LD64_LO12_NC: 1267 case R_AARCH64_TLSDESC_ADD_LO12_NC: 1268 return R_TLSDESC; 1269 case R_AARCH64_TLSDESC_CALL: 1270 return R_TLSDESC_CALL; 1271 case R_AARCH64_TLSLE_ADD_TPREL_HI12: 1272 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC: 1273 return R_TLS; 1274 case R_AARCH64_CALL26: 1275 case R_AARCH64_CONDBR19: 1276 case R_AARCH64_JUMP26: 1277 case R_AARCH64_TSTBR14: 1278 return R_PLT_PC; 1279 case R_AARCH64_PREL16: 1280 case R_AARCH64_PREL32: 1281 case R_AARCH64_PREL64: 1282 case R_AARCH64_ADR_PREL_LO21: 1283 return R_PC; 1284 case R_AARCH64_ADR_PREL_PG_HI21: 1285 return R_PAGE_PC; 1286 case R_AARCH64_LD64_GOT_LO12_NC: 1287 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: 1288 return R_GOT; 1289 case R_AARCH64_ADR_GOT_PAGE: 1290 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: 1291 return R_GOT_PAGE_PC; 1292 } 1293 } 1294 1295 RelExpr AArch64TargetInfo::adjustRelaxExpr(uint32_t Type, const uint8_t *Data, 1296 RelExpr Expr) const { 1297 if (Expr == R_RELAX_TLS_GD_TO_IE) { 1298 if (Type == R_AARCH64_TLSDESC_ADR_PAGE21) 1299 return R_RELAX_TLS_GD_TO_IE_PAGE_PC; 1300 return R_RELAX_TLS_GD_TO_IE_ABS; 1301 } 1302 return Expr; 1303 } 1304 1305 bool AArch64TargetInfo::usesOnlyLowPageBits(uint32_t Type) const { 1306 switch (Type) { 1307 default: 1308 return false; 1309 case R_AARCH64_ADD_ABS_LO12_NC: 1310 case R_AARCH64_LD64_GOT_LO12_NC: 1311 case R_AARCH64_LDST128_ABS_LO12_NC: 1312 case R_AARCH64_LDST16_ABS_LO12_NC: 1313 case R_AARCH64_LDST32_ABS_LO12_NC: 1314 case R_AARCH64_LDST64_ABS_LO12_NC: 1315 case R_AARCH64_LDST8_ABS_LO12_NC: 1316 case R_AARCH64_TLSDESC_ADD_LO12_NC: 1317 case R_AARCH64_TLSDESC_LD64_LO12_NC: 1318 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: 1319 return true; 1320 } 1321 } 1322 1323 bool AArch64TargetInfo::isTlsInitialExecRel(uint32_t Type) const { 1324 return Type == R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 || 1325 Type == R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC; 1326 } 1327 1328 bool AArch64TargetInfo::isPicRel(uint32_t Type) const { 1329 return Type == R_AARCH64_ABS32 || Type == R_AARCH64_ABS64; 1330 } 1331 1332 void AArch64TargetInfo::writeGotPlt(uint8_t *Buf, const SymbolBody &) const { 1333 write64le(Buf, In<ELF64LE>::Plt->getVA()); 1334 } 1335 1336 // Page(Expr) is the page address of the expression Expr, defined 1337 // as (Expr & ~0xFFF). (This applies even if the machine page size 1338 // supported by the platform has a different value.) 1339 uint64_t getAArch64Page(uint64_t Expr) { 1340 return Expr & (~static_cast<uint64_t>(0xFFF)); 1341 } 1342 1343 void AArch64TargetInfo::writePltHeader(uint8_t *Buf) const { 1344 const uint8_t PltData[] = { 1345 0xf0, 0x7b, 0xbf, 0xa9, // stp x16, x30, [sp,#-16]! 1346 0x10, 0x00, 0x00, 0x90, // adrp x16, Page(&(.plt.got[2])) 1347 0x11, 0x02, 0x40, 0xf9, // ldr x17, [x16, Offset(&(.plt.got[2]))] 1348 0x10, 0x02, 0x00, 0x91, // add x16, x16, Offset(&(.plt.got[2])) 1349 0x20, 0x02, 0x1f, 0xd6, // br x17 1350 0x1f, 0x20, 0x03, 0xd5, // nop 1351 0x1f, 0x20, 0x03, 0xd5, // nop 1352 0x1f, 0x20, 0x03, 0xd5 // nop 1353 }; 1354 memcpy(Buf, PltData, sizeof(PltData)); 1355 1356 uint64_t Got = In<ELF64LE>::GotPlt->getVA(); 1357 uint64_t Plt = In<ELF64LE>::Plt->getVA(); 1358 relocateOne(Buf + 4, R_AARCH64_ADR_PREL_PG_HI21, 1359 getAArch64Page(Got + 16) - getAArch64Page(Plt + 4)); 1360 relocateOne(Buf + 8, R_AARCH64_LDST64_ABS_LO12_NC, Got + 16); 1361 relocateOne(Buf + 12, R_AARCH64_ADD_ABS_LO12_NC, Got + 16); 1362 } 1363 1364 void AArch64TargetInfo::writePlt(uint8_t *Buf, uint64_t GotEntryAddr, 1365 uint64_t PltEntryAddr, int32_t Index, 1366 unsigned RelOff) const { 1367 const uint8_t Inst[] = { 1368 0x10, 0x00, 0x00, 0x90, // adrp x16, Page(&(.plt.got[n])) 1369 0x11, 0x02, 0x40, 0xf9, // ldr x17, [x16, Offset(&(.plt.got[n]))] 1370 0x10, 0x02, 0x00, 0x91, // add x16, x16, Offset(&(.plt.got[n])) 1371 0x20, 0x02, 0x1f, 0xd6 // br x17 1372 }; 1373 memcpy(Buf, Inst, sizeof(Inst)); 1374 1375 relocateOne(Buf, R_AARCH64_ADR_PREL_PG_HI21, 1376 getAArch64Page(GotEntryAddr) - getAArch64Page(PltEntryAddr)); 1377 relocateOne(Buf + 4, R_AARCH64_LDST64_ABS_LO12_NC, GotEntryAddr); 1378 relocateOne(Buf + 8, R_AARCH64_ADD_ABS_LO12_NC, GotEntryAddr); 1379 } 1380 1381 static void write32AArch64Addr(uint8_t *L, uint64_t Imm) { 1382 uint32_t ImmLo = (Imm & 0x3) << 29; 1383 uint32_t ImmHi = (Imm & 0x1FFFFC) << 3; 1384 uint64_t Mask = (0x3 << 29) | (0x1FFFFC << 3); 1385 write32le(L, (read32le(L) & ~Mask) | ImmLo | ImmHi); 1386 } 1387 1388 // Return the bits [Start, End] from Val shifted Start bits. 1389 // For instance, getBits(0xF0, 4, 8) returns 0xF. 1390 static uint64_t getBits(uint64_t Val, int Start, int End) { 1391 uint64_t Mask = ((uint64_t)1 << (End + 1 - Start)) - 1; 1392 return (Val >> Start) & Mask; 1393 } 1394 1395 // Update the immediate field in a AARCH64 ldr, str, and add instruction. 1396 static void or32AArch64Imm(uint8_t *L, uint64_t Imm) { 1397 or32le(L, (Imm & 0xFFF) << 10); 1398 } 1399 1400 void AArch64TargetInfo::relocateOne(uint8_t *Loc, uint32_t Type, 1401 uint64_t Val) const { 1402 switch (Type) { 1403 case R_AARCH64_ABS16: 1404 case R_AARCH64_PREL16: 1405 checkIntUInt<16>(Loc, Val, Type); 1406 write16le(Loc, Val); 1407 break; 1408 case R_AARCH64_ABS32: 1409 case R_AARCH64_PREL32: 1410 checkIntUInt<32>(Loc, Val, Type); 1411 write32le(Loc, Val); 1412 break; 1413 case R_AARCH64_ABS64: 1414 case R_AARCH64_GLOB_DAT: 1415 case R_AARCH64_PREL64: 1416 write64le(Loc, Val); 1417 break; 1418 case R_AARCH64_ADD_ABS_LO12_NC: 1419 or32AArch64Imm(Loc, Val); 1420 break; 1421 case R_AARCH64_ADR_GOT_PAGE: 1422 case R_AARCH64_ADR_PREL_PG_HI21: 1423 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: 1424 case R_AARCH64_TLSDESC_ADR_PAGE21: 1425 checkInt<33>(Loc, Val, Type); 1426 write32AArch64Addr(Loc, Val >> 12); 1427 break; 1428 case R_AARCH64_ADR_PREL_LO21: 1429 checkInt<21>(Loc, Val, Type); 1430 write32AArch64Addr(Loc, Val); 1431 break; 1432 case R_AARCH64_CALL26: 1433 case R_AARCH64_JUMP26: 1434 checkInt<28>(Loc, Val, Type); 1435 or32le(Loc, (Val & 0x0FFFFFFC) >> 2); 1436 break; 1437 case R_AARCH64_CONDBR19: 1438 checkInt<21>(Loc, Val, Type); 1439 or32le(Loc, (Val & 0x1FFFFC) << 3); 1440 break; 1441 case R_AARCH64_LD64_GOT_LO12_NC: 1442 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: 1443 case R_AARCH64_TLSDESC_LD64_LO12_NC: 1444 checkAlignment<8>(Loc, Val, Type); 1445 or32le(Loc, (Val & 0xFF8) << 7); 1446 break; 1447 case R_AARCH64_LDST8_ABS_LO12_NC: 1448 or32AArch64Imm(Loc, getBits(Val, 0, 11)); 1449 break; 1450 case R_AARCH64_LDST16_ABS_LO12_NC: 1451 or32AArch64Imm(Loc, getBits(Val, 1, 11)); 1452 break; 1453 case R_AARCH64_LDST32_ABS_LO12_NC: 1454 or32AArch64Imm(Loc, getBits(Val, 2, 11)); 1455 break; 1456 case R_AARCH64_LDST64_ABS_LO12_NC: 1457 or32AArch64Imm(Loc, getBits(Val, 3, 11)); 1458 break; 1459 case R_AARCH64_LDST128_ABS_LO12_NC: 1460 or32AArch64Imm(Loc, getBits(Val, 4, 11)); 1461 break; 1462 case R_AARCH64_MOVW_UABS_G0_NC: 1463 or32le(Loc, (Val & 0xFFFF) << 5); 1464 break; 1465 case R_AARCH64_MOVW_UABS_G1_NC: 1466 or32le(Loc, (Val & 0xFFFF0000) >> 11); 1467 break; 1468 case R_AARCH64_MOVW_UABS_G2_NC: 1469 or32le(Loc, (Val & 0xFFFF00000000) >> 27); 1470 break; 1471 case R_AARCH64_MOVW_UABS_G3: 1472 or32le(Loc, (Val & 0xFFFF000000000000) >> 43); 1473 break; 1474 case R_AARCH64_TSTBR14: 1475 checkInt<16>(Loc, Val, Type); 1476 or32le(Loc, (Val & 0xFFFC) << 3); 1477 break; 1478 case R_AARCH64_TLSLE_ADD_TPREL_HI12: 1479 checkInt<24>(Loc, Val, Type); 1480 or32AArch64Imm(Loc, Val >> 12); 1481 break; 1482 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC: 1483 case R_AARCH64_TLSDESC_ADD_LO12_NC: 1484 or32AArch64Imm(Loc, Val); 1485 break; 1486 default: 1487 error(getErrorLocation(Loc) + "unrecognized reloc " + Twine(Type)); 1488 } 1489 } 1490 1491 void AArch64TargetInfo::relaxTlsGdToLe(uint8_t *Loc, uint32_t Type, 1492 uint64_t Val) const { 1493 // TLSDESC Global-Dynamic relocation are in the form: 1494 // adrp x0, :tlsdesc:v [R_AARCH64_TLSDESC_ADR_PAGE21] 1495 // ldr x1, [x0, #:tlsdesc_lo12:v [R_AARCH64_TLSDESC_LD64_LO12_NC] 1496 // add x0, x0, :tlsdesc_los:v [_AARCH64_TLSDESC_ADD_LO12_NC] 1497 // .tlsdesccall [R_AARCH64_TLSDESC_CALL] 1498 // blr x1 1499 // And it can optimized to: 1500 // movz x0, #0x0, lsl #16 1501 // movk x0, #0x10 1502 // nop 1503 // nop 1504 checkUInt<32>(Loc, Val, Type); 1505 1506 switch (Type) { 1507 case R_AARCH64_TLSDESC_ADD_LO12_NC: 1508 case R_AARCH64_TLSDESC_CALL: 1509 write32le(Loc, 0xd503201f); // nop 1510 return; 1511 case R_AARCH64_TLSDESC_ADR_PAGE21: 1512 write32le(Loc, 0xd2a00000 | (((Val >> 16) & 0xffff) << 5)); // movz 1513 return; 1514 case R_AARCH64_TLSDESC_LD64_LO12_NC: 1515 write32le(Loc, 0xf2800000 | ((Val & 0xffff) << 5)); // movk 1516 return; 1517 default: 1518 llvm_unreachable("unsupported relocation for TLS GD to LE relaxation"); 1519 } 1520 } 1521 1522 void AArch64TargetInfo::relaxTlsGdToIe(uint8_t *Loc, uint32_t Type, 1523 uint64_t Val) const { 1524 // TLSDESC Global-Dynamic relocation are in the form: 1525 // adrp x0, :tlsdesc:v [R_AARCH64_TLSDESC_ADR_PAGE21] 1526 // ldr x1, [x0, #:tlsdesc_lo12:v [R_AARCH64_TLSDESC_LD64_LO12_NC] 1527 // add x0, x0, :tlsdesc_los:v [_AARCH64_TLSDESC_ADD_LO12_NC] 1528 // .tlsdesccall [R_AARCH64_TLSDESC_CALL] 1529 // blr x1 1530 // And it can optimized to: 1531 // adrp x0, :gottprel:v 1532 // ldr x0, [x0, :gottprel_lo12:v] 1533 // nop 1534 // nop 1535 1536 switch (Type) { 1537 case R_AARCH64_TLSDESC_ADD_LO12_NC: 1538 case R_AARCH64_TLSDESC_CALL: 1539 write32le(Loc, 0xd503201f); // nop 1540 break; 1541 case R_AARCH64_TLSDESC_ADR_PAGE21: 1542 write32le(Loc, 0x90000000); // adrp 1543 relocateOne(Loc, R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21, Val); 1544 break; 1545 case R_AARCH64_TLSDESC_LD64_LO12_NC: 1546 write32le(Loc, 0xf9400000); // ldr 1547 relocateOne(Loc, R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC, Val); 1548 break; 1549 default: 1550 llvm_unreachable("unsupported relocation for TLS GD to LE relaxation"); 1551 } 1552 } 1553 1554 void AArch64TargetInfo::relaxTlsIeToLe(uint8_t *Loc, uint32_t Type, 1555 uint64_t Val) const { 1556 checkUInt<32>(Loc, Val, Type); 1557 1558 if (Type == R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21) { 1559 // Generate MOVZ. 1560 uint32_t RegNo = read32le(Loc) & 0x1f; 1561 write32le(Loc, (0xd2a00000 | RegNo) | (((Val >> 16) & 0xffff) << 5)); 1562 return; 1563 } 1564 if (Type == R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC) { 1565 // Generate MOVK. 1566 uint32_t RegNo = read32le(Loc) & 0x1f; 1567 write32le(Loc, (0xf2800000 | RegNo) | ((Val & 0xffff) << 5)); 1568 return; 1569 } 1570 llvm_unreachable("invalid relocation for TLS IE to LE relaxation"); 1571 } 1572 1573 AMDGPUTargetInfo::AMDGPUTargetInfo() { 1574 RelativeRel = R_AMDGPU_REL64; 1575 GotRel = R_AMDGPU_ABS64; 1576 GotEntrySize = 8; 1577 } 1578 1579 void AMDGPUTargetInfo::relocateOne(uint8_t *Loc, uint32_t Type, 1580 uint64_t Val) const { 1581 switch (Type) { 1582 case R_AMDGPU_ABS32: 1583 case R_AMDGPU_GOTPCREL: 1584 case R_AMDGPU_GOTPCREL32_LO: 1585 case R_AMDGPU_REL32: 1586 case R_AMDGPU_REL32_LO: 1587 write32le(Loc, Val); 1588 break; 1589 case R_AMDGPU_ABS64: 1590 write64le(Loc, Val); 1591 break; 1592 case R_AMDGPU_GOTPCREL32_HI: 1593 case R_AMDGPU_REL32_HI: 1594 write32le(Loc, Val >> 32); 1595 break; 1596 default: 1597 error(getErrorLocation(Loc) + "unrecognized reloc " + Twine(Type)); 1598 } 1599 } 1600 1601 RelExpr AMDGPUTargetInfo::getRelExpr(uint32_t Type, const SymbolBody &S) const { 1602 switch (Type) { 1603 case R_AMDGPU_ABS32: 1604 case R_AMDGPU_ABS64: 1605 return R_ABS; 1606 case R_AMDGPU_REL32: 1607 case R_AMDGPU_REL32_LO: 1608 case R_AMDGPU_REL32_HI: 1609 return R_PC; 1610 case R_AMDGPU_GOTPCREL: 1611 case R_AMDGPU_GOTPCREL32_LO: 1612 case R_AMDGPU_GOTPCREL32_HI: 1613 return R_GOT_PC; 1614 default: 1615 fatal("do not know how to handle relocation " + Twine(Type)); 1616 } 1617 } 1618 1619 ARMTargetInfo::ARMTargetInfo() { 1620 CopyRel = R_ARM_COPY; 1621 RelativeRel = R_ARM_RELATIVE; 1622 IRelativeRel = R_ARM_IRELATIVE; 1623 GotRel = R_ARM_GLOB_DAT; 1624 PltRel = R_ARM_JUMP_SLOT; 1625 TlsGotRel = R_ARM_TLS_TPOFF32; 1626 TlsModuleIndexRel = R_ARM_TLS_DTPMOD32; 1627 TlsOffsetRel = R_ARM_TLS_DTPOFF32; 1628 GotEntrySize = 4; 1629 GotPltEntrySize = 4; 1630 PltEntrySize = 16; 1631 PltHeaderSize = 20; 1632 // ARM uses Variant 1 TLS 1633 TcbSize = 8; 1634 NeedsThunks = true; 1635 } 1636 1637 RelExpr ARMTargetInfo::getRelExpr(uint32_t Type, const SymbolBody &S) const { 1638 switch (Type) { 1639 default: 1640 return R_ABS; 1641 case R_ARM_THM_JUMP11: 1642 return R_PC; 1643 case R_ARM_CALL: 1644 case R_ARM_JUMP24: 1645 case R_ARM_PC24: 1646 case R_ARM_PLT32: 1647 case R_ARM_PREL31: 1648 case R_ARM_THM_JUMP19: 1649 case R_ARM_THM_JUMP24: 1650 case R_ARM_THM_CALL: 1651 return R_PLT_PC; 1652 case R_ARM_GOTOFF32: 1653 // (S + A) - GOT_ORG 1654 return R_GOTREL; 1655 case R_ARM_GOT_BREL: 1656 // GOT(S) + A - GOT_ORG 1657 return R_GOT_OFF; 1658 case R_ARM_GOT_PREL: 1659 case R_ARM_TLS_IE32: 1660 // GOT(S) + A - P 1661 return R_GOT_PC; 1662 case R_ARM_TARGET1: 1663 return Config->Target1Rel ? R_PC : R_ABS; 1664 case R_ARM_TARGET2: 1665 if (Config->Target2 == Target2Policy::Rel) 1666 return R_PC; 1667 if (Config->Target2 == Target2Policy::Abs) 1668 return R_ABS; 1669 return R_GOT_PC; 1670 case R_ARM_TLS_GD32: 1671 return R_TLSGD_PC; 1672 case R_ARM_TLS_LDM32: 1673 return R_TLSLD_PC; 1674 case R_ARM_BASE_PREL: 1675 // B(S) + A - P 1676 // FIXME: currently B(S) assumed to be .got, this may not hold for all 1677 // platforms. 1678 return R_GOTONLY_PC; 1679 case R_ARM_MOVW_PREL_NC: 1680 case R_ARM_MOVT_PREL: 1681 case R_ARM_REL32: 1682 case R_ARM_THM_MOVW_PREL_NC: 1683 case R_ARM_THM_MOVT_PREL: 1684 return R_PC; 1685 case R_ARM_NONE: 1686 return R_HINT; 1687 case R_ARM_TLS_LE32: 1688 return R_TLS; 1689 } 1690 } 1691 1692 bool ARMTargetInfo::isPicRel(uint32_t Type) const { 1693 return (Type == R_ARM_TARGET1 && !Config->Target1Rel) || 1694 (Type == R_ARM_ABS32); 1695 } 1696 1697 uint32_t ARMTargetInfo::getDynRel(uint32_t Type) const { 1698 if (Type == R_ARM_TARGET1 && !Config->Target1Rel) 1699 return R_ARM_ABS32; 1700 if (Type == R_ARM_ABS32) 1701 return Type; 1702 // Keep it going with a dummy value so that we can find more reloc errors. 1703 return R_ARM_ABS32; 1704 } 1705 1706 void ARMTargetInfo::writeGotPlt(uint8_t *Buf, const SymbolBody &) const { 1707 write32le(Buf, In<ELF32LE>::Plt->getVA()); 1708 } 1709 1710 void ARMTargetInfo::writeIgotPlt(uint8_t *Buf, const SymbolBody &S) const { 1711 // An ARM entry is the address of the ifunc resolver function. 1712 write32le(Buf, S.getVA<ELF32LE>()); 1713 } 1714 1715 void ARMTargetInfo::writePltHeader(uint8_t *Buf) const { 1716 const uint8_t PltData[] = { 1717 0x04, 0xe0, 0x2d, 0xe5, // str lr, [sp,#-4]! 1718 0x04, 0xe0, 0x9f, 0xe5, // ldr lr, L2 1719 0x0e, 0xe0, 0x8f, 0xe0, // L1: add lr, pc, lr 1720 0x08, 0xf0, 0xbe, 0xe5, // ldr pc, [lr, #8] 1721 0x00, 0x00, 0x00, 0x00, // L2: .word &(.got.plt) - L1 - 8 1722 }; 1723 memcpy(Buf, PltData, sizeof(PltData)); 1724 uint64_t GotPlt = In<ELF32LE>::GotPlt->getVA(); 1725 uint64_t L1 = In<ELF32LE>::Plt->getVA() + 8; 1726 write32le(Buf + 16, GotPlt - L1 - 8); 1727 } 1728 1729 void ARMTargetInfo::writePlt(uint8_t *Buf, uint64_t GotEntryAddr, 1730 uint64_t PltEntryAddr, int32_t Index, 1731 unsigned RelOff) const { 1732 // FIXME: Using simple code sequence with simple relocations. 1733 // There is a more optimal sequence but it requires support for the group 1734 // relocations. See ELF for the ARM Architecture Appendix A.3 1735 const uint8_t PltData[] = { 1736 0x04, 0xc0, 0x9f, 0xe5, // ldr ip, L2 1737 0x0f, 0xc0, 0x8c, 0xe0, // L1: add ip, ip, pc 1738 0x00, 0xf0, 0x9c, 0xe5, // ldr pc, [ip] 1739 0x00, 0x00, 0x00, 0x00, // L2: .word Offset(&(.plt.got) - L1 - 8 1740 }; 1741 memcpy(Buf, PltData, sizeof(PltData)); 1742 uint64_t L1 = PltEntryAddr + 4; 1743 write32le(Buf + 12, GotEntryAddr - L1 - 8); 1744 } 1745 1746 RelExpr ARMTargetInfo::getThunkExpr(RelExpr Expr, uint32_t RelocType, 1747 const InputFile &File, 1748 const SymbolBody &S) const { 1749 // If S is an undefined weak symbol in an executable we don't need a Thunk. 1750 // In a DSO calls to undefined symbols, including weak ones get PLT entries 1751 // which may need a thunk. 1752 if (S.isUndefined() && !S.isLocal() && S.symbol()->isWeak() 1753 && !Config->Shared) 1754 return Expr; 1755 // A state change from ARM to Thumb and vice versa must go through an 1756 // interworking thunk if the relocation type is not R_ARM_CALL or 1757 // R_ARM_THM_CALL. 1758 switch (RelocType) { 1759 case R_ARM_PC24: 1760 case R_ARM_PLT32: 1761 case R_ARM_JUMP24: 1762 // Source is ARM, all PLT entries are ARM so no interworking required. 1763 // Otherwise we need to interwork if Symbol has bit 0 set (Thumb). 1764 if (Expr == R_PC && ((S.getVA<ELF32LE>() & 1) == 1)) 1765 return R_THUNK_PC; 1766 break; 1767 case R_ARM_THM_JUMP19: 1768 case R_ARM_THM_JUMP24: 1769 // Source is Thumb, all PLT entries are ARM so interworking is required. 1770 // Otherwise we need to interwork if Symbol has bit 0 clear (ARM). 1771 if (Expr == R_PLT_PC) 1772 return R_THUNK_PLT_PC; 1773 if ((S.getVA<ELF32LE>() & 1) == 0) 1774 return R_THUNK_PC; 1775 break; 1776 } 1777 return Expr; 1778 } 1779 1780 void ARMTargetInfo::relocateOne(uint8_t *Loc, uint32_t Type, 1781 uint64_t Val) const { 1782 switch (Type) { 1783 case R_ARM_ABS32: 1784 case R_ARM_BASE_PREL: 1785 case R_ARM_GLOB_DAT: 1786 case R_ARM_GOTOFF32: 1787 case R_ARM_GOT_BREL: 1788 case R_ARM_GOT_PREL: 1789 case R_ARM_REL32: 1790 case R_ARM_RELATIVE: 1791 case R_ARM_TARGET1: 1792 case R_ARM_TARGET2: 1793 case R_ARM_TLS_GD32: 1794 case R_ARM_TLS_IE32: 1795 case R_ARM_TLS_LDM32: 1796 case R_ARM_TLS_LDO32: 1797 case R_ARM_TLS_LE32: 1798 case R_ARM_TLS_TPOFF32: 1799 write32le(Loc, Val); 1800 break; 1801 case R_ARM_TLS_DTPMOD32: 1802 write32le(Loc, 1); 1803 break; 1804 case R_ARM_PREL31: 1805 checkInt<31>(Loc, Val, Type); 1806 write32le(Loc, (read32le(Loc) & 0x80000000) | (Val & ~0x80000000)); 1807 break; 1808 case R_ARM_CALL: 1809 // R_ARM_CALL is used for BL and BLX instructions, depending on the 1810 // value of bit 0 of Val, we must select a BL or BLX instruction 1811 if (Val & 1) { 1812 // If bit 0 of Val is 1 the target is Thumb, we must select a BLX. 1813 // The BLX encoding is 0xfa:H:imm24 where Val = imm24:H:'1' 1814 checkInt<26>(Loc, Val, Type); 1815 write32le(Loc, 0xfa000000 | // opcode 1816 ((Val & 2) << 23) | // H 1817 ((Val >> 2) & 0x00ffffff)); // imm24 1818 break; 1819 } 1820 if ((read32le(Loc) & 0xfe000000) == 0xfa000000) 1821 // BLX (always unconditional) instruction to an ARM Target, select an 1822 // unconditional BL. 1823 write32le(Loc, 0xeb000000 | (read32le(Loc) & 0x00ffffff)); 1824 // fall through as BL encoding is shared with B 1825 case R_ARM_JUMP24: 1826 case R_ARM_PC24: 1827 case R_ARM_PLT32: 1828 checkInt<26>(Loc, Val, Type); 1829 write32le(Loc, (read32le(Loc) & ~0x00ffffff) | ((Val >> 2) & 0x00ffffff)); 1830 break; 1831 case R_ARM_THM_JUMP11: 1832 checkInt<12>(Loc, Val, Type); 1833 write16le(Loc, (read32le(Loc) & 0xf800) | ((Val >> 1) & 0x07ff)); 1834 break; 1835 case R_ARM_THM_JUMP19: 1836 // Encoding T3: Val = S:J2:J1:imm6:imm11:0 1837 checkInt<21>(Loc, Val, Type); 1838 write16le(Loc, 1839 (read16le(Loc) & 0xfbc0) | // opcode cond 1840 ((Val >> 10) & 0x0400) | // S 1841 ((Val >> 12) & 0x003f)); // imm6 1842 write16le(Loc + 2, 1843 0x8000 | // opcode 1844 ((Val >> 8) & 0x0800) | // J2 1845 ((Val >> 5) & 0x2000) | // J1 1846 ((Val >> 1) & 0x07ff)); // imm11 1847 break; 1848 case R_ARM_THM_CALL: 1849 // R_ARM_THM_CALL is used for BL and BLX instructions, depending on the 1850 // value of bit 0 of Val, we must select a BL or BLX instruction 1851 if ((Val & 1) == 0) { 1852 // Ensure BLX destination is 4-byte aligned. As BLX instruction may 1853 // only be two byte aligned. This must be done before overflow check 1854 Val = alignTo(Val, 4); 1855 } 1856 // Bit 12 is 0 for BLX, 1 for BL 1857 write16le(Loc + 2, (read16le(Loc + 2) & ~0x1000) | (Val & 1) << 12); 1858 // Fall through as rest of encoding is the same as B.W 1859 case R_ARM_THM_JUMP24: 1860 // Encoding B T4, BL T1, BLX T2: Val = S:I1:I2:imm10:imm11:0 1861 // FIXME: Use of I1 and I2 require v6T2ops 1862 checkInt<25>(Loc, Val, Type); 1863 write16le(Loc, 1864 0xf000 | // opcode 1865 ((Val >> 14) & 0x0400) | // S 1866 ((Val >> 12) & 0x03ff)); // imm10 1867 write16le(Loc + 2, 1868 (read16le(Loc + 2) & 0xd000) | // opcode 1869 (((~(Val >> 10)) ^ (Val >> 11)) & 0x2000) | // J1 1870 (((~(Val >> 11)) ^ (Val >> 13)) & 0x0800) | // J2 1871 ((Val >> 1) & 0x07ff)); // imm11 1872 break; 1873 case R_ARM_MOVW_ABS_NC: 1874 case R_ARM_MOVW_PREL_NC: 1875 write32le(Loc, (read32le(Loc) & ~0x000f0fff) | ((Val & 0xf000) << 4) | 1876 (Val & 0x0fff)); 1877 break; 1878 case R_ARM_MOVT_ABS: 1879 case R_ARM_MOVT_PREL: 1880 checkInt<32>(Loc, Val, Type); 1881 write32le(Loc, (read32le(Loc) & ~0x000f0fff) | 1882 (((Val >> 16) & 0xf000) << 4) | ((Val >> 16) & 0xfff)); 1883 break; 1884 case R_ARM_THM_MOVT_ABS: 1885 case R_ARM_THM_MOVT_PREL: 1886 // Encoding T1: A = imm4:i:imm3:imm8 1887 checkInt<32>(Loc, Val, Type); 1888 write16le(Loc, 1889 0xf2c0 | // opcode 1890 ((Val >> 17) & 0x0400) | // i 1891 ((Val >> 28) & 0x000f)); // imm4 1892 write16le(Loc + 2, 1893 (read16le(Loc + 2) & 0x8f00) | // opcode 1894 ((Val >> 12) & 0x7000) | // imm3 1895 ((Val >> 16) & 0x00ff)); // imm8 1896 break; 1897 case R_ARM_THM_MOVW_ABS_NC: 1898 case R_ARM_THM_MOVW_PREL_NC: 1899 // Encoding T3: A = imm4:i:imm3:imm8 1900 write16le(Loc, 1901 0xf240 | // opcode 1902 ((Val >> 1) & 0x0400) | // i 1903 ((Val >> 12) & 0x000f)); // imm4 1904 write16le(Loc + 2, 1905 (read16le(Loc + 2) & 0x8f00) | // opcode 1906 ((Val << 4) & 0x7000) | // imm3 1907 (Val & 0x00ff)); // imm8 1908 break; 1909 default: 1910 error(getErrorLocation(Loc) + "unrecognized reloc " + Twine(Type)); 1911 } 1912 } 1913 1914 uint64_t ARMTargetInfo::getImplicitAddend(const uint8_t *Buf, 1915 uint32_t Type) const { 1916 switch (Type) { 1917 default: 1918 return 0; 1919 case R_ARM_ABS32: 1920 case R_ARM_BASE_PREL: 1921 case R_ARM_GOTOFF32: 1922 case R_ARM_GOT_BREL: 1923 case R_ARM_GOT_PREL: 1924 case R_ARM_REL32: 1925 case R_ARM_TARGET1: 1926 case R_ARM_TARGET2: 1927 case R_ARM_TLS_GD32: 1928 case R_ARM_TLS_LDM32: 1929 case R_ARM_TLS_LDO32: 1930 case R_ARM_TLS_IE32: 1931 case R_ARM_TLS_LE32: 1932 return SignExtend64<32>(read32le(Buf)); 1933 case R_ARM_PREL31: 1934 return SignExtend64<31>(read32le(Buf)); 1935 case R_ARM_CALL: 1936 case R_ARM_JUMP24: 1937 case R_ARM_PC24: 1938 case R_ARM_PLT32: 1939 return SignExtend64<26>(read32le(Buf) << 2); 1940 case R_ARM_THM_JUMP11: 1941 return SignExtend64<12>(read16le(Buf) << 1); 1942 case R_ARM_THM_JUMP19: { 1943 // Encoding T3: A = S:J2:J1:imm10:imm6:0 1944 uint16_t Hi = read16le(Buf); 1945 uint16_t Lo = read16le(Buf + 2); 1946 return SignExtend64<20>(((Hi & 0x0400) << 10) | // S 1947 ((Lo & 0x0800) << 8) | // J2 1948 ((Lo & 0x2000) << 5) | // J1 1949 ((Hi & 0x003f) << 12) | // imm6 1950 ((Lo & 0x07ff) << 1)); // imm11:0 1951 } 1952 case R_ARM_THM_CALL: 1953 case R_ARM_THM_JUMP24: { 1954 // Encoding B T4, BL T1, BLX T2: A = S:I1:I2:imm10:imm11:0 1955 // I1 = NOT(J1 EOR S), I2 = NOT(J2 EOR S) 1956 // FIXME: I1 and I2 require v6T2ops 1957 uint16_t Hi = read16le(Buf); 1958 uint16_t Lo = read16le(Buf + 2); 1959 return SignExtend64<24>(((Hi & 0x0400) << 14) | // S 1960 (~((Lo ^ (Hi << 3)) << 10) & 0x00800000) | // I1 1961 (~((Lo ^ (Hi << 1)) << 11) & 0x00400000) | // I2 1962 ((Hi & 0x003ff) << 12) | // imm0 1963 ((Lo & 0x007ff) << 1)); // imm11:0 1964 } 1965 // ELF for the ARM Architecture 4.6.1.1 the implicit addend for MOVW and 1966 // MOVT is in the range -32768 <= A < 32768 1967 case R_ARM_MOVW_ABS_NC: 1968 case R_ARM_MOVT_ABS: 1969 case R_ARM_MOVW_PREL_NC: 1970 case R_ARM_MOVT_PREL: { 1971 uint64_t Val = read32le(Buf) & 0x000f0fff; 1972 return SignExtend64<16>(((Val & 0x000f0000) >> 4) | (Val & 0x00fff)); 1973 } 1974 case R_ARM_THM_MOVW_ABS_NC: 1975 case R_ARM_THM_MOVT_ABS: 1976 case R_ARM_THM_MOVW_PREL_NC: 1977 case R_ARM_THM_MOVT_PREL: { 1978 // Encoding T3: A = imm4:i:imm3:imm8 1979 uint16_t Hi = read16le(Buf); 1980 uint16_t Lo = read16le(Buf + 2); 1981 return SignExtend64<16>(((Hi & 0x000f) << 12) | // imm4 1982 ((Hi & 0x0400) << 1) | // i 1983 ((Lo & 0x7000) >> 4) | // imm3 1984 (Lo & 0x00ff)); // imm8 1985 } 1986 } 1987 } 1988 1989 bool ARMTargetInfo::isTlsLocalDynamicRel(uint32_t Type) const { 1990 return Type == R_ARM_TLS_LDO32 || Type == R_ARM_TLS_LDM32; 1991 } 1992 1993 bool ARMTargetInfo::isTlsGlobalDynamicRel(uint32_t Type) const { 1994 return Type == R_ARM_TLS_GD32; 1995 } 1996 1997 bool ARMTargetInfo::isTlsInitialExecRel(uint32_t Type) const { 1998 return Type == R_ARM_TLS_IE32; 1999 } 2000 2001 template <class ELFT> MipsTargetInfo<ELFT>::MipsTargetInfo() { 2002 GotPltHeaderEntriesNum = 2; 2003 DefaultMaxPageSize = 65536; 2004 GotEntrySize = sizeof(typename ELFT::uint); 2005 GotPltEntrySize = sizeof(typename ELFT::uint); 2006 PltEntrySize = 16; 2007 PltHeaderSize = 32; 2008 CopyRel = R_MIPS_COPY; 2009 PltRel = R_MIPS_JUMP_SLOT; 2010 NeedsThunks = true; 2011 if (ELFT::Is64Bits) { 2012 RelativeRel = (R_MIPS_64 << 8) | R_MIPS_REL32; 2013 TlsGotRel = R_MIPS_TLS_TPREL64; 2014 TlsModuleIndexRel = R_MIPS_TLS_DTPMOD64; 2015 TlsOffsetRel = R_MIPS_TLS_DTPREL64; 2016 } else { 2017 RelativeRel = R_MIPS_REL32; 2018 TlsGotRel = R_MIPS_TLS_TPREL32; 2019 TlsModuleIndexRel = R_MIPS_TLS_DTPMOD32; 2020 TlsOffsetRel = R_MIPS_TLS_DTPREL32; 2021 } 2022 } 2023 2024 template <class ELFT> 2025 RelExpr MipsTargetInfo<ELFT>::getRelExpr(uint32_t Type, 2026 const SymbolBody &S) const { 2027 // See comment in the calculateMipsRelChain. 2028 if (ELFT::Is64Bits || Config->MipsN32Abi) 2029 Type &= 0xff; 2030 switch (Type) { 2031 default: 2032 return R_ABS; 2033 case R_MIPS_JALR: 2034 return R_HINT; 2035 case R_MIPS_GPREL16: 2036 case R_MIPS_GPREL32: 2037 return R_MIPS_GOTREL; 2038 case R_MIPS_26: 2039 return R_PLT; 2040 case R_MIPS_HI16: 2041 case R_MIPS_LO16: 2042 case R_MIPS_GOT_OFST: 2043 // R_MIPS_HI16/R_MIPS_LO16 relocations against _gp_disp calculate 2044 // offset between start of function and 'gp' value which by default 2045 // equal to the start of .got section. In that case we consider these 2046 // relocations as relative. 2047 if (&S == ElfSym<ELFT>::MipsGpDisp) 2048 return R_PC; 2049 return R_ABS; 2050 case R_MIPS_PC32: 2051 case R_MIPS_PC16: 2052 case R_MIPS_PC19_S2: 2053 case R_MIPS_PC21_S2: 2054 case R_MIPS_PC26_S2: 2055 case R_MIPS_PCHI16: 2056 case R_MIPS_PCLO16: 2057 return R_PC; 2058 case R_MIPS_GOT16: 2059 if (S.isLocal()) 2060 return R_MIPS_GOT_LOCAL_PAGE; 2061 // fallthrough 2062 case R_MIPS_CALL16: 2063 case R_MIPS_GOT_DISP: 2064 case R_MIPS_TLS_GOTTPREL: 2065 return R_MIPS_GOT_OFF; 2066 case R_MIPS_CALL_HI16: 2067 case R_MIPS_CALL_LO16: 2068 case R_MIPS_GOT_HI16: 2069 case R_MIPS_GOT_LO16: 2070 return R_MIPS_GOT_OFF32; 2071 case R_MIPS_GOT_PAGE: 2072 return R_MIPS_GOT_LOCAL_PAGE; 2073 case R_MIPS_TLS_GD: 2074 return R_MIPS_TLSGD; 2075 case R_MIPS_TLS_LDM: 2076 return R_MIPS_TLSLD; 2077 } 2078 } 2079 2080 template <class ELFT> bool MipsTargetInfo<ELFT>::isPicRel(uint32_t Type) const { 2081 return Type == R_MIPS_32 || Type == R_MIPS_64; 2082 } 2083 2084 template <class ELFT> 2085 uint32_t MipsTargetInfo<ELFT>::getDynRel(uint32_t Type) const { 2086 return RelativeRel; 2087 } 2088 2089 template <class ELFT> 2090 bool MipsTargetInfo<ELFT>::isTlsLocalDynamicRel(uint32_t Type) const { 2091 return Type == R_MIPS_TLS_LDM; 2092 } 2093 2094 template <class ELFT> 2095 bool MipsTargetInfo<ELFT>::isTlsGlobalDynamicRel(uint32_t Type) const { 2096 return Type == R_MIPS_TLS_GD; 2097 } 2098 2099 template <class ELFT> 2100 void MipsTargetInfo<ELFT>::writeGotPlt(uint8_t *Buf, const SymbolBody &) const { 2101 write32<ELFT::TargetEndianness>(Buf, In<ELFT>::Plt->getVA()); 2102 } 2103 2104 template <endianness E, uint8_t BSIZE, uint8_t SHIFT> 2105 static int64_t getPcRelocAddend(const uint8_t *Loc) { 2106 uint32_t Instr = read32<E>(Loc); 2107 uint32_t Mask = 0xffffffff >> (32 - BSIZE); 2108 return SignExtend64<BSIZE + SHIFT>((Instr & Mask) << SHIFT); 2109 } 2110 2111 template <endianness E, uint8_t BSIZE, uint8_t SHIFT> 2112 static void applyMipsPcReloc(uint8_t *Loc, uint32_t Type, uint64_t V) { 2113 uint32_t Mask = 0xffffffff >> (32 - BSIZE); 2114 uint32_t Instr = read32<E>(Loc); 2115 if (SHIFT > 0) 2116 checkAlignment<(1 << SHIFT)>(Loc, V, Type); 2117 checkInt<BSIZE + SHIFT>(Loc, V, Type); 2118 write32<E>(Loc, (Instr & ~Mask) | ((V >> SHIFT) & Mask)); 2119 } 2120 2121 template <endianness E> static void writeMipsHi16(uint8_t *Loc, uint64_t V) { 2122 uint32_t Instr = read32<E>(Loc); 2123 uint16_t Res = ((V + 0x8000) >> 16) & 0xffff; 2124 write32<E>(Loc, (Instr & 0xffff0000) | Res); 2125 } 2126 2127 template <endianness E> static void writeMipsHigher(uint8_t *Loc, uint64_t V) { 2128 uint32_t Instr = read32<E>(Loc); 2129 uint16_t Res = ((V + 0x80008000) >> 32) & 0xffff; 2130 write32<E>(Loc, (Instr & 0xffff0000) | Res); 2131 } 2132 2133 template <endianness E> static void writeMipsHighest(uint8_t *Loc, uint64_t V) { 2134 uint32_t Instr = read32<E>(Loc); 2135 uint16_t Res = ((V + 0x800080008000) >> 48) & 0xffff; 2136 write32<E>(Loc, (Instr & 0xffff0000) | Res); 2137 } 2138 2139 template <endianness E> static void writeMipsLo16(uint8_t *Loc, uint64_t V) { 2140 uint32_t Instr = read32<E>(Loc); 2141 write32<E>(Loc, (Instr & 0xffff0000) | (V & 0xffff)); 2142 } 2143 2144 template <class ELFT> static bool isMipsR6() { 2145 const auto &FirstObj = cast<ELFFileBase<ELFT>>(*Config->FirstElf); 2146 uint32_t Arch = FirstObj.getObj().getHeader()->e_flags & EF_MIPS_ARCH; 2147 return Arch == EF_MIPS_ARCH_32R6 || Arch == EF_MIPS_ARCH_64R6; 2148 } 2149 2150 template <class ELFT> 2151 void MipsTargetInfo<ELFT>::writePltHeader(uint8_t *Buf) const { 2152 const endianness E = ELFT::TargetEndianness; 2153 if (Config->MipsN32Abi) { 2154 write32<E>(Buf, 0x3c0e0000); // lui $14, %hi(&GOTPLT[0]) 2155 write32<E>(Buf + 4, 0x8dd90000); // lw $25, %lo(&GOTPLT[0])($14) 2156 write32<E>(Buf + 8, 0x25ce0000); // addiu $14, $14, %lo(&GOTPLT[0]) 2157 write32<E>(Buf + 12, 0x030ec023); // subu $24, $24, $14 2158 } else { 2159 write32<E>(Buf, 0x3c1c0000); // lui $28, %hi(&GOTPLT[0]) 2160 write32<E>(Buf + 4, 0x8f990000); // lw $25, %lo(&GOTPLT[0])($28) 2161 write32<E>(Buf + 8, 0x279c0000); // addiu $28, $28, %lo(&GOTPLT[0]) 2162 write32<E>(Buf + 12, 0x031cc023); // subu $24, $24, $28 2163 } 2164 write32<E>(Buf + 16, 0x03e07825); // move $15, $31 2165 write32<E>(Buf + 20, 0x0018c082); // srl $24, $24, 2 2166 write32<E>(Buf + 24, 0x0320f809); // jalr $25 2167 write32<E>(Buf + 28, 0x2718fffe); // subu $24, $24, 2 2168 uint64_t Got = In<ELFT>::GotPlt->getVA(); 2169 writeMipsHi16<E>(Buf, Got); 2170 writeMipsLo16<E>(Buf + 4, Got); 2171 writeMipsLo16<E>(Buf + 8, Got); 2172 } 2173 2174 template <class ELFT> 2175 void MipsTargetInfo<ELFT>::writePlt(uint8_t *Buf, uint64_t GotEntryAddr, 2176 uint64_t PltEntryAddr, int32_t Index, 2177 unsigned RelOff) const { 2178 const endianness E = ELFT::TargetEndianness; 2179 write32<E>(Buf, 0x3c0f0000); // lui $15, %hi(.got.plt entry) 2180 write32<E>(Buf + 4, 0x8df90000); // l[wd] $25, %lo(.got.plt entry)($15) 2181 // jr $25 2182 write32<E>(Buf + 8, isMipsR6<ELFT>() ? 0x03200009 : 0x03200008); 2183 write32<E>(Buf + 12, 0x25f80000); // addiu $24, $15, %lo(.got.plt entry) 2184 writeMipsHi16<E>(Buf, GotEntryAddr); 2185 writeMipsLo16<E>(Buf + 4, GotEntryAddr); 2186 writeMipsLo16<E>(Buf + 12, GotEntryAddr); 2187 } 2188 2189 template <class ELFT> 2190 RelExpr MipsTargetInfo<ELFT>::getThunkExpr(RelExpr Expr, uint32_t Type, 2191 const InputFile &File, 2192 const SymbolBody &S) const { 2193 // Any MIPS PIC code function is invoked with its address in register $t9. 2194 // So if we have a branch instruction from non-PIC code to the PIC one 2195 // we cannot make the jump directly and need to create a small stubs 2196 // to save the target function address. 2197 // See page 3-38 ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf 2198 if (Type != R_MIPS_26) 2199 return Expr; 2200 auto *F = dyn_cast<ELFFileBase<ELFT>>(&File); 2201 if (!F) 2202 return Expr; 2203 // If current file has PIC code, LA25 stub is not required. 2204 if (F->getObj().getHeader()->e_flags & EF_MIPS_PIC) 2205 return Expr; 2206 auto *D = dyn_cast<DefinedRegular<ELFT>>(&S); 2207 // LA25 is required if target file has PIC code 2208 // or target symbol is a PIC symbol. 2209 return D && D->isMipsPIC() ? R_THUNK_ABS : Expr; 2210 } 2211 2212 template <class ELFT> 2213 uint64_t MipsTargetInfo<ELFT>::getImplicitAddend(const uint8_t *Buf, 2214 uint32_t Type) const { 2215 const endianness E = ELFT::TargetEndianness; 2216 switch (Type) { 2217 default: 2218 return 0; 2219 case R_MIPS_32: 2220 case R_MIPS_GPREL32: 2221 case R_MIPS_TLS_DTPREL32: 2222 case R_MIPS_TLS_TPREL32: 2223 return read32<E>(Buf); 2224 case R_MIPS_26: 2225 // FIXME (simon): If the relocation target symbol is not a PLT entry 2226 // we should use another expression for calculation: 2227 // ((A << 2) | (P & 0xf0000000)) >> 2 2228 return SignExtend64<28>((read32<E>(Buf) & 0x3ffffff) << 2); 2229 case R_MIPS_GPREL16: 2230 case R_MIPS_LO16: 2231 case R_MIPS_PCLO16: 2232 case R_MIPS_TLS_DTPREL_HI16: 2233 case R_MIPS_TLS_DTPREL_LO16: 2234 case R_MIPS_TLS_TPREL_HI16: 2235 case R_MIPS_TLS_TPREL_LO16: 2236 return SignExtend64<16>(read32<E>(Buf)); 2237 case R_MIPS_PC16: 2238 return getPcRelocAddend<E, 16, 2>(Buf); 2239 case R_MIPS_PC19_S2: 2240 return getPcRelocAddend<E, 19, 2>(Buf); 2241 case R_MIPS_PC21_S2: 2242 return getPcRelocAddend<E, 21, 2>(Buf); 2243 case R_MIPS_PC26_S2: 2244 return getPcRelocAddend<E, 26, 2>(Buf); 2245 case R_MIPS_PC32: 2246 return getPcRelocAddend<E, 32, 0>(Buf); 2247 } 2248 } 2249 2250 static std::pair<uint32_t, uint64_t> 2251 calculateMipsRelChain(uint8_t *Loc, uint32_t Type, uint64_t Val) { 2252 // MIPS N64 ABI packs multiple relocations into the single relocation 2253 // record. In general, all up to three relocations can have arbitrary 2254 // types. In fact, Clang and GCC uses only a few combinations. For now, 2255 // we support two of them. That is allow to pass at least all LLVM 2256 // test suite cases. 2257 // <any relocation> / R_MIPS_SUB / R_MIPS_HI16 | R_MIPS_LO16 2258 // <any relocation> / R_MIPS_64 / R_MIPS_NONE 2259 // The first relocation is a 'real' relocation which is calculated 2260 // using the corresponding symbol's value. The second and the third 2261 // relocations used to modify result of the first one: extend it to 2262 // 64-bit, extract high or low part etc. For details, see part 2.9 Relocation 2263 // at the https://dmz-portal.mips.com/mw/images/8/82/007-4658-001.pdf 2264 uint32_t Type2 = (Type >> 8) & 0xff; 2265 uint32_t Type3 = (Type >> 16) & 0xff; 2266 if (Type2 == R_MIPS_NONE && Type3 == R_MIPS_NONE) 2267 return std::make_pair(Type, Val); 2268 if (Type2 == R_MIPS_64 && Type3 == R_MIPS_NONE) 2269 return std::make_pair(Type2, Val); 2270 if (Type2 == R_MIPS_SUB && (Type3 == R_MIPS_HI16 || Type3 == R_MIPS_LO16)) 2271 return std::make_pair(Type3, -Val); 2272 error(getErrorLocation(Loc) + "unsupported relocations combination " + 2273 Twine(Type)); 2274 return std::make_pair(Type & 0xff, Val); 2275 } 2276 2277 template <class ELFT> 2278 void MipsTargetInfo<ELFT>::relocateOne(uint8_t *Loc, uint32_t Type, 2279 uint64_t Val) const { 2280 const endianness E = ELFT::TargetEndianness; 2281 // Thread pointer and DRP offsets from the start of TLS data area. 2282 // https://www.linux-mips.org/wiki/NPTL 2283 if (Type == R_MIPS_TLS_DTPREL_HI16 || Type == R_MIPS_TLS_DTPREL_LO16 || 2284 Type == R_MIPS_TLS_DTPREL32 || Type == R_MIPS_TLS_DTPREL64) 2285 Val -= 0x8000; 2286 else if (Type == R_MIPS_TLS_TPREL_HI16 || Type == R_MIPS_TLS_TPREL_LO16 || 2287 Type == R_MIPS_TLS_TPREL32 || Type == R_MIPS_TLS_TPREL64) 2288 Val -= 0x7000; 2289 if (ELFT::Is64Bits || Config->MipsN32Abi) 2290 std::tie(Type, Val) = calculateMipsRelChain(Loc, Type, Val); 2291 switch (Type) { 2292 case R_MIPS_32: 2293 case R_MIPS_GPREL32: 2294 case R_MIPS_TLS_DTPREL32: 2295 case R_MIPS_TLS_TPREL32: 2296 write32<E>(Loc, Val); 2297 break; 2298 case R_MIPS_64: 2299 case R_MIPS_TLS_DTPREL64: 2300 case R_MIPS_TLS_TPREL64: 2301 write64<E>(Loc, Val); 2302 break; 2303 case R_MIPS_26: 2304 write32<E>(Loc, (read32<E>(Loc) & ~0x3ffffff) | ((Val >> 2) & 0x3ffffff)); 2305 break; 2306 case R_MIPS_GOT_DISP: 2307 case R_MIPS_GOT_PAGE: 2308 case R_MIPS_GOT16: 2309 case R_MIPS_GPREL16: 2310 case R_MIPS_TLS_GD: 2311 case R_MIPS_TLS_LDM: 2312 checkInt<16>(Loc, Val, Type); 2313 // fallthrough 2314 case R_MIPS_CALL16: 2315 case R_MIPS_CALL_LO16: 2316 case R_MIPS_GOT_LO16: 2317 case R_MIPS_GOT_OFST: 2318 case R_MIPS_LO16: 2319 case R_MIPS_PCLO16: 2320 case R_MIPS_TLS_DTPREL_LO16: 2321 case R_MIPS_TLS_GOTTPREL: 2322 case R_MIPS_TLS_TPREL_LO16: 2323 writeMipsLo16<E>(Loc, Val); 2324 break; 2325 case R_MIPS_CALL_HI16: 2326 case R_MIPS_GOT_HI16: 2327 case R_MIPS_HI16: 2328 case R_MIPS_PCHI16: 2329 case R_MIPS_TLS_DTPREL_HI16: 2330 case R_MIPS_TLS_TPREL_HI16: 2331 writeMipsHi16<E>(Loc, Val); 2332 break; 2333 case R_MIPS_HIGHER: 2334 writeMipsHigher<E>(Loc, Val); 2335 break; 2336 case R_MIPS_HIGHEST: 2337 writeMipsHighest<E>(Loc, Val); 2338 break; 2339 case R_MIPS_JALR: 2340 // Ignore this optimization relocation for now 2341 break; 2342 case R_MIPS_PC16: 2343 applyMipsPcReloc<E, 16, 2>(Loc, Type, Val); 2344 break; 2345 case R_MIPS_PC19_S2: 2346 applyMipsPcReloc<E, 19, 2>(Loc, Type, Val); 2347 break; 2348 case R_MIPS_PC21_S2: 2349 applyMipsPcReloc<E, 21, 2>(Loc, Type, Val); 2350 break; 2351 case R_MIPS_PC26_S2: 2352 applyMipsPcReloc<E, 26, 2>(Loc, Type, Val); 2353 break; 2354 case R_MIPS_PC32: 2355 applyMipsPcReloc<E, 32, 0>(Loc, Type, Val); 2356 break; 2357 default: 2358 error(getErrorLocation(Loc) + "unrecognized reloc " + Twine(Type)); 2359 } 2360 } 2361 2362 template <class ELFT> 2363 bool MipsTargetInfo<ELFT>::usesOnlyLowPageBits(uint32_t Type) const { 2364 return Type == R_MIPS_LO16 || Type == R_MIPS_GOT_OFST; 2365 } 2366 } 2367 } 2368