1 //===- Target.cpp ---------------------------------------------------------===// 2 // 3 // The LLVM Linker 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // Machine-specific things, such as applying relocations, creation of 11 // GOT or PLT entries, etc., are handled in this file. 12 // 13 // Refer the ELF spec for the single letter varaibles, S, A or P, used 14 // in this file. 15 // 16 // Some functions defined in this file has "relaxTls" as part of their names. 17 // They do peephole optimization for TLS variables by rewriting instructions. 18 // They are not part of the ABI but optional optimization, so you can skip 19 // them if you are not interested in how TLS variables are optimized. 20 // See the following paper for the details. 21 // 22 // Ulrich Drepper, ELF Handling For Thread-Local Storage 23 // http://www.akkadia.org/drepper/tls.pdf 24 // 25 //===----------------------------------------------------------------------===// 26 27 #include "Target.h" 28 #include "Error.h" 29 #include "InputFiles.h" 30 #include "OutputSections.h" 31 #include "Symbols.h" 32 #include "Thunks.h" 33 34 #include "llvm/ADT/ArrayRef.h" 35 #include "llvm/Object/ELF.h" 36 #include "llvm/Support/Endian.h" 37 #include "llvm/Support/ELF.h" 38 39 using namespace llvm; 40 using namespace llvm::object; 41 using namespace llvm::support::endian; 42 using namespace llvm::ELF; 43 44 namespace lld { 45 namespace elf { 46 47 TargetInfo *Target; 48 49 static void or32le(uint8_t *P, int32_t V) { write32le(P, read32le(P) | V); } 50 51 StringRef getRelName(uint32_t Type) { 52 return getELFRelocationTypeName(Config->EMachine, Type); 53 } 54 55 template <unsigned N> static void checkInt(int64_t V, uint32_t Type) { 56 if (!isInt<N>(V)) 57 error("relocation " + getRelName(Type) + " out of range"); 58 } 59 60 template <unsigned N> static void checkUInt(uint64_t V, uint32_t Type) { 61 if (!isUInt<N>(V)) 62 error("relocation " + getRelName(Type) + " out of range"); 63 } 64 65 template <unsigned N> static void checkIntUInt(uint64_t V, uint32_t Type) { 66 if (!isInt<N>(V) && !isUInt<N>(V)) 67 error("relocation " + getRelName(Type) + " out of range"); 68 } 69 70 template <unsigned N> static void checkAlignment(uint64_t V, uint32_t Type) { 71 if ((V & (N - 1)) != 0) 72 error("improper alignment for relocation " + getRelName(Type)); 73 } 74 75 static void errorDynRel(uint32_t Type) { 76 error("relocation " + getRelName(Type) + 77 " cannot be used against shared object; recompile with -fPIC."); 78 } 79 80 namespace { 81 class X86TargetInfo final : public TargetInfo { 82 public: 83 X86TargetInfo(); 84 RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override; 85 uint64_t getImplicitAddend(const uint8_t *Buf, uint32_t Type) const override; 86 void writeGotPltHeader(uint8_t *Buf) const override; 87 uint32_t getDynRel(uint32_t Type) const override; 88 bool isTlsLocalDynamicRel(uint32_t Type) const override; 89 bool isTlsGlobalDynamicRel(uint32_t Type) const override; 90 bool isTlsInitialExecRel(uint32_t Type) const override; 91 void writeGotPlt(uint8_t *Buf, const SymbolBody &S) const override; 92 void writePltHeader(uint8_t *Buf) const override; 93 void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr, 94 int32_t Index, unsigned RelOff) const override; 95 void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override; 96 97 RelExpr adjustRelaxExpr(uint32_t Type, const uint8_t *Data, 98 RelExpr Expr) const override; 99 void relaxTlsGdToIe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override; 100 void relaxTlsGdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override; 101 void relaxTlsIeToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override; 102 void relaxTlsLdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override; 103 }; 104 105 template <class ELFT> class X86_64TargetInfo final : public TargetInfo { 106 public: 107 X86_64TargetInfo(); 108 RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override; 109 uint32_t getDynRel(uint32_t Type) const override; 110 bool isTlsLocalDynamicRel(uint32_t Type) const override; 111 bool isTlsGlobalDynamicRel(uint32_t Type) const override; 112 bool isTlsInitialExecRel(uint32_t Type) const override; 113 void writeGotPltHeader(uint8_t *Buf) const override; 114 void writeGotPlt(uint8_t *Buf, const SymbolBody &S) const override; 115 void writePltHeader(uint8_t *Buf) const override; 116 void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr, 117 int32_t Index, unsigned RelOff) const override; 118 void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override; 119 120 RelExpr adjustRelaxExpr(uint32_t Type, const uint8_t *Data, 121 RelExpr Expr) const override; 122 void relaxGot(uint8_t *Loc, uint64_t Val) const override; 123 void relaxTlsGdToIe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override; 124 void relaxTlsGdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override; 125 void relaxTlsIeToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override; 126 void relaxTlsLdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override; 127 128 private: 129 void relaxGotNoPic(uint8_t *Loc, uint64_t Val, uint8_t Op, 130 uint8_t ModRm) const; 131 }; 132 133 class PPCTargetInfo final : public TargetInfo { 134 public: 135 PPCTargetInfo(); 136 void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override; 137 RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override; 138 }; 139 140 class PPC64TargetInfo final : public TargetInfo { 141 public: 142 PPC64TargetInfo(); 143 RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override; 144 void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr, 145 int32_t Index, unsigned RelOff) const override; 146 void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override; 147 }; 148 149 class AArch64TargetInfo final : public TargetInfo { 150 public: 151 AArch64TargetInfo(); 152 RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override; 153 uint32_t getDynRel(uint32_t Type) const override; 154 bool isTlsInitialExecRel(uint32_t Type) const override; 155 void writeGotPlt(uint8_t *Buf, const SymbolBody &S) const override; 156 void writePltHeader(uint8_t *Buf) const override; 157 void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr, 158 int32_t Index, unsigned RelOff) const override; 159 bool usesOnlyLowPageBits(uint32_t Type) const override; 160 void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override; 161 RelExpr adjustRelaxExpr(uint32_t Type, const uint8_t *Data, 162 RelExpr Expr) const override; 163 void relaxTlsGdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override; 164 void relaxTlsGdToIe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override; 165 void relaxTlsIeToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override; 166 }; 167 168 class AMDGPUTargetInfo final : public TargetInfo { 169 public: 170 AMDGPUTargetInfo(); 171 void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override; 172 RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override; 173 }; 174 175 class ARMTargetInfo final : public TargetInfo { 176 public: 177 ARMTargetInfo(); 178 RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override; 179 uint32_t getDynRel(uint32_t Type) const override; 180 uint64_t getImplicitAddend(const uint8_t *Buf, uint32_t Type) const override; 181 void writeGotPlt(uint8_t *Buf, const SymbolBody &S) const override; 182 void writePltHeader(uint8_t *Buf) const override; 183 void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr, 184 int32_t Index, unsigned RelOff) const override; 185 RelExpr getThunkExpr(RelExpr Expr, uint32_t RelocType, 186 const InputFile &File, 187 const SymbolBody &S) const override; 188 void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override; 189 }; 190 191 template <class ELFT> class MipsTargetInfo final : public TargetInfo { 192 public: 193 MipsTargetInfo(); 194 RelExpr getRelExpr(uint32_t Type, const SymbolBody &S) const override; 195 uint64_t getImplicitAddend(const uint8_t *Buf, uint32_t Type) const override; 196 uint32_t getDynRel(uint32_t Type) const override; 197 bool isTlsLocalDynamicRel(uint32_t Type) const override; 198 bool isTlsGlobalDynamicRel(uint32_t Type) const override; 199 void writeGotPlt(uint8_t *Buf, const SymbolBody &S) const override; 200 void writePltHeader(uint8_t *Buf) const override; 201 void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr, 202 int32_t Index, unsigned RelOff) const override; 203 RelExpr getThunkExpr(RelExpr Expr, uint32_t RelocType, 204 const InputFile &File, 205 const SymbolBody &S) const override; 206 void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override; 207 bool usesOnlyLowPageBits(uint32_t Type) const override; 208 }; 209 } // anonymous namespace 210 211 TargetInfo *createTarget() { 212 switch (Config->EMachine) { 213 case EM_386: 214 return new X86TargetInfo(); 215 case EM_AARCH64: 216 return new AArch64TargetInfo(); 217 case EM_AMDGPU: 218 return new AMDGPUTargetInfo(); 219 case EM_ARM: 220 return new ARMTargetInfo(); 221 case EM_MIPS: 222 switch (Config->EKind) { 223 case ELF32LEKind: 224 return new MipsTargetInfo<ELF32LE>(); 225 case ELF32BEKind: 226 return new MipsTargetInfo<ELF32BE>(); 227 case ELF64LEKind: 228 return new MipsTargetInfo<ELF64LE>(); 229 case ELF64BEKind: 230 return new MipsTargetInfo<ELF64BE>(); 231 default: 232 fatal("unsupported MIPS target"); 233 } 234 case EM_PPC: 235 return new PPCTargetInfo(); 236 case EM_PPC64: 237 return new PPC64TargetInfo(); 238 case EM_X86_64: 239 if (Config->EKind == ELF32LEKind) 240 return new X86_64TargetInfo<ELF32LE>(); 241 return new X86_64TargetInfo<ELF64LE>(); 242 } 243 fatal("unknown target machine"); 244 } 245 246 TargetInfo::~TargetInfo() {} 247 248 uint64_t TargetInfo::getImplicitAddend(const uint8_t *Buf, 249 uint32_t Type) const { 250 return 0; 251 } 252 253 bool TargetInfo::usesOnlyLowPageBits(uint32_t Type) const { return false; } 254 255 RelExpr TargetInfo::getThunkExpr(RelExpr Expr, uint32_t RelocType, 256 const InputFile &File, 257 const SymbolBody &S) const { 258 return Expr; 259 } 260 261 bool TargetInfo::isTlsInitialExecRel(uint32_t Type) const { return false; } 262 263 bool TargetInfo::isTlsLocalDynamicRel(uint32_t Type) const { return false; } 264 265 bool TargetInfo::isTlsGlobalDynamicRel(uint32_t Type) const { 266 return false; 267 } 268 269 RelExpr TargetInfo::adjustRelaxExpr(uint32_t Type, const uint8_t *Data, 270 RelExpr Expr) const { 271 return Expr; 272 } 273 274 void TargetInfo::relaxGot(uint8_t *Loc, uint64_t Val) const { 275 llvm_unreachable("Should not have claimed to be relaxable"); 276 } 277 278 void TargetInfo::relaxTlsGdToLe(uint8_t *Loc, uint32_t Type, 279 uint64_t Val) const { 280 llvm_unreachable("Should not have claimed to be relaxable"); 281 } 282 283 void TargetInfo::relaxTlsGdToIe(uint8_t *Loc, uint32_t Type, 284 uint64_t Val) const { 285 llvm_unreachable("Should not have claimed to be relaxable"); 286 } 287 288 void TargetInfo::relaxTlsIeToLe(uint8_t *Loc, uint32_t Type, 289 uint64_t Val) const { 290 llvm_unreachable("Should not have claimed to be relaxable"); 291 } 292 293 void TargetInfo::relaxTlsLdToLe(uint8_t *Loc, uint32_t Type, 294 uint64_t Val) const { 295 llvm_unreachable("Should not have claimed to be relaxable"); 296 } 297 298 X86TargetInfo::X86TargetInfo() { 299 CopyRel = R_386_COPY; 300 GotRel = R_386_GLOB_DAT; 301 PltRel = R_386_JUMP_SLOT; 302 IRelativeRel = R_386_IRELATIVE; 303 RelativeRel = R_386_RELATIVE; 304 TlsGotRel = R_386_TLS_TPOFF; 305 TlsModuleIndexRel = R_386_TLS_DTPMOD32; 306 TlsOffsetRel = R_386_TLS_DTPOFF32; 307 GotEntrySize = 4; 308 GotPltEntrySize = 4; 309 PltEntrySize = 16; 310 PltHeaderSize = 16; 311 TlsGdRelaxSkip = 2; 312 } 313 314 RelExpr X86TargetInfo::getRelExpr(uint32_t Type, const SymbolBody &S) const { 315 switch (Type) { 316 default: 317 return R_ABS; 318 case R_386_TLS_GD: 319 return R_TLSGD; 320 case R_386_TLS_LDM: 321 return R_TLSLD; 322 case R_386_PLT32: 323 return R_PLT_PC; 324 case R_386_PC32: 325 return R_PC; 326 case R_386_GOTPC: 327 return R_GOTONLY_PC; 328 case R_386_TLS_IE: 329 return R_GOT; 330 case R_386_GOT32: 331 case R_386_GOT32X: 332 case R_386_TLS_GOTIE: 333 return R_GOT_FROM_END; 334 case R_386_GOTOFF: 335 return R_GOTREL; 336 case R_386_TLS_LE: 337 return R_TLS; 338 case R_386_TLS_LE_32: 339 return R_NEG_TLS; 340 } 341 } 342 343 RelExpr X86TargetInfo::adjustRelaxExpr(uint32_t Type, const uint8_t *Data, 344 RelExpr Expr) const { 345 switch (Expr) { 346 default: 347 return Expr; 348 case R_RELAX_TLS_GD_TO_IE: 349 return R_RELAX_TLS_GD_TO_IE_END; 350 case R_RELAX_TLS_GD_TO_LE: 351 return R_RELAX_TLS_GD_TO_LE_NEG; 352 } 353 } 354 355 void X86TargetInfo::writeGotPltHeader(uint8_t *Buf) const { 356 write32le(Buf, Out<ELF32LE>::Dynamic->getVA()); 357 } 358 359 void X86TargetInfo::writeGotPlt(uint8_t *Buf, const SymbolBody &S) const { 360 // Entries in .got.plt initially points back to the corresponding 361 // PLT entries with a fixed offset to skip the first instruction. 362 write32le(Buf, S.getPltVA<ELF32LE>() + 6); 363 } 364 365 uint32_t X86TargetInfo::getDynRel(uint32_t Type) const { 366 if (Type == R_386_TLS_LE) 367 return R_386_TLS_TPOFF; 368 if (Type == R_386_TLS_LE_32) 369 return R_386_TLS_TPOFF32; 370 return Type; 371 } 372 373 bool X86TargetInfo::isTlsGlobalDynamicRel(uint32_t Type) const { 374 return Type == R_386_TLS_GD; 375 } 376 377 bool X86TargetInfo::isTlsLocalDynamicRel(uint32_t Type) const { 378 return Type == R_386_TLS_LDO_32 || Type == R_386_TLS_LDM; 379 } 380 381 bool X86TargetInfo::isTlsInitialExecRel(uint32_t Type) const { 382 return Type == R_386_TLS_IE || Type == R_386_TLS_GOTIE; 383 } 384 385 void X86TargetInfo::writePltHeader(uint8_t *Buf) const { 386 // Executable files and shared object files have 387 // separate procedure linkage tables. 388 if (Config->Pic) { 389 const uint8_t V[] = { 390 0xff, 0xb3, 0x04, 0x00, 0x00, 0x00, // pushl 4(%ebx) 391 0xff, 0xa3, 0x08, 0x00, 0x00, 0x00, // jmp *8(%ebx) 392 0x90, 0x90, 0x90, 0x90 // nop; nop; nop; nop 393 }; 394 memcpy(Buf, V, sizeof(V)); 395 return; 396 } 397 398 const uint8_t PltData[] = { 399 0xff, 0x35, 0x00, 0x00, 0x00, 0x00, // pushl (GOT+4) 400 0xff, 0x25, 0x00, 0x00, 0x00, 0x00, // jmp *(GOT+8) 401 0x90, 0x90, 0x90, 0x90 // nop; nop; nop; nop 402 }; 403 memcpy(Buf, PltData, sizeof(PltData)); 404 uint32_t Got = Out<ELF32LE>::GotPlt->getVA(); 405 write32le(Buf + 2, Got + 4); 406 write32le(Buf + 8, Got + 8); 407 } 408 409 void X86TargetInfo::writePlt(uint8_t *Buf, uint64_t GotEntryAddr, 410 uint64_t PltEntryAddr, int32_t Index, 411 unsigned RelOff) const { 412 const uint8_t Inst[] = { 413 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, // jmp *foo_in_GOT|*foo@GOT(%ebx) 414 0x68, 0x00, 0x00, 0x00, 0x00, // pushl $reloc_offset 415 0xe9, 0x00, 0x00, 0x00, 0x00 // jmp .PLT0@PC 416 }; 417 memcpy(Buf, Inst, sizeof(Inst)); 418 419 // jmp *foo@GOT(%ebx) or jmp *foo_in_GOT 420 Buf[1] = Config->Pic ? 0xa3 : 0x25; 421 uint32_t Got = Out<ELF32LE>::GotPlt->getVA(); 422 write32le(Buf + 2, Config->Shared ? GotEntryAddr - Got : GotEntryAddr); 423 write32le(Buf + 7, RelOff); 424 write32le(Buf + 12, -Index * PltEntrySize - PltHeaderSize - 16); 425 } 426 427 uint64_t X86TargetInfo::getImplicitAddend(const uint8_t *Buf, 428 uint32_t Type) const { 429 switch (Type) { 430 default: 431 return 0; 432 case R_386_32: 433 case R_386_GOT32: 434 case R_386_GOT32X: 435 case R_386_GOTOFF: 436 case R_386_GOTPC: 437 case R_386_PC32: 438 case R_386_PLT32: 439 return read32le(Buf); 440 } 441 } 442 443 void X86TargetInfo::relocateOne(uint8_t *Loc, uint32_t Type, 444 uint64_t Val) const { 445 checkInt<32>(Val, Type); 446 write32le(Loc, Val); 447 } 448 449 void X86TargetInfo::relaxTlsGdToLe(uint8_t *Loc, uint32_t Type, 450 uint64_t Val) const { 451 // Convert 452 // leal x@tlsgd(, %ebx, 1), 453 // call __tls_get_addr@plt 454 // to 455 // movl %gs:0,%eax 456 // subl $x@ntpoff,%eax 457 const uint8_t Inst[] = { 458 0x65, 0xa1, 0x00, 0x00, 0x00, 0x00, // movl %gs:0, %eax 459 0x81, 0xe8, 0x00, 0x00, 0x00, 0x00 // subl 0(%ebx), %eax 460 }; 461 memcpy(Loc - 3, Inst, sizeof(Inst)); 462 relocateOne(Loc + 5, R_386_32, Val); 463 } 464 465 void X86TargetInfo::relaxTlsGdToIe(uint8_t *Loc, uint32_t Type, 466 uint64_t Val) const { 467 // Convert 468 // leal x@tlsgd(, %ebx, 1), 469 // call __tls_get_addr@plt 470 // to 471 // movl %gs:0, %eax 472 // addl x@gotntpoff(%ebx), %eax 473 const uint8_t Inst[] = { 474 0x65, 0xa1, 0x00, 0x00, 0x00, 0x00, // movl %gs:0, %eax 475 0x03, 0x83, 0x00, 0x00, 0x00, 0x00 // addl 0(%ebx), %eax 476 }; 477 memcpy(Loc - 3, Inst, sizeof(Inst)); 478 relocateOne(Loc + 5, R_386_32, Val); 479 } 480 481 // In some conditions, relocations can be optimized to avoid using GOT. 482 // This function does that for Initial Exec to Local Exec case. 483 void X86TargetInfo::relaxTlsIeToLe(uint8_t *Loc, uint32_t Type, 484 uint64_t Val) const { 485 // Ulrich's document section 6.2 says that @gotntpoff can 486 // be used with MOVL or ADDL instructions. 487 // @indntpoff is similar to @gotntpoff, but for use in 488 // position dependent code. 489 uint8_t Reg = (Loc[-1] >> 3) & 7; 490 491 if (Type == R_386_TLS_IE) { 492 if (Loc[-1] == 0xa1) { 493 // "movl foo@indntpoff,%eax" -> "movl $foo,%eax" 494 // This case is different from the generic case below because 495 // this is a 5 byte instruction while below is 6 bytes. 496 Loc[-1] = 0xb8; 497 } else if (Loc[-2] == 0x8b) { 498 // "movl foo@indntpoff,%reg" -> "movl $foo,%reg" 499 Loc[-2] = 0xc7; 500 Loc[-1] = 0xc0 | Reg; 501 } else { 502 // "addl foo@indntpoff,%reg" -> "addl $foo,%reg" 503 Loc[-2] = 0x81; 504 Loc[-1] = 0xc0 | Reg; 505 } 506 } else { 507 assert(Type == R_386_TLS_GOTIE); 508 if (Loc[-2] == 0x8b) { 509 // "movl foo@gottpoff(%rip),%reg" -> "movl $foo,%reg" 510 Loc[-2] = 0xc7; 511 Loc[-1] = 0xc0 | Reg; 512 } else { 513 // "addl foo@gotntpoff(%rip),%reg" -> "leal foo(%reg),%reg" 514 Loc[-2] = 0x8d; 515 Loc[-1] = 0x80 | (Reg << 3) | Reg; 516 } 517 } 518 relocateOne(Loc, R_386_TLS_LE, Val); 519 } 520 521 void X86TargetInfo::relaxTlsLdToLe(uint8_t *Loc, uint32_t Type, 522 uint64_t Val) const { 523 if (Type == R_386_TLS_LDO_32) { 524 relocateOne(Loc, R_386_TLS_LE, Val); 525 return; 526 } 527 528 // Convert 529 // leal foo(%reg),%eax 530 // call ___tls_get_addr 531 // to 532 // movl %gs:0,%eax 533 // nop 534 // leal 0(%esi,1),%esi 535 const uint8_t Inst[] = { 536 0x65, 0xa1, 0x00, 0x00, 0x00, 0x00, // movl %gs:0,%eax 537 0x90, // nop 538 0x8d, 0x74, 0x26, 0x00 // leal 0(%esi,1),%esi 539 }; 540 memcpy(Loc - 2, Inst, sizeof(Inst)); 541 } 542 543 template <class ELFT> X86_64TargetInfo<ELFT>::X86_64TargetInfo() { 544 CopyRel = R_X86_64_COPY; 545 GotRel = R_X86_64_GLOB_DAT; 546 PltRel = R_X86_64_JUMP_SLOT; 547 RelativeRel = R_X86_64_RELATIVE; 548 IRelativeRel = R_X86_64_IRELATIVE; 549 TlsGotRel = R_X86_64_TPOFF64; 550 TlsModuleIndexRel = R_X86_64_DTPMOD64; 551 TlsOffsetRel = R_X86_64_DTPOFF64; 552 GotEntrySize = 8; 553 GotPltEntrySize = 8; 554 PltEntrySize = 16; 555 PltHeaderSize = 16; 556 TlsGdRelaxSkip = 2; 557 } 558 559 template <class ELFT> 560 RelExpr X86_64TargetInfo<ELFT>::getRelExpr(uint32_t Type, 561 const SymbolBody &S) const { 562 switch (Type) { 563 default: 564 return R_ABS; 565 case R_X86_64_TPOFF32: 566 return R_TLS; 567 case R_X86_64_TLSLD: 568 return R_TLSLD_PC; 569 case R_X86_64_TLSGD: 570 return R_TLSGD_PC; 571 case R_X86_64_SIZE32: 572 case R_X86_64_SIZE64: 573 return R_SIZE; 574 case R_X86_64_PLT32: 575 return R_PLT_PC; 576 case R_X86_64_PC32: 577 case R_X86_64_PC64: 578 return R_PC; 579 case R_X86_64_GOT32: 580 return R_GOT_FROM_END; 581 case R_X86_64_GOTPCREL: 582 case R_X86_64_GOTPCRELX: 583 case R_X86_64_REX_GOTPCRELX: 584 case R_X86_64_GOTTPOFF: 585 return R_GOT_PC; 586 } 587 } 588 589 template <class ELFT> 590 void X86_64TargetInfo<ELFT>::writeGotPltHeader(uint8_t *Buf) const { 591 // The first entry holds the value of _DYNAMIC. It is not clear why that is 592 // required, but it is documented in the psabi and the glibc dynamic linker 593 // seems to use it (note that this is relevant for linking ld.so, not any 594 // other program). 595 write64le(Buf, Out<ELFT>::Dynamic->getVA()); 596 } 597 598 template <class ELFT> 599 void X86_64TargetInfo<ELFT>::writeGotPlt(uint8_t *Buf, 600 const SymbolBody &S) const { 601 // See comments in X86TargetInfo::writeGotPlt. 602 write32le(Buf, S.getPltVA<ELFT>() + 6); 603 } 604 605 template <class ELFT> 606 void X86_64TargetInfo<ELFT>::writePltHeader(uint8_t *Buf) const { 607 const uint8_t PltData[] = { 608 0xff, 0x35, 0x00, 0x00, 0x00, 0x00, // pushq GOT+8(%rip) 609 0xff, 0x25, 0x00, 0x00, 0x00, 0x00, // jmp *GOT+16(%rip) 610 0x0f, 0x1f, 0x40, 0x00 // nopl 0x0(rax) 611 }; 612 memcpy(Buf, PltData, sizeof(PltData)); 613 uint64_t Got = Out<ELFT>::GotPlt->getVA(); 614 uint64_t Plt = Out<ELFT>::Plt->getVA(); 615 write32le(Buf + 2, Got - Plt + 2); // GOT+8 616 write32le(Buf + 8, Got - Plt + 4); // GOT+16 617 } 618 619 template <class ELFT> 620 void X86_64TargetInfo<ELFT>::writePlt(uint8_t *Buf, uint64_t GotEntryAddr, 621 uint64_t PltEntryAddr, int32_t Index, 622 unsigned RelOff) const { 623 const uint8_t Inst[] = { 624 0xff, 0x25, 0x00, 0x00, 0x00, 0x00, // jmpq *got(%rip) 625 0x68, 0x00, 0x00, 0x00, 0x00, // pushq <relocation index> 626 0xe9, 0x00, 0x00, 0x00, 0x00 // jmpq plt[0] 627 }; 628 memcpy(Buf, Inst, sizeof(Inst)); 629 630 write32le(Buf + 2, GotEntryAddr - PltEntryAddr - 6); 631 write32le(Buf + 7, Index); 632 write32le(Buf + 12, -Index * PltEntrySize - PltHeaderSize - 16); 633 } 634 635 template <class ELFT> 636 uint32_t X86_64TargetInfo<ELFT>::getDynRel(uint32_t Type) const { 637 if (Type == R_X86_64_PC32 || Type == R_X86_64_32) 638 errorDynRel(Type); 639 return Type; 640 } 641 642 template <class ELFT> 643 bool X86_64TargetInfo<ELFT>::isTlsInitialExecRel(uint32_t Type) const { 644 return Type == R_X86_64_GOTTPOFF; 645 } 646 647 template <class ELFT> 648 bool X86_64TargetInfo<ELFT>::isTlsGlobalDynamicRel(uint32_t Type) const { 649 return Type == R_X86_64_TLSGD; 650 } 651 652 template <class ELFT> 653 bool X86_64TargetInfo<ELFT>::isTlsLocalDynamicRel(uint32_t Type) const { 654 return Type == R_X86_64_DTPOFF32 || Type == R_X86_64_DTPOFF64 || 655 Type == R_X86_64_TLSLD; 656 } 657 658 template <class ELFT> 659 void X86_64TargetInfo<ELFT>::relaxTlsGdToLe(uint8_t *Loc, uint32_t Type, 660 uint64_t Val) const { 661 // Convert 662 // .byte 0x66 663 // leaq x@tlsgd(%rip), %rdi 664 // .word 0x6666 665 // rex64 666 // call __tls_get_addr@plt 667 // to 668 // mov %fs:0x0,%rax 669 // lea x@tpoff,%rax 670 const uint8_t Inst[] = { 671 0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00, // mov %fs:0x0,%rax 672 0x48, 0x8d, 0x80, 0x00, 0x00, 0x00, 0x00 // lea x@tpoff,%rax 673 }; 674 memcpy(Loc - 4, Inst, sizeof(Inst)); 675 // The original code used a pc relative relocation and so we have to 676 // compensate for the -4 in had in the addend. 677 relocateOne(Loc + 8, R_X86_64_TPOFF32, Val + 4); 678 } 679 680 template <class ELFT> 681 void X86_64TargetInfo<ELFT>::relaxTlsGdToIe(uint8_t *Loc, uint32_t Type, 682 uint64_t Val) const { 683 // Convert 684 // .byte 0x66 685 // leaq x@tlsgd(%rip), %rdi 686 // .word 0x6666 687 // rex64 688 // call __tls_get_addr@plt 689 // to 690 // mov %fs:0x0,%rax 691 // addq x@tpoff,%rax 692 const uint8_t Inst[] = { 693 0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00, // mov %fs:0x0,%rax 694 0x48, 0x03, 0x05, 0x00, 0x00, 0x00, 0x00 // addq x@tpoff,%rax 695 }; 696 memcpy(Loc - 4, Inst, sizeof(Inst)); 697 // Both code sequences are PC relatives, but since we are moving the constant 698 // forward by 8 bytes we have to subtract the value by 8. 699 relocateOne(Loc + 8, R_X86_64_PC32, Val - 8); 700 } 701 702 // In some conditions, R_X86_64_GOTTPOFF relocation can be optimized to 703 // R_X86_64_TPOFF32 so that it does not use GOT. 704 template <class ELFT> 705 void X86_64TargetInfo<ELFT>::relaxTlsIeToLe(uint8_t *Loc, uint32_t Type, 706 uint64_t Val) const { 707 uint8_t *Inst = Loc - 3; 708 uint8_t Reg = Loc[-1] >> 3; 709 uint8_t *RegSlot = Loc - 1; 710 711 // Note that ADD with RSP or R12 is converted to ADD instead of LEA 712 // because LEA with these registers needs 4 bytes to encode and thus 713 // wouldn't fit the space. 714 715 if (memcmp(Inst, "\x48\x03\x25", 3) == 0) { 716 // "addq foo@gottpoff(%rip),%rsp" -> "addq $foo,%rsp" 717 memcpy(Inst, "\x48\x81\xc4", 3); 718 } else if (memcmp(Inst, "\x4c\x03\x25", 3) == 0) { 719 // "addq foo@gottpoff(%rip),%r12" -> "addq $foo,%r12" 720 memcpy(Inst, "\x49\x81\xc4", 3); 721 } else if (memcmp(Inst, "\x4c\x03", 2) == 0) { 722 // "addq foo@gottpoff(%rip),%r[8-15]" -> "leaq foo(%r[8-15]),%r[8-15]" 723 memcpy(Inst, "\x4d\x8d", 2); 724 *RegSlot = 0x80 | (Reg << 3) | Reg; 725 } else if (memcmp(Inst, "\x48\x03", 2) == 0) { 726 // "addq foo@gottpoff(%rip),%reg -> "leaq foo(%reg),%reg" 727 memcpy(Inst, "\x48\x8d", 2); 728 *RegSlot = 0x80 | (Reg << 3) | Reg; 729 } else if (memcmp(Inst, "\x4c\x8b", 2) == 0) { 730 // "movq foo@gottpoff(%rip),%r[8-15]" -> "movq $foo,%r[8-15]" 731 memcpy(Inst, "\x49\xc7", 2); 732 *RegSlot = 0xc0 | Reg; 733 } else if (memcmp(Inst, "\x48\x8b", 2) == 0) { 734 // "movq foo@gottpoff(%rip),%reg" -> "movq $foo,%reg" 735 memcpy(Inst, "\x48\xc7", 2); 736 *RegSlot = 0xc0 | Reg; 737 } else { 738 fatal("R_X86_64_GOTTPOFF must be used in MOVQ or ADDQ instructions only"); 739 } 740 741 // The original code used a PC relative relocation. 742 // Need to compensate for the -4 it had in the addend. 743 relocateOne(Loc, R_X86_64_TPOFF32, Val + 4); 744 } 745 746 template <class ELFT> 747 void X86_64TargetInfo<ELFT>::relaxTlsLdToLe(uint8_t *Loc, uint32_t Type, 748 uint64_t Val) const { 749 // Convert 750 // leaq bar@tlsld(%rip), %rdi 751 // callq __tls_get_addr@PLT 752 // leaq bar@dtpoff(%rax), %rcx 753 // to 754 // .word 0x6666 755 // .byte 0x66 756 // mov %fs:0,%rax 757 // leaq bar@tpoff(%rax), %rcx 758 if (Type == R_X86_64_DTPOFF64) { 759 write64le(Loc, Val); 760 return; 761 } 762 if (Type == R_X86_64_DTPOFF32) { 763 relocateOne(Loc, R_X86_64_TPOFF32, Val); 764 return; 765 } 766 767 const uint8_t Inst[] = { 768 0x66, 0x66, // .word 0x6666 769 0x66, // .byte 0x66 770 0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00 // mov %fs:0,%rax 771 }; 772 memcpy(Loc - 3, Inst, sizeof(Inst)); 773 } 774 775 template <class ELFT> 776 void X86_64TargetInfo<ELFT>::relocateOne(uint8_t *Loc, uint32_t Type, 777 uint64_t Val) const { 778 switch (Type) { 779 case R_X86_64_32: 780 checkUInt<32>(Val, Type); 781 write32le(Loc, Val); 782 break; 783 case R_X86_64_32S: 784 case R_X86_64_TPOFF32: 785 case R_X86_64_GOT32: 786 case R_X86_64_GOTPCREL: 787 case R_X86_64_GOTPCRELX: 788 case R_X86_64_REX_GOTPCRELX: 789 case R_X86_64_PC32: 790 case R_X86_64_GOTTPOFF: 791 case R_X86_64_PLT32: 792 case R_X86_64_TLSGD: 793 case R_X86_64_TLSLD: 794 case R_X86_64_DTPOFF32: 795 case R_X86_64_SIZE32: 796 checkInt<32>(Val, Type); 797 write32le(Loc, Val); 798 break; 799 case R_X86_64_64: 800 case R_X86_64_DTPOFF64: 801 case R_X86_64_SIZE64: 802 case R_X86_64_PC64: 803 write64le(Loc, Val); 804 break; 805 default: 806 fatal("unrecognized reloc " + Twine(Type)); 807 } 808 } 809 810 template <class ELFT> 811 RelExpr X86_64TargetInfo<ELFT>::adjustRelaxExpr(uint32_t Type, 812 const uint8_t *Data, 813 RelExpr RelExpr) const { 814 if (Type != R_X86_64_GOTPCRELX && Type != R_X86_64_REX_GOTPCRELX) 815 return RelExpr; 816 const uint8_t Op = Data[-2]; 817 const uint8_t ModRm = Data[-1]; 818 // FIXME: When PIC is disabled and foo is defined locally in the 819 // lower 32 bit address space, memory operand in mov can be converted into 820 // immediate operand. Otherwise, mov must be changed to lea. We support only 821 // latter relaxation at this moment. 822 if (Op == 0x8b) 823 return R_RELAX_GOT_PC; 824 // Relax call and jmp. 825 if (Op == 0xff && (ModRm == 0x15 || ModRm == 0x25)) 826 return R_RELAX_GOT_PC; 827 828 // Relaxation of test, adc, add, and, cmp, or, sbb, sub, xor. 829 // If PIC then no relaxation is available. 830 // We also don't relax test/binop instructions without REX byte, 831 // they are 32bit operations and not common to have. 832 assert(Type == R_X86_64_REX_GOTPCRELX); 833 return Config->Pic ? RelExpr : R_RELAX_GOT_PC_NOPIC; 834 } 835 836 // A subset of relaxations can only be applied for no-PIC. This method 837 // handles such relaxations. Instructions encoding information was taken from: 838 // "Intel 64 and IA-32 Architectures Software Developer's Manual V2" 839 // (http://www.intel.com/content/dam/www/public/us/en/documents/manuals/ 840 // 64-ia-32-architectures-software-developer-instruction-set-reference-manual-325383.pdf) 841 template <class ELFT> 842 void X86_64TargetInfo<ELFT>::relaxGotNoPic(uint8_t *Loc, uint64_t Val, 843 uint8_t Op, uint8_t ModRm) const { 844 const uint8_t Rex = Loc[-3]; 845 // Convert "test %reg, foo@GOTPCREL(%rip)" to "test $foo, %reg". 846 if (Op == 0x85) { 847 // See "TEST-Logical Compare" (4-428 Vol. 2B), 848 // TEST r/m64, r64 uses "full" ModR / M byte (no opcode extension). 849 850 // ModR/M byte has form XX YYY ZZZ, where 851 // YYY is MODRM.reg(register 2), ZZZ is MODRM.rm(register 1). 852 // XX has different meanings: 853 // 00: The operand's memory address is in reg1. 854 // 01: The operand's memory address is reg1 + a byte-sized displacement. 855 // 10: The operand's memory address is reg1 + a word-sized displacement. 856 // 11: The operand is reg1 itself. 857 // If an instruction requires only one operand, the unused reg2 field 858 // holds extra opcode bits rather than a register code 859 // 0xC0 == 11 000 000 binary. 860 // 0x38 == 00 111 000 binary. 861 // We transfer reg2 to reg1 here as operand. 862 // See "2.1.3 ModR/M and SIB Bytes" (Vol. 2A 2-3). 863 Loc[-1] = 0xc0 | (ModRm & 0x38) >> 3; // ModR/M byte. 864 865 // Change opcode from TEST r/m64, r64 to TEST r/m64, imm32 866 // See "TEST-Logical Compare" (4-428 Vol. 2B). 867 Loc[-2] = 0xf7; 868 869 // Move R bit to the B bit in REX byte. 870 // REX byte is encoded as 0100WRXB, where 871 // 0100 is 4bit fixed pattern. 872 // REX.W When 1, a 64-bit operand size is used. Otherwise, when 0, the 873 // default operand size is used (which is 32-bit for most but not all 874 // instructions). 875 // REX.R This 1-bit value is an extension to the MODRM.reg field. 876 // REX.X This 1-bit value is an extension to the SIB.index field. 877 // REX.B This 1-bit value is an extension to the MODRM.rm field or the 878 // SIB.base field. 879 // See "2.2.1.2 More on REX Prefix Fields " (2-8 Vol. 2A). 880 Loc[-3] = (Rex & ~0x4) | (Rex & 0x4) >> 2; 881 relocateOne(Loc, R_X86_64_PC32, Val); 882 return; 883 } 884 885 // If we are here then we need to relax the adc, add, and, cmp, or, sbb, sub 886 // or xor operations. 887 888 // Convert "binop foo@GOTPCREL(%rip), %reg" to "binop $foo, %reg". 889 // Logic is close to one for test instruction above, but we also 890 // write opcode extension here, see below for details. 891 Loc[-1] = 0xc0 | (ModRm & 0x38) >> 3 | (Op & 0x3c); // ModR/M byte. 892 893 // Primary opcode is 0x81, opcode extension is one of: 894 // 000b = ADD, 001b is OR, 010b is ADC, 011b is SBB, 895 // 100b is AND, 101b is SUB, 110b is XOR, 111b is CMP. 896 // This value was wrote to MODRM.reg in a line above. 897 // See "3.2 INSTRUCTIONS (A-M)" (Vol. 2A 3-15), 898 // "INSTRUCTION SET REFERENCE, N-Z" (Vol. 2B 4-1) for 899 // descriptions about each operation. 900 Loc[-2] = 0x81; 901 Loc[-3] = (Rex & ~0x4) | (Rex & 0x4) >> 2; 902 relocateOne(Loc, R_X86_64_PC32, Val); 903 } 904 905 template <class ELFT> 906 void X86_64TargetInfo<ELFT>::relaxGot(uint8_t *Loc, uint64_t Val) const { 907 const uint8_t Op = Loc[-2]; 908 const uint8_t ModRm = Loc[-1]; 909 910 // Convert "mov foo@GOTPCREL(%rip),%reg" to "lea foo(%rip),%reg". 911 if (Op == 0x8b) { 912 Loc[-2] = 0x8d; 913 relocateOne(Loc, R_X86_64_PC32, Val); 914 return; 915 } 916 917 if (Op != 0xff) { 918 // We are relaxing a rip relative to an absolute, so compensate 919 // for the old -4 addend. 920 assert(!Config->Pic); 921 relaxGotNoPic(Loc, Val + 4, Op, ModRm); 922 return; 923 } 924 925 // Convert call/jmp instructions. 926 if (ModRm == 0x15) { 927 // ABI says we can convert "call *foo@GOTPCREL(%rip)" to "nop; call foo". 928 // Instead we convert to "addr32 call foo" where addr32 is an instruction 929 // prefix. That makes result expression to be a single instruction. 930 Loc[-2] = 0x67; // addr32 prefix 931 Loc[-1] = 0xe8; // call 932 relocateOne(Loc, R_X86_64_PC32, Val); 933 return; 934 } 935 936 // Convert "jmp *foo@GOTPCREL(%rip)" to "jmp foo; nop". 937 // jmp doesn't return, so it is fine to use nop here, it is just a stub. 938 assert(ModRm == 0x25); 939 Loc[-2] = 0xe9; // jmp 940 Loc[3] = 0x90; // nop 941 relocateOne(Loc - 1, R_X86_64_PC32, Val + 1); 942 } 943 944 // Relocation masks following the #lo(value), #hi(value), #ha(value), 945 // #higher(value), #highera(value), #highest(value), and #highesta(value) 946 // macros defined in section 4.5.1. Relocation Types of the PPC-elf64abi 947 // document. 948 static uint16_t applyPPCLo(uint64_t V) { return V; } 949 static uint16_t applyPPCHi(uint64_t V) { return V >> 16; } 950 static uint16_t applyPPCHa(uint64_t V) { return (V + 0x8000) >> 16; } 951 static uint16_t applyPPCHigher(uint64_t V) { return V >> 32; } 952 static uint16_t applyPPCHighera(uint64_t V) { return (V + 0x8000) >> 32; } 953 static uint16_t applyPPCHighest(uint64_t V) { return V >> 48; } 954 static uint16_t applyPPCHighesta(uint64_t V) { return (V + 0x8000) >> 48; } 955 956 PPCTargetInfo::PPCTargetInfo() {} 957 958 void PPCTargetInfo::relocateOne(uint8_t *Loc, uint32_t Type, 959 uint64_t Val) const { 960 switch (Type) { 961 case R_PPC_ADDR16_HA: 962 write16be(Loc, applyPPCHa(Val)); 963 break; 964 case R_PPC_ADDR16_LO: 965 write16be(Loc, applyPPCLo(Val)); 966 break; 967 default: 968 fatal("unrecognized reloc " + Twine(Type)); 969 } 970 } 971 972 RelExpr PPCTargetInfo::getRelExpr(uint32_t Type, const SymbolBody &S) const { 973 return R_ABS; 974 } 975 976 PPC64TargetInfo::PPC64TargetInfo() { 977 PltRel = GotRel = R_PPC64_GLOB_DAT; 978 RelativeRel = R_PPC64_RELATIVE; 979 GotEntrySize = 8; 980 GotPltEntrySize = 8; 981 PltEntrySize = 32; 982 PltHeaderSize = 0; 983 984 // We need 64K pages (at least under glibc/Linux, the loader won't 985 // set different permissions on a finer granularity than that). 986 PageSize = 65536; 987 988 // The PPC64 ELF ABI v1 spec, says: 989 // 990 // It is normally desirable to put segments with different characteristics 991 // in separate 256 Mbyte portions of the address space, to give the 992 // operating system full paging flexibility in the 64-bit address space. 993 // 994 // And because the lowest non-zero 256M boundary is 0x10000000, PPC64 linkers 995 // use 0x10000000 as the starting address. 996 DefaultImageBase = 0x10000000; 997 } 998 999 static uint64_t PPC64TocOffset = 0x8000; 1000 1001 uint64_t getPPC64TocBase() { 1002 // The TOC consists of sections .got, .toc, .tocbss, .plt in that order. The 1003 // TOC starts where the first of these sections starts. We always create a 1004 // .got when we see a relocation that uses it, so for us the start is always 1005 // the .got. 1006 uint64_t TocVA = Out<ELF64BE>::Got->getVA(); 1007 1008 // Per the ppc64-elf-linux ABI, The TOC base is TOC value plus 0x8000 1009 // thus permitting a full 64 Kbytes segment. Note that the glibc startup 1010 // code (crt1.o) assumes that you can get from the TOC base to the 1011 // start of the .toc section with only a single (signed) 16-bit relocation. 1012 return TocVA + PPC64TocOffset; 1013 } 1014 1015 RelExpr PPC64TargetInfo::getRelExpr(uint32_t Type, const SymbolBody &S) const { 1016 switch (Type) { 1017 default: 1018 return R_ABS; 1019 case R_PPC64_TOC16: 1020 case R_PPC64_TOC16_DS: 1021 case R_PPC64_TOC16_HA: 1022 case R_PPC64_TOC16_HI: 1023 case R_PPC64_TOC16_LO: 1024 case R_PPC64_TOC16_LO_DS: 1025 return R_GOTREL; 1026 case R_PPC64_TOC: 1027 return R_PPC_TOC; 1028 case R_PPC64_REL24: 1029 return R_PPC_PLT_OPD; 1030 } 1031 } 1032 1033 void PPC64TargetInfo::writePlt(uint8_t *Buf, uint64_t GotEntryAddr, 1034 uint64_t PltEntryAddr, int32_t Index, 1035 unsigned RelOff) const { 1036 uint64_t Off = GotEntryAddr - getPPC64TocBase(); 1037 1038 // FIXME: What we should do, in theory, is get the offset of the function 1039 // descriptor in the .opd section, and use that as the offset from %r2 (the 1040 // TOC-base pointer). Instead, we have the GOT-entry offset, and that will 1041 // be a pointer to the function descriptor in the .opd section. Using 1042 // this scheme is simpler, but requires an extra indirection per PLT dispatch. 1043 1044 write32be(Buf, 0xf8410028); // std %r2, 40(%r1) 1045 write32be(Buf + 4, 0x3d620000 | applyPPCHa(Off)); // addis %r11, %r2, X@ha 1046 write32be(Buf + 8, 0xe98b0000 | applyPPCLo(Off)); // ld %r12, X@l(%r11) 1047 write32be(Buf + 12, 0xe96c0000); // ld %r11,0(%r12) 1048 write32be(Buf + 16, 0x7d6903a6); // mtctr %r11 1049 write32be(Buf + 20, 0xe84c0008); // ld %r2,8(%r12) 1050 write32be(Buf + 24, 0xe96c0010); // ld %r11,16(%r12) 1051 write32be(Buf + 28, 0x4e800420); // bctr 1052 } 1053 1054 static std::pair<uint32_t, uint64_t> toAddr16Rel(uint32_t Type, uint64_t Val) { 1055 uint64_t V = Val - PPC64TocOffset; 1056 switch (Type) { 1057 case R_PPC64_TOC16: return {R_PPC64_ADDR16, V}; 1058 case R_PPC64_TOC16_DS: return {R_PPC64_ADDR16_DS, V}; 1059 case R_PPC64_TOC16_HA: return {R_PPC64_ADDR16_HA, V}; 1060 case R_PPC64_TOC16_HI: return {R_PPC64_ADDR16_HI, V}; 1061 case R_PPC64_TOC16_LO: return {R_PPC64_ADDR16_LO, V}; 1062 case R_PPC64_TOC16_LO_DS: return {R_PPC64_ADDR16_LO_DS, V}; 1063 default: return {Type, Val}; 1064 } 1065 } 1066 1067 void PPC64TargetInfo::relocateOne(uint8_t *Loc, uint32_t Type, 1068 uint64_t Val) const { 1069 // For a TOC-relative relocation, proceed in terms of the corresponding 1070 // ADDR16 relocation type. 1071 std::tie(Type, Val) = toAddr16Rel(Type, Val); 1072 1073 switch (Type) { 1074 case R_PPC64_ADDR14: { 1075 checkAlignment<4>(Val, Type); 1076 // Preserve the AA/LK bits in the branch instruction 1077 uint8_t AALK = Loc[3]; 1078 write16be(Loc + 2, (AALK & 3) | (Val & 0xfffc)); 1079 break; 1080 } 1081 case R_PPC64_ADDR16: 1082 checkInt<16>(Val, Type); 1083 write16be(Loc, Val); 1084 break; 1085 case R_PPC64_ADDR16_DS: 1086 checkInt<16>(Val, Type); 1087 write16be(Loc, (read16be(Loc) & 3) | (Val & ~3)); 1088 break; 1089 case R_PPC64_ADDR16_HA: 1090 case R_PPC64_REL16_HA: 1091 write16be(Loc, applyPPCHa(Val)); 1092 break; 1093 case R_PPC64_ADDR16_HI: 1094 case R_PPC64_REL16_HI: 1095 write16be(Loc, applyPPCHi(Val)); 1096 break; 1097 case R_PPC64_ADDR16_HIGHER: 1098 write16be(Loc, applyPPCHigher(Val)); 1099 break; 1100 case R_PPC64_ADDR16_HIGHERA: 1101 write16be(Loc, applyPPCHighera(Val)); 1102 break; 1103 case R_PPC64_ADDR16_HIGHEST: 1104 write16be(Loc, applyPPCHighest(Val)); 1105 break; 1106 case R_PPC64_ADDR16_HIGHESTA: 1107 write16be(Loc, applyPPCHighesta(Val)); 1108 break; 1109 case R_PPC64_ADDR16_LO: 1110 write16be(Loc, applyPPCLo(Val)); 1111 break; 1112 case R_PPC64_ADDR16_LO_DS: 1113 case R_PPC64_REL16_LO: 1114 write16be(Loc, (read16be(Loc) & 3) | (applyPPCLo(Val) & ~3)); 1115 break; 1116 case R_PPC64_ADDR32: 1117 case R_PPC64_REL32: 1118 checkInt<32>(Val, Type); 1119 write32be(Loc, Val); 1120 break; 1121 case R_PPC64_ADDR64: 1122 case R_PPC64_REL64: 1123 case R_PPC64_TOC: 1124 write64be(Loc, Val); 1125 break; 1126 case R_PPC64_REL24: { 1127 uint32_t Mask = 0x03FFFFFC; 1128 checkInt<24>(Val, Type); 1129 write32be(Loc, (read32be(Loc) & ~Mask) | (Val & Mask)); 1130 break; 1131 } 1132 default: 1133 fatal("unrecognized reloc " + Twine(Type)); 1134 } 1135 } 1136 1137 AArch64TargetInfo::AArch64TargetInfo() { 1138 CopyRel = R_AARCH64_COPY; 1139 RelativeRel = R_AARCH64_RELATIVE; 1140 IRelativeRel = R_AARCH64_IRELATIVE; 1141 GotRel = R_AARCH64_GLOB_DAT; 1142 PltRel = R_AARCH64_JUMP_SLOT; 1143 TlsDescRel = R_AARCH64_TLSDESC; 1144 TlsGotRel = R_AARCH64_TLS_TPREL64; 1145 GotEntrySize = 8; 1146 GotPltEntrySize = 8; 1147 PltEntrySize = 16; 1148 PltHeaderSize = 32; 1149 1150 // It doesn't seem to be documented anywhere, but tls on aarch64 uses variant 1151 // 1 of the tls structures and the tcb size is 16. 1152 TcbSize = 16; 1153 } 1154 1155 RelExpr AArch64TargetInfo::getRelExpr(uint32_t Type, 1156 const SymbolBody &S) const { 1157 switch (Type) { 1158 default: 1159 return R_ABS; 1160 case R_AARCH64_TLSDESC_ADR_PAGE21: 1161 return R_TLSDESC_PAGE; 1162 case R_AARCH64_TLSDESC_LD64_LO12_NC: 1163 case R_AARCH64_TLSDESC_ADD_LO12_NC: 1164 return R_TLSDESC; 1165 case R_AARCH64_TLSDESC_CALL: 1166 return R_HINT; 1167 case R_AARCH64_TLSLE_ADD_TPREL_HI12: 1168 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC: 1169 return R_TLS; 1170 case R_AARCH64_CALL26: 1171 case R_AARCH64_CONDBR19: 1172 case R_AARCH64_JUMP26: 1173 case R_AARCH64_TSTBR14: 1174 return R_PLT_PC; 1175 case R_AARCH64_PREL16: 1176 case R_AARCH64_PREL32: 1177 case R_AARCH64_PREL64: 1178 case R_AARCH64_ADR_PREL_LO21: 1179 return R_PC; 1180 case R_AARCH64_ADR_PREL_PG_HI21: 1181 return R_PAGE_PC; 1182 case R_AARCH64_LD64_GOT_LO12_NC: 1183 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: 1184 return R_GOT; 1185 case R_AARCH64_ADR_GOT_PAGE: 1186 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: 1187 return R_GOT_PAGE_PC; 1188 } 1189 } 1190 1191 RelExpr AArch64TargetInfo::adjustRelaxExpr(uint32_t Type, const uint8_t *Data, 1192 RelExpr Expr) const { 1193 if (Expr == R_RELAX_TLS_GD_TO_IE) { 1194 if (Type == R_AARCH64_TLSDESC_ADR_PAGE21) 1195 return R_RELAX_TLS_GD_TO_IE_PAGE_PC; 1196 return R_RELAX_TLS_GD_TO_IE_ABS; 1197 } 1198 return Expr; 1199 } 1200 1201 bool AArch64TargetInfo::usesOnlyLowPageBits(uint32_t Type) const { 1202 switch (Type) { 1203 default: 1204 return false; 1205 case R_AARCH64_ADD_ABS_LO12_NC: 1206 case R_AARCH64_LD64_GOT_LO12_NC: 1207 case R_AARCH64_LDST128_ABS_LO12_NC: 1208 case R_AARCH64_LDST16_ABS_LO12_NC: 1209 case R_AARCH64_LDST32_ABS_LO12_NC: 1210 case R_AARCH64_LDST64_ABS_LO12_NC: 1211 case R_AARCH64_LDST8_ABS_LO12_NC: 1212 case R_AARCH64_TLSDESC_ADD_LO12_NC: 1213 case R_AARCH64_TLSDESC_LD64_LO12_NC: 1214 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: 1215 return true; 1216 } 1217 } 1218 1219 bool AArch64TargetInfo::isTlsInitialExecRel(uint32_t Type) const { 1220 return Type == R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 || 1221 Type == R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC; 1222 } 1223 1224 uint32_t AArch64TargetInfo::getDynRel(uint32_t Type) const { 1225 if (Type == R_AARCH64_ABS32 || Type == R_AARCH64_ABS64) 1226 return Type; 1227 // Keep it going with a dummy value so that we can find more reloc errors. 1228 errorDynRel(Type); 1229 return R_AARCH64_ABS32; 1230 } 1231 1232 void AArch64TargetInfo::writeGotPlt(uint8_t *Buf, const SymbolBody &) const { 1233 write64le(Buf, Out<ELF64LE>::Plt->getVA()); 1234 } 1235 1236 static uint64_t getAArch64Page(uint64_t Expr) { 1237 return Expr & (~static_cast<uint64_t>(0xFFF)); 1238 } 1239 1240 void AArch64TargetInfo::writePltHeader(uint8_t *Buf) const { 1241 const uint8_t PltData[] = { 1242 0xf0, 0x7b, 0xbf, 0xa9, // stp x16, x30, [sp,#-16]! 1243 0x10, 0x00, 0x00, 0x90, // adrp x16, Page(&(.plt.got[2])) 1244 0x11, 0x02, 0x40, 0xf9, // ldr x17, [x16, Offset(&(.plt.got[2]))] 1245 0x10, 0x02, 0x00, 0x91, // add x16, x16, Offset(&(.plt.got[2])) 1246 0x20, 0x02, 0x1f, 0xd6, // br x17 1247 0x1f, 0x20, 0x03, 0xd5, // nop 1248 0x1f, 0x20, 0x03, 0xd5, // nop 1249 0x1f, 0x20, 0x03, 0xd5 // nop 1250 }; 1251 memcpy(Buf, PltData, sizeof(PltData)); 1252 1253 uint64_t Got = Out<ELF64LE>::GotPlt->getVA(); 1254 uint64_t Plt = Out<ELF64LE>::Plt->getVA(); 1255 relocateOne(Buf + 4, R_AARCH64_ADR_PREL_PG_HI21, 1256 getAArch64Page(Got + 16) - getAArch64Page(Plt + 4)); 1257 relocateOne(Buf + 8, R_AARCH64_LDST64_ABS_LO12_NC, Got + 16); 1258 relocateOne(Buf + 12, R_AARCH64_ADD_ABS_LO12_NC, Got + 16); 1259 } 1260 1261 void AArch64TargetInfo::writePlt(uint8_t *Buf, uint64_t GotEntryAddr, 1262 uint64_t PltEntryAddr, int32_t Index, 1263 unsigned RelOff) const { 1264 const uint8_t Inst[] = { 1265 0x10, 0x00, 0x00, 0x90, // adrp x16, Page(&(.plt.got[n])) 1266 0x11, 0x02, 0x40, 0xf9, // ldr x17, [x16, Offset(&(.plt.got[n]))] 1267 0x10, 0x02, 0x00, 0x91, // add x16, x16, Offset(&(.plt.got[n])) 1268 0x20, 0x02, 0x1f, 0xd6 // br x17 1269 }; 1270 memcpy(Buf, Inst, sizeof(Inst)); 1271 1272 relocateOne(Buf, R_AARCH64_ADR_PREL_PG_HI21, 1273 getAArch64Page(GotEntryAddr) - getAArch64Page(PltEntryAddr)); 1274 relocateOne(Buf + 4, R_AARCH64_LDST64_ABS_LO12_NC, GotEntryAddr); 1275 relocateOne(Buf + 8, R_AARCH64_ADD_ABS_LO12_NC, GotEntryAddr); 1276 } 1277 1278 static void updateAArch64Addr(uint8_t *L, uint64_t Imm) { 1279 uint32_t ImmLo = (Imm & 0x3) << 29; 1280 uint32_t ImmHi = (Imm & 0x1FFFFC) << 3; 1281 uint64_t Mask = (0x3 << 29) | (0x1FFFFC << 3); 1282 write32le(L, (read32le(L) & ~Mask) | ImmLo | ImmHi); 1283 } 1284 1285 static inline void updateAArch64Add(uint8_t *L, uint64_t Imm) { 1286 or32le(L, (Imm & 0xFFF) << 10); 1287 } 1288 1289 void AArch64TargetInfo::relocateOne(uint8_t *Loc, uint32_t Type, 1290 uint64_t Val) const { 1291 switch (Type) { 1292 case R_AARCH64_ABS16: 1293 case R_AARCH64_PREL16: 1294 checkIntUInt<16>(Val, Type); 1295 write16le(Loc, Val); 1296 break; 1297 case R_AARCH64_ABS32: 1298 case R_AARCH64_PREL32: 1299 checkIntUInt<32>(Val, Type); 1300 write32le(Loc, Val); 1301 break; 1302 case R_AARCH64_ABS64: 1303 case R_AARCH64_PREL64: 1304 write64le(Loc, Val); 1305 break; 1306 case R_AARCH64_ADD_ABS_LO12_NC: 1307 // This relocation stores 12 bits and there's no instruction 1308 // to do it. Instead, we do a 32 bits store of the value 1309 // of r_addend bitwise-or'ed Loc. This assumes that the addend 1310 // bits in Loc are zero. 1311 or32le(Loc, (Val & 0xFFF) << 10); 1312 break; 1313 case R_AARCH64_ADR_GOT_PAGE: 1314 case R_AARCH64_ADR_PREL_PG_HI21: 1315 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: 1316 case R_AARCH64_TLSDESC_ADR_PAGE21: 1317 checkInt<33>(Val, Type); 1318 updateAArch64Addr(Loc, Val >> 12); 1319 break; 1320 case R_AARCH64_ADR_PREL_LO21: 1321 checkInt<21>(Val, Type); 1322 updateAArch64Addr(Loc, Val); 1323 break; 1324 case R_AARCH64_CALL26: 1325 case R_AARCH64_JUMP26: 1326 checkInt<28>(Val, Type); 1327 or32le(Loc, (Val & 0x0FFFFFFC) >> 2); 1328 break; 1329 case R_AARCH64_CONDBR19: 1330 checkInt<21>(Val, Type); 1331 or32le(Loc, (Val & 0x1FFFFC) << 3); 1332 break; 1333 case R_AARCH64_LD64_GOT_LO12_NC: 1334 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: 1335 case R_AARCH64_TLSDESC_LD64_LO12_NC: 1336 checkAlignment<8>(Val, Type); 1337 or32le(Loc, (Val & 0xFF8) << 7); 1338 break; 1339 case R_AARCH64_LDST128_ABS_LO12_NC: 1340 or32le(Loc, (Val & 0x0FF8) << 6); 1341 break; 1342 case R_AARCH64_LDST16_ABS_LO12_NC: 1343 or32le(Loc, (Val & 0x0FFC) << 9); 1344 break; 1345 case R_AARCH64_LDST8_ABS_LO12_NC: 1346 or32le(Loc, (Val & 0xFFF) << 10); 1347 break; 1348 case R_AARCH64_LDST32_ABS_LO12_NC: 1349 or32le(Loc, (Val & 0xFFC) << 8); 1350 break; 1351 case R_AARCH64_LDST64_ABS_LO12_NC: 1352 or32le(Loc, (Val & 0xFF8) << 7); 1353 break; 1354 case R_AARCH64_TSTBR14: 1355 checkInt<16>(Val, Type); 1356 or32le(Loc, (Val & 0xFFFC) << 3); 1357 break; 1358 case R_AARCH64_TLSLE_ADD_TPREL_HI12: 1359 checkInt<24>(Val, Type); 1360 updateAArch64Add(Loc, Val >> 12); 1361 break; 1362 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC: 1363 case R_AARCH64_TLSDESC_ADD_LO12_NC: 1364 updateAArch64Add(Loc, Val); 1365 break; 1366 default: 1367 fatal("unrecognized reloc " + Twine(Type)); 1368 } 1369 } 1370 1371 void AArch64TargetInfo::relaxTlsGdToLe(uint8_t *Loc, uint32_t Type, 1372 uint64_t Val) const { 1373 // TLSDESC Global-Dynamic relocation are in the form: 1374 // adrp x0, :tlsdesc:v [R_AARCH64_TLSDESC_ADR_PAGE21] 1375 // ldr x1, [x0, #:tlsdesc_lo12:v [R_AARCH64_TLSDESC_LD64_LO12_NC] 1376 // add x0, x0, :tlsdesc_los:v [_AARCH64_TLSDESC_ADD_LO12_NC] 1377 // .tlsdesccall [R_AARCH64_TLSDESC_CALL] 1378 // blr x1 1379 // And it can optimized to: 1380 // movz x0, #0x0, lsl #16 1381 // movk x0, #0x10 1382 // nop 1383 // nop 1384 checkUInt<32>(Val, Type); 1385 1386 switch (Type) { 1387 case R_AARCH64_TLSDESC_ADD_LO12_NC: 1388 case R_AARCH64_TLSDESC_CALL: 1389 write32le(Loc, 0xd503201f); // nop 1390 return; 1391 case R_AARCH64_TLSDESC_ADR_PAGE21: 1392 write32le(Loc, 0xd2a00000 | (((Val >> 16) & 0xffff) << 5)); // movz 1393 return; 1394 case R_AARCH64_TLSDESC_LD64_LO12_NC: 1395 write32le(Loc, 0xf2800000 | ((Val & 0xffff) << 5)); // movk 1396 return; 1397 default: 1398 llvm_unreachable("unsupported relocation for TLS GD to LE relaxation"); 1399 } 1400 } 1401 1402 void AArch64TargetInfo::relaxTlsGdToIe(uint8_t *Loc, uint32_t Type, 1403 uint64_t Val) const { 1404 // TLSDESC Global-Dynamic relocation are in the form: 1405 // adrp x0, :tlsdesc:v [R_AARCH64_TLSDESC_ADR_PAGE21] 1406 // ldr x1, [x0, #:tlsdesc_lo12:v [R_AARCH64_TLSDESC_LD64_LO12_NC] 1407 // add x0, x0, :tlsdesc_los:v [_AARCH64_TLSDESC_ADD_LO12_NC] 1408 // .tlsdesccall [R_AARCH64_TLSDESC_CALL] 1409 // blr x1 1410 // And it can optimized to: 1411 // adrp x0, :gottprel:v 1412 // ldr x0, [x0, :gottprel_lo12:v] 1413 // nop 1414 // nop 1415 1416 switch (Type) { 1417 case R_AARCH64_TLSDESC_ADD_LO12_NC: 1418 case R_AARCH64_TLSDESC_CALL: 1419 write32le(Loc, 0xd503201f); // nop 1420 break; 1421 case R_AARCH64_TLSDESC_ADR_PAGE21: 1422 write32le(Loc, 0x90000000); // adrp 1423 relocateOne(Loc, R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21, Val); 1424 break; 1425 case R_AARCH64_TLSDESC_LD64_LO12_NC: 1426 write32le(Loc, 0xf9400000); // ldr 1427 relocateOne(Loc, R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC, Val); 1428 break; 1429 default: 1430 llvm_unreachable("unsupported relocation for TLS GD to LE relaxation"); 1431 } 1432 } 1433 1434 void AArch64TargetInfo::relaxTlsIeToLe(uint8_t *Loc, uint32_t Type, 1435 uint64_t Val) const { 1436 checkUInt<32>(Val, Type); 1437 1438 if (Type == R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21) { 1439 // Generate MOVZ. 1440 uint32_t RegNo = read32le(Loc) & 0x1f; 1441 write32le(Loc, (0xd2a00000 | RegNo) | (((Val >> 16) & 0xffff) << 5)); 1442 return; 1443 } 1444 if (Type == R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC) { 1445 // Generate MOVK. 1446 uint32_t RegNo = read32le(Loc) & 0x1f; 1447 write32le(Loc, (0xf2800000 | RegNo) | ((Val & 0xffff) << 5)); 1448 return; 1449 } 1450 llvm_unreachable("invalid relocation for TLS IE to LE relaxation"); 1451 } 1452 1453 AMDGPUTargetInfo::AMDGPUTargetInfo() { 1454 GotRel = R_AMDGPU_ABS64; 1455 GotEntrySize = 8; 1456 } 1457 1458 void AMDGPUTargetInfo::relocateOne(uint8_t *Loc, uint32_t Type, 1459 uint64_t Val) const { 1460 switch (Type) { 1461 case R_AMDGPU_GOTPCREL: 1462 case R_AMDGPU_REL32: 1463 write32le(Loc, Val); 1464 break; 1465 default: 1466 fatal("unrecognized reloc " + Twine(Type)); 1467 } 1468 } 1469 1470 RelExpr AMDGPUTargetInfo::getRelExpr(uint32_t Type, const SymbolBody &S) const { 1471 switch (Type) { 1472 case R_AMDGPU_REL32: 1473 return R_PC; 1474 case R_AMDGPU_GOTPCREL: 1475 return R_GOT_PC; 1476 default: 1477 fatal("do not know how to handle relocation " + Twine(Type)); 1478 } 1479 } 1480 1481 ARMTargetInfo::ARMTargetInfo() { 1482 CopyRel = R_ARM_COPY; 1483 RelativeRel = R_ARM_RELATIVE; 1484 IRelativeRel = R_ARM_IRELATIVE; 1485 GotRel = R_ARM_GLOB_DAT; 1486 PltRel = R_ARM_JUMP_SLOT; 1487 TlsGotRel = R_ARM_TLS_TPOFF32; 1488 TlsModuleIndexRel = R_ARM_TLS_DTPMOD32; 1489 TlsOffsetRel = R_ARM_TLS_DTPOFF32; 1490 GotEntrySize = 4; 1491 GotPltEntrySize = 4; 1492 PltEntrySize = 16; 1493 PltHeaderSize = 20; 1494 } 1495 1496 RelExpr ARMTargetInfo::getRelExpr(uint32_t Type, const SymbolBody &S) const { 1497 switch (Type) { 1498 default: 1499 return R_ABS; 1500 case R_ARM_THM_JUMP11: 1501 return R_PC; 1502 case R_ARM_CALL: 1503 case R_ARM_JUMP24: 1504 case R_ARM_PC24: 1505 case R_ARM_PLT32: 1506 case R_ARM_THM_JUMP19: 1507 case R_ARM_THM_JUMP24: 1508 case R_ARM_THM_CALL: 1509 return R_PLT_PC; 1510 case R_ARM_GOTOFF32: 1511 // (S + A) - GOT_ORG 1512 return R_GOTREL; 1513 case R_ARM_GOT_BREL: 1514 // GOT(S) + A - GOT_ORG 1515 return R_GOT_OFF; 1516 case R_ARM_GOT_PREL: 1517 // GOT(S) + - GOT_ORG 1518 return R_GOT_PC; 1519 case R_ARM_BASE_PREL: 1520 // B(S) + A - P 1521 // FIXME: currently B(S) assumed to be .got, this may not hold for all 1522 // platforms. 1523 return R_GOTONLY_PC; 1524 case R_ARM_MOVW_PREL_NC: 1525 case R_ARM_MOVT_PREL: 1526 case R_ARM_PREL31: 1527 case R_ARM_REL32: 1528 case R_ARM_THM_MOVW_PREL_NC: 1529 case R_ARM_THM_MOVT_PREL: 1530 return R_PC; 1531 } 1532 } 1533 1534 uint32_t ARMTargetInfo::getDynRel(uint32_t Type) const { 1535 if (Type == R_ARM_ABS32) 1536 return Type; 1537 // Keep it going with a dummy value so that we can find more reloc errors. 1538 errorDynRel(Type); 1539 return R_ARM_ABS32; 1540 } 1541 1542 void ARMTargetInfo::writeGotPlt(uint8_t *Buf, const SymbolBody &) const { 1543 write32le(Buf, Out<ELF32LE>::Plt->getVA()); 1544 } 1545 1546 void ARMTargetInfo::writePltHeader(uint8_t *Buf) const { 1547 const uint8_t PltData[] = { 1548 0x04, 0xe0, 0x2d, 0xe5, // str lr, [sp,#-4]! 1549 0x04, 0xe0, 0x9f, 0xe5, // ldr lr, L2 1550 0x0e, 0xe0, 0x8f, 0xe0, // L1: add lr, pc, lr 1551 0x08, 0xf0, 0xbe, 0xe5, // ldr pc, [lr, #8] 1552 0x00, 0x00, 0x00, 0x00, // L2: .word &(.got.plt) - L1 - 8 1553 }; 1554 memcpy(Buf, PltData, sizeof(PltData)); 1555 uint64_t GotPlt = Out<ELF32LE>::GotPlt->getVA(); 1556 uint64_t L1 = Out<ELF32LE>::Plt->getVA() + 8; 1557 write32le(Buf + 16, GotPlt - L1 - 8); 1558 } 1559 1560 void ARMTargetInfo::writePlt(uint8_t *Buf, uint64_t GotEntryAddr, 1561 uint64_t PltEntryAddr, int32_t Index, 1562 unsigned RelOff) const { 1563 // FIXME: Using simple code sequence with simple relocations. 1564 // There is a more optimal sequence but it requires support for the group 1565 // relocations. See ELF for the ARM Architecture Appendix A.3 1566 const uint8_t PltData[] = { 1567 0x04, 0xc0, 0x9f, 0xe5, // ldr ip, L2 1568 0x0f, 0xc0, 0x8c, 0xe0, // L1: add ip, ip, pc 1569 0x00, 0xf0, 0x9c, 0xe5, // ldr pc, [ip] 1570 0x00, 0x00, 0x00, 0x00, // L2: .word Offset(&(.plt.got) - L1 - 8 1571 }; 1572 memcpy(Buf, PltData, sizeof(PltData)); 1573 uint64_t L1 = PltEntryAddr + 4; 1574 write32le(Buf + 12, GotEntryAddr - L1 - 8); 1575 } 1576 1577 RelExpr ARMTargetInfo::getThunkExpr(RelExpr Expr, uint32_t RelocType, 1578 const InputFile &File, 1579 const SymbolBody &S) const { 1580 // A state change from ARM to Thumb and vice versa must go through an 1581 // interworking thunk if the relocation type is not R_ARM_CALL or 1582 // R_ARM_THM_CALL. 1583 switch (RelocType) { 1584 case R_ARM_PC24: 1585 case R_ARM_PLT32: 1586 case R_ARM_JUMP24: 1587 // Source is ARM, all PLT entries are ARM so no interworking required. 1588 // Otherwise we need to interwork if Symbol has bit 0 set (Thumb). 1589 if (Expr == R_PC && ((S.getVA<ELF32LE>() & 1) == 1)) 1590 return R_THUNK_PC; 1591 break; 1592 case R_ARM_THM_JUMP19: 1593 case R_ARM_THM_JUMP24: 1594 // Source is Thumb, all PLT entries are ARM so interworking is required. 1595 // Otherwise we need to interwork if Symbol has bit 0 clear (ARM). 1596 if (Expr == R_PLT_PC) 1597 return R_THUNK_PLT_PC; 1598 if ((S.getVA<ELF32LE>() & 1) == 0) 1599 return R_THUNK_PC; 1600 break; 1601 } 1602 return Expr; 1603 } 1604 1605 void ARMTargetInfo::relocateOne(uint8_t *Loc, uint32_t Type, 1606 uint64_t Val) const { 1607 switch (Type) { 1608 case R_ARM_NONE: 1609 break; 1610 case R_ARM_ABS32: 1611 case R_ARM_BASE_PREL: 1612 case R_ARM_GOTOFF32: 1613 case R_ARM_GOT_BREL: 1614 case R_ARM_GOT_PREL: 1615 case R_ARM_REL32: 1616 write32le(Loc, Val); 1617 break; 1618 case R_ARM_PREL31: 1619 checkInt<31>(Val, Type); 1620 write32le(Loc, (read32le(Loc) & 0x80000000) | (Val & ~0x80000000)); 1621 break; 1622 case R_ARM_CALL: 1623 // R_ARM_CALL is used for BL and BLX instructions, depending on the 1624 // value of bit 0 of Val, we must select a BL or BLX instruction 1625 if (Val & 1) { 1626 // If bit 0 of Val is 1 the target is Thumb, we must select a BLX. 1627 // The BLX encoding is 0xfa:H:imm24 where Val = imm24:H:'1' 1628 checkInt<26>(Val, Type); 1629 write32le(Loc, 0xfa000000 | // opcode 1630 ((Val & 2) << 23) | // H 1631 ((Val >> 2) & 0x00ffffff)); // imm24 1632 break; 1633 } 1634 if ((read32le(Loc) & 0xfe000000) == 0xfa000000) 1635 // BLX (always unconditional) instruction to an ARM Target, select an 1636 // unconditional BL. 1637 write32le(Loc, 0xeb000000 | (read32le(Loc) & 0x00ffffff)); 1638 // fall through as BL encoding is shared with B 1639 case R_ARM_JUMP24: 1640 case R_ARM_PC24: 1641 case R_ARM_PLT32: 1642 checkInt<26>(Val, Type); 1643 write32le(Loc, (read32le(Loc) & ~0x00ffffff) | ((Val >> 2) & 0x00ffffff)); 1644 break; 1645 case R_ARM_THM_JUMP11: 1646 checkInt<12>(Val, Type); 1647 write16le(Loc, (read32le(Loc) & 0xf800) | ((Val >> 1) & 0x07ff)); 1648 break; 1649 case R_ARM_THM_JUMP19: 1650 // Encoding T3: Val = S:J2:J1:imm6:imm11:0 1651 checkInt<21>(Val, Type); 1652 write16le(Loc, 1653 (read16le(Loc) & 0xfbc0) | // opcode cond 1654 ((Val >> 10) & 0x0400) | // S 1655 ((Val >> 12) & 0x003f)); // imm6 1656 write16le(Loc + 2, 1657 0x8000 | // opcode 1658 ((Val >> 8) & 0x0800) | // J2 1659 ((Val >> 5) & 0x2000) | // J1 1660 ((Val >> 1) & 0x07ff)); // imm11 1661 break; 1662 case R_ARM_THM_CALL: 1663 // R_ARM_THM_CALL is used for BL and BLX instructions, depending on the 1664 // value of bit 0 of Val, we must select a BL or BLX instruction 1665 if ((Val & 1) == 0) { 1666 // Ensure BLX destination is 4-byte aligned. As BLX instruction may 1667 // only be two byte aligned. This must be done before overflow check 1668 Val = alignTo(Val, 4); 1669 } 1670 // Bit 12 is 0 for BLX, 1 for BL 1671 write16le(Loc + 2, (read16le(Loc + 2) & ~0x1000) | (Val & 1) << 12); 1672 // Fall through as rest of encoding is the same as B.W 1673 case R_ARM_THM_JUMP24: 1674 // Encoding B T4, BL T1, BLX T2: Val = S:I1:I2:imm10:imm11:0 1675 // FIXME: Use of I1 and I2 require v6T2ops 1676 checkInt<25>(Val, Type); 1677 write16le(Loc, 1678 0xf000 | // opcode 1679 ((Val >> 14) & 0x0400) | // S 1680 ((Val >> 12) & 0x03ff)); // imm10 1681 write16le(Loc + 2, 1682 (read16le(Loc + 2) & 0xd000) | // opcode 1683 (((~(Val >> 10)) ^ (Val >> 11)) & 0x2000) | // J1 1684 (((~(Val >> 11)) ^ (Val >> 13)) & 0x0800) | // J2 1685 ((Val >> 1) & 0x07ff)); // imm11 1686 break; 1687 case R_ARM_MOVW_ABS_NC: 1688 case R_ARM_MOVW_PREL_NC: 1689 write32le(Loc, (read32le(Loc) & ~0x000f0fff) | ((Val & 0xf000) << 4) | 1690 (Val & 0x0fff)); 1691 break; 1692 case R_ARM_MOVT_ABS: 1693 case R_ARM_MOVT_PREL: 1694 checkInt<32>(Val, Type); 1695 write32le(Loc, (read32le(Loc) & ~0x000f0fff) | 1696 (((Val >> 16) & 0xf000) << 4) | ((Val >> 16) & 0xfff)); 1697 break; 1698 case R_ARM_THM_MOVT_ABS: 1699 case R_ARM_THM_MOVT_PREL: 1700 // Encoding T1: A = imm4:i:imm3:imm8 1701 checkInt<32>(Val, Type); 1702 write16le(Loc, 1703 0xf2c0 | // opcode 1704 ((Val >> 17) & 0x0400) | // i 1705 ((Val >> 28) & 0x000f)); // imm4 1706 write16le(Loc + 2, 1707 (read16le(Loc + 2) & 0x8f00) | // opcode 1708 ((Val >> 12) & 0x7000) | // imm3 1709 ((Val >> 16) & 0x00ff)); // imm8 1710 break; 1711 case R_ARM_THM_MOVW_ABS_NC: 1712 case R_ARM_THM_MOVW_PREL_NC: 1713 // Encoding T3: A = imm4:i:imm3:imm8 1714 write16le(Loc, 1715 0xf240 | // opcode 1716 ((Val >> 1) & 0x0400) | // i 1717 ((Val >> 12) & 0x000f)); // imm4 1718 write16le(Loc + 2, 1719 (read16le(Loc + 2) & 0x8f00) | // opcode 1720 ((Val << 4) & 0x7000) | // imm3 1721 (Val & 0x00ff)); // imm8 1722 break; 1723 default: 1724 fatal("unrecognized reloc " + Twine(Type)); 1725 } 1726 } 1727 1728 uint64_t ARMTargetInfo::getImplicitAddend(const uint8_t *Buf, 1729 uint32_t Type) const { 1730 switch (Type) { 1731 default: 1732 return 0; 1733 case R_ARM_ABS32: 1734 case R_ARM_BASE_PREL: 1735 case R_ARM_GOTOFF32: 1736 case R_ARM_GOT_BREL: 1737 case R_ARM_GOT_PREL: 1738 case R_ARM_REL32: 1739 return SignExtend64<32>(read32le(Buf)); 1740 case R_ARM_PREL31: 1741 return SignExtend64<31>(read32le(Buf)); 1742 case R_ARM_CALL: 1743 case R_ARM_JUMP24: 1744 case R_ARM_PC24: 1745 case R_ARM_PLT32: 1746 return SignExtend64<26>(read32le(Buf) << 2); 1747 case R_ARM_THM_JUMP11: 1748 return SignExtend64<12>(read16le(Buf) << 1); 1749 case R_ARM_THM_JUMP19: { 1750 // Encoding T3: A = S:J2:J1:imm10:imm6:0 1751 uint16_t Hi = read16le(Buf); 1752 uint16_t Lo = read16le(Buf + 2); 1753 return SignExtend64<20>(((Hi & 0x0400) << 10) | // S 1754 ((Lo & 0x0800) << 8) | // J2 1755 ((Lo & 0x2000) << 5) | // J1 1756 ((Hi & 0x003f) << 12) | // imm6 1757 ((Lo & 0x07ff) << 1)); // imm11:0 1758 } 1759 case R_ARM_THM_CALL: 1760 case R_ARM_THM_JUMP24: { 1761 // Encoding B T4, BL T1, BLX T2: A = S:I1:I2:imm10:imm11:0 1762 // I1 = NOT(J1 EOR S), I2 = NOT(J2 EOR S) 1763 // FIXME: I1 and I2 require v6T2ops 1764 uint16_t Hi = read16le(Buf); 1765 uint16_t Lo = read16le(Buf + 2); 1766 return SignExtend64<24>(((Hi & 0x0400) << 14) | // S 1767 (~((Lo ^ (Hi << 3)) << 10) & 0x00800000) | // I1 1768 (~((Lo ^ (Hi << 1)) << 11) & 0x00400000) | // I2 1769 ((Hi & 0x003ff) << 12) | // imm0 1770 ((Lo & 0x007ff) << 1)); // imm11:0 1771 } 1772 // ELF for the ARM Architecture 4.6.1.1 the implicit addend for MOVW and 1773 // MOVT is in the range -32768 <= A < 32768 1774 case R_ARM_MOVW_ABS_NC: 1775 case R_ARM_MOVT_ABS: 1776 case R_ARM_MOVW_PREL_NC: 1777 case R_ARM_MOVT_PREL: { 1778 uint64_t Val = read32le(Buf) & 0x000f0fff; 1779 return SignExtend64<16>(((Val & 0x000f0000) >> 4) | (Val & 0x00fff)); 1780 } 1781 case R_ARM_THM_MOVW_ABS_NC: 1782 case R_ARM_THM_MOVT_ABS: 1783 case R_ARM_THM_MOVW_PREL_NC: 1784 case R_ARM_THM_MOVT_PREL: { 1785 // Encoding T3: A = imm4:i:imm3:imm8 1786 uint16_t Hi = read16le(Buf); 1787 uint16_t Lo = read16le(Buf + 2); 1788 return SignExtend64<16>(((Hi & 0x000f) << 12) | // imm4 1789 ((Hi & 0x0400) << 1) | // i 1790 ((Lo & 0x7000) >> 4) | // imm3 1791 (Lo & 0x00ff)); // imm8 1792 } 1793 } 1794 } 1795 1796 template <class ELFT> MipsTargetInfo<ELFT>::MipsTargetInfo() { 1797 GotPltHeaderEntriesNum = 2; 1798 PageSize = 65536; 1799 GotEntrySize = sizeof(typename ELFT::uint); 1800 GotPltEntrySize = sizeof(typename ELFT::uint); 1801 PltEntrySize = 16; 1802 PltHeaderSize = 32; 1803 CopyRel = R_MIPS_COPY; 1804 PltRel = R_MIPS_JUMP_SLOT; 1805 if (ELFT::Is64Bits) { 1806 RelativeRel = (R_MIPS_64 << 8) | R_MIPS_REL32; 1807 TlsGotRel = R_MIPS_TLS_TPREL64; 1808 TlsModuleIndexRel = R_MIPS_TLS_DTPMOD64; 1809 TlsOffsetRel = R_MIPS_TLS_DTPREL64; 1810 } else { 1811 RelativeRel = R_MIPS_REL32; 1812 TlsGotRel = R_MIPS_TLS_TPREL32; 1813 TlsModuleIndexRel = R_MIPS_TLS_DTPMOD32; 1814 TlsOffsetRel = R_MIPS_TLS_DTPREL32; 1815 } 1816 } 1817 1818 template <class ELFT> 1819 RelExpr MipsTargetInfo<ELFT>::getRelExpr(uint32_t Type, 1820 const SymbolBody &S) const { 1821 if (ELFT::Is64Bits) 1822 // See comment in the calculateMips64RelChain. 1823 Type &= 0xff; 1824 switch (Type) { 1825 default: 1826 return R_ABS; 1827 case R_MIPS_JALR: 1828 return R_HINT; 1829 case R_MIPS_GPREL16: 1830 case R_MIPS_GPREL32: 1831 return R_GOTREL; 1832 case R_MIPS_26: 1833 return R_PLT; 1834 case R_MIPS_HI16: 1835 case R_MIPS_LO16: 1836 case R_MIPS_GOT_OFST: 1837 // MIPS _gp_disp designates offset between start of function and 'gp' 1838 // pointer into GOT. __gnu_local_gp is equal to the current value of 1839 // the 'gp'. Therefore any relocations against them do not require 1840 // dynamic relocation. 1841 if (&S == ElfSym<ELFT>::MipsGpDisp) 1842 return R_PC; 1843 return R_ABS; 1844 case R_MIPS_PC32: 1845 case R_MIPS_PC16: 1846 case R_MIPS_PC19_S2: 1847 case R_MIPS_PC21_S2: 1848 case R_MIPS_PC26_S2: 1849 case R_MIPS_PCHI16: 1850 case R_MIPS_PCLO16: 1851 return R_PC; 1852 case R_MIPS_GOT16: 1853 if (S.isLocal()) 1854 return R_MIPS_GOT_LOCAL_PAGE; 1855 // fallthrough 1856 case R_MIPS_CALL16: 1857 case R_MIPS_GOT_DISP: 1858 case R_MIPS_TLS_GOTTPREL: 1859 return R_MIPS_GOT_OFF; 1860 case R_MIPS_GOT_PAGE: 1861 return R_MIPS_GOT_LOCAL_PAGE; 1862 case R_MIPS_TLS_GD: 1863 return R_MIPS_TLSGD; 1864 case R_MIPS_TLS_LDM: 1865 return R_MIPS_TLSLD; 1866 } 1867 } 1868 1869 template <class ELFT> 1870 uint32_t MipsTargetInfo<ELFT>::getDynRel(uint32_t Type) const { 1871 if (Type == R_MIPS_32 || Type == R_MIPS_64) 1872 return RelativeRel; 1873 // Keep it going with a dummy value so that we can find more reloc errors. 1874 errorDynRel(Type); 1875 return R_MIPS_32; 1876 } 1877 1878 template <class ELFT> 1879 bool MipsTargetInfo<ELFT>::isTlsLocalDynamicRel(uint32_t Type) const { 1880 return Type == R_MIPS_TLS_LDM; 1881 } 1882 1883 template <class ELFT> 1884 bool MipsTargetInfo<ELFT>::isTlsGlobalDynamicRel(uint32_t Type) const { 1885 return Type == R_MIPS_TLS_GD; 1886 } 1887 1888 template <class ELFT> 1889 void MipsTargetInfo<ELFT>::writeGotPlt(uint8_t *Buf, const SymbolBody &) const { 1890 write32<ELFT::TargetEndianness>(Buf, Out<ELFT>::Plt->getVA()); 1891 } 1892 1893 static uint16_t mipsHigh(uint64_t V) { return (V + 0x8000) >> 16; } 1894 1895 template <endianness E, uint8_t BSIZE, uint8_t SHIFT> 1896 static int64_t getPcRelocAddend(const uint8_t *Loc) { 1897 uint32_t Instr = read32<E>(Loc); 1898 uint32_t Mask = 0xffffffff >> (32 - BSIZE); 1899 return SignExtend64<BSIZE + SHIFT>((Instr & Mask) << SHIFT); 1900 } 1901 1902 template <endianness E, uint8_t BSIZE, uint8_t SHIFT> 1903 static void applyMipsPcReloc(uint8_t *Loc, uint32_t Type, uint64_t V) { 1904 uint32_t Mask = 0xffffffff >> (32 - BSIZE); 1905 uint32_t Instr = read32<E>(Loc); 1906 if (SHIFT > 0) 1907 checkAlignment<(1 << SHIFT)>(V, Type); 1908 checkInt<BSIZE + SHIFT>(V, Type); 1909 write32<E>(Loc, (Instr & ~Mask) | ((V >> SHIFT) & Mask)); 1910 } 1911 1912 template <endianness E> 1913 static void writeMipsHi16(uint8_t *Loc, uint64_t V) { 1914 uint32_t Instr = read32<E>(Loc); 1915 write32<E>(Loc, (Instr & 0xffff0000) | mipsHigh(V)); 1916 } 1917 1918 template <endianness E> 1919 static void writeMipsLo16(uint8_t *Loc, uint64_t V) { 1920 uint32_t Instr = read32<E>(Loc); 1921 write32<E>(Loc, (Instr & 0xffff0000) | (V & 0xffff)); 1922 } 1923 1924 template <class ELFT> 1925 void MipsTargetInfo<ELFT>::writePltHeader(uint8_t *Buf) const { 1926 const endianness E = ELFT::TargetEndianness; 1927 write32<E>(Buf, 0x3c1c0000); // lui $28, %hi(&GOTPLT[0]) 1928 write32<E>(Buf + 4, 0x8f990000); // lw $25, %lo(&GOTPLT[0])($28) 1929 write32<E>(Buf + 8, 0x279c0000); // addiu $28, $28, %lo(&GOTPLT[0]) 1930 write32<E>(Buf + 12, 0x031cc023); // subu $24, $24, $28 1931 write32<E>(Buf + 16, 0x03e07825); // move $15, $31 1932 write32<E>(Buf + 20, 0x0018c082); // srl $24, $24, 2 1933 write32<E>(Buf + 24, 0x0320f809); // jalr $25 1934 write32<E>(Buf + 28, 0x2718fffe); // subu $24, $24, 2 1935 uint64_t Got = Out<ELFT>::GotPlt->getVA(); 1936 writeMipsHi16<E>(Buf, Got); 1937 writeMipsLo16<E>(Buf + 4, Got); 1938 writeMipsLo16<E>(Buf + 8, Got); 1939 } 1940 1941 template <class ELFT> 1942 void MipsTargetInfo<ELFT>::writePlt(uint8_t *Buf, uint64_t GotEntryAddr, 1943 uint64_t PltEntryAddr, int32_t Index, 1944 unsigned RelOff) const { 1945 const endianness E = ELFT::TargetEndianness; 1946 write32<E>(Buf, 0x3c0f0000); // lui $15, %hi(.got.plt entry) 1947 write32<E>(Buf + 4, 0x8df90000); // l[wd] $25, %lo(.got.plt entry)($15) 1948 write32<E>(Buf + 8, 0x03200008); // jr $25 1949 write32<E>(Buf + 12, 0x25f80000); // addiu $24, $15, %lo(.got.plt entry) 1950 writeMipsHi16<E>(Buf, GotEntryAddr); 1951 writeMipsLo16<E>(Buf + 4, GotEntryAddr); 1952 writeMipsLo16<E>(Buf + 12, GotEntryAddr); 1953 } 1954 1955 template <class ELFT> 1956 RelExpr MipsTargetInfo<ELFT>::getThunkExpr(RelExpr Expr, uint32_t Type, 1957 const InputFile &File, 1958 const SymbolBody &S) const { 1959 // Any MIPS PIC code function is invoked with its address in register $t9. 1960 // So if we have a branch instruction from non-PIC code to the PIC one 1961 // we cannot make the jump directly and need to create a small stubs 1962 // to save the target function address. 1963 // See page 3-38 ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf 1964 if (Type != R_MIPS_26) 1965 return Expr; 1966 auto *F = dyn_cast<ELFFileBase<ELFT>>(&File); 1967 if (!F) 1968 return Expr; 1969 // If current file has PIC code, LA25 stub is not required. 1970 if (F->getObj().getHeader()->e_flags & EF_MIPS_PIC) 1971 return Expr; 1972 auto *D = dyn_cast<DefinedRegular<ELFT>>(&S); 1973 if (!D || !D->Section) 1974 return Expr; 1975 // LA25 is required if target file has PIC code 1976 // or target symbol is a PIC symbol. 1977 const ELFFile<ELFT> &DefFile = D->Section->getFile()->getObj(); 1978 bool PicFile = DefFile.getHeader()->e_flags & EF_MIPS_PIC; 1979 bool PicSym = (D->StOther & STO_MIPS_MIPS16) == STO_MIPS_PIC; 1980 return (PicFile || PicSym) ? R_THUNK_ABS : Expr; 1981 } 1982 1983 template <class ELFT> 1984 uint64_t MipsTargetInfo<ELFT>::getImplicitAddend(const uint8_t *Buf, 1985 uint32_t Type) const { 1986 const endianness E = ELFT::TargetEndianness; 1987 switch (Type) { 1988 default: 1989 return 0; 1990 case R_MIPS_32: 1991 case R_MIPS_GPREL32: 1992 return read32<E>(Buf); 1993 case R_MIPS_26: 1994 // FIXME (simon): If the relocation target symbol is not a PLT entry 1995 // we should use another expression for calculation: 1996 // ((A << 2) | (P & 0xf0000000)) >> 2 1997 return SignExtend64<28>(read32<E>(Buf) << 2); 1998 case R_MIPS_GPREL16: 1999 case R_MIPS_LO16: 2000 case R_MIPS_PCLO16: 2001 case R_MIPS_TLS_DTPREL_HI16: 2002 case R_MIPS_TLS_DTPREL_LO16: 2003 case R_MIPS_TLS_TPREL_HI16: 2004 case R_MIPS_TLS_TPREL_LO16: 2005 return SignExtend64<16>(read32<E>(Buf)); 2006 case R_MIPS_PC16: 2007 return getPcRelocAddend<E, 16, 2>(Buf); 2008 case R_MIPS_PC19_S2: 2009 return getPcRelocAddend<E, 19, 2>(Buf); 2010 case R_MIPS_PC21_S2: 2011 return getPcRelocAddend<E, 21, 2>(Buf); 2012 case R_MIPS_PC26_S2: 2013 return getPcRelocAddend<E, 26, 2>(Buf); 2014 case R_MIPS_PC32: 2015 return getPcRelocAddend<E, 32, 0>(Buf); 2016 } 2017 } 2018 2019 static std::pair<uint32_t, uint64_t> calculateMips64RelChain(uint32_t Type, 2020 uint64_t Val) { 2021 // MIPS N64 ABI packs multiple relocations into the single relocation 2022 // record. In general, all up to three relocations can have arbitrary 2023 // types. In fact, Clang and GCC uses only a few combinations. For now, 2024 // we support two of them. That is allow to pass at least all LLVM 2025 // test suite cases. 2026 // <any relocation> / R_MIPS_SUB / R_MIPS_HI16 | R_MIPS_LO16 2027 // <any relocation> / R_MIPS_64 / R_MIPS_NONE 2028 // The first relocation is a 'real' relocation which is calculated 2029 // using the corresponding symbol's value. The second and the third 2030 // relocations used to modify result of the first one: extend it to 2031 // 64-bit, extract high or low part etc. For details, see part 2.9 Relocation 2032 // at the https://dmz-portal.mips.com/mw/images/8/82/007-4658-001.pdf 2033 uint32_t Type2 = (Type >> 8) & 0xff; 2034 uint32_t Type3 = (Type >> 16) & 0xff; 2035 if (Type2 == R_MIPS_NONE && Type3 == R_MIPS_NONE) 2036 return std::make_pair(Type, Val); 2037 if (Type2 == R_MIPS_64 && Type3 == R_MIPS_NONE) 2038 return std::make_pair(Type2, Val); 2039 if (Type2 == R_MIPS_SUB && (Type3 == R_MIPS_HI16 || Type3 == R_MIPS_LO16)) 2040 return std::make_pair(Type3, -Val); 2041 error("unsupported relocations combination " + Twine(Type)); 2042 return std::make_pair(Type & 0xff, Val); 2043 } 2044 2045 template <class ELFT> 2046 void MipsTargetInfo<ELFT>::relocateOne(uint8_t *Loc, uint32_t Type, 2047 uint64_t Val) const { 2048 const endianness E = ELFT::TargetEndianness; 2049 // Thread pointer and DRP offsets from the start of TLS data area. 2050 // https://www.linux-mips.org/wiki/NPTL 2051 if (Type == R_MIPS_TLS_DTPREL_HI16 || Type == R_MIPS_TLS_DTPREL_LO16) 2052 Val -= 0x8000; 2053 else if (Type == R_MIPS_TLS_TPREL_HI16 || Type == R_MIPS_TLS_TPREL_LO16) 2054 Val -= 0x7000; 2055 if (ELFT::Is64Bits) 2056 std::tie(Type, Val) = calculateMips64RelChain(Type, Val); 2057 switch (Type) { 2058 case R_MIPS_32: 2059 case R_MIPS_GPREL32: 2060 write32<E>(Loc, Val); 2061 break; 2062 case R_MIPS_64: 2063 write64<E>(Loc, Val); 2064 break; 2065 case R_MIPS_26: 2066 write32<E>(Loc, (read32<E>(Loc) & ~0x3ffffff) | (Val >> 2)); 2067 break; 2068 case R_MIPS_GOT_DISP: 2069 case R_MIPS_GOT_PAGE: 2070 case R_MIPS_GOT16: 2071 case R_MIPS_GPREL16: 2072 case R_MIPS_TLS_GD: 2073 case R_MIPS_TLS_LDM: 2074 checkInt<16>(Val, Type); 2075 // fallthrough 2076 case R_MIPS_CALL16: 2077 case R_MIPS_GOT_OFST: 2078 case R_MIPS_LO16: 2079 case R_MIPS_PCLO16: 2080 case R_MIPS_TLS_DTPREL_LO16: 2081 case R_MIPS_TLS_GOTTPREL: 2082 case R_MIPS_TLS_TPREL_LO16: 2083 writeMipsLo16<E>(Loc, Val); 2084 break; 2085 case R_MIPS_HI16: 2086 case R_MIPS_PCHI16: 2087 case R_MIPS_TLS_DTPREL_HI16: 2088 case R_MIPS_TLS_TPREL_HI16: 2089 writeMipsHi16<E>(Loc, Val); 2090 break; 2091 case R_MIPS_JALR: 2092 // Ignore this optimization relocation for now 2093 break; 2094 case R_MIPS_PC16: 2095 applyMipsPcReloc<E, 16, 2>(Loc, Type, Val); 2096 break; 2097 case R_MIPS_PC19_S2: 2098 applyMipsPcReloc<E, 19, 2>(Loc, Type, Val); 2099 break; 2100 case R_MIPS_PC21_S2: 2101 applyMipsPcReloc<E, 21, 2>(Loc, Type, Val); 2102 break; 2103 case R_MIPS_PC26_S2: 2104 applyMipsPcReloc<E, 26, 2>(Loc, Type, Val); 2105 break; 2106 case R_MIPS_PC32: 2107 applyMipsPcReloc<E, 32, 0>(Loc, Type, Val); 2108 break; 2109 default: 2110 fatal("unrecognized reloc " + Twine(Type)); 2111 } 2112 } 2113 2114 template <class ELFT> 2115 bool MipsTargetInfo<ELFT>::usesOnlyLowPageBits(uint32_t Type) const { 2116 return Type == R_MIPS_LO16 || Type == R_MIPS_GOT_OFST; 2117 } 2118 } 2119 } 2120