1 //===-- RuntimeDyldELF.cpp - Run-time dynamic linker for MC-JIT -*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // Implementation of ELF support for the MC-JIT runtime dynamic linker. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "RuntimeDyldELF.h" 15 #include "RuntimeDyldCheckerImpl.h" 16 #include "llvm/ADT/IntervalMap.h" 17 #include "llvm/ADT/STLExtras.h" 18 #include "llvm/ADT/StringRef.h" 19 #include "llvm/ADT/Triple.h" 20 #include "llvm/MC/MCStreamer.h" 21 #include "llvm/Object/ELFObjectFile.h" 22 #include "llvm/Object/ObjectFile.h" 23 #include "llvm/Support/ELF.h" 24 #include "llvm/Support/Endian.h" 25 #include "llvm/Support/MemoryBuffer.h" 26 #include "llvm/Support/TargetRegistry.h" 27 28 using namespace llvm; 29 using namespace llvm::object; 30 31 #define DEBUG_TYPE "dyld" 32 33 static inline std::error_code check(std::error_code Err) { 34 if (Err) { 35 report_fatal_error(Err.message()); 36 } 37 return Err; 38 } 39 40 namespace { 41 42 template <class ELFT> class DyldELFObject : public ELFObjectFile<ELFT> { 43 LLVM_ELF_IMPORT_TYPES_ELFT(ELFT) 44 45 typedef Elf_Shdr_Impl<ELFT> Elf_Shdr; 46 typedef Elf_Sym_Impl<ELFT> Elf_Sym; 47 typedef Elf_Rel_Impl<ELFT, false> Elf_Rel; 48 typedef Elf_Rel_Impl<ELFT, true> Elf_Rela; 49 50 typedef Elf_Ehdr_Impl<ELFT> Elf_Ehdr; 51 52 typedef typename ELFDataTypeTypedefHelper<ELFT>::value_type addr_type; 53 54 public: 55 DyldELFObject(MemoryBufferRef Wrapper, std::error_code &ec); 56 57 void updateSectionAddress(const SectionRef &Sec, uint64_t Addr); 58 59 void updateSymbolAddress(const SymbolRef &SymRef, uint64_t Addr); 60 61 // Methods for type inquiry through isa, cast and dyn_cast 62 static inline bool classof(const Binary *v) { 63 return (isa<ELFObjectFile<ELFT>>(v) && 64 classof(cast<ELFObjectFile<ELFT>>(v))); 65 } 66 static inline bool classof(const ELFObjectFile<ELFT> *v) { 67 return v->isDyldType(); 68 } 69 70 }; 71 72 73 74 // The MemoryBuffer passed into this constructor is just a wrapper around the 75 // actual memory. Ultimately, the Binary parent class will take ownership of 76 // this MemoryBuffer object but not the underlying memory. 77 template <class ELFT> 78 DyldELFObject<ELFT>::DyldELFObject(MemoryBufferRef Wrapper, std::error_code &EC) 79 : ELFObjectFile<ELFT>(Wrapper, EC) { 80 this->isDyldELFObject = true; 81 } 82 83 template <class ELFT> 84 void DyldELFObject<ELFT>::updateSectionAddress(const SectionRef &Sec, 85 uint64_t Addr) { 86 DataRefImpl ShdrRef = Sec.getRawDataRefImpl(); 87 Elf_Shdr *shdr = 88 const_cast<Elf_Shdr *>(reinterpret_cast<const Elf_Shdr *>(ShdrRef.p)); 89 90 // This assumes the address passed in matches the target address bitness 91 // The template-based type cast handles everything else. 92 shdr->sh_addr = static_cast<addr_type>(Addr); 93 } 94 95 template <class ELFT> 96 void DyldELFObject<ELFT>::updateSymbolAddress(const SymbolRef &SymRef, 97 uint64_t Addr) { 98 99 Elf_Sym *sym = const_cast<Elf_Sym *>( 100 ELFObjectFile<ELFT>::getSymbol(SymRef.getRawDataRefImpl())); 101 102 // This assumes the address passed in matches the target address bitness 103 // The template-based type cast handles everything else. 104 sym->st_value = static_cast<addr_type>(Addr); 105 } 106 107 class LoadedELFObjectInfo 108 : public RuntimeDyld::LoadedObjectInfoHelper<LoadedELFObjectInfo> { 109 public: 110 LoadedELFObjectInfo(RuntimeDyldImpl &RTDyld, unsigned BeginIdx, 111 unsigned EndIdx) 112 : LoadedObjectInfoHelper(RTDyld, BeginIdx, EndIdx) {} 113 114 OwningBinary<ObjectFile> 115 getObjectForDebug(const ObjectFile &Obj) const override; 116 }; 117 118 template <typename ELFT> 119 std::unique_ptr<DyldELFObject<ELFT>> 120 createRTDyldELFObject(MemoryBufferRef Buffer, 121 const LoadedELFObjectInfo &L, 122 std::error_code &ec) { 123 typedef typename ELFFile<ELFT>::Elf_Shdr Elf_Shdr; 124 typedef typename ELFDataTypeTypedefHelper<ELFT>::value_type addr_type; 125 126 std::unique_ptr<DyldELFObject<ELFT>> Obj = 127 llvm::make_unique<DyldELFObject<ELFT>>(Buffer, ec); 128 129 // Iterate over all sections in the object. 130 for (const auto &Sec : Obj->sections()) { 131 StringRef SectionName; 132 Sec.getName(SectionName); 133 if (SectionName != "") { 134 DataRefImpl ShdrRef = Sec.getRawDataRefImpl(); 135 Elf_Shdr *shdr = const_cast<Elf_Shdr *>( 136 reinterpret_cast<const Elf_Shdr *>(ShdrRef.p)); 137 138 if (uint64_t SecLoadAddr = L.getSectionLoadAddress(SectionName)) { 139 // This assumes that the address passed in matches the target address 140 // bitness. The template-based type cast handles everything else. 141 shdr->sh_addr = static_cast<addr_type>(SecLoadAddr); 142 } 143 } 144 } 145 146 return Obj; 147 } 148 149 OwningBinary<ObjectFile> createELFDebugObject(const ObjectFile &Obj, 150 const LoadedELFObjectInfo &L) { 151 assert(Obj.isELF() && "Not an ELF object file."); 152 153 std::unique_ptr<MemoryBuffer> Buffer = 154 MemoryBuffer::getMemBufferCopy(Obj.getData(), Obj.getFileName()); 155 156 std::error_code ec; 157 158 std::unique_ptr<ObjectFile> DebugObj; 159 if (Obj.getBytesInAddress() == 4 && Obj.isLittleEndian()) { 160 typedef ELFType<support::little, false> ELF32LE; 161 DebugObj = createRTDyldELFObject<ELF32LE>(Buffer->getMemBufferRef(), L, ec); 162 } else if (Obj.getBytesInAddress() == 4 && !Obj.isLittleEndian()) { 163 typedef ELFType<support::big, false> ELF32BE; 164 DebugObj = createRTDyldELFObject<ELF32BE>(Buffer->getMemBufferRef(), L, ec); 165 } else if (Obj.getBytesInAddress() == 8 && !Obj.isLittleEndian()) { 166 typedef ELFType<support::big, true> ELF64BE; 167 DebugObj = createRTDyldELFObject<ELF64BE>(Buffer->getMemBufferRef(), L, ec); 168 } else if (Obj.getBytesInAddress() == 8 && Obj.isLittleEndian()) { 169 typedef ELFType<support::little, true> ELF64LE; 170 DebugObj = createRTDyldELFObject<ELF64LE>(Buffer->getMemBufferRef(), L, ec); 171 } else 172 llvm_unreachable("Unexpected ELF format"); 173 174 assert(!ec && "Could not construct copy ELF object file"); 175 176 return OwningBinary<ObjectFile>(std::move(DebugObj), std::move(Buffer)); 177 } 178 179 OwningBinary<ObjectFile> 180 LoadedELFObjectInfo::getObjectForDebug(const ObjectFile &Obj) const { 181 return createELFDebugObject(Obj, *this); 182 } 183 184 } // namespace 185 186 namespace llvm { 187 188 RuntimeDyldELF::RuntimeDyldELF(RuntimeDyld::MemoryManager &MemMgr, 189 RuntimeDyld::SymbolResolver &Resolver) 190 : RuntimeDyldImpl(MemMgr, Resolver), GOTSectionID(0), CurrentGOTIndex(0) {} 191 RuntimeDyldELF::~RuntimeDyldELF() {} 192 193 void RuntimeDyldELF::registerEHFrames() { 194 for (int i = 0, e = UnregisteredEHFrameSections.size(); i != e; ++i) { 195 SID EHFrameSID = UnregisteredEHFrameSections[i]; 196 uint8_t *EHFrameAddr = Sections[EHFrameSID].Address; 197 uint64_t EHFrameLoadAddr = Sections[EHFrameSID].LoadAddress; 198 size_t EHFrameSize = Sections[EHFrameSID].Size; 199 MemMgr.registerEHFrames(EHFrameAddr, EHFrameLoadAddr, EHFrameSize); 200 RegisteredEHFrameSections.push_back(EHFrameSID); 201 } 202 UnregisteredEHFrameSections.clear(); 203 } 204 205 void RuntimeDyldELF::deregisterEHFrames() { 206 for (int i = 0, e = RegisteredEHFrameSections.size(); i != e; ++i) { 207 SID EHFrameSID = RegisteredEHFrameSections[i]; 208 uint8_t *EHFrameAddr = Sections[EHFrameSID].Address; 209 uint64_t EHFrameLoadAddr = Sections[EHFrameSID].LoadAddress; 210 size_t EHFrameSize = Sections[EHFrameSID].Size; 211 MemMgr.deregisterEHFrames(EHFrameAddr, EHFrameLoadAddr, EHFrameSize); 212 } 213 RegisteredEHFrameSections.clear(); 214 } 215 216 std::unique_ptr<RuntimeDyld::LoadedObjectInfo> 217 RuntimeDyldELF::loadObject(const object::ObjectFile &O) { 218 unsigned SectionStartIdx, SectionEndIdx; 219 std::tie(SectionStartIdx, SectionEndIdx) = loadObjectImpl(O); 220 return llvm::make_unique<LoadedELFObjectInfo>(*this, SectionStartIdx, 221 SectionEndIdx); 222 } 223 224 void RuntimeDyldELF::resolveX86_64Relocation(const SectionEntry &Section, 225 uint64_t Offset, uint64_t Value, 226 uint32_t Type, int64_t Addend, 227 uint64_t SymOffset) { 228 switch (Type) { 229 default: 230 llvm_unreachable("Relocation type not implemented yet!"); 231 break; 232 case ELF::R_X86_64_64: { 233 support::ulittle64_t::ref(Section.Address + Offset) = Value + Addend; 234 DEBUG(dbgs() << "Writing " << format("%p", (Value + Addend)) << " at " 235 << format("%p\n", Section.Address + Offset)); 236 break; 237 } 238 case ELF::R_X86_64_32: 239 case ELF::R_X86_64_32S: { 240 Value += Addend; 241 assert((Type == ELF::R_X86_64_32 && (Value <= UINT32_MAX)) || 242 (Type == ELF::R_X86_64_32S && 243 ((int64_t)Value <= INT32_MAX && (int64_t)Value >= INT32_MIN))); 244 uint32_t TruncatedAddr = (Value & 0xFFFFFFFF); 245 support::ulittle32_t::ref(Section.Address + Offset) = TruncatedAddr; 246 DEBUG(dbgs() << "Writing " << format("%p", TruncatedAddr) << " at " 247 << format("%p\n", Section.Address + Offset)); 248 break; 249 } 250 case ELF::R_X86_64_PC32: { 251 uint64_t FinalAddress = Section.LoadAddress + Offset; 252 int64_t RealOffset = Value + Addend - FinalAddress; 253 assert(isInt<32>(RealOffset)); 254 int32_t TruncOffset = (RealOffset & 0xFFFFFFFF); 255 support::ulittle32_t::ref(Section.Address + Offset) = TruncOffset; 256 break; 257 } 258 case ELF::R_X86_64_PC64: { 259 uint64_t FinalAddress = Section.LoadAddress + Offset; 260 int64_t RealOffset = Value + Addend - FinalAddress; 261 support::ulittle64_t::ref(Section.Address + Offset) = RealOffset; 262 break; 263 } 264 } 265 } 266 267 void RuntimeDyldELF::resolveX86Relocation(const SectionEntry &Section, 268 uint64_t Offset, uint32_t Value, 269 uint32_t Type, int32_t Addend) { 270 switch (Type) { 271 case ELF::R_386_32: { 272 support::ulittle32_t::ref(Section.Address + Offset) = Value + Addend; 273 break; 274 } 275 case ELF::R_386_PC32: { 276 uint32_t FinalAddress = ((Section.LoadAddress + Offset) & 0xFFFFFFFF); 277 uint32_t RealOffset = Value + Addend - FinalAddress; 278 support::ulittle32_t::ref(Section.Address + Offset) = RealOffset; 279 break; 280 } 281 default: 282 // There are other relocation types, but it appears these are the 283 // only ones currently used by the LLVM ELF object writer 284 llvm_unreachable("Relocation type not implemented yet!"); 285 break; 286 } 287 } 288 289 void RuntimeDyldELF::resolveAArch64Relocation(const SectionEntry &Section, 290 uint64_t Offset, uint64_t Value, 291 uint32_t Type, int64_t Addend) { 292 uint32_t *TargetPtr = reinterpret_cast<uint32_t *>(Section.Address + Offset); 293 uint64_t FinalAddress = Section.LoadAddress + Offset; 294 295 DEBUG(dbgs() << "resolveAArch64Relocation, LocalAddress: 0x" 296 << format("%llx", Section.Address + Offset) 297 << " FinalAddress: 0x" << format("%llx", FinalAddress) 298 << " Value: 0x" << format("%llx", Value) << " Type: 0x" 299 << format("%x", Type) << " Addend: 0x" << format("%llx", Addend) 300 << "\n"); 301 302 switch (Type) { 303 default: 304 llvm_unreachable("Relocation type not implemented yet!"); 305 break; 306 case ELF::R_AARCH64_ABS64: { 307 uint64_t *TargetPtr = 308 reinterpret_cast<uint64_t *>(Section.Address + Offset); 309 *TargetPtr = Value + Addend; 310 break; 311 } 312 case ELF::R_AARCH64_PREL32: { 313 uint64_t Result = Value + Addend - FinalAddress; 314 assert(static_cast<int64_t>(Result) >= INT32_MIN && 315 static_cast<int64_t>(Result) <= UINT32_MAX); 316 *TargetPtr = static_cast<uint32_t>(Result & 0xffffffffU); 317 break; 318 } 319 case ELF::R_AARCH64_CALL26: // fallthrough 320 case ELF::R_AARCH64_JUMP26: { 321 // Operation: S+A-P. Set Call or B immediate value to bits fff_fffc of the 322 // calculation. 323 uint64_t BranchImm = Value + Addend - FinalAddress; 324 325 // "Check that -2^27 <= result < 2^27". 326 assert(isInt<28>(BranchImm)); 327 328 // AArch64 code is emitted with .rela relocations. The data already in any 329 // bits affected by the relocation on entry is garbage. 330 *TargetPtr &= 0xfc000000U; 331 // Immediate goes in bits 25:0 of B and BL. 332 *TargetPtr |= static_cast<uint32_t>(BranchImm & 0xffffffcU) >> 2; 333 break; 334 } 335 case ELF::R_AARCH64_MOVW_UABS_G3: { 336 uint64_t Result = Value + Addend; 337 338 // AArch64 code is emitted with .rela relocations. The data already in any 339 // bits affected by the relocation on entry is garbage. 340 *TargetPtr &= 0xffe0001fU; 341 // Immediate goes in bits 20:5 of MOVZ/MOVK instruction 342 *TargetPtr |= Result >> (48 - 5); 343 // Shift must be "lsl #48", in bits 22:21 344 assert((*TargetPtr >> 21 & 0x3) == 3 && "invalid shift for relocation"); 345 break; 346 } 347 case ELF::R_AARCH64_MOVW_UABS_G2_NC: { 348 uint64_t Result = Value + Addend; 349 350 // AArch64 code is emitted with .rela relocations. The data already in any 351 // bits affected by the relocation on entry is garbage. 352 *TargetPtr &= 0xffe0001fU; 353 // Immediate goes in bits 20:5 of MOVZ/MOVK instruction 354 *TargetPtr |= ((Result & 0xffff00000000ULL) >> (32 - 5)); 355 // Shift must be "lsl #32", in bits 22:21 356 assert((*TargetPtr >> 21 & 0x3) == 2 && "invalid shift for relocation"); 357 break; 358 } 359 case ELF::R_AARCH64_MOVW_UABS_G1_NC: { 360 uint64_t Result = Value + Addend; 361 362 // AArch64 code is emitted with .rela relocations. The data already in any 363 // bits affected by the relocation on entry is garbage. 364 *TargetPtr &= 0xffe0001fU; 365 // Immediate goes in bits 20:5 of MOVZ/MOVK instruction 366 *TargetPtr |= ((Result & 0xffff0000U) >> (16 - 5)); 367 // Shift must be "lsl #16", in bits 22:2 368 assert((*TargetPtr >> 21 & 0x3) == 1 && "invalid shift for relocation"); 369 break; 370 } 371 case ELF::R_AARCH64_MOVW_UABS_G0_NC: { 372 uint64_t Result = Value + Addend; 373 374 // AArch64 code is emitted with .rela relocations. The data already in any 375 // bits affected by the relocation on entry is garbage. 376 *TargetPtr &= 0xffe0001fU; 377 // Immediate goes in bits 20:5 of MOVZ/MOVK instruction 378 *TargetPtr |= ((Result & 0xffffU) << 5); 379 // Shift must be "lsl #0", in bits 22:21. 380 assert((*TargetPtr >> 21 & 0x3) == 0 && "invalid shift for relocation"); 381 break; 382 } 383 case ELF::R_AARCH64_ADR_PREL_PG_HI21: { 384 // Operation: Page(S+A) - Page(P) 385 uint64_t Result = 386 ((Value + Addend) & ~0xfffULL) - (FinalAddress & ~0xfffULL); 387 388 // Check that -2^32 <= X < 2^32 389 assert(isInt<33>(Result) && "overflow check failed for relocation"); 390 391 // AArch64 code is emitted with .rela relocations. The data already in any 392 // bits affected by the relocation on entry is garbage. 393 *TargetPtr &= 0x9f00001fU; 394 // Immediate goes in bits 30:29 + 5:23 of ADRP instruction, taken 395 // from bits 32:12 of X. 396 *TargetPtr |= ((Result & 0x3000U) << (29 - 12)); 397 *TargetPtr |= ((Result & 0x1ffffc000ULL) >> (14 - 5)); 398 break; 399 } 400 case ELF::R_AARCH64_LDST32_ABS_LO12_NC: { 401 // Operation: S + A 402 uint64_t Result = Value + Addend; 403 404 // AArch64 code is emitted with .rela relocations. The data already in any 405 // bits affected by the relocation on entry is garbage. 406 *TargetPtr &= 0xffc003ffU; 407 // Immediate goes in bits 21:10 of LD/ST instruction, taken 408 // from bits 11:2 of X 409 *TargetPtr |= ((Result & 0xffc) << (10 - 2)); 410 break; 411 } 412 case ELF::R_AARCH64_LDST64_ABS_LO12_NC: { 413 // Operation: S + A 414 uint64_t Result = Value + Addend; 415 416 // AArch64 code is emitted with .rela relocations. The data already in any 417 // bits affected by the relocation on entry is garbage. 418 *TargetPtr &= 0xffc003ffU; 419 // Immediate goes in bits 21:10 of LD/ST instruction, taken 420 // from bits 11:3 of X 421 *TargetPtr |= ((Result & 0xff8) << (10 - 3)); 422 break; 423 } 424 } 425 } 426 427 void RuntimeDyldELF::resolveARMRelocation(const SectionEntry &Section, 428 uint64_t Offset, uint32_t Value, 429 uint32_t Type, int32_t Addend) { 430 // TODO: Add Thumb relocations. 431 uint32_t *TargetPtr = (uint32_t *)(Section.Address + Offset); 432 uint32_t FinalAddress = ((Section.LoadAddress + Offset) & 0xFFFFFFFF); 433 Value += Addend; 434 435 DEBUG(dbgs() << "resolveARMRelocation, LocalAddress: " 436 << Section.Address + Offset 437 << " FinalAddress: " << format("%p", FinalAddress) << " Value: " 438 << format("%x", Value) << " Type: " << format("%x", Type) 439 << " Addend: " << format("%x", Addend) << "\n"); 440 441 switch (Type) { 442 default: 443 llvm_unreachable("Not implemented relocation type!"); 444 445 case ELF::R_ARM_NONE: 446 break; 447 case ELF::R_ARM_PREL31: 448 case ELF::R_ARM_TARGET1: 449 case ELF::R_ARM_ABS32: 450 *TargetPtr = Value; 451 break; 452 // Write first 16 bit of 32 bit value to the mov instruction. 453 // Last 4 bit should be shifted. 454 case ELF::R_ARM_MOVW_ABS_NC: 455 case ELF::R_ARM_MOVT_ABS: 456 if (Type == ELF::R_ARM_MOVW_ABS_NC) 457 Value = Value & 0xFFFF; 458 else if (Type == ELF::R_ARM_MOVT_ABS) 459 Value = (Value >> 16) & 0xFFFF; 460 *TargetPtr &= ~0x000F0FFF; 461 *TargetPtr |= Value & 0xFFF; 462 *TargetPtr |= ((Value >> 12) & 0xF) << 16; 463 break; 464 // Write 24 bit relative value to the branch instruction. 465 case ELF::R_ARM_PC24: // Fall through. 466 case ELF::R_ARM_CALL: // Fall through. 467 case ELF::R_ARM_JUMP24: 468 int32_t RelValue = static_cast<int32_t>(Value - FinalAddress - 8); 469 RelValue = (RelValue & 0x03FFFFFC) >> 2; 470 assert((*TargetPtr & 0xFFFFFF) == 0xFFFFFE); 471 *TargetPtr &= 0xFF000000; 472 *TargetPtr |= RelValue; 473 break; 474 } 475 } 476 477 void RuntimeDyldELF::resolveMIPSRelocation(const SectionEntry &Section, 478 uint64_t Offset, uint32_t Value, 479 uint32_t Type, int32_t Addend) { 480 uint8_t *TargetPtr = Section.Address + Offset; 481 Value += Addend; 482 483 DEBUG(dbgs() << "resolveMIPSRelocation, LocalAddress: " 484 << Section.Address + Offset << " FinalAddress: " 485 << format("%p", Section.LoadAddress + Offset) << " Value: " 486 << format("%x", Value) << " Type: " << format("%x", Type) 487 << " Addend: " << format("%x", Addend) << "\n"); 488 489 uint32_t Insn = readBytesUnaligned(TargetPtr, 4); 490 491 switch (Type) { 492 default: 493 llvm_unreachable("Not implemented relocation type!"); 494 break; 495 case ELF::R_MIPS_32: 496 writeBytesUnaligned(Value, TargetPtr, 4); 497 break; 498 case ELF::R_MIPS_26: 499 Insn &= 0xfc000000; 500 Insn |= (Value & 0x0fffffff) >> 2; 501 writeBytesUnaligned(Insn, TargetPtr, 4); 502 break; 503 case ELF::R_MIPS_HI16: 504 // Get the higher 16-bits. Also add 1 if bit 15 is 1. 505 Insn &= 0xffff0000; 506 Insn |= ((Value + 0x8000) >> 16) & 0xffff; 507 writeBytesUnaligned(Insn, TargetPtr, 4); 508 break; 509 case ELF::R_MIPS_LO16: 510 Insn &= 0xffff0000; 511 Insn |= Value & 0xffff; 512 writeBytesUnaligned(Insn, TargetPtr, 4); 513 break; 514 case ELF::R_MIPS_PC32: { 515 uint32_t FinalAddress = (Section.LoadAddress + Offset); 516 writeBytesUnaligned(Value - FinalAddress, (uint8_t *)TargetPtr, 4); 517 break; 518 } 519 case ELF::R_MIPS_PC16: { 520 uint32_t FinalAddress = (Section.LoadAddress + Offset); 521 Insn &= 0xffff0000; 522 Insn |= ((Value - FinalAddress) >> 2) & 0xffff; 523 writeBytesUnaligned(Insn, TargetPtr, 4); 524 break; 525 } 526 case ELF::R_MIPS_PC19_S2: { 527 uint32_t FinalAddress = (Section.LoadAddress + Offset); 528 Insn &= 0xfff80000; 529 Insn |= ((Value - (FinalAddress & ~0x3)) >> 2) & 0x7ffff; 530 writeBytesUnaligned(Insn, TargetPtr, 4); 531 break; 532 } 533 case ELF::R_MIPS_PC21_S2: { 534 uint32_t FinalAddress = (Section.LoadAddress + Offset); 535 Insn &= 0xffe00000; 536 Insn |= ((Value - FinalAddress) >> 2) & 0x1fffff; 537 writeBytesUnaligned(Insn, TargetPtr, 4); 538 break; 539 } 540 case ELF::R_MIPS_PC26_S2: { 541 uint32_t FinalAddress = (Section.LoadAddress + Offset); 542 Insn &= 0xfc000000; 543 Insn |= ((Value - FinalAddress) >> 2) & 0x3ffffff; 544 writeBytesUnaligned(Insn, TargetPtr, 4); 545 break; 546 } 547 case ELF::R_MIPS_PCHI16: { 548 uint32_t FinalAddress = (Section.LoadAddress + Offset); 549 Insn &= 0xffff0000; 550 Insn |= ((Value - FinalAddress + 0x8000) >> 16) & 0xffff; 551 writeBytesUnaligned(Insn, TargetPtr, 4); 552 break; 553 } 554 case ELF::R_MIPS_PCLO16: { 555 uint32_t FinalAddress = (Section.LoadAddress + Offset); 556 Insn &= 0xffff0000; 557 Insn |= (Value - FinalAddress) & 0xffff; 558 writeBytesUnaligned(Insn, TargetPtr, 4); 559 break; 560 } 561 } 562 } 563 564 void RuntimeDyldELF::setMipsABI(const ObjectFile &Obj) { 565 if (Arch == Triple::UnknownArch || 566 !StringRef(Triple::getArchTypePrefix(Arch)).equals("mips")) { 567 IsMipsO32ABI = false; 568 IsMipsN64ABI = false; 569 return; 570 } 571 unsigned AbiVariant; 572 Obj.getPlatformFlags(AbiVariant); 573 IsMipsO32ABI = AbiVariant & ELF::EF_MIPS_ABI_O32; 574 IsMipsN64ABI = Obj.getFileFormatName().equals("ELF64-mips"); 575 if (AbiVariant & ELF::EF_MIPS_ABI2) 576 llvm_unreachable("Mips N32 ABI is not supported yet"); 577 } 578 579 void RuntimeDyldELF::resolveMIPS64Relocation(const SectionEntry &Section, 580 uint64_t Offset, uint64_t Value, 581 uint32_t Type, int64_t Addend, 582 uint64_t SymOffset, 583 SID SectionID) { 584 uint32_t r_type = Type & 0xff; 585 uint32_t r_type2 = (Type >> 8) & 0xff; 586 uint32_t r_type3 = (Type >> 16) & 0xff; 587 588 // RelType is used to keep information for which relocation type we are 589 // applying relocation. 590 uint32_t RelType = r_type; 591 int64_t CalculatedValue = evaluateMIPS64Relocation(Section, Offset, Value, 592 RelType, Addend, 593 SymOffset, SectionID); 594 if (r_type2 != ELF::R_MIPS_NONE) { 595 RelType = r_type2; 596 CalculatedValue = evaluateMIPS64Relocation(Section, Offset, 0, RelType, 597 CalculatedValue, SymOffset, 598 SectionID); 599 } 600 if (r_type3 != ELF::R_MIPS_NONE) { 601 RelType = r_type3; 602 CalculatedValue = evaluateMIPS64Relocation(Section, Offset, 0, RelType, 603 CalculatedValue, SymOffset, 604 SectionID); 605 } 606 applyMIPS64Relocation(Section.Address + Offset, CalculatedValue, RelType); 607 } 608 609 int64_t 610 RuntimeDyldELF::evaluateMIPS64Relocation(const SectionEntry &Section, 611 uint64_t Offset, uint64_t Value, 612 uint32_t Type, int64_t Addend, 613 uint64_t SymOffset, SID SectionID) { 614 615 DEBUG(dbgs() << "evaluateMIPS64Relocation, LocalAddress: 0x" 616 << format("%llx", Section.Address + Offset) 617 << " FinalAddress: 0x" 618 << format("%llx", Section.LoadAddress + Offset) 619 << " Value: 0x" << format("%llx", Value) << " Type: 0x" 620 << format("%x", Type) << " Addend: 0x" << format("%llx", Addend) 621 << " SymOffset: " << format("%x", SymOffset) 622 << "\n"); 623 624 switch (Type) { 625 default: 626 llvm_unreachable("Not implemented relocation type!"); 627 break; 628 case ELF::R_MIPS_JALR: 629 case ELF::R_MIPS_NONE: 630 break; 631 case ELF::R_MIPS_32: 632 case ELF::R_MIPS_64: 633 return Value + Addend; 634 case ELF::R_MIPS_26: 635 return ((Value + Addend) >> 2) & 0x3ffffff; 636 case ELF::R_MIPS_GPREL16: { 637 uint64_t GOTAddr = getSectionLoadAddress(SectionToGOTMap[SectionID]); 638 return Value + Addend - (GOTAddr + 0x7ff0); 639 } 640 case ELF::R_MIPS_SUB: 641 return Value - Addend; 642 case ELF::R_MIPS_HI16: 643 // Get the higher 16-bits. Also add 1 if bit 15 is 1. 644 return ((Value + Addend + 0x8000) >> 16) & 0xffff; 645 case ELF::R_MIPS_LO16: 646 return (Value + Addend) & 0xffff; 647 case ELF::R_MIPS_CALL16: 648 case ELF::R_MIPS_GOT_DISP: 649 case ELF::R_MIPS_GOT_PAGE: { 650 uint8_t *LocalGOTAddr = 651 getSectionAddress(SectionToGOTMap[SectionID]) + SymOffset; 652 uint64_t GOTEntry = readBytesUnaligned(LocalGOTAddr, 8); 653 654 Value += Addend; 655 if (Type == ELF::R_MIPS_GOT_PAGE) 656 Value = (Value + 0x8000) & ~0xffff; 657 658 if (GOTEntry) 659 assert(GOTEntry == Value && 660 "GOT entry has two different addresses."); 661 else 662 writeBytesUnaligned(Value, LocalGOTAddr, 8); 663 664 return (SymOffset - 0x7ff0) & 0xffff; 665 } 666 case ELF::R_MIPS_GOT_OFST: { 667 int64_t page = (Value + Addend + 0x8000) & ~0xffff; 668 return (Value + Addend - page) & 0xffff; 669 } 670 case ELF::R_MIPS_GPREL32: { 671 uint64_t GOTAddr = getSectionLoadAddress(SectionToGOTMap[SectionID]); 672 return Value + Addend - (GOTAddr + 0x7ff0); 673 } 674 case ELF::R_MIPS_PC16: { 675 uint64_t FinalAddress = (Section.LoadAddress + Offset); 676 return ((Value + Addend - FinalAddress) >> 2) & 0xffff; 677 } 678 case ELF::R_MIPS_PC32: { 679 uint64_t FinalAddress = (Section.LoadAddress + Offset); 680 return Value + Addend - FinalAddress; 681 } 682 case ELF::R_MIPS_PC18_S3: { 683 uint64_t FinalAddress = (Section.LoadAddress + Offset); 684 return ((Value + Addend - ((FinalAddress | 7) ^ 7)) >> 3) & 0x3ffff; 685 } 686 case ELF::R_MIPS_PC19_S2: { 687 uint64_t FinalAddress = (Section.LoadAddress + Offset); 688 return ((Value + Addend - FinalAddress) >> 2) & 0x7ffff; 689 } 690 case ELF::R_MIPS_PC21_S2: { 691 uint64_t FinalAddress = (Section.LoadAddress + Offset); 692 return ((Value + Addend - FinalAddress) >> 2) & 0x1fffff; 693 } 694 case ELF::R_MIPS_PC26_S2: { 695 uint64_t FinalAddress = (Section.LoadAddress + Offset); 696 return ((Value + Addend - FinalAddress) >> 2) & 0x3ffffff; 697 } 698 case ELF::R_MIPS_PCHI16: { 699 uint64_t FinalAddress = (Section.LoadAddress + Offset); 700 return ((Value + Addend - FinalAddress + 0x8000) >> 16) & 0xffff; 701 } 702 case ELF::R_MIPS_PCLO16: { 703 uint64_t FinalAddress = (Section.LoadAddress + Offset); 704 return (Value + Addend - FinalAddress) & 0xffff; 705 } 706 } 707 return 0; 708 } 709 710 void RuntimeDyldELF::applyMIPS64Relocation(uint8_t *TargetPtr, 711 int64_t CalculatedValue, 712 uint32_t Type) { 713 uint32_t Insn = readBytesUnaligned(TargetPtr, 4); 714 715 switch (Type) { 716 default: 717 break; 718 case ELF::R_MIPS_32: 719 case ELF::R_MIPS_GPREL32: 720 case ELF::R_MIPS_PC32: 721 writeBytesUnaligned(CalculatedValue & 0xffffffff, TargetPtr, 4); 722 break; 723 case ELF::R_MIPS_64: 724 case ELF::R_MIPS_SUB: 725 writeBytesUnaligned(CalculatedValue, TargetPtr, 8); 726 break; 727 case ELF::R_MIPS_26: 728 case ELF::R_MIPS_PC26_S2: 729 Insn = (Insn & 0xfc000000) | CalculatedValue; 730 writeBytesUnaligned(Insn, TargetPtr, 4); 731 break; 732 case ELF::R_MIPS_GPREL16: 733 Insn = (Insn & 0xffff0000) | (CalculatedValue & 0xffff); 734 writeBytesUnaligned(Insn, TargetPtr, 4); 735 break; 736 case ELF::R_MIPS_HI16: 737 case ELF::R_MIPS_LO16: 738 case ELF::R_MIPS_PCHI16: 739 case ELF::R_MIPS_PCLO16: 740 case ELF::R_MIPS_PC16: 741 case ELF::R_MIPS_CALL16: 742 case ELF::R_MIPS_GOT_DISP: 743 case ELF::R_MIPS_GOT_PAGE: 744 case ELF::R_MIPS_GOT_OFST: 745 Insn = (Insn & 0xffff0000) | CalculatedValue; 746 writeBytesUnaligned(Insn, TargetPtr, 4); 747 break; 748 case ELF::R_MIPS_PC18_S3: 749 Insn = (Insn & 0xfffc0000) | CalculatedValue; 750 writeBytesUnaligned(Insn, TargetPtr, 4); 751 break; 752 case ELF::R_MIPS_PC19_S2: 753 Insn = (Insn & 0xfff80000) | CalculatedValue; 754 writeBytesUnaligned(Insn, TargetPtr, 4); 755 break; 756 case ELF::R_MIPS_PC21_S2: 757 Insn = (Insn & 0xffe00000) | CalculatedValue; 758 writeBytesUnaligned(Insn, TargetPtr, 4); 759 break; 760 } 761 } 762 763 // Return the .TOC. section and offset. 764 void RuntimeDyldELF::findPPC64TOCSection(const ELFObjectFileBase &Obj, 765 ObjSectionToIDMap &LocalSections, 766 RelocationValueRef &Rel) { 767 // Set a default SectionID in case we do not find a TOC section below. 768 // This may happen for references to TOC base base (sym@toc, .odp 769 // relocation) without a .toc directive. In this case just use the 770 // first section (which is usually the .odp) since the code won't 771 // reference the .toc base directly. 772 Rel.SymbolName = NULL; 773 Rel.SectionID = 0; 774 775 // The TOC consists of sections .got, .toc, .tocbss, .plt in that 776 // order. The TOC starts where the first of these sections starts. 777 for (auto &Section: Obj.sections()) { 778 StringRef SectionName; 779 check(Section.getName(SectionName)); 780 781 if (SectionName == ".got" 782 || SectionName == ".toc" 783 || SectionName == ".tocbss" 784 || SectionName == ".plt") { 785 Rel.SectionID = findOrEmitSection(Obj, Section, false, LocalSections); 786 break; 787 } 788 } 789 790 // Per the ppc64-elf-linux ABI, The TOC base is TOC value plus 0x8000 791 // thus permitting a full 64 Kbytes segment. 792 Rel.Addend = 0x8000; 793 } 794 795 // Returns the sections and offset associated with the ODP entry referenced 796 // by Symbol. 797 void RuntimeDyldELF::findOPDEntrySection(const ELFObjectFileBase &Obj, 798 ObjSectionToIDMap &LocalSections, 799 RelocationValueRef &Rel) { 800 // Get the ELF symbol value (st_value) to compare with Relocation offset in 801 // .opd entries 802 for (section_iterator si = Obj.section_begin(), se = Obj.section_end(); 803 si != se; ++si) { 804 section_iterator RelSecI = si->getRelocatedSection(); 805 if (RelSecI == Obj.section_end()) 806 continue; 807 808 StringRef RelSectionName; 809 check(RelSecI->getName(RelSectionName)); 810 if (RelSectionName != ".opd") 811 continue; 812 813 for (elf_relocation_iterator i = si->relocation_begin(), 814 e = si->relocation_end(); 815 i != e;) { 816 // The R_PPC64_ADDR64 relocation indicates the first field 817 // of a .opd entry 818 uint64_t TypeFunc = i->getType(); 819 if (TypeFunc != ELF::R_PPC64_ADDR64) { 820 ++i; 821 continue; 822 } 823 824 uint64_t TargetSymbolOffset = i->getOffset(); 825 symbol_iterator TargetSymbol = i->getSymbol(); 826 ErrorOr<int64_t> AddendOrErr = i->getAddend(); 827 Check(AddendOrErr.getError()); 828 int64_t Addend = *AddendOrErr; 829 830 ++i; 831 if (i == e) 832 break; 833 834 // Just check if following relocation is a R_PPC64_TOC 835 uint64_t TypeTOC = i->getType(); 836 if (TypeTOC != ELF::R_PPC64_TOC) 837 continue; 838 839 // Finally compares the Symbol value and the target symbol offset 840 // to check if this .opd entry refers to the symbol the relocation 841 // points to. 842 if (Rel.Addend != (int64_t)TargetSymbolOffset) 843 continue; 844 845 section_iterator tsi(Obj.section_end()); 846 check(TargetSymbol->getSection(tsi)); 847 bool IsCode = tsi->isText(); 848 Rel.SectionID = findOrEmitSection(Obj, (*tsi), IsCode, LocalSections); 849 Rel.Addend = (intptr_t)Addend; 850 return; 851 } 852 } 853 llvm_unreachable("Attempting to get address of ODP entry!"); 854 } 855 856 // Relocation masks following the #lo(value), #hi(value), #ha(value), 857 // #higher(value), #highera(value), #highest(value), and #highesta(value) 858 // macros defined in section 4.5.1. Relocation Types of the PPC-elf64abi 859 // document. 860 861 static inline uint16_t applyPPClo(uint64_t value) { return value & 0xffff; } 862 863 static inline uint16_t applyPPChi(uint64_t value) { 864 return (value >> 16) & 0xffff; 865 } 866 867 static inline uint16_t applyPPCha (uint64_t value) { 868 return ((value + 0x8000) >> 16) & 0xffff; 869 } 870 871 static inline uint16_t applyPPChigher(uint64_t value) { 872 return (value >> 32) & 0xffff; 873 } 874 875 static inline uint16_t applyPPChighera (uint64_t value) { 876 return ((value + 0x8000) >> 32) & 0xffff; 877 } 878 879 static inline uint16_t applyPPChighest(uint64_t value) { 880 return (value >> 48) & 0xffff; 881 } 882 883 static inline uint16_t applyPPChighesta (uint64_t value) { 884 return ((value + 0x8000) >> 48) & 0xffff; 885 } 886 887 void RuntimeDyldELF::resolvePPC64Relocation(const SectionEntry &Section, 888 uint64_t Offset, uint64_t Value, 889 uint32_t Type, int64_t Addend) { 890 uint8_t *LocalAddress = Section.Address + Offset; 891 switch (Type) { 892 default: 893 llvm_unreachable("Relocation type not implemented yet!"); 894 break; 895 case ELF::R_PPC64_ADDR16: 896 writeInt16BE(LocalAddress, applyPPClo(Value + Addend)); 897 break; 898 case ELF::R_PPC64_ADDR16_DS: 899 writeInt16BE(LocalAddress, applyPPClo(Value + Addend) & ~3); 900 break; 901 case ELF::R_PPC64_ADDR16_LO: 902 writeInt16BE(LocalAddress, applyPPClo(Value + Addend)); 903 break; 904 case ELF::R_PPC64_ADDR16_LO_DS: 905 writeInt16BE(LocalAddress, applyPPClo(Value + Addend) & ~3); 906 break; 907 case ELF::R_PPC64_ADDR16_HI: 908 writeInt16BE(LocalAddress, applyPPChi(Value + Addend)); 909 break; 910 case ELF::R_PPC64_ADDR16_HA: 911 writeInt16BE(LocalAddress, applyPPCha(Value + Addend)); 912 break; 913 case ELF::R_PPC64_ADDR16_HIGHER: 914 writeInt16BE(LocalAddress, applyPPChigher(Value + Addend)); 915 break; 916 case ELF::R_PPC64_ADDR16_HIGHERA: 917 writeInt16BE(LocalAddress, applyPPChighera(Value + Addend)); 918 break; 919 case ELF::R_PPC64_ADDR16_HIGHEST: 920 writeInt16BE(LocalAddress, applyPPChighest(Value + Addend)); 921 break; 922 case ELF::R_PPC64_ADDR16_HIGHESTA: 923 writeInt16BE(LocalAddress, applyPPChighesta(Value + Addend)); 924 break; 925 case ELF::R_PPC64_ADDR14: { 926 assert(((Value + Addend) & 3) == 0); 927 // Preserve the AA/LK bits in the branch instruction 928 uint8_t aalk = *(LocalAddress + 3); 929 writeInt16BE(LocalAddress + 2, (aalk & 3) | ((Value + Addend) & 0xfffc)); 930 } break; 931 case ELF::R_PPC64_REL16_LO: { 932 uint64_t FinalAddress = (Section.LoadAddress + Offset); 933 uint64_t Delta = Value - FinalAddress + Addend; 934 writeInt16BE(LocalAddress, applyPPClo(Delta)); 935 } break; 936 case ELF::R_PPC64_REL16_HI: { 937 uint64_t FinalAddress = (Section.LoadAddress + Offset); 938 uint64_t Delta = Value - FinalAddress + Addend; 939 writeInt16BE(LocalAddress, applyPPChi(Delta)); 940 } break; 941 case ELF::R_PPC64_REL16_HA: { 942 uint64_t FinalAddress = (Section.LoadAddress + Offset); 943 uint64_t Delta = Value - FinalAddress + Addend; 944 writeInt16BE(LocalAddress, applyPPCha(Delta)); 945 } break; 946 case ELF::R_PPC64_ADDR32: { 947 int32_t Result = static_cast<int32_t>(Value + Addend); 948 if (SignExtend32<32>(Result) != Result) 949 llvm_unreachable("Relocation R_PPC64_ADDR32 overflow"); 950 writeInt32BE(LocalAddress, Result); 951 } break; 952 case ELF::R_PPC64_REL24: { 953 uint64_t FinalAddress = (Section.LoadAddress + Offset); 954 int32_t delta = static_cast<int32_t>(Value - FinalAddress + Addend); 955 if (SignExtend32<24>(delta) != delta) 956 llvm_unreachable("Relocation R_PPC64_REL24 overflow"); 957 // Generates a 'bl <address>' instruction 958 writeInt32BE(LocalAddress, 0x48000001 | (delta & 0x03FFFFFC)); 959 } break; 960 case ELF::R_PPC64_REL32: { 961 uint64_t FinalAddress = (Section.LoadAddress + Offset); 962 int32_t delta = static_cast<int32_t>(Value - FinalAddress + Addend); 963 if (SignExtend32<32>(delta) != delta) 964 llvm_unreachable("Relocation R_PPC64_REL32 overflow"); 965 writeInt32BE(LocalAddress, delta); 966 } break; 967 case ELF::R_PPC64_REL64: { 968 uint64_t FinalAddress = (Section.LoadAddress + Offset); 969 uint64_t Delta = Value - FinalAddress + Addend; 970 writeInt64BE(LocalAddress, Delta); 971 } break; 972 case ELF::R_PPC64_ADDR64: 973 writeInt64BE(LocalAddress, Value + Addend); 974 break; 975 } 976 } 977 978 void RuntimeDyldELF::resolveSystemZRelocation(const SectionEntry &Section, 979 uint64_t Offset, uint64_t Value, 980 uint32_t Type, int64_t Addend) { 981 uint8_t *LocalAddress = Section.Address + Offset; 982 switch (Type) { 983 default: 984 llvm_unreachable("Relocation type not implemented yet!"); 985 break; 986 case ELF::R_390_PC16DBL: 987 case ELF::R_390_PLT16DBL: { 988 int64_t Delta = (Value + Addend) - (Section.LoadAddress + Offset); 989 assert(int16_t(Delta / 2) * 2 == Delta && "R_390_PC16DBL overflow"); 990 writeInt16BE(LocalAddress, Delta / 2); 991 break; 992 } 993 case ELF::R_390_PC32DBL: 994 case ELF::R_390_PLT32DBL: { 995 int64_t Delta = (Value + Addend) - (Section.LoadAddress + Offset); 996 assert(int32_t(Delta / 2) * 2 == Delta && "R_390_PC32DBL overflow"); 997 writeInt32BE(LocalAddress, Delta / 2); 998 break; 999 } 1000 case ELF::R_390_PC32: { 1001 int64_t Delta = (Value + Addend) - (Section.LoadAddress + Offset); 1002 assert(int32_t(Delta) == Delta && "R_390_PC32 overflow"); 1003 writeInt32BE(LocalAddress, Delta); 1004 break; 1005 } 1006 case ELF::R_390_64: 1007 writeInt64BE(LocalAddress, Value + Addend); 1008 break; 1009 } 1010 } 1011 1012 // The target location for the relocation is described by RE.SectionID and 1013 // RE.Offset. RE.SectionID can be used to find the SectionEntry. Each 1014 // SectionEntry has three members describing its location. 1015 // SectionEntry::Address is the address at which the section has been loaded 1016 // into memory in the current (host) process. SectionEntry::LoadAddress is the 1017 // address that the section will have in the target process. 1018 // SectionEntry::ObjAddress is the address of the bits for this section in the 1019 // original emitted object image (also in the current address space). 1020 // 1021 // Relocations will be applied as if the section were loaded at 1022 // SectionEntry::LoadAddress, but they will be applied at an address based 1023 // on SectionEntry::Address. SectionEntry::ObjAddress will be used to refer to 1024 // Target memory contents if they are required for value calculations. 1025 // 1026 // The Value parameter here is the load address of the symbol for the 1027 // relocation to be applied. For relocations which refer to symbols in the 1028 // current object Value will be the LoadAddress of the section in which 1029 // the symbol resides (RE.Addend provides additional information about the 1030 // symbol location). For external symbols, Value will be the address of the 1031 // symbol in the target address space. 1032 void RuntimeDyldELF::resolveRelocation(const RelocationEntry &RE, 1033 uint64_t Value) { 1034 const SectionEntry &Section = Sections[RE.SectionID]; 1035 return resolveRelocation(Section, RE.Offset, Value, RE.RelType, RE.Addend, 1036 RE.SymOffset, RE.SectionID); 1037 } 1038 1039 void RuntimeDyldELF::resolveRelocation(const SectionEntry &Section, 1040 uint64_t Offset, uint64_t Value, 1041 uint32_t Type, int64_t Addend, 1042 uint64_t SymOffset, SID SectionID) { 1043 switch (Arch) { 1044 case Triple::x86_64: 1045 resolveX86_64Relocation(Section, Offset, Value, Type, Addend, SymOffset); 1046 break; 1047 case Triple::x86: 1048 resolveX86Relocation(Section, Offset, (uint32_t)(Value & 0xffffffffL), Type, 1049 (uint32_t)(Addend & 0xffffffffL)); 1050 break; 1051 case Triple::aarch64: 1052 case Triple::aarch64_be: 1053 resolveAArch64Relocation(Section, Offset, Value, Type, Addend); 1054 break; 1055 case Triple::arm: // Fall through. 1056 case Triple::armeb: 1057 case Triple::thumb: 1058 case Triple::thumbeb: 1059 resolveARMRelocation(Section, Offset, (uint32_t)(Value & 0xffffffffL), Type, 1060 (uint32_t)(Addend & 0xffffffffL)); 1061 break; 1062 case Triple::mips: // Fall through. 1063 case Triple::mipsel: 1064 case Triple::mips64: 1065 case Triple::mips64el: 1066 if (IsMipsO32ABI) 1067 resolveMIPSRelocation(Section, Offset, (uint32_t)(Value & 0xffffffffL), 1068 Type, (uint32_t)(Addend & 0xffffffffL)); 1069 else if (IsMipsN64ABI) 1070 resolveMIPS64Relocation(Section, Offset, Value, Type, Addend, SymOffset, 1071 SectionID); 1072 else 1073 llvm_unreachable("Mips ABI not handled"); 1074 break; 1075 case Triple::ppc64: // Fall through. 1076 case Triple::ppc64le: 1077 resolvePPC64Relocation(Section, Offset, Value, Type, Addend); 1078 break; 1079 case Triple::systemz: 1080 resolveSystemZRelocation(Section, Offset, Value, Type, Addend); 1081 break; 1082 default: 1083 llvm_unreachable("Unsupported CPU type!"); 1084 } 1085 } 1086 1087 void *RuntimeDyldELF::computePlaceholderAddress(unsigned SectionID, uint64_t Offset) const { 1088 return (void*)(Sections[SectionID].ObjAddress + Offset); 1089 } 1090 1091 void RuntimeDyldELF::processSimpleRelocation(unsigned SectionID, uint64_t Offset, unsigned RelType, RelocationValueRef Value) { 1092 RelocationEntry RE(SectionID, Offset, RelType, Value.Addend, Value.Offset); 1093 if (Value.SymbolName) 1094 addRelocationForSymbol(RE, Value.SymbolName); 1095 else 1096 addRelocationForSection(RE, Value.SectionID); 1097 } 1098 1099 relocation_iterator RuntimeDyldELF::processRelocationRef( 1100 unsigned SectionID, relocation_iterator RelI, const ObjectFile &O, 1101 ObjSectionToIDMap &ObjSectionToID, StubMap &Stubs) { 1102 const auto &Obj = cast<ELFObjectFileBase>(O); 1103 uint64_t RelType = RelI->getType(); 1104 ErrorOr<int64_t> AddendOrErr = ELFRelocationRef(*RelI).getAddend(); 1105 int64_t Addend = AddendOrErr ? *AddendOrErr : 0; 1106 elf_symbol_iterator Symbol = RelI->getSymbol(); 1107 1108 // Obtain the symbol name which is referenced in the relocation 1109 StringRef TargetName; 1110 if (Symbol != Obj.symbol_end()) { 1111 ErrorOr<StringRef> TargetNameOrErr = Symbol->getName(); 1112 if (std::error_code EC = TargetNameOrErr.getError()) 1113 report_fatal_error(EC.message()); 1114 TargetName = *TargetNameOrErr; 1115 } 1116 DEBUG(dbgs() << "\t\tRelType: " << RelType << " Addend: " << Addend 1117 << " TargetName: " << TargetName << "\n"); 1118 RelocationValueRef Value; 1119 // First search for the symbol in the local symbol table 1120 SymbolRef::Type SymType = SymbolRef::ST_Unknown; 1121 1122 // Search for the symbol in the global symbol table 1123 RTDyldSymbolTable::const_iterator gsi = GlobalSymbolTable.end(); 1124 if (Symbol != Obj.symbol_end()) { 1125 gsi = GlobalSymbolTable.find(TargetName.data()); 1126 SymType = Symbol->getType(); 1127 } 1128 if (gsi != GlobalSymbolTable.end()) { 1129 const auto &SymInfo = gsi->second; 1130 Value.SectionID = SymInfo.getSectionID(); 1131 Value.Offset = SymInfo.getOffset(); 1132 Value.Addend = SymInfo.getOffset() + Addend; 1133 } else { 1134 switch (SymType) { 1135 case SymbolRef::ST_Debug: { 1136 // TODO: Now ELF SymbolRef::ST_Debug = STT_SECTION, it's not obviously 1137 // and can be changed by another developers. Maybe best way is add 1138 // a new symbol type ST_Section to SymbolRef and use it. 1139 section_iterator si(Obj.section_end()); 1140 Symbol->getSection(si); 1141 if (si == Obj.section_end()) 1142 llvm_unreachable("Symbol section not found, bad object file format!"); 1143 DEBUG(dbgs() << "\t\tThis is section symbol\n"); 1144 bool isCode = si->isText(); 1145 Value.SectionID = findOrEmitSection(Obj, (*si), isCode, ObjSectionToID); 1146 Value.Addend = Addend; 1147 break; 1148 } 1149 case SymbolRef::ST_Data: 1150 case SymbolRef::ST_Unknown: { 1151 Value.SymbolName = TargetName.data(); 1152 Value.Addend = Addend; 1153 1154 // Absolute relocations will have a zero symbol ID (STN_UNDEF), which 1155 // will manifest here as a NULL symbol name. 1156 // We can set this as a valid (but empty) symbol name, and rely 1157 // on addRelocationForSymbol to handle this. 1158 if (!Value.SymbolName) 1159 Value.SymbolName = ""; 1160 break; 1161 } 1162 default: 1163 llvm_unreachable("Unresolved symbol type!"); 1164 break; 1165 } 1166 } 1167 1168 uint64_t Offset = RelI->getOffset(); 1169 1170 DEBUG(dbgs() << "\t\tSectionID: " << SectionID << " Offset: " << Offset 1171 << "\n"); 1172 if ((Arch == Triple::aarch64 || Arch == Triple::aarch64_be) && 1173 (RelType == ELF::R_AARCH64_CALL26 || RelType == ELF::R_AARCH64_JUMP26)) { 1174 // This is an AArch64 branch relocation, need to use a stub function. 1175 DEBUG(dbgs() << "\t\tThis is an AArch64 branch relocation."); 1176 SectionEntry &Section = Sections[SectionID]; 1177 1178 // Look for an existing stub. 1179 StubMap::const_iterator i = Stubs.find(Value); 1180 if (i != Stubs.end()) { 1181 resolveRelocation(Section, Offset, (uint64_t)Section.Address + i->second, 1182 RelType, 0); 1183 DEBUG(dbgs() << " Stub function found\n"); 1184 } else { 1185 // Create a new stub function. 1186 DEBUG(dbgs() << " Create a new stub function\n"); 1187 Stubs[Value] = Section.StubOffset; 1188 uint8_t *StubTargetAddr = 1189 createStubFunction(Section.Address + Section.StubOffset); 1190 1191 RelocationEntry REmovz_g3(SectionID, StubTargetAddr - Section.Address, 1192 ELF::R_AARCH64_MOVW_UABS_G3, Value.Addend); 1193 RelocationEntry REmovk_g2(SectionID, StubTargetAddr - Section.Address + 4, 1194 ELF::R_AARCH64_MOVW_UABS_G2_NC, Value.Addend); 1195 RelocationEntry REmovk_g1(SectionID, StubTargetAddr - Section.Address + 8, 1196 ELF::R_AARCH64_MOVW_UABS_G1_NC, Value.Addend); 1197 RelocationEntry REmovk_g0(SectionID, 1198 StubTargetAddr - Section.Address + 12, 1199 ELF::R_AARCH64_MOVW_UABS_G0_NC, Value.Addend); 1200 1201 if (Value.SymbolName) { 1202 addRelocationForSymbol(REmovz_g3, Value.SymbolName); 1203 addRelocationForSymbol(REmovk_g2, Value.SymbolName); 1204 addRelocationForSymbol(REmovk_g1, Value.SymbolName); 1205 addRelocationForSymbol(REmovk_g0, Value.SymbolName); 1206 } else { 1207 addRelocationForSection(REmovz_g3, Value.SectionID); 1208 addRelocationForSection(REmovk_g2, Value.SectionID); 1209 addRelocationForSection(REmovk_g1, Value.SectionID); 1210 addRelocationForSection(REmovk_g0, Value.SectionID); 1211 } 1212 resolveRelocation(Section, Offset, 1213 (uint64_t)Section.Address + Section.StubOffset, RelType, 1214 0); 1215 Section.StubOffset += getMaxStubSize(); 1216 } 1217 } else if (Arch == Triple::arm) { 1218 if (RelType == ELF::R_ARM_PC24 || RelType == ELF::R_ARM_CALL || 1219 RelType == ELF::R_ARM_JUMP24) { 1220 // This is an ARM branch relocation, need to use a stub function. 1221 DEBUG(dbgs() << "\t\tThis is an ARM branch relocation."); 1222 SectionEntry &Section = Sections[SectionID]; 1223 1224 // Look for an existing stub. 1225 StubMap::const_iterator i = Stubs.find(Value); 1226 if (i != Stubs.end()) { 1227 resolveRelocation(Section, Offset, (uint64_t)Section.Address + i->second, 1228 RelType, 0); 1229 DEBUG(dbgs() << " Stub function found\n"); 1230 } else { 1231 // Create a new stub function. 1232 DEBUG(dbgs() << " Create a new stub function\n"); 1233 Stubs[Value] = Section.StubOffset; 1234 uint8_t *StubTargetAddr = 1235 createStubFunction(Section.Address + Section.StubOffset); 1236 RelocationEntry RE(SectionID, StubTargetAddr - Section.Address, 1237 ELF::R_ARM_ABS32, Value.Addend); 1238 if (Value.SymbolName) 1239 addRelocationForSymbol(RE, Value.SymbolName); 1240 else 1241 addRelocationForSection(RE, Value.SectionID); 1242 1243 resolveRelocation(Section, Offset, 1244 (uint64_t)Section.Address + Section.StubOffset, RelType, 1245 0); 1246 Section.StubOffset += getMaxStubSize(); 1247 } 1248 } else { 1249 uint32_t *Placeholder = 1250 reinterpret_cast<uint32_t*>(computePlaceholderAddress(SectionID, Offset)); 1251 if (RelType == ELF::R_ARM_PREL31 || RelType == ELF::R_ARM_TARGET1 || 1252 RelType == ELF::R_ARM_ABS32) { 1253 Value.Addend += *Placeholder; 1254 } else if (RelType == ELF::R_ARM_MOVW_ABS_NC || RelType == ELF::R_ARM_MOVT_ABS) { 1255 // See ELF for ARM documentation 1256 Value.Addend += (int16_t)((*Placeholder & 0xFFF) | (((*Placeholder >> 16) & 0xF) << 12)); 1257 } 1258 processSimpleRelocation(SectionID, Offset, RelType, Value); 1259 } 1260 } else if (IsMipsO32ABI) { 1261 uint8_t *Placeholder = reinterpret_cast<uint8_t *>( 1262 computePlaceholderAddress(SectionID, Offset)); 1263 uint32_t Opcode = readBytesUnaligned(Placeholder, 4); 1264 if (RelType == ELF::R_MIPS_26) { 1265 // This is an Mips branch relocation, need to use a stub function. 1266 DEBUG(dbgs() << "\t\tThis is a Mips branch relocation."); 1267 SectionEntry &Section = Sections[SectionID]; 1268 1269 // Extract the addend from the instruction. 1270 // We shift up by two since the Value will be down shifted again 1271 // when applying the relocation. 1272 uint32_t Addend = (Opcode & 0x03ffffff) << 2; 1273 1274 Value.Addend += Addend; 1275 1276 // Look up for existing stub. 1277 StubMap::const_iterator i = Stubs.find(Value); 1278 if (i != Stubs.end()) { 1279 RelocationEntry RE(SectionID, Offset, RelType, i->second); 1280 addRelocationForSection(RE, SectionID); 1281 DEBUG(dbgs() << " Stub function found\n"); 1282 } else { 1283 // Create a new stub function. 1284 DEBUG(dbgs() << " Create a new stub function\n"); 1285 Stubs[Value] = Section.StubOffset; 1286 uint8_t *StubTargetAddr = 1287 createStubFunction(Section.Address + Section.StubOffset); 1288 1289 // Creating Hi and Lo relocations for the filled stub instructions. 1290 RelocationEntry REHi(SectionID, StubTargetAddr - Section.Address, 1291 ELF::R_MIPS_HI16, Value.Addend); 1292 RelocationEntry RELo(SectionID, StubTargetAddr - Section.Address + 4, 1293 ELF::R_MIPS_LO16, Value.Addend); 1294 1295 if (Value.SymbolName) { 1296 addRelocationForSymbol(REHi, Value.SymbolName); 1297 addRelocationForSymbol(RELo, Value.SymbolName); 1298 } 1299 else { 1300 addRelocationForSection(REHi, Value.SectionID); 1301 addRelocationForSection(RELo, Value.SectionID); 1302 } 1303 1304 RelocationEntry RE(SectionID, Offset, RelType, Section.StubOffset); 1305 addRelocationForSection(RE, SectionID); 1306 Section.StubOffset += getMaxStubSize(); 1307 } 1308 } else { 1309 // FIXME: Calculate correct addends for R_MIPS_HI16, R_MIPS_LO16, 1310 // R_MIPS_PCHI16 and R_MIPS_PCLO16 relocations. 1311 if (RelType == ELF::R_MIPS_HI16 || RelType == ELF::R_MIPS_PCHI16) 1312 Value.Addend += (Opcode & 0x0000ffff) << 16; 1313 else if (RelType == ELF::R_MIPS_LO16) 1314 Value.Addend += (Opcode & 0x0000ffff); 1315 else if (RelType == ELF::R_MIPS_32) 1316 Value.Addend += Opcode; 1317 else if (RelType == ELF::R_MIPS_PCLO16) 1318 Value.Addend += SignExtend32<16>((Opcode & 0x0000ffff)); 1319 else if (RelType == ELF::R_MIPS_PC16) 1320 Value.Addend += SignExtend32<18>((Opcode & 0x0000ffff) << 2); 1321 else if (RelType == ELF::R_MIPS_PC19_S2) 1322 Value.Addend += SignExtend32<21>((Opcode & 0x0007ffff) << 2); 1323 else if (RelType == ELF::R_MIPS_PC21_S2) 1324 Value.Addend += SignExtend32<23>((Opcode & 0x001fffff) << 2); 1325 else if (RelType == ELF::R_MIPS_PC26_S2) 1326 Value.Addend += SignExtend32<28>((Opcode & 0x03ffffff) << 2); 1327 processSimpleRelocation(SectionID, Offset, RelType, Value); 1328 } 1329 } else if (IsMipsN64ABI) { 1330 uint32_t r_type = RelType & 0xff; 1331 RelocationEntry RE(SectionID, Offset, RelType, Value.Addend); 1332 if (r_type == ELF::R_MIPS_CALL16 || r_type == ELF::R_MIPS_GOT_PAGE 1333 || r_type == ELF::R_MIPS_GOT_DISP) { 1334 StringMap<uint64_t>::iterator i = GOTSymbolOffsets.find(TargetName); 1335 if (i != GOTSymbolOffsets.end()) 1336 RE.SymOffset = i->second; 1337 else { 1338 RE.SymOffset = allocateGOTEntries(SectionID, 1); 1339 GOTSymbolOffsets[TargetName] = RE.SymOffset; 1340 } 1341 } 1342 if (Value.SymbolName) 1343 addRelocationForSymbol(RE, Value.SymbolName); 1344 else 1345 addRelocationForSection(RE, Value.SectionID); 1346 } else if (Arch == Triple::ppc64 || Arch == Triple::ppc64le) { 1347 if (RelType == ELF::R_PPC64_REL24) { 1348 // Determine ABI variant in use for this object. 1349 unsigned AbiVariant; 1350 Obj.getPlatformFlags(AbiVariant); 1351 AbiVariant &= ELF::EF_PPC64_ABI; 1352 // A PPC branch relocation will need a stub function if the target is 1353 // an external symbol (Symbol::ST_Unknown) or if the target address 1354 // is not within the signed 24-bits branch address. 1355 SectionEntry &Section = Sections[SectionID]; 1356 uint8_t *Target = Section.Address + Offset; 1357 bool RangeOverflow = false; 1358 if (SymType != SymbolRef::ST_Unknown) { 1359 if (AbiVariant != 2) { 1360 // In the ELFv1 ABI, a function call may point to the .opd entry, 1361 // so the final symbol value is calculated based on the relocation 1362 // values in the .opd section. 1363 findOPDEntrySection(Obj, ObjSectionToID, Value); 1364 } else { 1365 // In the ELFv2 ABI, a function symbol may provide a local entry 1366 // point, which must be used for direct calls. 1367 uint8_t SymOther = Symbol->getOther(); 1368 Value.Addend += ELF::decodePPC64LocalEntryOffset(SymOther); 1369 } 1370 uint8_t *RelocTarget = Sections[Value.SectionID].Address + Value.Addend; 1371 int32_t delta = static_cast<int32_t>(Target - RelocTarget); 1372 // If it is within 24-bits branch range, just set the branch target 1373 if (SignExtend32<24>(delta) == delta) { 1374 RelocationEntry RE(SectionID, Offset, RelType, Value.Addend); 1375 if (Value.SymbolName) 1376 addRelocationForSymbol(RE, Value.SymbolName); 1377 else 1378 addRelocationForSection(RE, Value.SectionID); 1379 } else { 1380 RangeOverflow = true; 1381 } 1382 } 1383 if (SymType == SymbolRef::ST_Unknown || RangeOverflow) { 1384 // It is an external symbol (SymbolRef::ST_Unknown) or within a range 1385 // larger than 24-bits. 1386 StubMap::const_iterator i = Stubs.find(Value); 1387 if (i != Stubs.end()) { 1388 // Symbol function stub already created, just relocate to it 1389 resolveRelocation(Section, Offset, 1390 (uint64_t)Section.Address + i->second, RelType, 0); 1391 DEBUG(dbgs() << " Stub function found\n"); 1392 } else { 1393 // Create a new stub function. 1394 DEBUG(dbgs() << " Create a new stub function\n"); 1395 Stubs[Value] = Section.StubOffset; 1396 uint8_t *StubTargetAddr = 1397 createStubFunction(Section.Address + Section.StubOffset, 1398 AbiVariant); 1399 RelocationEntry RE(SectionID, StubTargetAddr - Section.Address, 1400 ELF::R_PPC64_ADDR64, Value.Addend); 1401 1402 // Generates the 64-bits address loads as exemplified in section 1403 // 4.5.1 in PPC64 ELF ABI. Note that the relocations need to 1404 // apply to the low part of the instructions, so we have to update 1405 // the offset according to the target endianness. 1406 uint64_t StubRelocOffset = StubTargetAddr - Section.Address; 1407 if (!IsTargetLittleEndian) 1408 StubRelocOffset += 2; 1409 1410 RelocationEntry REhst(SectionID, StubRelocOffset + 0, 1411 ELF::R_PPC64_ADDR16_HIGHEST, Value.Addend); 1412 RelocationEntry REhr(SectionID, StubRelocOffset + 4, 1413 ELF::R_PPC64_ADDR16_HIGHER, Value.Addend); 1414 RelocationEntry REh(SectionID, StubRelocOffset + 12, 1415 ELF::R_PPC64_ADDR16_HI, Value.Addend); 1416 RelocationEntry REl(SectionID, StubRelocOffset + 16, 1417 ELF::R_PPC64_ADDR16_LO, Value.Addend); 1418 1419 if (Value.SymbolName) { 1420 addRelocationForSymbol(REhst, Value.SymbolName); 1421 addRelocationForSymbol(REhr, Value.SymbolName); 1422 addRelocationForSymbol(REh, Value.SymbolName); 1423 addRelocationForSymbol(REl, Value.SymbolName); 1424 } else { 1425 addRelocationForSection(REhst, Value.SectionID); 1426 addRelocationForSection(REhr, Value.SectionID); 1427 addRelocationForSection(REh, Value.SectionID); 1428 addRelocationForSection(REl, Value.SectionID); 1429 } 1430 1431 resolveRelocation(Section, Offset, 1432 (uint64_t)Section.Address + Section.StubOffset, 1433 RelType, 0); 1434 Section.StubOffset += getMaxStubSize(); 1435 } 1436 if (SymType == SymbolRef::ST_Unknown) { 1437 // Restore the TOC for external calls 1438 if (AbiVariant == 2) 1439 writeInt32BE(Target + 4, 0xE8410018); // ld r2,28(r1) 1440 else 1441 writeInt32BE(Target + 4, 0xE8410028); // ld r2,40(r1) 1442 } 1443 } 1444 } else if (RelType == ELF::R_PPC64_TOC16 || 1445 RelType == ELF::R_PPC64_TOC16_DS || 1446 RelType == ELF::R_PPC64_TOC16_LO || 1447 RelType == ELF::R_PPC64_TOC16_LO_DS || 1448 RelType == ELF::R_PPC64_TOC16_HI || 1449 RelType == ELF::R_PPC64_TOC16_HA) { 1450 // These relocations are supposed to subtract the TOC address from 1451 // the final value. This does not fit cleanly into the RuntimeDyld 1452 // scheme, since there may be *two* sections involved in determining 1453 // the relocation value (the section of the symbol refered to by the 1454 // relocation, and the TOC section associated with the current module). 1455 // 1456 // Fortunately, these relocations are currently only ever generated 1457 // refering to symbols that themselves reside in the TOC, which means 1458 // that the two sections are actually the same. Thus they cancel out 1459 // and we can immediately resolve the relocation right now. 1460 switch (RelType) { 1461 case ELF::R_PPC64_TOC16: RelType = ELF::R_PPC64_ADDR16; break; 1462 case ELF::R_PPC64_TOC16_DS: RelType = ELF::R_PPC64_ADDR16_DS; break; 1463 case ELF::R_PPC64_TOC16_LO: RelType = ELF::R_PPC64_ADDR16_LO; break; 1464 case ELF::R_PPC64_TOC16_LO_DS: RelType = ELF::R_PPC64_ADDR16_LO_DS; break; 1465 case ELF::R_PPC64_TOC16_HI: RelType = ELF::R_PPC64_ADDR16_HI; break; 1466 case ELF::R_PPC64_TOC16_HA: RelType = ELF::R_PPC64_ADDR16_HA; break; 1467 default: llvm_unreachable("Wrong relocation type."); 1468 } 1469 1470 RelocationValueRef TOCValue; 1471 findPPC64TOCSection(Obj, ObjSectionToID, TOCValue); 1472 if (Value.SymbolName || Value.SectionID != TOCValue.SectionID) 1473 llvm_unreachable("Unsupported TOC relocation."); 1474 Value.Addend -= TOCValue.Addend; 1475 resolveRelocation(Sections[SectionID], Offset, Value.Addend, RelType, 0); 1476 } else { 1477 // There are two ways to refer to the TOC address directly: either 1478 // via a ELF::R_PPC64_TOC relocation (where both symbol and addend are 1479 // ignored), or via any relocation that refers to the magic ".TOC." 1480 // symbols (in which case the addend is respected). 1481 if (RelType == ELF::R_PPC64_TOC) { 1482 RelType = ELF::R_PPC64_ADDR64; 1483 findPPC64TOCSection(Obj, ObjSectionToID, Value); 1484 } else if (TargetName == ".TOC.") { 1485 findPPC64TOCSection(Obj, ObjSectionToID, Value); 1486 Value.Addend += Addend; 1487 } 1488 1489 RelocationEntry RE(SectionID, Offset, RelType, Value.Addend); 1490 1491 if (Value.SymbolName) 1492 addRelocationForSymbol(RE, Value.SymbolName); 1493 else 1494 addRelocationForSection(RE, Value.SectionID); 1495 } 1496 } else if (Arch == Triple::systemz && 1497 (RelType == ELF::R_390_PLT32DBL || RelType == ELF::R_390_GOTENT)) { 1498 // Create function stubs for both PLT and GOT references, regardless of 1499 // whether the GOT reference is to data or code. The stub contains the 1500 // full address of the symbol, as needed by GOT references, and the 1501 // executable part only adds an overhead of 8 bytes. 1502 // 1503 // We could try to conserve space by allocating the code and data 1504 // parts of the stub separately. However, as things stand, we allocate 1505 // a stub for every relocation, so using a GOT in JIT code should be 1506 // no less space efficient than using an explicit constant pool. 1507 DEBUG(dbgs() << "\t\tThis is a SystemZ indirect relocation."); 1508 SectionEntry &Section = Sections[SectionID]; 1509 1510 // Look for an existing stub. 1511 StubMap::const_iterator i = Stubs.find(Value); 1512 uintptr_t StubAddress; 1513 if (i != Stubs.end()) { 1514 StubAddress = uintptr_t(Section.Address) + i->second; 1515 DEBUG(dbgs() << " Stub function found\n"); 1516 } else { 1517 // Create a new stub function. 1518 DEBUG(dbgs() << " Create a new stub function\n"); 1519 1520 uintptr_t BaseAddress = uintptr_t(Section.Address); 1521 uintptr_t StubAlignment = getStubAlignment(); 1522 StubAddress = (BaseAddress + Section.StubOffset + StubAlignment - 1) & 1523 -StubAlignment; 1524 unsigned StubOffset = StubAddress - BaseAddress; 1525 1526 Stubs[Value] = StubOffset; 1527 createStubFunction((uint8_t *)StubAddress); 1528 RelocationEntry RE(SectionID, StubOffset + 8, ELF::R_390_64, 1529 Value.Offset); 1530 if (Value.SymbolName) 1531 addRelocationForSymbol(RE, Value.SymbolName); 1532 else 1533 addRelocationForSection(RE, Value.SectionID); 1534 Section.StubOffset = StubOffset + getMaxStubSize(); 1535 } 1536 1537 if (RelType == ELF::R_390_GOTENT) 1538 resolveRelocation(Section, Offset, StubAddress + 8, ELF::R_390_PC32DBL, 1539 Addend); 1540 else 1541 resolveRelocation(Section, Offset, StubAddress, RelType, Addend); 1542 } else if (Arch == Triple::x86_64) { 1543 if (RelType == ELF::R_X86_64_PLT32) { 1544 // The way the PLT relocations normally work is that the linker allocates 1545 // the 1546 // PLT and this relocation makes a PC-relative call into the PLT. The PLT 1547 // entry will then jump to an address provided by the GOT. On first call, 1548 // the 1549 // GOT address will point back into PLT code that resolves the symbol. After 1550 // the first call, the GOT entry points to the actual function. 1551 // 1552 // For local functions we're ignoring all of that here and just replacing 1553 // the PLT32 relocation type with PC32, which will translate the relocation 1554 // into a PC-relative call directly to the function. For external symbols we 1555 // can't be sure the function will be within 2^32 bytes of the call site, so 1556 // we need to create a stub, which calls into the GOT. This case is 1557 // equivalent to the usual PLT implementation except that we use the stub 1558 // mechanism in RuntimeDyld (which puts stubs at the end of the section) 1559 // rather than allocating a PLT section. 1560 if (Value.SymbolName) { 1561 // This is a call to an external function. 1562 // Look for an existing stub. 1563 SectionEntry &Section = Sections[SectionID]; 1564 StubMap::const_iterator i = Stubs.find(Value); 1565 uintptr_t StubAddress; 1566 if (i != Stubs.end()) { 1567 StubAddress = uintptr_t(Section.Address) + i->second; 1568 DEBUG(dbgs() << " Stub function found\n"); 1569 } else { 1570 // Create a new stub function (equivalent to a PLT entry). 1571 DEBUG(dbgs() << " Create a new stub function\n"); 1572 1573 uintptr_t BaseAddress = uintptr_t(Section.Address); 1574 uintptr_t StubAlignment = getStubAlignment(); 1575 StubAddress = (BaseAddress + Section.StubOffset + StubAlignment - 1) & 1576 -StubAlignment; 1577 unsigned StubOffset = StubAddress - BaseAddress; 1578 Stubs[Value] = StubOffset; 1579 createStubFunction((uint8_t *)StubAddress); 1580 1581 // Bump our stub offset counter 1582 Section.StubOffset = StubOffset + getMaxStubSize(); 1583 1584 // Allocate a GOT Entry 1585 uint64_t GOTOffset = allocateGOTEntries(SectionID, 1); 1586 1587 // The load of the GOT address has an addend of -4 1588 resolveGOTOffsetRelocation(SectionID, StubOffset + 2, GOTOffset - 4); 1589 1590 // Fill in the value of the symbol we're targeting into the GOT 1591 addRelocationForSymbol(computeGOTOffsetRE(SectionID,GOTOffset,0,ELF::R_X86_64_64), 1592 Value.SymbolName); 1593 } 1594 1595 // Make the target call a call into the stub table. 1596 resolveRelocation(Section, Offset, StubAddress, ELF::R_X86_64_PC32, 1597 Addend); 1598 } else { 1599 RelocationEntry RE(SectionID, Offset, ELF::R_X86_64_PC32, Value.Addend, 1600 Value.Offset); 1601 addRelocationForSection(RE, Value.SectionID); 1602 } 1603 } else if (RelType == ELF::R_X86_64_GOTPCREL) { 1604 uint64_t GOTOffset = allocateGOTEntries(SectionID, 1); 1605 resolveGOTOffsetRelocation(SectionID, Offset, GOTOffset + Addend); 1606 1607 // Fill in the value of the symbol we're targeting into the GOT 1608 RelocationEntry RE = computeGOTOffsetRE(SectionID, GOTOffset, Value.Offset, ELF::R_X86_64_64); 1609 if (Value.SymbolName) 1610 addRelocationForSymbol(RE, Value.SymbolName); 1611 else 1612 addRelocationForSection(RE, Value.SectionID); 1613 } else if (RelType == ELF::R_X86_64_PC32) { 1614 Value.Addend += support::ulittle32_t::ref(computePlaceholderAddress(SectionID, Offset)); 1615 processSimpleRelocation(SectionID, Offset, RelType, Value); 1616 } else if (RelType == ELF::R_X86_64_PC64) { 1617 Value.Addend += support::ulittle64_t::ref(computePlaceholderAddress(SectionID, Offset)); 1618 processSimpleRelocation(SectionID, Offset, RelType, Value); 1619 } else { 1620 processSimpleRelocation(SectionID, Offset, RelType, Value); 1621 } 1622 } else { 1623 if (Arch == Triple::x86) { 1624 Value.Addend += support::ulittle32_t::ref(computePlaceholderAddress(SectionID, Offset)); 1625 } 1626 processSimpleRelocation(SectionID, Offset, RelType, Value); 1627 } 1628 return ++RelI; 1629 } 1630 1631 size_t RuntimeDyldELF::getGOTEntrySize() { 1632 // We don't use the GOT in all of these cases, but it's essentially free 1633 // to put them all here. 1634 size_t Result = 0; 1635 switch (Arch) { 1636 case Triple::x86_64: 1637 case Triple::aarch64: 1638 case Triple::aarch64_be: 1639 case Triple::ppc64: 1640 case Triple::ppc64le: 1641 case Triple::systemz: 1642 Result = sizeof(uint64_t); 1643 break; 1644 case Triple::x86: 1645 case Triple::arm: 1646 case Triple::thumb: 1647 Result = sizeof(uint32_t); 1648 break; 1649 case Triple::mips: 1650 case Triple::mipsel: 1651 case Triple::mips64: 1652 case Triple::mips64el: 1653 if (IsMipsO32ABI) 1654 Result = sizeof(uint32_t); 1655 else if (IsMipsN64ABI) 1656 Result = sizeof(uint64_t); 1657 else 1658 llvm_unreachable("Mips ABI not handled"); 1659 break; 1660 default: 1661 llvm_unreachable("Unsupported CPU type!"); 1662 } 1663 return Result; 1664 } 1665 1666 uint64_t RuntimeDyldELF::allocateGOTEntries(unsigned SectionID, unsigned no) 1667 { 1668 (void)SectionID; // The GOT Section is the same for all section in the object file 1669 if (GOTSectionID == 0) { 1670 GOTSectionID = Sections.size(); 1671 // Reserve a section id. We'll allocate the section later 1672 // once we know the total size 1673 Sections.push_back(SectionEntry(".got", 0, 0, 0)); 1674 } 1675 uint64_t StartOffset = CurrentGOTIndex * getGOTEntrySize(); 1676 CurrentGOTIndex += no; 1677 return StartOffset; 1678 } 1679 1680 void RuntimeDyldELF::resolveGOTOffsetRelocation(unsigned SectionID, uint64_t Offset, uint64_t GOTOffset) 1681 { 1682 // Fill in the relative address of the GOT Entry into the stub 1683 RelocationEntry GOTRE(SectionID, Offset, ELF::R_X86_64_PC32, GOTOffset); 1684 addRelocationForSection(GOTRE, GOTSectionID); 1685 } 1686 1687 RelocationEntry RuntimeDyldELF::computeGOTOffsetRE(unsigned SectionID, uint64_t GOTOffset, uint64_t SymbolOffset, 1688 uint32_t Type) 1689 { 1690 (void)SectionID; // The GOT Section is the same for all section in the object file 1691 return RelocationEntry(GOTSectionID, GOTOffset, Type, SymbolOffset); 1692 } 1693 1694 void RuntimeDyldELF::finalizeLoad(const ObjectFile &Obj, 1695 ObjSectionToIDMap &SectionMap) { 1696 // If necessary, allocate the global offset table 1697 if (GOTSectionID != 0) { 1698 // Allocate memory for the section 1699 size_t TotalSize = CurrentGOTIndex * getGOTEntrySize(); 1700 uint8_t *Addr = MemMgr.allocateDataSection(TotalSize, getGOTEntrySize(), 1701 GOTSectionID, ".got", false); 1702 if (!Addr) 1703 report_fatal_error("Unable to allocate memory for GOT!"); 1704 1705 Sections[GOTSectionID] = SectionEntry(".got", Addr, TotalSize, 0); 1706 1707 if (Checker) 1708 Checker->registerSection(Obj.getFileName(), GOTSectionID); 1709 1710 // For now, initialize all GOT entries to zero. We'll fill them in as 1711 // needed when GOT-based relocations are applied. 1712 memset(Addr, 0, TotalSize); 1713 if (IsMipsN64ABI) { 1714 // To correctly resolve Mips GOT relocations, we need a mapping from 1715 // object's sections to GOTs. 1716 for (section_iterator SI = Obj.section_begin(), SE = Obj.section_end(); 1717 SI != SE; ++SI) { 1718 if (SI->relocation_begin() != SI->relocation_end()) { 1719 section_iterator RelocatedSection = SI->getRelocatedSection(); 1720 ObjSectionToIDMap::iterator i = SectionMap.find(*RelocatedSection); 1721 assert (i != SectionMap.end()); 1722 SectionToGOTMap[i->second] = GOTSectionID; 1723 } 1724 } 1725 GOTSymbolOffsets.clear(); 1726 } 1727 } 1728 1729 // Look for and record the EH frame section. 1730 ObjSectionToIDMap::iterator i, e; 1731 for (i = SectionMap.begin(), e = SectionMap.end(); i != e; ++i) { 1732 const SectionRef &Section = i->first; 1733 StringRef Name; 1734 Section.getName(Name); 1735 if (Name == ".eh_frame") { 1736 UnregisteredEHFrameSections.push_back(i->second); 1737 break; 1738 } 1739 } 1740 1741 GOTSectionID = 0; 1742 CurrentGOTIndex = 0; 1743 } 1744 1745 bool RuntimeDyldELF::isCompatibleFile(const object::ObjectFile &Obj) const { 1746 return Obj.isELF(); 1747 } 1748 1749 } // namespace llvm 1750