1 //===-- RuntimeDyldELF.cpp - Run-time dynamic linker for MC-JIT -*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // Implementation of ELF support for the MC-JIT runtime dynamic linker. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "RuntimeDyldELF.h" 15 #include "RuntimeDyldCheckerImpl.h" 16 #include "llvm/ADT/IntervalMap.h" 17 #include "llvm/ADT/STLExtras.h" 18 #include "llvm/ADT/StringRef.h" 19 #include "llvm/ADT/Triple.h" 20 #include "llvm/MC/MCStreamer.h" 21 #include "llvm/Object/ELFObjectFile.h" 22 #include "llvm/Object/ObjectFile.h" 23 #include "llvm/Support/ELF.h" 24 #include "llvm/Support/Endian.h" 25 #include "llvm/Support/MemoryBuffer.h" 26 #include "llvm/Support/TargetRegistry.h" 27 28 using namespace llvm; 29 using namespace llvm::object; 30 31 #define DEBUG_TYPE "dyld" 32 33 static inline std::error_code check(std::error_code Err) { 34 if (Err) { 35 report_fatal_error(Err.message()); 36 } 37 return Err; 38 } 39 40 namespace { 41 42 template <class ELFT> class DyldELFObject : public ELFObjectFile<ELFT> { 43 LLVM_ELF_IMPORT_TYPES_ELFT(ELFT) 44 45 typedef Elf_Shdr_Impl<ELFT> Elf_Shdr; 46 typedef Elf_Sym_Impl<ELFT> Elf_Sym; 47 typedef Elf_Rel_Impl<ELFT, false> Elf_Rel; 48 typedef Elf_Rel_Impl<ELFT, true> Elf_Rela; 49 50 typedef Elf_Ehdr_Impl<ELFT> Elf_Ehdr; 51 52 typedef typename ELFDataTypeTypedefHelper<ELFT>::value_type addr_type; 53 54 public: 55 DyldELFObject(MemoryBufferRef Wrapper, std::error_code &ec); 56 57 void updateSectionAddress(const SectionRef &Sec, uint64_t Addr); 58 59 void updateSymbolAddress(const SymbolRef &SymRef, uint64_t Addr); 60 61 // Methods for type inquiry through isa, cast and dyn_cast 62 static inline bool classof(const Binary *v) { 63 return (isa<ELFObjectFile<ELFT>>(v) && 64 classof(cast<ELFObjectFile<ELFT>>(v))); 65 } 66 static inline bool classof(const ELFObjectFile<ELFT> *v) { 67 return v->isDyldType(); 68 } 69 70 }; 71 72 73 74 // The MemoryBuffer passed into this constructor is just a wrapper around the 75 // actual memory. Ultimately, the Binary parent class will take ownership of 76 // this MemoryBuffer object but not the underlying memory. 77 template <class ELFT> 78 DyldELFObject<ELFT>::DyldELFObject(MemoryBufferRef Wrapper, std::error_code &EC) 79 : ELFObjectFile<ELFT>(Wrapper, EC) { 80 this->isDyldELFObject = true; 81 } 82 83 template <class ELFT> 84 void DyldELFObject<ELFT>::updateSectionAddress(const SectionRef &Sec, 85 uint64_t Addr) { 86 DataRefImpl ShdrRef = Sec.getRawDataRefImpl(); 87 Elf_Shdr *shdr = 88 const_cast<Elf_Shdr *>(reinterpret_cast<const Elf_Shdr *>(ShdrRef.p)); 89 90 // This assumes the address passed in matches the target address bitness 91 // The template-based type cast handles everything else. 92 shdr->sh_addr = static_cast<addr_type>(Addr); 93 } 94 95 template <class ELFT> 96 void DyldELFObject<ELFT>::updateSymbolAddress(const SymbolRef &SymRef, 97 uint64_t Addr) { 98 99 Elf_Sym *sym = const_cast<Elf_Sym *>( 100 ELFObjectFile<ELFT>::getSymbol(SymRef.getRawDataRefImpl())); 101 102 // This assumes the address passed in matches the target address bitness 103 // The template-based type cast handles everything else. 104 sym->st_value = static_cast<addr_type>(Addr); 105 } 106 107 class LoadedELFObjectInfo 108 : public RuntimeDyld::LoadedObjectInfoHelper<LoadedELFObjectInfo> { 109 public: 110 LoadedELFObjectInfo(RuntimeDyldImpl &RTDyld, unsigned BeginIdx, 111 unsigned EndIdx) 112 : LoadedObjectInfoHelper(RTDyld, BeginIdx, EndIdx) {} 113 114 OwningBinary<ObjectFile> 115 getObjectForDebug(const ObjectFile &Obj) const override; 116 }; 117 118 template <typename ELFT> 119 std::unique_ptr<DyldELFObject<ELFT>> 120 createRTDyldELFObject(MemoryBufferRef Buffer, 121 const LoadedELFObjectInfo &L, 122 std::error_code &ec) { 123 typedef typename ELFFile<ELFT>::Elf_Shdr Elf_Shdr; 124 typedef typename ELFDataTypeTypedefHelper<ELFT>::value_type addr_type; 125 126 std::unique_ptr<DyldELFObject<ELFT>> Obj = 127 llvm::make_unique<DyldELFObject<ELFT>>(Buffer, ec); 128 129 // Iterate over all sections in the object. 130 for (const auto &Sec : Obj->sections()) { 131 StringRef SectionName; 132 Sec.getName(SectionName); 133 if (SectionName != "") { 134 DataRefImpl ShdrRef = Sec.getRawDataRefImpl(); 135 Elf_Shdr *shdr = const_cast<Elf_Shdr *>( 136 reinterpret_cast<const Elf_Shdr *>(ShdrRef.p)); 137 138 if (uint64_t SecLoadAddr = L.getSectionLoadAddress(SectionName)) { 139 // This assumes that the address passed in matches the target address 140 // bitness. The template-based type cast handles everything else. 141 shdr->sh_addr = static_cast<addr_type>(SecLoadAddr); 142 } 143 } 144 } 145 146 return Obj; 147 } 148 149 OwningBinary<ObjectFile> createELFDebugObject(const ObjectFile &Obj, 150 const LoadedELFObjectInfo &L) { 151 assert(Obj.isELF() && "Not an ELF object file."); 152 153 std::unique_ptr<MemoryBuffer> Buffer = 154 MemoryBuffer::getMemBufferCopy(Obj.getData(), Obj.getFileName()); 155 156 std::error_code ec; 157 158 std::unique_ptr<ObjectFile> DebugObj; 159 if (Obj.getBytesInAddress() == 4 && Obj.isLittleEndian()) { 160 typedef ELFType<support::little, false> ELF32LE; 161 DebugObj = createRTDyldELFObject<ELF32LE>(Buffer->getMemBufferRef(), L, ec); 162 } else if (Obj.getBytesInAddress() == 4 && !Obj.isLittleEndian()) { 163 typedef ELFType<support::big, false> ELF32BE; 164 DebugObj = createRTDyldELFObject<ELF32BE>(Buffer->getMemBufferRef(), L, ec); 165 } else if (Obj.getBytesInAddress() == 8 && !Obj.isLittleEndian()) { 166 typedef ELFType<support::big, true> ELF64BE; 167 DebugObj = createRTDyldELFObject<ELF64BE>(Buffer->getMemBufferRef(), L, ec); 168 } else if (Obj.getBytesInAddress() == 8 && Obj.isLittleEndian()) { 169 typedef ELFType<support::little, true> ELF64LE; 170 DebugObj = createRTDyldELFObject<ELF64LE>(Buffer->getMemBufferRef(), L, ec); 171 } else 172 llvm_unreachable("Unexpected ELF format"); 173 174 assert(!ec && "Could not construct copy ELF object file"); 175 176 return OwningBinary<ObjectFile>(std::move(DebugObj), std::move(Buffer)); 177 } 178 179 OwningBinary<ObjectFile> 180 LoadedELFObjectInfo::getObjectForDebug(const ObjectFile &Obj) const { 181 return createELFDebugObject(Obj, *this); 182 } 183 184 } // namespace 185 186 namespace llvm { 187 188 RuntimeDyldELF::RuntimeDyldELF(RuntimeDyld::MemoryManager &MemMgr, 189 RuntimeDyld::SymbolResolver &Resolver) 190 : RuntimeDyldImpl(MemMgr, Resolver), GOTSectionID(0), CurrentGOTIndex(0) {} 191 RuntimeDyldELF::~RuntimeDyldELF() {} 192 193 void RuntimeDyldELF::registerEHFrames() { 194 for (int i = 0, e = UnregisteredEHFrameSections.size(); i != e; ++i) { 195 SID EHFrameSID = UnregisteredEHFrameSections[i]; 196 uint8_t *EHFrameAddr = Sections[EHFrameSID].Address; 197 uint64_t EHFrameLoadAddr = Sections[EHFrameSID].LoadAddress; 198 size_t EHFrameSize = Sections[EHFrameSID].Size; 199 MemMgr.registerEHFrames(EHFrameAddr, EHFrameLoadAddr, EHFrameSize); 200 RegisteredEHFrameSections.push_back(EHFrameSID); 201 } 202 UnregisteredEHFrameSections.clear(); 203 } 204 205 void RuntimeDyldELF::deregisterEHFrames() { 206 for (int i = 0, e = RegisteredEHFrameSections.size(); i != e; ++i) { 207 SID EHFrameSID = RegisteredEHFrameSections[i]; 208 uint8_t *EHFrameAddr = Sections[EHFrameSID].Address; 209 uint64_t EHFrameLoadAddr = Sections[EHFrameSID].LoadAddress; 210 size_t EHFrameSize = Sections[EHFrameSID].Size; 211 MemMgr.deregisterEHFrames(EHFrameAddr, EHFrameLoadAddr, EHFrameSize); 212 } 213 RegisteredEHFrameSections.clear(); 214 } 215 216 std::unique_ptr<RuntimeDyld::LoadedObjectInfo> 217 RuntimeDyldELF::loadObject(const object::ObjectFile &O) { 218 unsigned SectionStartIdx, SectionEndIdx; 219 std::tie(SectionStartIdx, SectionEndIdx) = loadObjectImpl(O); 220 return llvm::make_unique<LoadedELFObjectInfo>(*this, SectionStartIdx, 221 SectionEndIdx); 222 } 223 224 void RuntimeDyldELF::resolveX86_64Relocation(const SectionEntry &Section, 225 uint64_t Offset, uint64_t Value, 226 uint32_t Type, int64_t Addend, 227 uint64_t SymOffset) { 228 switch (Type) { 229 default: 230 llvm_unreachable("Relocation type not implemented yet!"); 231 break; 232 case ELF::R_X86_64_64: { 233 support::ulittle64_t::ref(Section.Address + Offset) = Value + Addend; 234 DEBUG(dbgs() << "Writing " << format("%p", (Value + Addend)) << " at " 235 << format("%p\n", Section.Address + Offset)); 236 break; 237 } 238 case ELF::R_X86_64_32: 239 case ELF::R_X86_64_32S: { 240 Value += Addend; 241 assert((Type == ELF::R_X86_64_32 && (Value <= UINT32_MAX)) || 242 (Type == ELF::R_X86_64_32S && 243 ((int64_t)Value <= INT32_MAX && (int64_t)Value >= INT32_MIN))); 244 uint32_t TruncatedAddr = (Value & 0xFFFFFFFF); 245 support::ulittle32_t::ref(Section.Address + Offset) = TruncatedAddr; 246 DEBUG(dbgs() << "Writing " << format("%p", TruncatedAddr) << " at " 247 << format("%p\n", Section.Address + Offset)); 248 break; 249 } 250 case ELF::R_X86_64_PC32: { 251 uint64_t FinalAddress = Section.LoadAddress + Offset; 252 int64_t RealOffset = Value + Addend - FinalAddress; 253 assert(isInt<32>(RealOffset)); 254 int32_t TruncOffset = (RealOffset & 0xFFFFFFFF); 255 support::ulittle32_t::ref(Section.Address + Offset) = TruncOffset; 256 break; 257 } 258 case ELF::R_X86_64_PC64: { 259 uint64_t FinalAddress = Section.LoadAddress + Offset; 260 int64_t RealOffset = Value + Addend - FinalAddress; 261 support::ulittle64_t::ref(Section.Address + Offset) = RealOffset; 262 break; 263 } 264 } 265 } 266 267 void RuntimeDyldELF::resolveX86Relocation(const SectionEntry &Section, 268 uint64_t Offset, uint32_t Value, 269 uint32_t Type, int32_t Addend) { 270 switch (Type) { 271 case ELF::R_386_32: { 272 support::ulittle32_t::ref(Section.Address + Offset) = Value + Addend; 273 break; 274 } 275 case ELF::R_386_PC32: { 276 uint32_t FinalAddress = ((Section.LoadAddress + Offset) & 0xFFFFFFFF); 277 uint32_t RealOffset = Value + Addend - FinalAddress; 278 support::ulittle32_t::ref(Section.Address + Offset) = RealOffset; 279 break; 280 } 281 default: 282 // There are other relocation types, but it appears these are the 283 // only ones currently used by the LLVM ELF object writer 284 llvm_unreachable("Relocation type not implemented yet!"); 285 break; 286 } 287 } 288 289 void RuntimeDyldELF::resolveAArch64Relocation(const SectionEntry &Section, 290 uint64_t Offset, uint64_t Value, 291 uint32_t Type, int64_t Addend) { 292 uint32_t *TargetPtr = reinterpret_cast<uint32_t *>(Section.Address + Offset); 293 uint64_t FinalAddress = Section.LoadAddress + Offset; 294 295 DEBUG(dbgs() << "resolveAArch64Relocation, LocalAddress: 0x" 296 << format("%llx", Section.Address + Offset) 297 << " FinalAddress: 0x" << format("%llx", FinalAddress) 298 << " Value: 0x" << format("%llx", Value) << " Type: 0x" 299 << format("%x", Type) << " Addend: 0x" << format("%llx", Addend) 300 << "\n"); 301 302 switch (Type) { 303 default: 304 llvm_unreachable("Relocation type not implemented yet!"); 305 break; 306 case ELF::R_AARCH64_ABS64: { 307 uint64_t *TargetPtr = 308 reinterpret_cast<uint64_t *>(Section.Address + Offset); 309 *TargetPtr = Value + Addend; 310 break; 311 } 312 case ELF::R_AARCH64_PREL32: { 313 uint64_t Result = Value + Addend - FinalAddress; 314 assert(static_cast<int64_t>(Result) >= INT32_MIN && 315 static_cast<int64_t>(Result) <= UINT32_MAX); 316 *TargetPtr = static_cast<uint32_t>(Result & 0xffffffffU); 317 break; 318 } 319 case ELF::R_AARCH64_CALL26: // fallthrough 320 case ELF::R_AARCH64_JUMP26: { 321 // Operation: S+A-P. Set Call or B immediate value to bits fff_fffc of the 322 // calculation. 323 uint64_t BranchImm = Value + Addend - FinalAddress; 324 325 // "Check that -2^27 <= result < 2^27". 326 assert(isInt<28>(BranchImm)); 327 328 // AArch64 code is emitted with .rela relocations. The data already in any 329 // bits affected by the relocation on entry is garbage. 330 *TargetPtr &= 0xfc000000U; 331 // Immediate goes in bits 25:0 of B and BL. 332 *TargetPtr |= static_cast<uint32_t>(BranchImm & 0xffffffcU) >> 2; 333 break; 334 } 335 case ELF::R_AARCH64_MOVW_UABS_G3: { 336 uint64_t Result = Value + Addend; 337 338 // AArch64 code is emitted with .rela relocations. The data already in any 339 // bits affected by the relocation on entry is garbage. 340 *TargetPtr &= 0xffe0001fU; 341 // Immediate goes in bits 20:5 of MOVZ/MOVK instruction 342 *TargetPtr |= Result >> (48 - 5); 343 // Shift must be "lsl #48", in bits 22:21 344 assert((*TargetPtr >> 21 & 0x3) == 3 && "invalid shift for relocation"); 345 break; 346 } 347 case ELF::R_AARCH64_MOVW_UABS_G2_NC: { 348 uint64_t Result = Value + Addend; 349 350 // AArch64 code is emitted with .rela relocations. The data already in any 351 // bits affected by the relocation on entry is garbage. 352 *TargetPtr &= 0xffe0001fU; 353 // Immediate goes in bits 20:5 of MOVZ/MOVK instruction 354 *TargetPtr |= ((Result & 0xffff00000000ULL) >> (32 - 5)); 355 // Shift must be "lsl #32", in bits 22:21 356 assert((*TargetPtr >> 21 & 0x3) == 2 && "invalid shift for relocation"); 357 break; 358 } 359 case ELF::R_AARCH64_MOVW_UABS_G1_NC: { 360 uint64_t Result = Value + Addend; 361 362 // AArch64 code is emitted with .rela relocations. The data already in any 363 // bits affected by the relocation on entry is garbage. 364 *TargetPtr &= 0xffe0001fU; 365 // Immediate goes in bits 20:5 of MOVZ/MOVK instruction 366 *TargetPtr |= ((Result & 0xffff0000U) >> (16 - 5)); 367 // Shift must be "lsl #16", in bits 22:2 368 assert((*TargetPtr >> 21 & 0x3) == 1 && "invalid shift for relocation"); 369 break; 370 } 371 case ELF::R_AARCH64_MOVW_UABS_G0_NC: { 372 uint64_t Result = Value + Addend; 373 374 // AArch64 code is emitted with .rela relocations. The data already in any 375 // bits affected by the relocation on entry is garbage. 376 *TargetPtr &= 0xffe0001fU; 377 // Immediate goes in bits 20:5 of MOVZ/MOVK instruction 378 *TargetPtr |= ((Result & 0xffffU) << 5); 379 // Shift must be "lsl #0", in bits 22:21. 380 assert((*TargetPtr >> 21 & 0x3) == 0 && "invalid shift for relocation"); 381 break; 382 } 383 case ELF::R_AARCH64_ADR_PREL_PG_HI21: { 384 // Operation: Page(S+A) - Page(P) 385 uint64_t Result = 386 ((Value + Addend) & ~0xfffULL) - (FinalAddress & ~0xfffULL); 387 388 // Check that -2^32 <= X < 2^32 389 assert(isInt<33>(Result) && "overflow check failed for relocation"); 390 391 // AArch64 code is emitted with .rela relocations. The data already in any 392 // bits affected by the relocation on entry is garbage. 393 *TargetPtr &= 0x9f00001fU; 394 // Immediate goes in bits 30:29 + 5:23 of ADRP instruction, taken 395 // from bits 32:12 of X. 396 *TargetPtr |= ((Result & 0x3000U) << (29 - 12)); 397 *TargetPtr |= ((Result & 0x1ffffc000ULL) >> (14 - 5)); 398 break; 399 } 400 case ELF::R_AARCH64_LDST32_ABS_LO12_NC: { 401 // Operation: S + A 402 uint64_t Result = Value + Addend; 403 404 // AArch64 code is emitted with .rela relocations. The data already in any 405 // bits affected by the relocation on entry is garbage. 406 *TargetPtr &= 0xffc003ffU; 407 // Immediate goes in bits 21:10 of LD/ST instruction, taken 408 // from bits 11:2 of X 409 *TargetPtr |= ((Result & 0xffc) << (10 - 2)); 410 break; 411 } 412 case ELF::R_AARCH64_LDST64_ABS_LO12_NC: { 413 // Operation: S + A 414 uint64_t Result = Value + Addend; 415 416 // AArch64 code is emitted with .rela relocations. The data already in any 417 // bits affected by the relocation on entry is garbage. 418 *TargetPtr &= 0xffc003ffU; 419 // Immediate goes in bits 21:10 of LD/ST instruction, taken 420 // from bits 11:3 of X 421 *TargetPtr |= ((Result & 0xff8) << (10 - 3)); 422 break; 423 } 424 } 425 } 426 427 void RuntimeDyldELF::resolveARMRelocation(const SectionEntry &Section, 428 uint64_t Offset, uint32_t Value, 429 uint32_t Type, int32_t Addend) { 430 // TODO: Add Thumb relocations. 431 uint32_t *TargetPtr = (uint32_t *)(Section.Address + Offset); 432 uint32_t FinalAddress = ((Section.LoadAddress + Offset) & 0xFFFFFFFF); 433 Value += Addend; 434 435 DEBUG(dbgs() << "resolveARMRelocation, LocalAddress: " 436 << Section.Address + Offset 437 << " FinalAddress: " << format("%p", FinalAddress) << " Value: " 438 << format("%x", Value) << " Type: " << format("%x", Type) 439 << " Addend: " << format("%x", Addend) << "\n"); 440 441 switch (Type) { 442 default: 443 llvm_unreachable("Not implemented relocation type!"); 444 445 case ELF::R_ARM_NONE: 446 break; 447 case ELF::R_ARM_PREL31: 448 case ELF::R_ARM_TARGET1: 449 case ELF::R_ARM_ABS32: 450 *TargetPtr = Value; 451 break; 452 // Write first 16 bit of 32 bit value to the mov instruction. 453 // Last 4 bit should be shifted. 454 case ELF::R_ARM_MOVW_ABS_NC: 455 case ELF::R_ARM_MOVT_ABS: 456 if (Type == ELF::R_ARM_MOVW_ABS_NC) 457 Value = Value & 0xFFFF; 458 else if (Type == ELF::R_ARM_MOVT_ABS) 459 Value = (Value >> 16) & 0xFFFF; 460 *TargetPtr &= ~0x000F0FFF; 461 *TargetPtr |= Value & 0xFFF; 462 *TargetPtr |= ((Value >> 12) & 0xF) << 16; 463 break; 464 // Write 24 bit relative value to the branch instruction. 465 case ELF::R_ARM_PC24: // Fall through. 466 case ELF::R_ARM_CALL: // Fall through. 467 case ELF::R_ARM_JUMP24: 468 int32_t RelValue = static_cast<int32_t>(Value - FinalAddress - 8); 469 RelValue = (RelValue & 0x03FFFFFC) >> 2; 470 assert((*TargetPtr & 0xFFFFFF) == 0xFFFFFE); 471 *TargetPtr &= 0xFF000000; 472 *TargetPtr |= RelValue; 473 break; 474 } 475 } 476 477 void RuntimeDyldELF::resolveMIPSRelocation(const SectionEntry &Section, 478 uint64_t Offset, uint32_t Value, 479 uint32_t Type, int32_t Addend) { 480 uint8_t *TargetPtr = Section.Address + Offset; 481 Value += Addend; 482 483 DEBUG(dbgs() << "resolveMIPSRelocation, LocalAddress: " 484 << Section.Address + Offset << " FinalAddress: " 485 << format("%p", Section.LoadAddress + Offset) << " Value: " 486 << format("%x", Value) << " Type: " << format("%x", Type) 487 << " Addend: " << format("%x", Addend) << "\n"); 488 489 uint32_t Insn = readBytesUnaligned(TargetPtr, 4); 490 491 switch (Type) { 492 default: 493 llvm_unreachable("Not implemented relocation type!"); 494 break; 495 case ELF::R_MIPS_32: 496 writeBytesUnaligned(Value, TargetPtr, 4); 497 break; 498 case ELF::R_MIPS_26: 499 Insn &= 0xfc000000; 500 Insn |= (Value & 0x0fffffff) >> 2; 501 writeBytesUnaligned(Insn, TargetPtr, 4); 502 break; 503 case ELF::R_MIPS_HI16: 504 // Get the higher 16-bits. Also add 1 if bit 15 is 1. 505 Insn &= 0xffff0000; 506 Insn |= ((Value + 0x8000) >> 16) & 0xffff; 507 writeBytesUnaligned(Insn, TargetPtr, 4); 508 break; 509 case ELF::R_MIPS_LO16: 510 Insn &= 0xffff0000; 511 Insn |= Value & 0xffff; 512 writeBytesUnaligned(Insn, TargetPtr, 4); 513 break; 514 case ELF::R_MIPS_PC32: 515 uint32_t FinalAddress = (Section.LoadAddress + Offset); 516 writeBytesUnaligned(Value + Addend - FinalAddress, (uint8_t *)TargetPtr, 4); 517 break; 518 } 519 } 520 521 void RuntimeDyldELF::setMipsABI(const ObjectFile &Obj) { 522 if (Arch == Triple::UnknownArch || 523 !StringRef(Triple::getArchTypePrefix(Arch)).equals("mips")) { 524 IsMipsO32ABI = false; 525 IsMipsN64ABI = false; 526 return; 527 } 528 unsigned AbiVariant; 529 Obj.getPlatformFlags(AbiVariant); 530 IsMipsO32ABI = AbiVariant & ELF::EF_MIPS_ABI_O32; 531 IsMipsN64ABI = Obj.getFileFormatName().equals("ELF64-mips"); 532 if (AbiVariant & ELF::EF_MIPS_ABI2) 533 llvm_unreachable("Mips N32 ABI is not supported yet"); 534 } 535 536 void RuntimeDyldELF::resolveMIPS64Relocation(const SectionEntry &Section, 537 uint64_t Offset, uint64_t Value, 538 uint32_t Type, int64_t Addend, 539 uint64_t SymOffset, 540 SID SectionID) { 541 uint32_t r_type = Type & 0xff; 542 uint32_t r_type2 = (Type >> 8) & 0xff; 543 uint32_t r_type3 = (Type >> 16) & 0xff; 544 545 // RelType is used to keep information for which relocation type we are 546 // applying relocation. 547 uint32_t RelType = r_type; 548 int64_t CalculatedValue = evaluateMIPS64Relocation(Section, Offset, Value, 549 RelType, Addend, 550 SymOffset, SectionID); 551 if (r_type2 != ELF::R_MIPS_NONE) { 552 RelType = r_type2; 553 CalculatedValue = evaluateMIPS64Relocation(Section, Offset, 0, RelType, 554 CalculatedValue, SymOffset, 555 SectionID); 556 } 557 if (r_type3 != ELF::R_MIPS_NONE) { 558 RelType = r_type3; 559 CalculatedValue = evaluateMIPS64Relocation(Section, Offset, 0, RelType, 560 CalculatedValue, SymOffset, 561 SectionID); 562 } 563 applyMIPS64Relocation(Section.Address + Offset, CalculatedValue, RelType); 564 } 565 566 int64_t 567 RuntimeDyldELF::evaluateMIPS64Relocation(const SectionEntry &Section, 568 uint64_t Offset, uint64_t Value, 569 uint32_t Type, int64_t Addend, 570 uint64_t SymOffset, SID SectionID) { 571 572 DEBUG(dbgs() << "evaluateMIPS64Relocation, LocalAddress: 0x" 573 << format("%llx", Section.Address + Offset) 574 << " FinalAddress: 0x" 575 << format("%llx", Section.LoadAddress + Offset) 576 << " Value: 0x" << format("%llx", Value) << " Type: 0x" 577 << format("%x", Type) << " Addend: 0x" << format("%llx", Addend) 578 << " SymOffset: " << format("%x", SymOffset) 579 << "\n"); 580 581 switch (Type) { 582 default: 583 llvm_unreachable("Not implemented relocation type!"); 584 break; 585 case ELF::R_MIPS_JALR: 586 case ELF::R_MIPS_NONE: 587 break; 588 case ELF::R_MIPS_32: 589 case ELF::R_MIPS_64: 590 return Value + Addend; 591 case ELF::R_MIPS_26: 592 return ((Value + Addend) >> 2) & 0x3ffffff; 593 case ELF::R_MIPS_GPREL16: { 594 uint64_t GOTAddr = getSectionLoadAddress(SectionToGOTMap[SectionID]); 595 return Value + Addend - (GOTAddr + 0x7ff0); 596 } 597 case ELF::R_MIPS_SUB: 598 return Value - Addend; 599 case ELF::R_MIPS_HI16: 600 // Get the higher 16-bits. Also add 1 if bit 15 is 1. 601 return ((Value + Addend + 0x8000) >> 16) & 0xffff; 602 case ELF::R_MIPS_LO16: 603 return (Value + Addend) & 0xffff; 604 case ELF::R_MIPS_CALL16: 605 case ELF::R_MIPS_GOT_DISP: 606 case ELF::R_MIPS_GOT_PAGE: { 607 uint8_t *LocalGOTAddr = 608 getSectionAddress(SectionToGOTMap[SectionID]) + SymOffset; 609 uint64_t GOTEntry = readBytesUnaligned(LocalGOTAddr, 8); 610 611 Value += Addend; 612 if (Type == ELF::R_MIPS_GOT_PAGE) 613 Value = (Value + 0x8000) & ~0xffff; 614 615 if (GOTEntry) 616 assert(GOTEntry == Value && 617 "GOT entry has two different addresses."); 618 else 619 writeBytesUnaligned(Value, LocalGOTAddr, 8); 620 621 return (SymOffset - 0x7ff0) & 0xffff; 622 } 623 case ELF::R_MIPS_GOT_OFST: { 624 int64_t page = (Value + Addend + 0x8000) & ~0xffff; 625 return (Value + Addend - page) & 0xffff; 626 } 627 case ELF::R_MIPS_GPREL32: { 628 uint64_t GOTAddr = getSectionLoadAddress(SectionToGOTMap[SectionID]); 629 return Value + Addend - (GOTAddr + 0x7ff0); 630 } 631 case ELF::R_MIPS_PC16: { 632 uint64_t FinalAddress = (Section.LoadAddress + Offset); 633 return ((Value + Addend - FinalAddress) >> 2) & 0xffff; 634 } 635 case ELF::R_MIPS_PC32: { 636 uint64_t FinalAddress = (Section.LoadAddress + Offset); 637 return Value + Addend - FinalAddress; 638 } 639 case ELF::R_MIPS_PC18_S3: { 640 uint64_t FinalAddress = (Section.LoadAddress + Offset); 641 return ((Value + Addend - ((FinalAddress | 7) ^ 7)) >> 3) & 0x3ffff; 642 } 643 case ELF::R_MIPS_PC19_S2: { 644 uint64_t FinalAddress = (Section.LoadAddress + Offset); 645 return ((Value + Addend - FinalAddress) >> 2) & 0x7ffff; 646 } 647 case ELF::R_MIPS_PC21_S2: { 648 uint64_t FinalAddress = (Section.LoadAddress + Offset); 649 return ((Value + Addend - FinalAddress) >> 2) & 0x1fffff; 650 } 651 case ELF::R_MIPS_PC26_S2: { 652 uint64_t FinalAddress = (Section.LoadAddress + Offset); 653 return ((Value + Addend - FinalAddress) >> 2) & 0x3ffffff; 654 } 655 case ELF::R_MIPS_PCHI16: { 656 uint64_t FinalAddress = (Section.LoadAddress + Offset); 657 return ((Value + Addend - FinalAddress + 0x8000) >> 16) & 0xffff; 658 } 659 case ELF::R_MIPS_PCLO16: { 660 uint64_t FinalAddress = (Section.LoadAddress + Offset); 661 return (Value + Addend - FinalAddress) & 0xffff; 662 } 663 } 664 return 0; 665 } 666 667 void RuntimeDyldELF::applyMIPS64Relocation(uint8_t *TargetPtr, 668 int64_t CalculatedValue, 669 uint32_t Type) { 670 uint32_t Insn = readBytesUnaligned(TargetPtr, 4); 671 672 switch (Type) { 673 default: 674 break; 675 case ELF::R_MIPS_32: 676 case ELF::R_MIPS_GPREL32: 677 case ELF::R_MIPS_PC32: 678 writeBytesUnaligned(CalculatedValue & 0xffffffff, TargetPtr, 4); 679 break; 680 case ELF::R_MIPS_64: 681 case ELF::R_MIPS_SUB: 682 writeBytesUnaligned(CalculatedValue, TargetPtr, 8); 683 break; 684 case ELF::R_MIPS_26: 685 case ELF::R_MIPS_PC26_S2: 686 Insn = (Insn & 0xfc000000) | CalculatedValue; 687 writeBytesUnaligned(Insn, TargetPtr, 4); 688 break; 689 case ELF::R_MIPS_GPREL16: 690 Insn = (Insn & 0xffff0000) | (CalculatedValue & 0xffff); 691 writeBytesUnaligned(Insn, TargetPtr, 4); 692 break; 693 case ELF::R_MIPS_HI16: 694 case ELF::R_MIPS_LO16: 695 case ELF::R_MIPS_PCHI16: 696 case ELF::R_MIPS_PCLO16: 697 case ELF::R_MIPS_PC16: 698 case ELF::R_MIPS_CALL16: 699 case ELF::R_MIPS_GOT_DISP: 700 case ELF::R_MIPS_GOT_PAGE: 701 case ELF::R_MIPS_GOT_OFST: 702 Insn = (Insn & 0xffff0000) | CalculatedValue; 703 writeBytesUnaligned(Insn, TargetPtr, 4); 704 break; 705 case ELF::R_MIPS_PC18_S3: 706 Insn = (Insn & 0xfffc0000) | CalculatedValue; 707 writeBytesUnaligned(Insn, TargetPtr, 4); 708 break; 709 case ELF::R_MIPS_PC19_S2: 710 Insn = (Insn & 0xfff80000) | CalculatedValue; 711 writeBytesUnaligned(Insn, TargetPtr, 4); 712 break; 713 case ELF::R_MIPS_PC21_S2: 714 Insn = (Insn & 0xffe00000) | CalculatedValue; 715 writeBytesUnaligned(Insn, TargetPtr, 4); 716 break; 717 } 718 } 719 720 // Return the .TOC. section and offset. 721 void RuntimeDyldELF::findPPC64TOCSection(const ELFObjectFileBase &Obj, 722 ObjSectionToIDMap &LocalSections, 723 RelocationValueRef &Rel) { 724 // Set a default SectionID in case we do not find a TOC section below. 725 // This may happen for references to TOC base base (sym@toc, .odp 726 // relocation) without a .toc directive. In this case just use the 727 // first section (which is usually the .odp) since the code won't 728 // reference the .toc base directly. 729 Rel.SymbolName = NULL; 730 Rel.SectionID = 0; 731 732 // The TOC consists of sections .got, .toc, .tocbss, .plt in that 733 // order. The TOC starts where the first of these sections starts. 734 for (auto &Section: Obj.sections()) { 735 StringRef SectionName; 736 check(Section.getName(SectionName)); 737 738 if (SectionName == ".got" 739 || SectionName == ".toc" 740 || SectionName == ".tocbss" 741 || SectionName == ".plt") { 742 Rel.SectionID = findOrEmitSection(Obj, Section, false, LocalSections); 743 break; 744 } 745 } 746 747 // Per the ppc64-elf-linux ABI, The TOC base is TOC value plus 0x8000 748 // thus permitting a full 64 Kbytes segment. 749 Rel.Addend = 0x8000; 750 } 751 752 // Returns the sections and offset associated with the ODP entry referenced 753 // by Symbol. 754 void RuntimeDyldELF::findOPDEntrySection(const ELFObjectFileBase &Obj, 755 ObjSectionToIDMap &LocalSections, 756 RelocationValueRef &Rel) { 757 // Get the ELF symbol value (st_value) to compare with Relocation offset in 758 // .opd entries 759 for (section_iterator si = Obj.section_begin(), se = Obj.section_end(); 760 si != se; ++si) { 761 section_iterator RelSecI = si->getRelocatedSection(); 762 if (RelSecI == Obj.section_end()) 763 continue; 764 765 StringRef RelSectionName; 766 check(RelSecI->getName(RelSectionName)); 767 if (RelSectionName != ".opd") 768 continue; 769 770 for (elf_relocation_iterator i = si->relocation_begin(), 771 e = si->relocation_end(); 772 i != e;) { 773 // The R_PPC64_ADDR64 relocation indicates the first field 774 // of a .opd entry 775 uint64_t TypeFunc = i->getType(); 776 if (TypeFunc != ELF::R_PPC64_ADDR64) { 777 ++i; 778 continue; 779 } 780 781 uint64_t TargetSymbolOffset = i->getOffset(); 782 symbol_iterator TargetSymbol = i->getSymbol(); 783 ErrorOr<int64_t> AddendOrErr = i->getAddend(); 784 Check(AddendOrErr.getError()); 785 int64_t Addend = *AddendOrErr; 786 787 ++i; 788 if (i == e) 789 break; 790 791 // Just check if following relocation is a R_PPC64_TOC 792 uint64_t TypeTOC = i->getType(); 793 if (TypeTOC != ELF::R_PPC64_TOC) 794 continue; 795 796 // Finally compares the Symbol value and the target symbol offset 797 // to check if this .opd entry refers to the symbol the relocation 798 // points to. 799 if (Rel.Addend != (int64_t)TargetSymbolOffset) 800 continue; 801 802 section_iterator tsi(Obj.section_end()); 803 check(TargetSymbol->getSection(tsi)); 804 bool IsCode = tsi->isText(); 805 Rel.SectionID = findOrEmitSection(Obj, (*tsi), IsCode, LocalSections); 806 Rel.Addend = (intptr_t)Addend; 807 return; 808 } 809 } 810 llvm_unreachable("Attempting to get address of ODP entry!"); 811 } 812 813 // Relocation masks following the #lo(value), #hi(value), #ha(value), 814 // #higher(value), #highera(value), #highest(value), and #highesta(value) 815 // macros defined in section 4.5.1. Relocation Types of the PPC-elf64abi 816 // document. 817 818 static inline uint16_t applyPPClo(uint64_t value) { return value & 0xffff; } 819 820 static inline uint16_t applyPPChi(uint64_t value) { 821 return (value >> 16) & 0xffff; 822 } 823 824 static inline uint16_t applyPPCha (uint64_t value) { 825 return ((value + 0x8000) >> 16) & 0xffff; 826 } 827 828 static inline uint16_t applyPPChigher(uint64_t value) { 829 return (value >> 32) & 0xffff; 830 } 831 832 static inline uint16_t applyPPChighera (uint64_t value) { 833 return ((value + 0x8000) >> 32) & 0xffff; 834 } 835 836 static inline uint16_t applyPPChighest(uint64_t value) { 837 return (value >> 48) & 0xffff; 838 } 839 840 static inline uint16_t applyPPChighesta (uint64_t value) { 841 return ((value + 0x8000) >> 48) & 0xffff; 842 } 843 844 void RuntimeDyldELF::resolvePPC64Relocation(const SectionEntry &Section, 845 uint64_t Offset, uint64_t Value, 846 uint32_t Type, int64_t Addend) { 847 uint8_t *LocalAddress = Section.Address + Offset; 848 switch (Type) { 849 default: 850 llvm_unreachable("Relocation type not implemented yet!"); 851 break; 852 case ELF::R_PPC64_ADDR16: 853 writeInt16BE(LocalAddress, applyPPClo(Value + Addend)); 854 break; 855 case ELF::R_PPC64_ADDR16_DS: 856 writeInt16BE(LocalAddress, applyPPClo(Value + Addend) & ~3); 857 break; 858 case ELF::R_PPC64_ADDR16_LO: 859 writeInt16BE(LocalAddress, applyPPClo(Value + Addend)); 860 break; 861 case ELF::R_PPC64_ADDR16_LO_DS: 862 writeInt16BE(LocalAddress, applyPPClo(Value + Addend) & ~3); 863 break; 864 case ELF::R_PPC64_ADDR16_HI: 865 writeInt16BE(LocalAddress, applyPPChi(Value + Addend)); 866 break; 867 case ELF::R_PPC64_ADDR16_HA: 868 writeInt16BE(LocalAddress, applyPPCha(Value + Addend)); 869 break; 870 case ELF::R_PPC64_ADDR16_HIGHER: 871 writeInt16BE(LocalAddress, applyPPChigher(Value + Addend)); 872 break; 873 case ELF::R_PPC64_ADDR16_HIGHERA: 874 writeInt16BE(LocalAddress, applyPPChighera(Value + Addend)); 875 break; 876 case ELF::R_PPC64_ADDR16_HIGHEST: 877 writeInt16BE(LocalAddress, applyPPChighest(Value + Addend)); 878 break; 879 case ELF::R_PPC64_ADDR16_HIGHESTA: 880 writeInt16BE(LocalAddress, applyPPChighesta(Value + Addend)); 881 break; 882 case ELF::R_PPC64_ADDR14: { 883 assert(((Value + Addend) & 3) == 0); 884 // Preserve the AA/LK bits in the branch instruction 885 uint8_t aalk = *(LocalAddress + 3); 886 writeInt16BE(LocalAddress + 2, (aalk & 3) | ((Value + Addend) & 0xfffc)); 887 } break; 888 case ELF::R_PPC64_REL16_LO: { 889 uint64_t FinalAddress = (Section.LoadAddress + Offset); 890 uint64_t Delta = Value - FinalAddress + Addend; 891 writeInt16BE(LocalAddress, applyPPClo(Delta)); 892 } break; 893 case ELF::R_PPC64_REL16_HI: { 894 uint64_t FinalAddress = (Section.LoadAddress + Offset); 895 uint64_t Delta = Value - FinalAddress + Addend; 896 writeInt16BE(LocalAddress, applyPPChi(Delta)); 897 } break; 898 case ELF::R_PPC64_REL16_HA: { 899 uint64_t FinalAddress = (Section.LoadAddress + Offset); 900 uint64_t Delta = Value - FinalAddress + Addend; 901 writeInt16BE(LocalAddress, applyPPCha(Delta)); 902 } break; 903 case ELF::R_PPC64_ADDR32: { 904 int32_t Result = static_cast<int32_t>(Value + Addend); 905 if (SignExtend32<32>(Result) != Result) 906 llvm_unreachable("Relocation R_PPC64_ADDR32 overflow"); 907 writeInt32BE(LocalAddress, Result); 908 } break; 909 case ELF::R_PPC64_REL24: { 910 uint64_t FinalAddress = (Section.LoadAddress + Offset); 911 int32_t delta = static_cast<int32_t>(Value - FinalAddress + Addend); 912 if (SignExtend32<24>(delta) != delta) 913 llvm_unreachable("Relocation R_PPC64_REL24 overflow"); 914 // Generates a 'bl <address>' instruction 915 writeInt32BE(LocalAddress, 0x48000001 | (delta & 0x03FFFFFC)); 916 } break; 917 case ELF::R_PPC64_REL32: { 918 uint64_t FinalAddress = (Section.LoadAddress + Offset); 919 int32_t delta = static_cast<int32_t>(Value - FinalAddress + Addend); 920 if (SignExtend32<32>(delta) != delta) 921 llvm_unreachable("Relocation R_PPC64_REL32 overflow"); 922 writeInt32BE(LocalAddress, delta); 923 } break; 924 case ELF::R_PPC64_REL64: { 925 uint64_t FinalAddress = (Section.LoadAddress + Offset); 926 uint64_t Delta = Value - FinalAddress + Addend; 927 writeInt64BE(LocalAddress, Delta); 928 } break; 929 case ELF::R_PPC64_ADDR64: 930 writeInt64BE(LocalAddress, Value + Addend); 931 break; 932 } 933 } 934 935 void RuntimeDyldELF::resolveSystemZRelocation(const SectionEntry &Section, 936 uint64_t Offset, uint64_t Value, 937 uint32_t Type, int64_t Addend) { 938 uint8_t *LocalAddress = Section.Address + Offset; 939 switch (Type) { 940 default: 941 llvm_unreachable("Relocation type not implemented yet!"); 942 break; 943 case ELF::R_390_PC16DBL: 944 case ELF::R_390_PLT16DBL: { 945 int64_t Delta = (Value + Addend) - (Section.LoadAddress + Offset); 946 assert(int16_t(Delta / 2) * 2 == Delta && "R_390_PC16DBL overflow"); 947 writeInt16BE(LocalAddress, Delta / 2); 948 break; 949 } 950 case ELF::R_390_PC32DBL: 951 case ELF::R_390_PLT32DBL: { 952 int64_t Delta = (Value + Addend) - (Section.LoadAddress + Offset); 953 assert(int32_t(Delta / 2) * 2 == Delta && "R_390_PC32DBL overflow"); 954 writeInt32BE(LocalAddress, Delta / 2); 955 break; 956 } 957 case ELF::R_390_PC32: { 958 int64_t Delta = (Value + Addend) - (Section.LoadAddress + Offset); 959 assert(int32_t(Delta) == Delta && "R_390_PC32 overflow"); 960 writeInt32BE(LocalAddress, Delta); 961 break; 962 } 963 case ELF::R_390_64: 964 writeInt64BE(LocalAddress, Value + Addend); 965 break; 966 } 967 } 968 969 // The target location for the relocation is described by RE.SectionID and 970 // RE.Offset. RE.SectionID can be used to find the SectionEntry. Each 971 // SectionEntry has three members describing its location. 972 // SectionEntry::Address is the address at which the section has been loaded 973 // into memory in the current (host) process. SectionEntry::LoadAddress is the 974 // address that the section will have in the target process. 975 // SectionEntry::ObjAddress is the address of the bits for this section in the 976 // original emitted object image (also in the current address space). 977 // 978 // Relocations will be applied as if the section were loaded at 979 // SectionEntry::LoadAddress, but they will be applied at an address based 980 // on SectionEntry::Address. SectionEntry::ObjAddress will be used to refer to 981 // Target memory contents if they are required for value calculations. 982 // 983 // The Value parameter here is the load address of the symbol for the 984 // relocation to be applied. For relocations which refer to symbols in the 985 // current object Value will be the LoadAddress of the section in which 986 // the symbol resides (RE.Addend provides additional information about the 987 // symbol location). For external symbols, Value will be the address of the 988 // symbol in the target address space. 989 void RuntimeDyldELF::resolveRelocation(const RelocationEntry &RE, 990 uint64_t Value) { 991 const SectionEntry &Section = Sections[RE.SectionID]; 992 return resolveRelocation(Section, RE.Offset, Value, RE.RelType, RE.Addend, 993 RE.SymOffset, RE.SectionID); 994 } 995 996 void RuntimeDyldELF::resolveRelocation(const SectionEntry &Section, 997 uint64_t Offset, uint64_t Value, 998 uint32_t Type, int64_t Addend, 999 uint64_t SymOffset, SID SectionID) { 1000 switch (Arch) { 1001 case Triple::x86_64: 1002 resolveX86_64Relocation(Section, Offset, Value, Type, Addend, SymOffset); 1003 break; 1004 case Triple::x86: 1005 resolveX86Relocation(Section, Offset, (uint32_t)(Value & 0xffffffffL), Type, 1006 (uint32_t)(Addend & 0xffffffffL)); 1007 break; 1008 case Triple::aarch64: 1009 case Triple::aarch64_be: 1010 resolveAArch64Relocation(Section, Offset, Value, Type, Addend); 1011 break; 1012 case Triple::arm: // Fall through. 1013 case Triple::armeb: 1014 case Triple::thumb: 1015 case Triple::thumbeb: 1016 resolveARMRelocation(Section, Offset, (uint32_t)(Value & 0xffffffffL), Type, 1017 (uint32_t)(Addend & 0xffffffffL)); 1018 break; 1019 case Triple::mips: // Fall through. 1020 case Triple::mipsel: 1021 case Triple::mips64: 1022 case Triple::mips64el: 1023 if (IsMipsO32ABI) 1024 resolveMIPSRelocation(Section, Offset, (uint32_t)(Value & 0xffffffffL), 1025 Type, (uint32_t)(Addend & 0xffffffffL)); 1026 else if (IsMipsN64ABI) 1027 resolveMIPS64Relocation(Section, Offset, Value, Type, Addend, SymOffset, 1028 SectionID); 1029 else 1030 llvm_unreachable("Mips ABI not handled"); 1031 break; 1032 case Triple::ppc64: // Fall through. 1033 case Triple::ppc64le: 1034 resolvePPC64Relocation(Section, Offset, Value, Type, Addend); 1035 break; 1036 case Triple::systemz: 1037 resolveSystemZRelocation(Section, Offset, Value, Type, Addend); 1038 break; 1039 default: 1040 llvm_unreachable("Unsupported CPU type!"); 1041 } 1042 } 1043 1044 void *RuntimeDyldELF::computePlaceholderAddress(unsigned SectionID, uint64_t Offset) const { 1045 return (void*)(Sections[SectionID].ObjAddress + Offset); 1046 } 1047 1048 void RuntimeDyldELF::processSimpleRelocation(unsigned SectionID, uint64_t Offset, unsigned RelType, RelocationValueRef Value) { 1049 RelocationEntry RE(SectionID, Offset, RelType, Value.Addend, Value.Offset); 1050 if (Value.SymbolName) 1051 addRelocationForSymbol(RE, Value.SymbolName); 1052 else 1053 addRelocationForSection(RE, Value.SectionID); 1054 } 1055 1056 relocation_iterator RuntimeDyldELF::processRelocationRef( 1057 unsigned SectionID, relocation_iterator RelI, const ObjectFile &O, 1058 ObjSectionToIDMap &ObjSectionToID, StubMap &Stubs) { 1059 const auto &Obj = cast<ELFObjectFileBase>(O); 1060 uint64_t RelType = RelI->getType(); 1061 ErrorOr<int64_t> AddendOrErr = ELFRelocationRef(*RelI).getAddend(); 1062 int64_t Addend = AddendOrErr ? *AddendOrErr : 0; 1063 elf_symbol_iterator Symbol = RelI->getSymbol(); 1064 1065 // Obtain the symbol name which is referenced in the relocation 1066 StringRef TargetName; 1067 if (Symbol != Obj.symbol_end()) 1068 Symbol->getName(TargetName); 1069 DEBUG(dbgs() << "\t\tRelType: " << RelType << " Addend: " << Addend 1070 << " TargetName: " << TargetName << "\n"); 1071 RelocationValueRef Value; 1072 // First search for the symbol in the local symbol table 1073 SymbolRef::Type SymType = SymbolRef::ST_Unknown; 1074 1075 // Search for the symbol in the global symbol table 1076 RTDyldSymbolTable::const_iterator gsi = GlobalSymbolTable.end(); 1077 if (Symbol != Obj.symbol_end()) { 1078 gsi = GlobalSymbolTable.find(TargetName.data()); 1079 SymType = Symbol->getType(); 1080 } 1081 if (gsi != GlobalSymbolTable.end()) { 1082 const auto &SymInfo = gsi->second; 1083 Value.SectionID = SymInfo.getSectionID(); 1084 Value.Offset = SymInfo.getOffset(); 1085 Value.Addend = SymInfo.getOffset() + Addend; 1086 } else { 1087 switch (SymType) { 1088 case SymbolRef::ST_Debug: { 1089 // TODO: Now ELF SymbolRef::ST_Debug = STT_SECTION, it's not obviously 1090 // and can be changed by another developers. Maybe best way is add 1091 // a new symbol type ST_Section to SymbolRef and use it. 1092 section_iterator si(Obj.section_end()); 1093 Symbol->getSection(si); 1094 if (si == Obj.section_end()) 1095 llvm_unreachable("Symbol section not found, bad object file format!"); 1096 DEBUG(dbgs() << "\t\tThis is section symbol\n"); 1097 bool isCode = si->isText(); 1098 Value.SectionID = findOrEmitSection(Obj, (*si), isCode, ObjSectionToID); 1099 Value.Addend = Addend; 1100 break; 1101 } 1102 case SymbolRef::ST_Data: 1103 case SymbolRef::ST_Unknown: { 1104 Value.SymbolName = TargetName.data(); 1105 Value.Addend = Addend; 1106 1107 // Absolute relocations will have a zero symbol ID (STN_UNDEF), which 1108 // will manifest here as a NULL symbol name. 1109 // We can set this as a valid (but empty) symbol name, and rely 1110 // on addRelocationForSymbol to handle this. 1111 if (!Value.SymbolName) 1112 Value.SymbolName = ""; 1113 break; 1114 } 1115 default: 1116 llvm_unreachable("Unresolved symbol type!"); 1117 break; 1118 } 1119 } 1120 1121 uint64_t Offset = RelI->getOffset(); 1122 1123 DEBUG(dbgs() << "\t\tSectionID: " << SectionID << " Offset: " << Offset 1124 << "\n"); 1125 if ((Arch == Triple::aarch64 || Arch == Triple::aarch64_be) && 1126 (RelType == ELF::R_AARCH64_CALL26 || RelType == ELF::R_AARCH64_JUMP26)) { 1127 // This is an AArch64 branch relocation, need to use a stub function. 1128 DEBUG(dbgs() << "\t\tThis is an AArch64 branch relocation."); 1129 SectionEntry &Section = Sections[SectionID]; 1130 1131 // Look for an existing stub. 1132 StubMap::const_iterator i = Stubs.find(Value); 1133 if (i != Stubs.end()) { 1134 resolveRelocation(Section, Offset, (uint64_t)Section.Address + i->second, 1135 RelType, 0); 1136 DEBUG(dbgs() << " Stub function found\n"); 1137 } else { 1138 // Create a new stub function. 1139 DEBUG(dbgs() << " Create a new stub function\n"); 1140 Stubs[Value] = Section.StubOffset; 1141 uint8_t *StubTargetAddr = 1142 createStubFunction(Section.Address + Section.StubOffset); 1143 1144 RelocationEntry REmovz_g3(SectionID, StubTargetAddr - Section.Address, 1145 ELF::R_AARCH64_MOVW_UABS_G3, Value.Addend); 1146 RelocationEntry REmovk_g2(SectionID, StubTargetAddr - Section.Address + 4, 1147 ELF::R_AARCH64_MOVW_UABS_G2_NC, Value.Addend); 1148 RelocationEntry REmovk_g1(SectionID, StubTargetAddr - Section.Address + 8, 1149 ELF::R_AARCH64_MOVW_UABS_G1_NC, Value.Addend); 1150 RelocationEntry REmovk_g0(SectionID, 1151 StubTargetAddr - Section.Address + 12, 1152 ELF::R_AARCH64_MOVW_UABS_G0_NC, Value.Addend); 1153 1154 if (Value.SymbolName) { 1155 addRelocationForSymbol(REmovz_g3, Value.SymbolName); 1156 addRelocationForSymbol(REmovk_g2, Value.SymbolName); 1157 addRelocationForSymbol(REmovk_g1, Value.SymbolName); 1158 addRelocationForSymbol(REmovk_g0, Value.SymbolName); 1159 } else { 1160 addRelocationForSection(REmovz_g3, Value.SectionID); 1161 addRelocationForSection(REmovk_g2, Value.SectionID); 1162 addRelocationForSection(REmovk_g1, Value.SectionID); 1163 addRelocationForSection(REmovk_g0, Value.SectionID); 1164 } 1165 resolveRelocation(Section, Offset, 1166 (uint64_t)Section.Address + Section.StubOffset, RelType, 1167 0); 1168 Section.StubOffset += getMaxStubSize(); 1169 } 1170 } else if (Arch == Triple::arm) { 1171 if (RelType == ELF::R_ARM_PC24 || RelType == ELF::R_ARM_CALL || 1172 RelType == ELF::R_ARM_JUMP24) { 1173 // This is an ARM branch relocation, need to use a stub function. 1174 DEBUG(dbgs() << "\t\tThis is an ARM branch relocation."); 1175 SectionEntry &Section = Sections[SectionID]; 1176 1177 // Look for an existing stub. 1178 StubMap::const_iterator i = Stubs.find(Value); 1179 if (i != Stubs.end()) { 1180 resolveRelocation(Section, Offset, (uint64_t)Section.Address + i->second, 1181 RelType, 0); 1182 DEBUG(dbgs() << " Stub function found\n"); 1183 } else { 1184 // Create a new stub function. 1185 DEBUG(dbgs() << " Create a new stub function\n"); 1186 Stubs[Value] = Section.StubOffset; 1187 uint8_t *StubTargetAddr = 1188 createStubFunction(Section.Address + Section.StubOffset); 1189 RelocationEntry RE(SectionID, StubTargetAddr - Section.Address, 1190 ELF::R_ARM_ABS32, Value.Addend); 1191 if (Value.SymbolName) 1192 addRelocationForSymbol(RE, Value.SymbolName); 1193 else 1194 addRelocationForSection(RE, Value.SectionID); 1195 1196 resolveRelocation(Section, Offset, 1197 (uint64_t)Section.Address + Section.StubOffset, RelType, 1198 0); 1199 Section.StubOffset += getMaxStubSize(); 1200 } 1201 } else { 1202 uint32_t *Placeholder = 1203 reinterpret_cast<uint32_t*>(computePlaceholderAddress(SectionID, Offset)); 1204 if (RelType == ELF::R_ARM_PREL31 || RelType == ELF::R_ARM_TARGET1 || 1205 RelType == ELF::R_ARM_ABS32) { 1206 Value.Addend += *Placeholder; 1207 } else if (RelType == ELF::R_ARM_MOVW_ABS_NC || RelType == ELF::R_ARM_MOVT_ABS) { 1208 // See ELF for ARM documentation 1209 Value.Addend += (int16_t)((*Placeholder & 0xFFF) | (((*Placeholder >> 16) & 0xF) << 12)); 1210 } 1211 processSimpleRelocation(SectionID, Offset, RelType, Value); 1212 } 1213 } else if (IsMipsO32ABI) { 1214 uint8_t *Placeholder = reinterpret_cast<uint8_t *>( 1215 computePlaceholderAddress(SectionID, Offset)); 1216 uint32_t Opcode = readBytesUnaligned(Placeholder, 4); 1217 if (RelType == ELF::R_MIPS_26) { 1218 // This is an Mips branch relocation, need to use a stub function. 1219 DEBUG(dbgs() << "\t\tThis is a Mips branch relocation."); 1220 SectionEntry &Section = Sections[SectionID]; 1221 1222 // Extract the addend from the instruction. 1223 // We shift up by two since the Value will be down shifted again 1224 // when applying the relocation. 1225 uint32_t Addend = (Opcode & 0x03ffffff) << 2; 1226 1227 Value.Addend += Addend; 1228 1229 // Look up for existing stub. 1230 StubMap::const_iterator i = Stubs.find(Value); 1231 if (i != Stubs.end()) { 1232 RelocationEntry RE(SectionID, Offset, RelType, i->second); 1233 addRelocationForSection(RE, SectionID); 1234 DEBUG(dbgs() << " Stub function found\n"); 1235 } else { 1236 // Create a new stub function. 1237 DEBUG(dbgs() << " Create a new stub function\n"); 1238 Stubs[Value] = Section.StubOffset; 1239 uint8_t *StubTargetAddr = 1240 createStubFunction(Section.Address + Section.StubOffset); 1241 1242 // Creating Hi and Lo relocations for the filled stub instructions. 1243 RelocationEntry REHi(SectionID, StubTargetAddr - Section.Address, 1244 ELF::R_MIPS_HI16, Value.Addend); 1245 RelocationEntry RELo(SectionID, StubTargetAddr - Section.Address + 4, 1246 ELF::R_MIPS_LO16, Value.Addend); 1247 1248 if (Value.SymbolName) { 1249 addRelocationForSymbol(REHi, Value.SymbolName); 1250 addRelocationForSymbol(RELo, Value.SymbolName); 1251 } 1252 else { 1253 addRelocationForSection(REHi, Value.SectionID); 1254 addRelocationForSection(RELo, Value.SectionID); 1255 } 1256 1257 RelocationEntry RE(SectionID, Offset, RelType, Section.StubOffset); 1258 addRelocationForSection(RE, SectionID); 1259 Section.StubOffset += getMaxStubSize(); 1260 } 1261 } else { 1262 if (RelType == ELF::R_MIPS_HI16) 1263 Value.Addend += (Opcode & 0x0000ffff) << 16; 1264 else if (RelType == ELF::R_MIPS_LO16) 1265 Value.Addend += (Opcode & 0x0000ffff); 1266 else if (RelType == ELF::R_MIPS_32) 1267 Value.Addend += Opcode; 1268 processSimpleRelocation(SectionID, Offset, RelType, Value); 1269 } 1270 } else if (IsMipsN64ABI) { 1271 uint32_t r_type = RelType & 0xff; 1272 RelocationEntry RE(SectionID, Offset, RelType, Value.Addend); 1273 if (r_type == ELF::R_MIPS_CALL16 || r_type == ELF::R_MIPS_GOT_PAGE 1274 || r_type == ELF::R_MIPS_GOT_DISP) { 1275 StringMap<uint64_t>::iterator i = GOTSymbolOffsets.find(TargetName); 1276 if (i != GOTSymbolOffsets.end()) 1277 RE.SymOffset = i->second; 1278 else { 1279 RE.SymOffset = allocateGOTEntries(SectionID, 1); 1280 GOTSymbolOffsets[TargetName] = RE.SymOffset; 1281 } 1282 } 1283 if (Value.SymbolName) 1284 addRelocationForSymbol(RE, Value.SymbolName); 1285 else 1286 addRelocationForSection(RE, Value.SectionID); 1287 } else if (Arch == Triple::ppc64 || Arch == Triple::ppc64le) { 1288 if (RelType == ELF::R_PPC64_REL24) { 1289 // Determine ABI variant in use for this object. 1290 unsigned AbiVariant; 1291 Obj.getPlatformFlags(AbiVariant); 1292 AbiVariant &= ELF::EF_PPC64_ABI; 1293 // A PPC branch relocation will need a stub function if the target is 1294 // an external symbol (Symbol::ST_Unknown) or if the target address 1295 // is not within the signed 24-bits branch address. 1296 SectionEntry &Section = Sections[SectionID]; 1297 uint8_t *Target = Section.Address + Offset; 1298 bool RangeOverflow = false; 1299 if (SymType != SymbolRef::ST_Unknown) { 1300 if (AbiVariant != 2) { 1301 // In the ELFv1 ABI, a function call may point to the .opd entry, 1302 // so the final symbol value is calculated based on the relocation 1303 // values in the .opd section. 1304 findOPDEntrySection(Obj, ObjSectionToID, Value); 1305 } else { 1306 // In the ELFv2 ABI, a function symbol may provide a local entry 1307 // point, which must be used for direct calls. 1308 uint8_t SymOther = Symbol->getOther(); 1309 Value.Addend += ELF::decodePPC64LocalEntryOffset(SymOther); 1310 } 1311 uint8_t *RelocTarget = Sections[Value.SectionID].Address + Value.Addend; 1312 int32_t delta = static_cast<int32_t>(Target - RelocTarget); 1313 // If it is within 24-bits branch range, just set the branch target 1314 if (SignExtend32<24>(delta) == delta) { 1315 RelocationEntry RE(SectionID, Offset, RelType, Value.Addend); 1316 if (Value.SymbolName) 1317 addRelocationForSymbol(RE, Value.SymbolName); 1318 else 1319 addRelocationForSection(RE, Value.SectionID); 1320 } else { 1321 RangeOverflow = true; 1322 } 1323 } 1324 if (SymType == SymbolRef::ST_Unknown || RangeOverflow) { 1325 // It is an external symbol (SymbolRef::ST_Unknown) or within a range 1326 // larger than 24-bits. 1327 StubMap::const_iterator i = Stubs.find(Value); 1328 if (i != Stubs.end()) { 1329 // Symbol function stub already created, just relocate to it 1330 resolveRelocation(Section, Offset, 1331 (uint64_t)Section.Address + i->second, RelType, 0); 1332 DEBUG(dbgs() << " Stub function found\n"); 1333 } else { 1334 // Create a new stub function. 1335 DEBUG(dbgs() << " Create a new stub function\n"); 1336 Stubs[Value] = Section.StubOffset; 1337 uint8_t *StubTargetAddr = 1338 createStubFunction(Section.Address + Section.StubOffset, 1339 AbiVariant); 1340 RelocationEntry RE(SectionID, StubTargetAddr - Section.Address, 1341 ELF::R_PPC64_ADDR64, Value.Addend); 1342 1343 // Generates the 64-bits address loads as exemplified in section 1344 // 4.5.1 in PPC64 ELF ABI. Note that the relocations need to 1345 // apply to the low part of the instructions, so we have to update 1346 // the offset according to the target endianness. 1347 uint64_t StubRelocOffset = StubTargetAddr - Section.Address; 1348 if (!IsTargetLittleEndian) 1349 StubRelocOffset += 2; 1350 1351 RelocationEntry REhst(SectionID, StubRelocOffset + 0, 1352 ELF::R_PPC64_ADDR16_HIGHEST, Value.Addend); 1353 RelocationEntry REhr(SectionID, StubRelocOffset + 4, 1354 ELF::R_PPC64_ADDR16_HIGHER, Value.Addend); 1355 RelocationEntry REh(SectionID, StubRelocOffset + 12, 1356 ELF::R_PPC64_ADDR16_HI, Value.Addend); 1357 RelocationEntry REl(SectionID, StubRelocOffset + 16, 1358 ELF::R_PPC64_ADDR16_LO, Value.Addend); 1359 1360 if (Value.SymbolName) { 1361 addRelocationForSymbol(REhst, Value.SymbolName); 1362 addRelocationForSymbol(REhr, Value.SymbolName); 1363 addRelocationForSymbol(REh, Value.SymbolName); 1364 addRelocationForSymbol(REl, Value.SymbolName); 1365 } else { 1366 addRelocationForSection(REhst, Value.SectionID); 1367 addRelocationForSection(REhr, Value.SectionID); 1368 addRelocationForSection(REh, Value.SectionID); 1369 addRelocationForSection(REl, Value.SectionID); 1370 } 1371 1372 resolveRelocation(Section, Offset, 1373 (uint64_t)Section.Address + Section.StubOffset, 1374 RelType, 0); 1375 Section.StubOffset += getMaxStubSize(); 1376 } 1377 if (SymType == SymbolRef::ST_Unknown) { 1378 // Restore the TOC for external calls 1379 if (AbiVariant == 2) 1380 writeInt32BE(Target + 4, 0xE8410018); // ld r2,28(r1) 1381 else 1382 writeInt32BE(Target + 4, 0xE8410028); // ld r2,40(r1) 1383 } 1384 } 1385 } else if (RelType == ELF::R_PPC64_TOC16 || 1386 RelType == ELF::R_PPC64_TOC16_DS || 1387 RelType == ELF::R_PPC64_TOC16_LO || 1388 RelType == ELF::R_PPC64_TOC16_LO_DS || 1389 RelType == ELF::R_PPC64_TOC16_HI || 1390 RelType == ELF::R_PPC64_TOC16_HA) { 1391 // These relocations are supposed to subtract the TOC address from 1392 // the final value. This does not fit cleanly into the RuntimeDyld 1393 // scheme, since there may be *two* sections involved in determining 1394 // the relocation value (the section of the symbol refered to by the 1395 // relocation, and the TOC section associated with the current module). 1396 // 1397 // Fortunately, these relocations are currently only ever generated 1398 // refering to symbols that themselves reside in the TOC, which means 1399 // that the two sections are actually the same. Thus they cancel out 1400 // and we can immediately resolve the relocation right now. 1401 switch (RelType) { 1402 case ELF::R_PPC64_TOC16: RelType = ELF::R_PPC64_ADDR16; break; 1403 case ELF::R_PPC64_TOC16_DS: RelType = ELF::R_PPC64_ADDR16_DS; break; 1404 case ELF::R_PPC64_TOC16_LO: RelType = ELF::R_PPC64_ADDR16_LO; break; 1405 case ELF::R_PPC64_TOC16_LO_DS: RelType = ELF::R_PPC64_ADDR16_LO_DS; break; 1406 case ELF::R_PPC64_TOC16_HI: RelType = ELF::R_PPC64_ADDR16_HI; break; 1407 case ELF::R_PPC64_TOC16_HA: RelType = ELF::R_PPC64_ADDR16_HA; break; 1408 default: llvm_unreachable("Wrong relocation type."); 1409 } 1410 1411 RelocationValueRef TOCValue; 1412 findPPC64TOCSection(Obj, ObjSectionToID, TOCValue); 1413 if (Value.SymbolName || Value.SectionID != TOCValue.SectionID) 1414 llvm_unreachable("Unsupported TOC relocation."); 1415 Value.Addend -= TOCValue.Addend; 1416 resolveRelocation(Sections[SectionID], Offset, Value.Addend, RelType, 0); 1417 } else { 1418 // There are two ways to refer to the TOC address directly: either 1419 // via a ELF::R_PPC64_TOC relocation (where both symbol and addend are 1420 // ignored), or via any relocation that refers to the magic ".TOC." 1421 // symbols (in which case the addend is respected). 1422 if (RelType == ELF::R_PPC64_TOC) { 1423 RelType = ELF::R_PPC64_ADDR64; 1424 findPPC64TOCSection(Obj, ObjSectionToID, Value); 1425 } else if (TargetName == ".TOC.") { 1426 findPPC64TOCSection(Obj, ObjSectionToID, Value); 1427 Value.Addend += Addend; 1428 } 1429 1430 RelocationEntry RE(SectionID, Offset, RelType, Value.Addend); 1431 1432 if (Value.SymbolName) 1433 addRelocationForSymbol(RE, Value.SymbolName); 1434 else 1435 addRelocationForSection(RE, Value.SectionID); 1436 } 1437 } else if (Arch == Triple::systemz && 1438 (RelType == ELF::R_390_PLT32DBL || RelType == ELF::R_390_GOTENT)) { 1439 // Create function stubs for both PLT and GOT references, regardless of 1440 // whether the GOT reference is to data or code. The stub contains the 1441 // full address of the symbol, as needed by GOT references, and the 1442 // executable part only adds an overhead of 8 bytes. 1443 // 1444 // We could try to conserve space by allocating the code and data 1445 // parts of the stub separately. However, as things stand, we allocate 1446 // a stub for every relocation, so using a GOT in JIT code should be 1447 // no less space efficient than using an explicit constant pool. 1448 DEBUG(dbgs() << "\t\tThis is a SystemZ indirect relocation."); 1449 SectionEntry &Section = Sections[SectionID]; 1450 1451 // Look for an existing stub. 1452 StubMap::const_iterator i = Stubs.find(Value); 1453 uintptr_t StubAddress; 1454 if (i != Stubs.end()) { 1455 StubAddress = uintptr_t(Section.Address) + i->second; 1456 DEBUG(dbgs() << " Stub function found\n"); 1457 } else { 1458 // Create a new stub function. 1459 DEBUG(dbgs() << " Create a new stub function\n"); 1460 1461 uintptr_t BaseAddress = uintptr_t(Section.Address); 1462 uintptr_t StubAlignment = getStubAlignment(); 1463 StubAddress = (BaseAddress + Section.StubOffset + StubAlignment - 1) & 1464 -StubAlignment; 1465 unsigned StubOffset = StubAddress - BaseAddress; 1466 1467 Stubs[Value] = StubOffset; 1468 createStubFunction((uint8_t *)StubAddress); 1469 RelocationEntry RE(SectionID, StubOffset + 8, ELF::R_390_64, 1470 Value.Offset); 1471 if (Value.SymbolName) 1472 addRelocationForSymbol(RE, Value.SymbolName); 1473 else 1474 addRelocationForSection(RE, Value.SectionID); 1475 Section.StubOffset = StubOffset + getMaxStubSize(); 1476 } 1477 1478 if (RelType == ELF::R_390_GOTENT) 1479 resolveRelocation(Section, Offset, StubAddress + 8, ELF::R_390_PC32DBL, 1480 Addend); 1481 else 1482 resolveRelocation(Section, Offset, StubAddress, RelType, Addend); 1483 } else if (Arch == Triple::x86_64) { 1484 if (RelType == ELF::R_X86_64_PLT32) { 1485 // The way the PLT relocations normally work is that the linker allocates 1486 // the 1487 // PLT and this relocation makes a PC-relative call into the PLT. The PLT 1488 // entry will then jump to an address provided by the GOT. On first call, 1489 // the 1490 // GOT address will point back into PLT code that resolves the symbol. After 1491 // the first call, the GOT entry points to the actual function. 1492 // 1493 // For local functions we're ignoring all of that here and just replacing 1494 // the PLT32 relocation type with PC32, which will translate the relocation 1495 // into a PC-relative call directly to the function. For external symbols we 1496 // can't be sure the function will be within 2^32 bytes of the call site, so 1497 // we need to create a stub, which calls into the GOT. This case is 1498 // equivalent to the usual PLT implementation except that we use the stub 1499 // mechanism in RuntimeDyld (which puts stubs at the end of the section) 1500 // rather than allocating a PLT section. 1501 if (Value.SymbolName) { 1502 // This is a call to an external function. 1503 // Look for an existing stub. 1504 SectionEntry &Section = Sections[SectionID]; 1505 StubMap::const_iterator i = Stubs.find(Value); 1506 uintptr_t StubAddress; 1507 if (i != Stubs.end()) { 1508 StubAddress = uintptr_t(Section.Address) + i->second; 1509 DEBUG(dbgs() << " Stub function found\n"); 1510 } else { 1511 // Create a new stub function (equivalent to a PLT entry). 1512 DEBUG(dbgs() << " Create a new stub function\n"); 1513 1514 uintptr_t BaseAddress = uintptr_t(Section.Address); 1515 uintptr_t StubAlignment = getStubAlignment(); 1516 StubAddress = (BaseAddress + Section.StubOffset + StubAlignment - 1) & 1517 -StubAlignment; 1518 unsigned StubOffset = StubAddress - BaseAddress; 1519 Stubs[Value] = StubOffset; 1520 createStubFunction((uint8_t *)StubAddress); 1521 1522 // Bump our stub offset counter 1523 Section.StubOffset = StubOffset + getMaxStubSize(); 1524 1525 // Allocate a GOT Entry 1526 uint64_t GOTOffset = allocateGOTEntries(SectionID, 1); 1527 1528 // The load of the GOT address has an addend of -4 1529 resolveGOTOffsetRelocation(SectionID, StubOffset + 2, GOTOffset - 4); 1530 1531 // Fill in the value of the symbol we're targeting into the GOT 1532 addRelocationForSymbol(computeGOTOffsetRE(SectionID,GOTOffset,0,ELF::R_X86_64_64), 1533 Value.SymbolName); 1534 } 1535 1536 // Make the target call a call into the stub table. 1537 resolveRelocation(Section, Offset, StubAddress, ELF::R_X86_64_PC32, 1538 Addend); 1539 } else { 1540 RelocationEntry RE(SectionID, Offset, ELF::R_X86_64_PC32, Value.Addend, 1541 Value.Offset); 1542 addRelocationForSection(RE, Value.SectionID); 1543 } 1544 } else if (RelType == ELF::R_X86_64_GOTPCREL) { 1545 uint64_t GOTOffset = allocateGOTEntries(SectionID, 1); 1546 resolveGOTOffsetRelocation(SectionID, Offset, GOTOffset + Addend); 1547 1548 // Fill in the value of the symbol we're targeting into the GOT 1549 RelocationEntry RE = computeGOTOffsetRE(SectionID, GOTOffset, Value.Offset, ELF::R_X86_64_64); 1550 if (Value.SymbolName) 1551 addRelocationForSymbol(RE, Value.SymbolName); 1552 else 1553 addRelocationForSection(RE, Value.SectionID); 1554 } else if (RelType == ELF::R_X86_64_PC32) { 1555 Value.Addend += support::ulittle32_t::ref(computePlaceholderAddress(SectionID, Offset)); 1556 processSimpleRelocation(SectionID, Offset, RelType, Value); 1557 } else if (RelType == ELF::R_X86_64_PC64) { 1558 Value.Addend += support::ulittle64_t::ref(computePlaceholderAddress(SectionID, Offset)); 1559 processSimpleRelocation(SectionID, Offset, RelType, Value); 1560 } else { 1561 processSimpleRelocation(SectionID, Offset, RelType, Value); 1562 } 1563 } else { 1564 if (Arch == Triple::x86) { 1565 Value.Addend += support::ulittle32_t::ref(computePlaceholderAddress(SectionID, Offset)); 1566 } 1567 processSimpleRelocation(SectionID, Offset, RelType, Value); 1568 } 1569 return ++RelI; 1570 } 1571 1572 size_t RuntimeDyldELF::getGOTEntrySize() { 1573 // We don't use the GOT in all of these cases, but it's essentially free 1574 // to put them all here. 1575 size_t Result = 0; 1576 switch (Arch) { 1577 case Triple::x86_64: 1578 case Triple::aarch64: 1579 case Triple::aarch64_be: 1580 case Triple::ppc64: 1581 case Triple::ppc64le: 1582 case Triple::systemz: 1583 Result = sizeof(uint64_t); 1584 break; 1585 case Triple::x86: 1586 case Triple::arm: 1587 case Triple::thumb: 1588 Result = sizeof(uint32_t); 1589 break; 1590 case Triple::mips: 1591 case Triple::mipsel: 1592 case Triple::mips64: 1593 case Triple::mips64el: 1594 if (IsMipsO32ABI) 1595 Result = sizeof(uint32_t); 1596 else if (IsMipsN64ABI) 1597 Result = sizeof(uint64_t); 1598 else 1599 llvm_unreachable("Mips ABI not handled"); 1600 break; 1601 default: 1602 llvm_unreachable("Unsupported CPU type!"); 1603 } 1604 return Result; 1605 } 1606 1607 uint64_t RuntimeDyldELF::allocateGOTEntries(unsigned SectionID, unsigned no) 1608 { 1609 (void)SectionID; // The GOT Section is the same for all section in the object file 1610 if (GOTSectionID == 0) { 1611 GOTSectionID = Sections.size(); 1612 // Reserve a section id. We'll allocate the section later 1613 // once we know the total size 1614 Sections.push_back(SectionEntry(".got", 0, 0, 0)); 1615 } 1616 uint64_t StartOffset = CurrentGOTIndex * getGOTEntrySize(); 1617 CurrentGOTIndex += no; 1618 return StartOffset; 1619 } 1620 1621 void RuntimeDyldELF::resolveGOTOffsetRelocation(unsigned SectionID, uint64_t Offset, uint64_t GOTOffset) 1622 { 1623 // Fill in the relative address of the GOT Entry into the stub 1624 RelocationEntry GOTRE(SectionID, Offset, ELF::R_X86_64_PC32, GOTOffset); 1625 addRelocationForSection(GOTRE, GOTSectionID); 1626 } 1627 1628 RelocationEntry RuntimeDyldELF::computeGOTOffsetRE(unsigned SectionID, uint64_t GOTOffset, uint64_t SymbolOffset, 1629 uint32_t Type) 1630 { 1631 (void)SectionID; // The GOT Section is the same for all section in the object file 1632 return RelocationEntry(GOTSectionID, GOTOffset, Type, SymbolOffset); 1633 } 1634 1635 void RuntimeDyldELF::finalizeLoad(const ObjectFile &Obj, 1636 ObjSectionToIDMap &SectionMap) { 1637 // If necessary, allocate the global offset table 1638 if (GOTSectionID != 0) { 1639 // Allocate memory for the section 1640 size_t TotalSize = CurrentGOTIndex * getGOTEntrySize(); 1641 uint8_t *Addr = MemMgr.allocateDataSection(TotalSize, getGOTEntrySize(), 1642 GOTSectionID, ".got", false); 1643 if (!Addr) 1644 report_fatal_error("Unable to allocate memory for GOT!"); 1645 1646 Sections[GOTSectionID] = SectionEntry(".got", Addr, TotalSize, 0); 1647 1648 if (Checker) 1649 Checker->registerSection(Obj.getFileName(), GOTSectionID); 1650 1651 // For now, initialize all GOT entries to zero. We'll fill them in as 1652 // needed when GOT-based relocations are applied. 1653 memset(Addr, 0, TotalSize); 1654 if (IsMipsN64ABI) { 1655 // To correctly resolve Mips GOT relocations, we need a mapping from 1656 // object's sections to GOTs. 1657 for (section_iterator SI = Obj.section_begin(), SE = Obj.section_end(); 1658 SI != SE; ++SI) { 1659 if (SI->relocation_begin() != SI->relocation_end()) { 1660 section_iterator RelocatedSection = SI->getRelocatedSection(); 1661 ObjSectionToIDMap::iterator i = SectionMap.find(*RelocatedSection); 1662 assert (i != SectionMap.end()); 1663 SectionToGOTMap[i->second] = GOTSectionID; 1664 } 1665 } 1666 GOTSymbolOffsets.clear(); 1667 } 1668 } 1669 1670 // Look for and record the EH frame section. 1671 ObjSectionToIDMap::iterator i, e; 1672 for (i = SectionMap.begin(), e = SectionMap.end(); i != e; ++i) { 1673 const SectionRef &Section = i->first; 1674 StringRef Name; 1675 Section.getName(Name); 1676 if (Name == ".eh_frame") { 1677 UnregisteredEHFrameSections.push_back(i->second); 1678 break; 1679 } 1680 } 1681 1682 GOTSectionID = 0; 1683 CurrentGOTIndex = 0; 1684 } 1685 1686 bool RuntimeDyldELF::isCompatibleFile(const object::ObjectFile &Obj) const { 1687 return Obj.isELF(); 1688 } 1689 1690 } // namespace llvm 1691