1 //===-- RuntimeDyldELF.cpp - Run-time dynamic linker for MC-JIT -*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // Implementation of ELF support for the MC-JIT runtime dynamic linker. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "RuntimeDyldELF.h" 15 #include "RuntimeDyldCheckerImpl.h" 16 #include "llvm/ADT/IntervalMap.h" 17 #include "llvm/ADT/STLExtras.h" 18 #include "llvm/ADT/StringRef.h" 19 #include "llvm/ADT/Triple.h" 20 #include "llvm/MC/MCStreamer.h" 21 #include "llvm/Object/ELFObjectFile.h" 22 #include "llvm/Object/ObjectFile.h" 23 #include "llvm/Support/ELF.h" 24 #include "llvm/Support/Endian.h" 25 #include "llvm/Support/MemoryBuffer.h" 26 #include "llvm/Support/TargetRegistry.h" 27 28 using namespace llvm; 29 using namespace llvm::object; 30 31 #define DEBUG_TYPE "dyld" 32 33 static inline std::error_code check(std::error_code Err) { 34 if (Err) { 35 report_fatal_error(Err.message()); 36 } 37 return Err; 38 } 39 40 namespace { 41 42 template <class ELFT> class DyldELFObject : public ELFObjectFile<ELFT> { 43 LLVM_ELF_IMPORT_TYPES_ELFT(ELFT) 44 45 typedef Elf_Shdr_Impl<ELFT> Elf_Shdr; 46 typedef Elf_Sym_Impl<ELFT> Elf_Sym; 47 typedef Elf_Rel_Impl<ELFT, false> Elf_Rel; 48 typedef Elf_Rel_Impl<ELFT, true> Elf_Rela; 49 50 typedef Elf_Ehdr_Impl<ELFT> Elf_Ehdr; 51 52 typedef typename ELFDataTypeTypedefHelper<ELFT>::value_type addr_type; 53 54 public: 55 DyldELFObject(MemoryBufferRef Wrapper, std::error_code &ec); 56 57 void updateSectionAddress(const SectionRef &Sec, uint64_t Addr); 58 59 void updateSymbolAddress(const SymbolRef &SymRef, uint64_t Addr); 60 61 // Methods for type inquiry through isa, cast and dyn_cast 62 static inline bool classof(const Binary *v) { 63 return (isa<ELFObjectFile<ELFT>>(v) && 64 classof(cast<ELFObjectFile<ELFT>>(v))); 65 } 66 static inline bool classof(const ELFObjectFile<ELFT> *v) { 67 return v->isDyldType(); 68 } 69 70 }; 71 72 73 74 // The MemoryBuffer passed into this constructor is just a wrapper around the 75 // actual memory. Ultimately, the Binary parent class will take ownership of 76 // this MemoryBuffer object but not the underlying memory. 77 template <class ELFT> 78 DyldELFObject<ELFT>::DyldELFObject(MemoryBufferRef Wrapper, std::error_code &EC) 79 : ELFObjectFile<ELFT>(Wrapper, EC) { 80 this->isDyldELFObject = true; 81 } 82 83 template <class ELFT> 84 void DyldELFObject<ELFT>::updateSectionAddress(const SectionRef &Sec, 85 uint64_t Addr) { 86 DataRefImpl ShdrRef = Sec.getRawDataRefImpl(); 87 Elf_Shdr *shdr = 88 const_cast<Elf_Shdr *>(reinterpret_cast<const Elf_Shdr *>(ShdrRef.p)); 89 90 // This assumes the address passed in matches the target address bitness 91 // The template-based type cast handles everything else. 92 shdr->sh_addr = static_cast<addr_type>(Addr); 93 } 94 95 template <class ELFT> 96 void DyldELFObject<ELFT>::updateSymbolAddress(const SymbolRef &SymRef, 97 uint64_t Addr) { 98 99 Elf_Sym *sym = const_cast<Elf_Sym *>( 100 ELFObjectFile<ELFT>::getSymbol(SymRef.getRawDataRefImpl())); 101 102 // This assumes the address passed in matches the target address bitness 103 // The template-based type cast handles everything else. 104 sym->st_value = static_cast<addr_type>(Addr); 105 } 106 107 class LoadedELFObjectInfo final 108 : public RuntimeDyld::LoadedObjectInfoHelper<LoadedELFObjectInfo> { 109 public: 110 LoadedELFObjectInfo(RuntimeDyldImpl &RTDyld, ObjSectionToIDMap ObjSecToIDMap) 111 : LoadedObjectInfoHelper(RTDyld, std::move(ObjSecToIDMap)) {} 112 113 OwningBinary<ObjectFile> 114 getObjectForDebug(const ObjectFile &Obj) const override; 115 }; 116 117 template <typename ELFT> 118 std::unique_ptr<DyldELFObject<ELFT>> 119 createRTDyldELFObject(MemoryBufferRef Buffer, 120 const ObjectFile &SourceObject, 121 const LoadedELFObjectInfo &L, 122 std::error_code &ec) { 123 typedef typename ELFFile<ELFT>::Elf_Shdr Elf_Shdr; 124 typedef typename ELFDataTypeTypedefHelper<ELFT>::value_type addr_type; 125 126 std::unique_ptr<DyldELFObject<ELFT>> Obj = 127 llvm::make_unique<DyldELFObject<ELFT>>(Buffer, ec); 128 129 // Iterate over all sections in the object. 130 auto SI = SourceObject.section_begin(); 131 for (const auto &Sec : Obj->sections()) { 132 StringRef SectionName; 133 Sec.getName(SectionName); 134 if (SectionName != "") { 135 DataRefImpl ShdrRef = Sec.getRawDataRefImpl(); 136 Elf_Shdr *shdr = const_cast<Elf_Shdr *>( 137 reinterpret_cast<const Elf_Shdr *>(ShdrRef.p)); 138 139 if (uint64_t SecLoadAddr = L.getSectionLoadAddress(*SI)) { 140 // This assumes that the address passed in matches the target address 141 // bitness. The template-based type cast handles everything else. 142 shdr->sh_addr = static_cast<addr_type>(SecLoadAddr); 143 } 144 } 145 ++SI; 146 } 147 148 return Obj; 149 } 150 151 OwningBinary<ObjectFile> createELFDebugObject(const ObjectFile &Obj, 152 const LoadedELFObjectInfo &L) { 153 assert(Obj.isELF() && "Not an ELF object file."); 154 155 std::unique_ptr<MemoryBuffer> Buffer = 156 MemoryBuffer::getMemBufferCopy(Obj.getData(), Obj.getFileName()); 157 158 std::error_code ec; 159 160 std::unique_ptr<ObjectFile> DebugObj; 161 if (Obj.getBytesInAddress() == 4 && Obj.isLittleEndian()) { 162 typedef ELFType<support::little, false> ELF32LE; 163 DebugObj = createRTDyldELFObject<ELF32LE>(Buffer->getMemBufferRef(), Obj, L, 164 ec); 165 } else if (Obj.getBytesInAddress() == 4 && !Obj.isLittleEndian()) { 166 typedef ELFType<support::big, false> ELF32BE; 167 DebugObj = createRTDyldELFObject<ELF32BE>(Buffer->getMemBufferRef(), Obj, L, 168 ec); 169 } else if (Obj.getBytesInAddress() == 8 && !Obj.isLittleEndian()) { 170 typedef ELFType<support::big, true> ELF64BE; 171 DebugObj = createRTDyldELFObject<ELF64BE>(Buffer->getMemBufferRef(), Obj, L, 172 ec); 173 } else if (Obj.getBytesInAddress() == 8 && Obj.isLittleEndian()) { 174 typedef ELFType<support::little, true> ELF64LE; 175 DebugObj = createRTDyldELFObject<ELF64LE>(Buffer->getMemBufferRef(), Obj, L, 176 ec); 177 } else 178 llvm_unreachable("Unexpected ELF format"); 179 180 assert(!ec && "Could not construct copy ELF object file"); 181 182 return OwningBinary<ObjectFile>(std::move(DebugObj), std::move(Buffer)); 183 } 184 185 OwningBinary<ObjectFile> 186 LoadedELFObjectInfo::getObjectForDebug(const ObjectFile &Obj) const { 187 return createELFDebugObject(Obj, *this); 188 } 189 190 } // namespace 191 192 namespace llvm { 193 194 RuntimeDyldELF::RuntimeDyldELF(RuntimeDyld::MemoryManager &MemMgr, 195 RuntimeDyld::SymbolResolver &Resolver) 196 : RuntimeDyldImpl(MemMgr, Resolver), GOTSectionID(0), CurrentGOTIndex(0) {} 197 RuntimeDyldELF::~RuntimeDyldELF() {} 198 199 void RuntimeDyldELF::registerEHFrames() { 200 for (int i = 0, e = UnregisteredEHFrameSections.size(); i != e; ++i) { 201 SID EHFrameSID = UnregisteredEHFrameSections[i]; 202 uint8_t *EHFrameAddr = Sections[EHFrameSID].Address; 203 uint64_t EHFrameLoadAddr = Sections[EHFrameSID].LoadAddress; 204 size_t EHFrameSize = Sections[EHFrameSID].Size; 205 MemMgr.registerEHFrames(EHFrameAddr, EHFrameLoadAddr, EHFrameSize); 206 RegisteredEHFrameSections.push_back(EHFrameSID); 207 } 208 UnregisteredEHFrameSections.clear(); 209 } 210 211 void RuntimeDyldELF::deregisterEHFrames() { 212 for (int i = 0, e = RegisteredEHFrameSections.size(); i != e; ++i) { 213 SID EHFrameSID = RegisteredEHFrameSections[i]; 214 uint8_t *EHFrameAddr = Sections[EHFrameSID].Address; 215 uint64_t EHFrameLoadAddr = Sections[EHFrameSID].LoadAddress; 216 size_t EHFrameSize = Sections[EHFrameSID].Size; 217 MemMgr.deregisterEHFrames(EHFrameAddr, EHFrameLoadAddr, EHFrameSize); 218 } 219 RegisteredEHFrameSections.clear(); 220 } 221 222 std::unique_ptr<RuntimeDyld::LoadedObjectInfo> 223 RuntimeDyldELF::loadObject(const object::ObjectFile &O) { 224 return llvm::make_unique<LoadedELFObjectInfo>(*this, loadObjectImpl(O)); 225 } 226 227 void RuntimeDyldELF::resolveX86_64Relocation(const SectionEntry &Section, 228 uint64_t Offset, uint64_t Value, 229 uint32_t Type, int64_t Addend, 230 uint64_t SymOffset) { 231 switch (Type) { 232 default: 233 llvm_unreachable("Relocation type not implemented yet!"); 234 break; 235 case ELF::R_X86_64_64: { 236 support::ulittle64_t::ref(Section.Address + Offset) = Value + Addend; 237 DEBUG(dbgs() << "Writing " << format("%p", (Value + Addend)) << " at " 238 << format("%p\n", Section.Address + Offset)); 239 break; 240 } 241 case ELF::R_X86_64_32: 242 case ELF::R_X86_64_32S: { 243 Value += Addend; 244 assert((Type == ELF::R_X86_64_32 && (Value <= UINT32_MAX)) || 245 (Type == ELF::R_X86_64_32S && 246 ((int64_t)Value <= INT32_MAX && (int64_t)Value >= INT32_MIN))); 247 uint32_t TruncatedAddr = (Value & 0xFFFFFFFF); 248 support::ulittle32_t::ref(Section.Address + Offset) = TruncatedAddr; 249 DEBUG(dbgs() << "Writing " << format("%p", TruncatedAddr) << " at " 250 << format("%p\n", Section.Address + Offset)); 251 break; 252 } 253 case ELF::R_X86_64_PC32: { 254 uint64_t FinalAddress = Section.LoadAddress + Offset; 255 int64_t RealOffset = Value + Addend - FinalAddress; 256 assert(isInt<32>(RealOffset)); 257 int32_t TruncOffset = (RealOffset & 0xFFFFFFFF); 258 support::ulittle32_t::ref(Section.Address + Offset) = TruncOffset; 259 break; 260 } 261 case ELF::R_X86_64_PC64: { 262 uint64_t FinalAddress = Section.LoadAddress + Offset; 263 int64_t RealOffset = Value + Addend - FinalAddress; 264 support::ulittle64_t::ref(Section.Address + Offset) = RealOffset; 265 break; 266 } 267 } 268 } 269 270 void RuntimeDyldELF::resolveX86Relocation(const SectionEntry &Section, 271 uint64_t Offset, uint32_t Value, 272 uint32_t Type, int32_t Addend) { 273 switch (Type) { 274 case ELF::R_386_32: { 275 support::ulittle32_t::ref(Section.Address + Offset) = Value + Addend; 276 break; 277 } 278 case ELF::R_386_PC32: { 279 uint32_t FinalAddress = ((Section.LoadAddress + Offset) & 0xFFFFFFFF); 280 uint32_t RealOffset = Value + Addend - FinalAddress; 281 support::ulittle32_t::ref(Section.Address + Offset) = RealOffset; 282 break; 283 } 284 default: 285 // There are other relocation types, but it appears these are the 286 // only ones currently used by the LLVM ELF object writer 287 llvm_unreachable("Relocation type not implemented yet!"); 288 break; 289 } 290 } 291 292 void RuntimeDyldELF::resolveAArch64Relocation(const SectionEntry &Section, 293 uint64_t Offset, uint64_t Value, 294 uint32_t Type, int64_t Addend) { 295 uint32_t *TargetPtr = reinterpret_cast<uint32_t *>(Section.Address + Offset); 296 uint64_t FinalAddress = Section.LoadAddress + Offset; 297 298 DEBUG(dbgs() << "resolveAArch64Relocation, LocalAddress: 0x" 299 << format("%llx", Section.Address + Offset) 300 << " FinalAddress: 0x" << format("%llx", FinalAddress) 301 << " Value: 0x" << format("%llx", Value) << " Type: 0x" 302 << format("%x", Type) << " Addend: 0x" << format("%llx", Addend) 303 << "\n"); 304 305 switch (Type) { 306 default: 307 llvm_unreachable("Relocation type not implemented yet!"); 308 break; 309 case ELF::R_AARCH64_ABS64: { 310 uint64_t *TargetPtr = 311 reinterpret_cast<uint64_t *>(Section.Address + Offset); 312 *TargetPtr = Value + Addend; 313 break; 314 } 315 case ELF::R_AARCH64_PREL32: { 316 uint64_t Result = Value + Addend - FinalAddress; 317 assert(static_cast<int64_t>(Result) >= INT32_MIN && 318 static_cast<int64_t>(Result) <= UINT32_MAX); 319 *TargetPtr = static_cast<uint32_t>(Result & 0xffffffffU); 320 break; 321 } 322 case ELF::R_AARCH64_CALL26: // fallthrough 323 case ELF::R_AARCH64_JUMP26: { 324 // Operation: S+A-P. Set Call or B immediate value to bits fff_fffc of the 325 // calculation. 326 uint64_t BranchImm = Value + Addend - FinalAddress; 327 328 // "Check that -2^27 <= result < 2^27". 329 assert(isInt<28>(BranchImm)); 330 331 // AArch64 code is emitted with .rela relocations. The data already in any 332 // bits affected by the relocation on entry is garbage. 333 *TargetPtr &= 0xfc000000U; 334 // Immediate goes in bits 25:0 of B and BL. 335 *TargetPtr |= static_cast<uint32_t>(BranchImm & 0xffffffcU) >> 2; 336 break; 337 } 338 case ELF::R_AARCH64_MOVW_UABS_G3: { 339 uint64_t Result = Value + Addend; 340 341 // AArch64 code is emitted with .rela relocations. The data already in any 342 // bits affected by the relocation on entry is garbage. 343 *TargetPtr &= 0xffe0001fU; 344 // Immediate goes in bits 20:5 of MOVZ/MOVK instruction 345 *TargetPtr |= Result >> (48 - 5); 346 // Shift must be "lsl #48", in bits 22:21 347 assert((*TargetPtr >> 21 & 0x3) == 3 && "invalid shift for relocation"); 348 break; 349 } 350 case ELF::R_AARCH64_MOVW_UABS_G2_NC: { 351 uint64_t Result = Value + Addend; 352 353 // AArch64 code is emitted with .rela relocations. The data already in any 354 // bits affected by the relocation on entry is garbage. 355 *TargetPtr &= 0xffe0001fU; 356 // Immediate goes in bits 20:5 of MOVZ/MOVK instruction 357 *TargetPtr |= ((Result & 0xffff00000000ULL) >> (32 - 5)); 358 // Shift must be "lsl #32", in bits 22:21 359 assert((*TargetPtr >> 21 & 0x3) == 2 && "invalid shift for relocation"); 360 break; 361 } 362 case ELF::R_AARCH64_MOVW_UABS_G1_NC: { 363 uint64_t Result = Value + Addend; 364 365 // AArch64 code is emitted with .rela relocations. The data already in any 366 // bits affected by the relocation on entry is garbage. 367 *TargetPtr &= 0xffe0001fU; 368 // Immediate goes in bits 20:5 of MOVZ/MOVK instruction 369 *TargetPtr |= ((Result & 0xffff0000U) >> (16 - 5)); 370 // Shift must be "lsl #16", in bits 22:2 371 assert((*TargetPtr >> 21 & 0x3) == 1 && "invalid shift for relocation"); 372 break; 373 } 374 case ELF::R_AARCH64_MOVW_UABS_G0_NC: { 375 uint64_t Result = Value + Addend; 376 377 // AArch64 code is emitted with .rela relocations. The data already in any 378 // bits affected by the relocation on entry is garbage. 379 *TargetPtr &= 0xffe0001fU; 380 // Immediate goes in bits 20:5 of MOVZ/MOVK instruction 381 *TargetPtr |= ((Result & 0xffffU) << 5); 382 // Shift must be "lsl #0", in bits 22:21. 383 assert((*TargetPtr >> 21 & 0x3) == 0 && "invalid shift for relocation"); 384 break; 385 } 386 case ELF::R_AARCH64_ADR_PREL_PG_HI21: { 387 // Operation: Page(S+A) - Page(P) 388 uint64_t Result = 389 ((Value + Addend) & ~0xfffULL) - (FinalAddress & ~0xfffULL); 390 391 // Check that -2^32 <= X < 2^32 392 assert(isInt<33>(Result) && "overflow check failed for relocation"); 393 394 // AArch64 code is emitted with .rela relocations. The data already in any 395 // bits affected by the relocation on entry is garbage. 396 *TargetPtr &= 0x9f00001fU; 397 // Immediate goes in bits 30:29 + 5:23 of ADRP instruction, taken 398 // from bits 32:12 of X. 399 *TargetPtr |= ((Result & 0x3000U) << (29 - 12)); 400 *TargetPtr |= ((Result & 0x1ffffc000ULL) >> (14 - 5)); 401 break; 402 } 403 case ELF::R_AARCH64_LDST32_ABS_LO12_NC: { 404 // Operation: S + A 405 uint64_t Result = Value + Addend; 406 407 // AArch64 code is emitted with .rela relocations. The data already in any 408 // bits affected by the relocation on entry is garbage. 409 *TargetPtr &= 0xffc003ffU; 410 // Immediate goes in bits 21:10 of LD/ST instruction, taken 411 // from bits 11:2 of X 412 *TargetPtr |= ((Result & 0xffc) << (10 - 2)); 413 break; 414 } 415 case ELF::R_AARCH64_LDST64_ABS_LO12_NC: { 416 // Operation: S + A 417 uint64_t Result = Value + Addend; 418 419 // AArch64 code is emitted with .rela relocations. The data already in any 420 // bits affected by the relocation on entry is garbage. 421 *TargetPtr &= 0xffc003ffU; 422 // Immediate goes in bits 21:10 of LD/ST instruction, taken 423 // from bits 11:3 of X 424 *TargetPtr |= ((Result & 0xff8) << (10 - 3)); 425 break; 426 } 427 } 428 } 429 430 void RuntimeDyldELF::resolveARMRelocation(const SectionEntry &Section, 431 uint64_t Offset, uint32_t Value, 432 uint32_t Type, int32_t Addend) { 433 // TODO: Add Thumb relocations. 434 uint32_t *TargetPtr = (uint32_t *)(Section.Address + Offset); 435 uint32_t FinalAddress = ((Section.LoadAddress + Offset) & 0xFFFFFFFF); 436 Value += Addend; 437 438 DEBUG(dbgs() << "resolveARMRelocation, LocalAddress: " 439 << Section.Address + Offset 440 << " FinalAddress: " << format("%p", FinalAddress) << " Value: " 441 << format("%x", Value) << " Type: " << format("%x", Type) 442 << " Addend: " << format("%x", Addend) << "\n"); 443 444 switch (Type) { 445 default: 446 llvm_unreachable("Not implemented relocation type!"); 447 448 case ELF::R_ARM_NONE: 449 break; 450 case ELF::R_ARM_PREL31: 451 case ELF::R_ARM_TARGET1: 452 case ELF::R_ARM_ABS32: 453 *TargetPtr = Value; 454 break; 455 // Write first 16 bit of 32 bit value to the mov instruction. 456 // Last 4 bit should be shifted. 457 case ELF::R_ARM_MOVW_ABS_NC: 458 case ELF::R_ARM_MOVT_ABS: 459 if (Type == ELF::R_ARM_MOVW_ABS_NC) 460 Value = Value & 0xFFFF; 461 else if (Type == ELF::R_ARM_MOVT_ABS) 462 Value = (Value >> 16) & 0xFFFF; 463 *TargetPtr &= ~0x000F0FFF; 464 *TargetPtr |= Value & 0xFFF; 465 *TargetPtr |= ((Value >> 12) & 0xF) << 16; 466 break; 467 // Write 24 bit relative value to the branch instruction. 468 case ELF::R_ARM_PC24: // Fall through. 469 case ELF::R_ARM_CALL: // Fall through. 470 case ELF::R_ARM_JUMP24: 471 int32_t RelValue = static_cast<int32_t>(Value - FinalAddress - 8); 472 RelValue = (RelValue & 0x03FFFFFC) >> 2; 473 assert((*TargetPtr & 0xFFFFFF) == 0xFFFFFE); 474 *TargetPtr &= 0xFF000000; 475 *TargetPtr |= RelValue; 476 break; 477 } 478 } 479 480 void RuntimeDyldELF::resolveMIPSRelocation(const SectionEntry &Section, 481 uint64_t Offset, uint32_t Value, 482 uint32_t Type, int32_t Addend) { 483 uint8_t *TargetPtr = Section.Address + Offset; 484 Value += Addend; 485 486 DEBUG(dbgs() << "resolveMIPSRelocation, LocalAddress: " 487 << Section.Address + Offset << " FinalAddress: " 488 << format("%p", Section.LoadAddress + Offset) << " Value: " 489 << format("%x", Value) << " Type: " << format("%x", Type) 490 << " Addend: " << format("%x", Addend) << "\n"); 491 492 uint32_t Insn = readBytesUnaligned(TargetPtr, 4); 493 494 switch (Type) { 495 default: 496 llvm_unreachable("Not implemented relocation type!"); 497 break; 498 case ELF::R_MIPS_32: 499 writeBytesUnaligned(Value, TargetPtr, 4); 500 break; 501 case ELF::R_MIPS_26: 502 Insn &= 0xfc000000; 503 Insn |= (Value & 0x0fffffff) >> 2; 504 writeBytesUnaligned(Insn, TargetPtr, 4); 505 break; 506 case ELF::R_MIPS_HI16: 507 // Get the higher 16-bits. Also add 1 if bit 15 is 1. 508 Insn &= 0xffff0000; 509 Insn |= ((Value + 0x8000) >> 16) & 0xffff; 510 writeBytesUnaligned(Insn, TargetPtr, 4); 511 break; 512 case ELF::R_MIPS_LO16: 513 Insn &= 0xffff0000; 514 Insn |= Value & 0xffff; 515 writeBytesUnaligned(Insn, TargetPtr, 4); 516 break; 517 case ELF::R_MIPS_PC32: { 518 uint32_t FinalAddress = (Section.LoadAddress + Offset); 519 writeBytesUnaligned(Value - FinalAddress, (uint8_t *)TargetPtr, 4); 520 break; 521 } 522 case ELF::R_MIPS_PC16: { 523 uint32_t FinalAddress = (Section.LoadAddress + Offset); 524 Insn &= 0xffff0000; 525 Insn |= ((Value - FinalAddress) >> 2) & 0xffff; 526 writeBytesUnaligned(Insn, TargetPtr, 4); 527 break; 528 } 529 case ELF::R_MIPS_PC19_S2: { 530 uint32_t FinalAddress = (Section.LoadAddress + Offset); 531 Insn &= 0xfff80000; 532 Insn |= ((Value - (FinalAddress & ~0x3)) >> 2) & 0x7ffff; 533 writeBytesUnaligned(Insn, TargetPtr, 4); 534 break; 535 } 536 case ELF::R_MIPS_PC21_S2: { 537 uint32_t FinalAddress = (Section.LoadAddress + Offset); 538 Insn &= 0xffe00000; 539 Insn |= ((Value - FinalAddress) >> 2) & 0x1fffff; 540 writeBytesUnaligned(Insn, TargetPtr, 4); 541 break; 542 } 543 case ELF::R_MIPS_PC26_S2: { 544 uint32_t FinalAddress = (Section.LoadAddress + Offset); 545 Insn &= 0xfc000000; 546 Insn |= ((Value - FinalAddress) >> 2) & 0x3ffffff; 547 writeBytesUnaligned(Insn, TargetPtr, 4); 548 break; 549 } 550 case ELF::R_MIPS_PCHI16: { 551 uint32_t FinalAddress = (Section.LoadAddress + Offset); 552 Insn &= 0xffff0000; 553 Insn |= ((Value - FinalAddress + 0x8000) >> 16) & 0xffff; 554 writeBytesUnaligned(Insn, TargetPtr, 4); 555 break; 556 } 557 case ELF::R_MIPS_PCLO16: { 558 uint32_t FinalAddress = (Section.LoadAddress + Offset); 559 Insn &= 0xffff0000; 560 Insn |= (Value - FinalAddress) & 0xffff; 561 writeBytesUnaligned(Insn, TargetPtr, 4); 562 break; 563 } 564 } 565 } 566 567 void RuntimeDyldELF::setMipsABI(const ObjectFile &Obj) { 568 if (Arch == Triple::UnknownArch || 569 !StringRef(Triple::getArchTypePrefix(Arch)).equals("mips")) { 570 IsMipsO32ABI = false; 571 IsMipsN64ABI = false; 572 return; 573 } 574 unsigned AbiVariant; 575 Obj.getPlatformFlags(AbiVariant); 576 IsMipsO32ABI = AbiVariant & ELF::EF_MIPS_ABI_O32; 577 IsMipsN64ABI = Obj.getFileFormatName().equals("ELF64-mips"); 578 if (AbiVariant & ELF::EF_MIPS_ABI2) 579 llvm_unreachable("Mips N32 ABI is not supported yet"); 580 } 581 582 void RuntimeDyldELF::resolveMIPS64Relocation(const SectionEntry &Section, 583 uint64_t Offset, uint64_t Value, 584 uint32_t Type, int64_t Addend, 585 uint64_t SymOffset, 586 SID SectionID) { 587 uint32_t r_type = Type & 0xff; 588 uint32_t r_type2 = (Type >> 8) & 0xff; 589 uint32_t r_type3 = (Type >> 16) & 0xff; 590 591 // RelType is used to keep information for which relocation type we are 592 // applying relocation. 593 uint32_t RelType = r_type; 594 int64_t CalculatedValue = evaluateMIPS64Relocation(Section, Offset, Value, 595 RelType, Addend, 596 SymOffset, SectionID); 597 if (r_type2 != ELF::R_MIPS_NONE) { 598 RelType = r_type2; 599 CalculatedValue = evaluateMIPS64Relocation(Section, Offset, 0, RelType, 600 CalculatedValue, SymOffset, 601 SectionID); 602 } 603 if (r_type3 != ELF::R_MIPS_NONE) { 604 RelType = r_type3; 605 CalculatedValue = evaluateMIPS64Relocation(Section, Offset, 0, RelType, 606 CalculatedValue, SymOffset, 607 SectionID); 608 } 609 applyMIPS64Relocation(Section.Address + Offset, CalculatedValue, RelType); 610 } 611 612 int64_t 613 RuntimeDyldELF::evaluateMIPS64Relocation(const SectionEntry &Section, 614 uint64_t Offset, uint64_t Value, 615 uint32_t Type, int64_t Addend, 616 uint64_t SymOffset, SID SectionID) { 617 618 DEBUG(dbgs() << "evaluateMIPS64Relocation, LocalAddress: 0x" 619 << format("%llx", Section.Address + Offset) 620 << " FinalAddress: 0x" 621 << format("%llx", Section.LoadAddress + Offset) 622 << " Value: 0x" << format("%llx", Value) << " Type: 0x" 623 << format("%x", Type) << " Addend: 0x" << format("%llx", Addend) 624 << " SymOffset: " << format("%x", SymOffset) 625 << "\n"); 626 627 switch (Type) { 628 default: 629 llvm_unreachable("Not implemented relocation type!"); 630 break; 631 case ELF::R_MIPS_JALR: 632 case ELF::R_MIPS_NONE: 633 break; 634 case ELF::R_MIPS_32: 635 case ELF::R_MIPS_64: 636 return Value + Addend; 637 case ELF::R_MIPS_26: 638 return ((Value + Addend) >> 2) & 0x3ffffff; 639 case ELF::R_MIPS_GPREL16: { 640 uint64_t GOTAddr = getSectionLoadAddress(SectionToGOTMap[SectionID]); 641 return Value + Addend - (GOTAddr + 0x7ff0); 642 } 643 case ELF::R_MIPS_SUB: 644 return Value - Addend; 645 case ELF::R_MIPS_HI16: 646 // Get the higher 16-bits. Also add 1 if bit 15 is 1. 647 return ((Value + Addend + 0x8000) >> 16) & 0xffff; 648 case ELF::R_MIPS_LO16: 649 return (Value + Addend) & 0xffff; 650 case ELF::R_MIPS_CALL16: 651 case ELF::R_MIPS_GOT_DISP: 652 case ELF::R_MIPS_GOT_PAGE: { 653 uint8_t *LocalGOTAddr = 654 getSectionAddress(SectionToGOTMap[SectionID]) + SymOffset; 655 uint64_t GOTEntry = readBytesUnaligned(LocalGOTAddr, 8); 656 657 Value += Addend; 658 if (Type == ELF::R_MIPS_GOT_PAGE) 659 Value = (Value + 0x8000) & ~0xffff; 660 661 if (GOTEntry) 662 assert(GOTEntry == Value && 663 "GOT entry has two different addresses."); 664 else 665 writeBytesUnaligned(Value, LocalGOTAddr, 8); 666 667 return (SymOffset - 0x7ff0) & 0xffff; 668 } 669 case ELF::R_MIPS_GOT_OFST: { 670 int64_t page = (Value + Addend + 0x8000) & ~0xffff; 671 return (Value + Addend - page) & 0xffff; 672 } 673 case ELF::R_MIPS_GPREL32: { 674 uint64_t GOTAddr = getSectionLoadAddress(SectionToGOTMap[SectionID]); 675 return Value + Addend - (GOTAddr + 0x7ff0); 676 } 677 case ELF::R_MIPS_PC16: { 678 uint64_t FinalAddress = (Section.LoadAddress + Offset); 679 return ((Value + Addend - FinalAddress) >> 2) & 0xffff; 680 } 681 case ELF::R_MIPS_PC32: { 682 uint64_t FinalAddress = (Section.LoadAddress + Offset); 683 return Value + Addend - FinalAddress; 684 } 685 case ELF::R_MIPS_PC18_S3: { 686 uint64_t FinalAddress = (Section.LoadAddress + Offset); 687 return ((Value + Addend - (FinalAddress & ~0x7)) >> 3) & 0x3ffff; 688 } 689 case ELF::R_MIPS_PC19_S2: { 690 uint64_t FinalAddress = (Section.LoadAddress + Offset); 691 return ((Value + Addend - (FinalAddress & ~0x3)) >> 2) & 0x7ffff; 692 } 693 case ELF::R_MIPS_PC21_S2: { 694 uint64_t FinalAddress = (Section.LoadAddress + Offset); 695 return ((Value + Addend - FinalAddress) >> 2) & 0x1fffff; 696 } 697 case ELF::R_MIPS_PC26_S2: { 698 uint64_t FinalAddress = (Section.LoadAddress + Offset); 699 return ((Value + Addend - FinalAddress) >> 2) & 0x3ffffff; 700 } 701 case ELF::R_MIPS_PCHI16: { 702 uint64_t FinalAddress = (Section.LoadAddress + Offset); 703 return ((Value + Addend - FinalAddress + 0x8000) >> 16) & 0xffff; 704 } 705 case ELF::R_MIPS_PCLO16: { 706 uint64_t FinalAddress = (Section.LoadAddress + Offset); 707 return (Value + Addend - FinalAddress) & 0xffff; 708 } 709 } 710 return 0; 711 } 712 713 void RuntimeDyldELF::applyMIPS64Relocation(uint8_t *TargetPtr, 714 int64_t CalculatedValue, 715 uint32_t Type) { 716 uint32_t Insn = readBytesUnaligned(TargetPtr, 4); 717 718 switch (Type) { 719 default: 720 break; 721 case ELF::R_MIPS_32: 722 case ELF::R_MIPS_GPREL32: 723 case ELF::R_MIPS_PC32: 724 writeBytesUnaligned(CalculatedValue & 0xffffffff, TargetPtr, 4); 725 break; 726 case ELF::R_MIPS_64: 727 case ELF::R_MIPS_SUB: 728 writeBytesUnaligned(CalculatedValue, TargetPtr, 8); 729 break; 730 case ELF::R_MIPS_26: 731 case ELF::R_MIPS_PC26_S2: 732 Insn = (Insn & 0xfc000000) | CalculatedValue; 733 writeBytesUnaligned(Insn, TargetPtr, 4); 734 break; 735 case ELF::R_MIPS_GPREL16: 736 Insn = (Insn & 0xffff0000) | (CalculatedValue & 0xffff); 737 writeBytesUnaligned(Insn, TargetPtr, 4); 738 break; 739 case ELF::R_MIPS_HI16: 740 case ELF::R_MIPS_LO16: 741 case ELF::R_MIPS_PCHI16: 742 case ELF::R_MIPS_PCLO16: 743 case ELF::R_MIPS_PC16: 744 case ELF::R_MIPS_CALL16: 745 case ELF::R_MIPS_GOT_DISP: 746 case ELF::R_MIPS_GOT_PAGE: 747 case ELF::R_MIPS_GOT_OFST: 748 Insn = (Insn & 0xffff0000) | CalculatedValue; 749 writeBytesUnaligned(Insn, TargetPtr, 4); 750 break; 751 case ELF::R_MIPS_PC18_S3: 752 Insn = (Insn & 0xfffc0000) | CalculatedValue; 753 writeBytesUnaligned(Insn, TargetPtr, 4); 754 break; 755 case ELF::R_MIPS_PC19_S2: 756 Insn = (Insn & 0xfff80000) | CalculatedValue; 757 writeBytesUnaligned(Insn, TargetPtr, 4); 758 break; 759 case ELF::R_MIPS_PC21_S2: 760 Insn = (Insn & 0xffe00000) | CalculatedValue; 761 writeBytesUnaligned(Insn, TargetPtr, 4); 762 break; 763 } 764 } 765 766 // Return the .TOC. section and offset. 767 void RuntimeDyldELF::findPPC64TOCSection(const ELFObjectFileBase &Obj, 768 ObjSectionToIDMap &LocalSections, 769 RelocationValueRef &Rel) { 770 // Set a default SectionID in case we do not find a TOC section below. 771 // This may happen for references to TOC base base (sym@toc, .odp 772 // relocation) without a .toc directive. In this case just use the 773 // first section (which is usually the .odp) since the code won't 774 // reference the .toc base directly. 775 Rel.SymbolName = NULL; 776 Rel.SectionID = 0; 777 778 // The TOC consists of sections .got, .toc, .tocbss, .plt in that 779 // order. The TOC starts where the first of these sections starts. 780 for (auto &Section: Obj.sections()) { 781 StringRef SectionName; 782 check(Section.getName(SectionName)); 783 784 if (SectionName == ".got" 785 || SectionName == ".toc" 786 || SectionName == ".tocbss" 787 || SectionName == ".plt") { 788 Rel.SectionID = findOrEmitSection(Obj, Section, false, LocalSections); 789 break; 790 } 791 } 792 793 // Per the ppc64-elf-linux ABI, The TOC base is TOC value plus 0x8000 794 // thus permitting a full 64 Kbytes segment. 795 Rel.Addend = 0x8000; 796 } 797 798 // Returns the sections and offset associated with the ODP entry referenced 799 // by Symbol. 800 void RuntimeDyldELF::findOPDEntrySection(const ELFObjectFileBase &Obj, 801 ObjSectionToIDMap &LocalSections, 802 RelocationValueRef &Rel) { 803 // Get the ELF symbol value (st_value) to compare with Relocation offset in 804 // .opd entries 805 for (section_iterator si = Obj.section_begin(), se = Obj.section_end(); 806 si != se; ++si) { 807 section_iterator RelSecI = si->getRelocatedSection(); 808 if (RelSecI == Obj.section_end()) 809 continue; 810 811 StringRef RelSectionName; 812 check(RelSecI->getName(RelSectionName)); 813 if (RelSectionName != ".opd") 814 continue; 815 816 for (elf_relocation_iterator i = si->relocation_begin(), 817 e = si->relocation_end(); 818 i != e;) { 819 // The R_PPC64_ADDR64 relocation indicates the first field 820 // of a .opd entry 821 uint64_t TypeFunc = i->getType(); 822 if (TypeFunc != ELF::R_PPC64_ADDR64) { 823 ++i; 824 continue; 825 } 826 827 uint64_t TargetSymbolOffset = i->getOffset(); 828 symbol_iterator TargetSymbol = i->getSymbol(); 829 ErrorOr<int64_t> AddendOrErr = i->getAddend(); 830 Check(AddendOrErr.getError()); 831 int64_t Addend = *AddendOrErr; 832 833 ++i; 834 if (i == e) 835 break; 836 837 // Just check if following relocation is a R_PPC64_TOC 838 uint64_t TypeTOC = i->getType(); 839 if (TypeTOC != ELF::R_PPC64_TOC) 840 continue; 841 842 // Finally compares the Symbol value and the target symbol offset 843 // to check if this .opd entry refers to the symbol the relocation 844 // points to. 845 if (Rel.Addend != (int64_t)TargetSymbolOffset) 846 continue; 847 848 ErrorOr<section_iterator> TSIOrErr = TargetSymbol->getSection(); 849 check(TSIOrErr.getError()); 850 section_iterator tsi = *TSIOrErr; 851 bool IsCode = tsi->isText(); 852 Rel.SectionID = findOrEmitSection(Obj, (*tsi), IsCode, LocalSections); 853 Rel.Addend = (intptr_t)Addend; 854 return; 855 } 856 } 857 llvm_unreachable("Attempting to get address of ODP entry!"); 858 } 859 860 // Relocation masks following the #lo(value), #hi(value), #ha(value), 861 // #higher(value), #highera(value), #highest(value), and #highesta(value) 862 // macros defined in section 4.5.1. Relocation Types of the PPC-elf64abi 863 // document. 864 865 static inline uint16_t applyPPClo(uint64_t value) { return value & 0xffff; } 866 867 static inline uint16_t applyPPChi(uint64_t value) { 868 return (value >> 16) & 0xffff; 869 } 870 871 static inline uint16_t applyPPCha (uint64_t value) { 872 return ((value + 0x8000) >> 16) & 0xffff; 873 } 874 875 static inline uint16_t applyPPChigher(uint64_t value) { 876 return (value >> 32) & 0xffff; 877 } 878 879 static inline uint16_t applyPPChighera (uint64_t value) { 880 return ((value + 0x8000) >> 32) & 0xffff; 881 } 882 883 static inline uint16_t applyPPChighest(uint64_t value) { 884 return (value >> 48) & 0xffff; 885 } 886 887 static inline uint16_t applyPPChighesta (uint64_t value) { 888 return ((value + 0x8000) >> 48) & 0xffff; 889 } 890 891 void RuntimeDyldELF::resolvePPC32Relocation(const SectionEntry &Section, 892 uint64_t Offset, uint64_t Value, 893 uint32_t Type, int64_t Addend) { 894 uint8_t *LocalAddress = Section.Address + Offset; 895 switch (Type) { 896 default: 897 llvm_unreachable("Relocation type not implemented yet!"); 898 break; 899 case ELF::R_PPC_ADDR16_LO: 900 writeInt16BE(LocalAddress, applyPPClo(Value + Addend)); 901 break; 902 case ELF::R_PPC_ADDR16_HI: 903 writeInt16BE(LocalAddress, applyPPChi(Value + Addend)); 904 break; 905 case ELF::R_PPC_ADDR16_HA: 906 writeInt16BE(LocalAddress, applyPPCha(Value + Addend)); 907 break; 908 } 909 } 910 911 void RuntimeDyldELF::resolvePPC64Relocation(const SectionEntry &Section, 912 uint64_t Offset, uint64_t Value, 913 uint32_t Type, int64_t Addend) { 914 uint8_t *LocalAddress = Section.Address + Offset; 915 switch (Type) { 916 default: 917 llvm_unreachable("Relocation type not implemented yet!"); 918 break; 919 case ELF::R_PPC64_ADDR16: 920 writeInt16BE(LocalAddress, applyPPClo(Value + Addend)); 921 break; 922 case ELF::R_PPC64_ADDR16_DS: 923 writeInt16BE(LocalAddress, applyPPClo(Value + Addend) & ~3); 924 break; 925 case ELF::R_PPC64_ADDR16_LO: 926 writeInt16BE(LocalAddress, applyPPClo(Value + Addend)); 927 break; 928 case ELF::R_PPC64_ADDR16_LO_DS: 929 writeInt16BE(LocalAddress, applyPPClo(Value + Addend) & ~3); 930 break; 931 case ELF::R_PPC64_ADDR16_HI: 932 writeInt16BE(LocalAddress, applyPPChi(Value + Addend)); 933 break; 934 case ELF::R_PPC64_ADDR16_HA: 935 writeInt16BE(LocalAddress, applyPPCha(Value + Addend)); 936 break; 937 case ELF::R_PPC64_ADDR16_HIGHER: 938 writeInt16BE(LocalAddress, applyPPChigher(Value + Addend)); 939 break; 940 case ELF::R_PPC64_ADDR16_HIGHERA: 941 writeInt16BE(LocalAddress, applyPPChighera(Value + Addend)); 942 break; 943 case ELF::R_PPC64_ADDR16_HIGHEST: 944 writeInt16BE(LocalAddress, applyPPChighest(Value + Addend)); 945 break; 946 case ELF::R_PPC64_ADDR16_HIGHESTA: 947 writeInt16BE(LocalAddress, applyPPChighesta(Value + Addend)); 948 break; 949 case ELF::R_PPC64_ADDR14: { 950 assert(((Value + Addend) & 3) == 0); 951 // Preserve the AA/LK bits in the branch instruction 952 uint8_t aalk = *(LocalAddress + 3); 953 writeInt16BE(LocalAddress + 2, (aalk & 3) | ((Value + Addend) & 0xfffc)); 954 } break; 955 case ELF::R_PPC64_REL16_LO: { 956 uint64_t FinalAddress = (Section.LoadAddress + Offset); 957 uint64_t Delta = Value - FinalAddress + Addend; 958 writeInt16BE(LocalAddress, applyPPClo(Delta)); 959 } break; 960 case ELF::R_PPC64_REL16_HI: { 961 uint64_t FinalAddress = (Section.LoadAddress + Offset); 962 uint64_t Delta = Value - FinalAddress + Addend; 963 writeInt16BE(LocalAddress, applyPPChi(Delta)); 964 } break; 965 case ELF::R_PPC64_REL16_HA: { 966 uint64_t FinalAddress = (Section.LoadAddress + Offset); 967 uint64_t Delta = Value - FinalAddress + Addend; 968 writeInt16BE(LocalAddress, applyPPCha(Delta)); 969 } break; 970 case ELF::R_PPC64_ADDR32: { 971 int32_t Result = static_cast<int32_t>(Value + Addend); 972 if (SignExtend32<32>(Result) != Result) 973 llvm_unreachable("Relocation R_PPC64_ADDR32 overflow"); 974 writeInt32BE(LocalAddress, Result); 975 } break; 976 case ELF::R_PPC64_REL24: { 977 uint64_t FinalAddress = (Section.LoadAddress + Offset); 978 int32_t delta = static_cast<int32_t>(Value - FinalAddress + Addend); 979 if (SignExtend32<24>(delta) != delta) 980 llvm_unreachable("Relocation R_PPC64_REL24 overflow"); 981 // Generates a 'bl <address>' instruction 982 writeInt32BE(LocalAddress, 0x48000001 | (delta & 0x03FFFFFC)); 983 } break; 984 case ELF::R_PPC64_REL32: { 985 uint64_t FinalAddress = (Section.LoadAddress + Offset); 986 int32_t delta = static_cast<int32_t>(Value - FinalAddress + Addend); 987 if (SignExtend32<32>(delta) != delta) 988 llvm_unreachable("Relocation R_PPC64_REL32 overflow"); 989 writeInt32BE(LocalAddress, delta); 990 } break; 991 case ELF::R_PPC64_REL64: { 992 uint64_t FinalAddress = (Section.LoadAddress + Offset); 993 uint64_t Delta = Value - FinalAddress + Addend; 994 writeInt64BE(LocalAddress, Delta); 995 } break; 996 case ELF::R_PPC64_ADDR64: 997 writeInt64BE(LocalAddress, Value + Addend); 998 break; 999 } 1000 } 1001 1002 void RuntimeDyldELF::resolveSystemZRelocation(const SectionEntry &Section, 1003 uint64_t Offset, uint64_t Value, 1004 uint32_t Type, int64_t Addend) { 1005 uint8_t *LocalAddress = Section.Address + Offset; 1006 switch (Type) { 1007 default: 1008 llvm_unreachable("Relocation type not implemented yet!"); 1009 break; 1010 case ELF::R_390_PC16DBL: 1011 case ELF::R_390_PLT16DBL: { 1012 int64_t Delta = (Value + Addend) - (Section.LoadAddress + Offset); 1013 assert(int16_t(Delta / 2) * 2 == Delta && "R_390_PC16DBL overflow"); 1014 writeInt16BE(LocalAddress, Delta / 2); 1015 break; 1016 } 1017 case ELF::R_390_PC32DBL: 1018 case ELF::R_390_PLT32DBL: { 1019 int64_t Delta = (Value + Addend) - (Section.LoadAddress + Offset); 1020 assert(int32_t(Delta / 2) * 2 == Delta && "R_390_PC32DBL overflow"); 1021 writeInt32BE(LocalAddress, Delta / 2); 1022 break; 1023 } 1024 case ELF::R_390_PC32: { 1025 int64_t Delta = (Value + Addend) - (Section.LoadAddress + Offset); 1026 assert(int32_t(Delta) == Delta && "R_390_PC32 overflow"); 1027 writeInt32BE(LocalAddress, Delta); 1028 break; 1029 } 1030 case ELF::R_390_64: 1031 writeInt64BE(LocalAddress, Value + Addend); 1032 break; 1033 } 1034 } 1035 1036 // The target location for the relocation is described by RE.SectionID and 1037 // RE.Offset. RE.SectionID can be used to find the SectionEntry. Each 1038 // SectionEntry has three members describing its location. 1039 // SectionEntry::Address is the address at which the section has been loaded 1040 // into memory in the current (host) process. SectionEntry::LoadAddress is the 1041 // address that the section will have in the target process. 1042 // SectionEntry::ObjAddress is the address of the bits for this section in the 1043 // original emitted object image (also in the current address space). 1044 // 1045 // Relocations will be applied as if the section were loaded at 1046 // SectionEntry::LoadAddress, but they will be applied at an address based 1047 // on SectionEntry::Address. SectionEntry::ObjAddress will be used to refer to 1048 // Target memory contents if they are required for value calculations. 1049 // 1050 // The Value parameter here is the load address of the symbol for the 1051 // relocation to be applied. For relocations which refer to symbols in the 1052 // current object Value will be the LoadAddress of the section in which 1053 // the symbol resides (RE.Addend provides additional information about the 1054 // symbol location). For external symbols, Value will be the address of the 1055 // symbol in the target address space. 1056 void RuntimeDyldELF::resolveRelocation(const RelocationEntry &RE, 1057 uint64_t Value) { 1058 const SectionEntry &Section = Sections[RE.SectionID]; 1059 return resolveRelocation(Section, RE.Offset, Value, RE.RelType, RE.Addend, 1060 RE.SymOffset, RE.SectionID); 1061 } 1062 1063 void RuntimeDyldELF::resolveRelocation(const SectionEntry &Section, 1064 uint64_t Offset, uint64_t Value, 1065 uint32_t Type, int64_t Addend, 1066 uint64_t SymOffset, SID SectionID) { 1067 switch (Arch) { 1068 case Triple::x86_64: 1069 resolveX86_64Relocation(Section, Offset, Value, Type, Addend, SymOffset); 1070 break; 1071 case Triple::x86: 1072 resolveX86Relocation(Section, Offset, (uint32_t)(Value & 0xffffffffL), Type, 1073 (uint32_t)(Addend & 0xffffffffL)); 1074 break; 1075 case Triple::aarch64: 1076 case Triple::aarch64_be: 1077 resolveAArch64Relocation(Section, Offset, Value, Type, Addend); 1078 break; 1079 case Triple::arm: // Fall through. 1080 case Triple::armeb: 1081 case Triple::thumb: 1082 case Triple::thumbeb: 1083 resolveARMRelocation(Section, Offset, (uint32_t)(Value & 0xffffffffL), Type, 1084 (uint32_t)(Addend & 0xffffffffL)); 1085 break; 1086 case Triple::mips: // Fall through. 1087 case Triple::mipsel: 1088 case Triple::mips64: 1089 case Triple::mips64el: 1090 if (IsMipsO32ABI) 1091 resolveMIPSRelocation(Section, Offset, (uint32_t)(Value & 0xffffffffL), 1092 Type, (uint32_t)(Addend & 0xffffffffL)); 1093 else if (IsMipsN64ABI) 1094 resolveMIPS64Relocation(Section, Offset, Value, Type, Addend, SymOffset, 1095 SectionID); 1096 else 1097 llvm_unreachable("Mips ABI not handled"); 1098 break; 1099 case Triple::ppc: 1100 resolvePPC32Relocation(Section, Offset, Value, Type, Addend); 1101 break; 1102 case Triple::ppc64: // Fall through. 1103 case Triple::ppc64le: 1104 resolvePPC64Relocation(Section, Offset, Value, Type, Addend); 1105 break; 1106 case Triple::systemz: 1107 resolveSystemZRelocation(Section, Offset, Value, Type, Addend); 1108 break; 1109 default: 1110 llvm_unreachable("Unsupported CPU type!"); 1111 } 1112 } 1113 1114 void *RuntimeDyldELF::computePlaceholderAddress(unsigned SectionID, uint64_t Offset) const { 1115 return (void*)(Sections[SectionID].ObjAddress + Offset); 1116 } 1117 1118 void RuntimeDyldELF::processSimpleRelocation(unsigned SectionID, uint64_t Offset, unsigned RelType, RelocationValueRef Value) { 1119 RelocationEntry RE(SectionID, Offset, RelType, Value.Addend, Value.Offset); 1120 if (Value.SymbolName) 1121 addRelocationForSymbol(RE, Value.SymbolName); 1122 else 1123 addRelocationForSection(RE, Value.SectionID); 1124 } 1125 1126 uint32_t RuntimeDyldELF::getMatchingLoRelocation(uint32_t RelType, 1127 bool IsLocal) const { 1128 switch (RelType) { 1129 case ELF::R_MICROMIPS_GOT16: 1130 if (IsLocal) 1131 return ELF::R_MICROMIPS_LO16; 1132 break; 1133 case ELF::R_MICROMIPS_HI16: 1134 return ELF::R_MICROMIPS_LO16; 1135 case ELF::R_MIPS_GOT16: 1136 if (IsLocal) 1137 return ELF::R_MIPS_LO16; 1138 break; 1139 case ELF::R_MIPS_HI16: 1140 return ELF::R_MIPS_LO16; 1141 case ELF::R_MIPS_PCHI16: 1142 return ELF::R_MIPS_PCLO16; 1143 default: 1144 break; 1145 } 1146 return ELF::R_MIPS_NONE; 1147 } 1148 1149 relocation_iterator RuntimeDyldELF::processRelocationRef( 1150 unsigned SectionID, relocation_iterator RelI, const ObjectFile &O, 1151 ObjSectionToIDMap &ObjSectionToID, StubMap &Stubs) { 1152 const auto &Obj = cast<ELFObjectFileBase>(O); 1153 uint64_t RelType = RelI->getType(); 1154 ErrorOr<int64_t> AddendOrErr = ELFRelocationRef(*RelI).getAddend(); 1155 int64_t Addend = AddendOrErr ? *AddendOrErr : 0; 1156 elf_symbol_iterator Symbol = RelI->getSymbol(); 1157 1158 // Obtain the symbol name which is referenced in the relocation 1159 StringRef TargetName; 1160 if (Symbol != Obj.symbol_end()) { 1161 ErrorOr<StringRef> TargetNameOrErr = Symbol->getName(); 1162 if (std::error_code EC = TargetNameOrErr.getError()) 1163 report_fatal_error(EC.message()); 1164 TargetName = *TargetNameOrErr; 1165 } 1166 DEBUG(dbgs() << "\t\tRelType: " << RelType << " Addend: " << Addend 1167 << " TargetName: " << TargetName << "\n"); 1168 RelocationValueRef Value; 1169 // First search for the symbol in the local symbol table 1170 SymbolRef::Type SymType = SymbolRef::ST_Unknown; 1171 1172 // Search for the symbol in the global symbol table 1173 RTDyldSymbolTable::const_iterator gsi = GlobalSymbolTable.end(); 1174 if (Symbol != Obj.symbol_end()) { 1175 gsi = GlobalSymbolTable.find(TargetName.data()); 1176 SymType = Symbol->getType(); 1177 } 1178 if (gsi != GlobalSymbolTable.end()) { 1179 const auto &SymInfo = gsi->second; 1180 Value.SectionID = SymInfo.getSectionID(); 1181 Value.Offset = SymInfo.getOffset(); 1182 Value.Addend = SymInfo.getOffset() + Addend; 1183 } else { 1184 switch (SymType) { 1185 case SymbolRef::ST_Debug: { 1186 // TODO: Now ELF SymbolRef::ST_Debug = STT_SECTION, it's not obviously 1187 // and can be changed by another developers. Maybe best way is add 1188 // a new symbol type ST_Section to SymbolRef and use it. 1189 section_iterator si = *Symbol->getSection(); 1190 if (si == Obj.section_end()) 1191 llvm_unreachable("Symbol section not found, bad object file format!"); 1192 DEBUG(dbgs() << "\t\tThis is section symbol\n"); 1193 bool isCode = si->isText(); 1194 Value.SectionID = findOrEmitSection(Obj, (*si), isCode, ObjSectionToID); 1195 Value.Addend = Addend; 1196 break; 1197 } 1198 case SymbolRef::ST_Data: 1199 case SymbolRef::ST_Unknown: { 1200 Value.SymbolName = TargetName.data(); 1201 Value.Addend = Addend; 1202 1203 // Absolute relocations will have a zero symbol ID (STN_UNDEF), which 1204 // will manifest here as a NULL symbol name. 1205 // We can set this as a valid (but empty) symbol name, and rely 1206 // on addRelocationForSymbol to handle this. 1207 if (!Value.SymbolName) 1208 Value.SymbolName = ""; 1209 break; 1210 } 1211 default: 1212 llvm_unreachable("Unresolved symbol type!"); 1213 break; 1214 } 1215 } 1216 1217 uint64_t Offset = RelI->getOffset(); 1218 1219 DEBUG(dbgs() << "\t\tSectionID: " << SectionID << " Offset: " << Offset 1220 << "\n"); 1221 if ((Arch == Triple::aarch64 || Arch == Triple::aarch64_be) && 1222 (RelType == ELF::R_AARCH64_CALL26 || RelType == ELF::R_AARCH64_JUMP26)) { 1223 // This is an AArch64 branch relocation, need to use a stub function. 1224 DEBUG(dbgs() << "\t\tThis is an AArch64 branch relocation."); 1225 SectionEntry &Section = Sections[SectionID]; 1226 1227 // Look for an existing stub. 1228 StubMap::const_iterator i = Stubs.find(Value); 1229 if (i != Stubs.end()) { 1230 resolveRelocation(Section, Offset, (uint64_t)Section.Address + i->second, 1231 RelType, 0); 1232 DEBUG(dbgs() << " Stub function found\n"); 1233 } else { 1234 // Create a new stub function. 1235 DEBUG(dbgs() << " Create a new stub function\n"); 1236 Stubs[Value] = Section.StubOffset; 1237 uint8_t *StubTargetAddr = 1238 createStubFunction(Section.Address + Section.StubOffset); 1239 1240 RelocationEntry REmovz_g3(SectionID, StubTargetAddr - Section.Address, 1241 ELF::R_AARCH64_MOVW_UABS_G3, Value.Addend); 1242 RelocationEntry REmovk_g2(SectionID, StubTargetAddr - Section.Address + 4, 1243 ELF::R_AARCH64_MOVW_UABS_G2_NC, Value.Addend); 1244 RelocationEntry REmovk_g1(SectionID, StubTargetAddr - Section.Address + 8, 1245 ELF::R_AARCH64_MOVW_UABS_G1_NC, Value.Addend); 1246 RelocationEntry REmovk_g0(SectionID, 1247 StubTargetAddr - Section.Address + 12, 1248 ELF::R_AARCH64_MOVW_UABS_G0_NC, Value.Addend); 1249 1250 if (Value.SymbolName) { 1251 addRelocationForSymbol(REmovz_g3, Value.SymbolName); 1252 addRelocationForSymbol(REmovk_g2, Value.SymbolName); 1253 addRelocationForSymbol(REmovk_g1, Value.SymbolName); 1254 addRelocationForSymbol(REmovk_g0, Value.SymbolName); 1255 } else { 1256 addRelocationForSection(REmovz_g3, Value.SectionID); 1257 addRelocationForSection(REmovk_g2, Value.SectionID); 1258 addRelocationForSection(REmovk_g1, Value.SectionID); 1259 addRelocationForSection(REmovk_g0, Value.SectionID); 1260 } 1261 resolveRelocation(Section, Offset, 1262 (uint64_t)Section.Address + Section.StubOffset, RelType, 1263 0); 1264 Section.StubOffset += getMaxStubSize(); 1265 } 1266 } else if (Arch == Triple::arm) { 1267 if (RelType == ELF::R_ARM_PC24 || RelType == ELF::R_ARM_CALL || 1268 RelType == ELF::R_ARM_JUMP24) { 1269 // This is an ARM branch relocation, need to use a stub function. 1270 DEBUG(dbgs() << "\t\tThis is an ARM branch relocation."); 1271 SectionEntry &Section = Sections[SectionID]; 1272 1273 // Look for an existing stub. 1274 StubMap::const_iterator i = Stubs.find(Value); 1275 if (i != Stubs.end()) { 1276 resolveRelocation(Section, Offset, (uint64_t)Section.Address + i->second, 1277 RelType, 0); 1278 DEBUG(dbgs() << " Stub function found\n"); 1279 } else { 1280 // Create a new stub function. 1281 DEBUG(dbgs() << " Create a new stub function\n"); 1282 Stubs[Value] = Section.StubOffset; 1283 uint8_t *StubTargetAddr = 1284 createStubFunction(Section.Address + Section.StubOffset); 1285 RelocationEntry RE(SectionID, StubTargetAddr - Section.Address, 1286 ELF::R_ARM_ABS32, Value.Addend); 1287 if (Value.SymbolName) 1288 addRelocationForSymbol(RE, Value.SymbolName); 1289 else 1290 addRelocationForSection(RE, Value.SectionID); 1291 1292 resolveRelocation(Section, Offset, 1293 (uint64_t)Section.Address + Section.StubOffset, RelType, 1294 0); 1295 Section.StubOffset += getMaxStubSize(); 1296 } 1297 } else { 1298 uint32_t *Placeholder = 1299 reinterpret_cast<uint32_t*>(computePlaceholderAddress(SectionID, Offset)); 1300 if (RelType == ELF::R_ARM_PREL31 || RelType == ELF::R_ARM_TARGET1 || 1301 RelType == ELF::R_ARM_ABS32) { 1302 Value.Addend += *Placeholder; 1303 } else if (RelType == ELF::R_ARM_MOVW_ABS_NC || RelType == ELF::R_ARM_MOVT_ABS) { 1304 // See ELF for ARM documentation 1305 Value.Addend += (int16_t)((*Placeholder & 0xFFF) | (((*Placeholder >> 16) & 0xF) << 12)); 1306 } 1307 processSimpleRelocation(SectionID, Offset, RelType, Value); 1308 } 1309 } else if (IsMipsO32ABI) { 1310 uint8_t *Placeholder = reinterpret_cast<uint8_t *>( 1311 computePlaceholderAddress(SectionID, Offset)); 1312 uint32_t Opcode = readBytesUnaligned(Placeholder, 4); 1313 if (RelType == ELF::R_MIPS_26) { 1314 // This is an Mips branch relocation, need to use a stub function. 1315 DEBUG(dbgs() << "\t\tThis is a Mips branch relocation."); 1316 SectionEntry &Section = Sections[SectionID]; 1317 1318 // Extract the addend from the instruction. 1319 // We shift up by two since the Value will be down shifted again 1320 // when applying the relocation. 1321 uint32_t Addend = (Opcode & 0x03ffffff) << 2; 1322 1323 Value.Addend += Addend; 1324 1325 // Look up for existing stub. 1326 StubMap::const_iterator i = Stubs.find(Value); 1327 if (i != Stubs.end()) { 1328 RelocationEntry RE(SectionID, Offset, RelType, i->second); 1329 addRelocationForSection(RE, SectionID); 1330 DEBUG(dbgs() << " Stub function found\n"); 1331 } else { 1332 // Create a new stub function. 1333 DEBUG(dbgs() << " Create a new stub function\n"); 1334 Stubs[Value] = Section.StubOffset; 1335 uint8_t *StubTargetAddr = 1336 createStubFunction(Section.Address + Section.StubOffset); 1337 1338 // Creating Hi and Lo relocations for the filled stub instructions. 1339 RelocationEntry REHi(SectionID, StubTargetAddr - Section.Address, 1340 ELF::R_MIPS_HI16, Value.Addend); 1341 RelocationEntry RELo(SectionID, StubTargetAddr - Section.Address + 4, 1342 ELF::R_MIPS_LO16, Value.Addend); 1343 1344 if (Value.SymbolName) { 1345 addRelocationForSymbol(REHi, Value.SymbolName); 1346 addRelocationForSymbol(RELo, Value.SymbolName); 1347 } 1348 else { 1349 addRelocationForSection(REHi, Value.SectionID); 1350 addRelocationForSection(RELo, Value.SectionID); 1351 } 1352 1353 RelocationEntry RE(SectionID, Offset, RelType, Section.StubOffset); 1354 addRelocationForSection(RE, SectionID); 1355 Section.StubOffset += getMaxStubSize(); 1356 } 1357 } else if (RelType == ELF::R_MIPS_HI16 || RelType == ELF::R_MIPS_PCHI16) { 1358 int64_t Addend = (Opcode & 0x0000ffff) << 16; 1359 RelocationEntry RE(SectionID, Offset, RelType, Addend); 1360 PendingRelocs.push_back(std::make_pair(Value, RE)); 1361 } else if (RelType == ELF::R_MIPS_LO16 || RelType == ELF::R_MIPS_PCLO16) { 1362 int64_t Addend = Value.Addend + SignExtend32<16>(Opcode & 0x0000ffff); 1363 for (auto I = PendingRelocs.begin(); I != PendingRelocs.end();) { 1364 const RelocationValueRef &MatchingValue = I->first; 1365 RelocationEntry &Reloc = I->second; 1366 if (MatchingValue == Value && 1367 RelType == getMatchingLoRelocation(Reloc.RelType) && 1368 SectionID == Reloc.SectionID) { 1369 Reloc.Addend += Addend; 1370 if (Value.SymbolName) 1371 addRelocationForSymbol(Reloc, Value.SymbolName); 1372 else 1373 addRelocationForSection(Reloc, Value.SectionID); 1374 I = PendingRelocs.erase(I); 1375 } else 1376 ++I; 1377 } 1378 RelocationEntry RE(SectionID, Offset, RelType, Addend); 1379 if (Value.SymbolName) 1380 addRelocationForSymbol(RE, Value.SymbolName); 1381 else 1382 addRelocationForSection(RE, Value.SectionID); 1383 } else { 1384 if (RelType == ELF::R_MIPS_32) 1385 Value.Addend += Opcode; 1386 else if (RelType == ELF::R_MIPS_PC16) 1387 Value.Addend += SignExtend32<18>((Opcode & 0x0000ffff) << 2); 1388 else if (RelType == ELF::R_MIPS_PC19_S2) 1389 Value.Addend += SignExtend32<21>((Opcode & 0x0007ffff) << 2); 1390 else if (RelType == ELF::R_MIPS_PC21_S2) 1391 Value.Addend += SignExtend32<23>((Opcode & 0x001fffff) << 2); 1392 else if (RelType == ELF::R_MIPS_PC26_S2) 1393 Value.Addend += SignExtend32<28>((Opcode & 0x03ffffff) << 2); 1394 processSimpleRelocation(SectionID, Offset, RelType, Value); 1395 } 1396 } else if (IsMipsN64ABI) { 1397 uint32_t r_type = RelType & 0xff; 1398 RelocationEntry RE(SectionID, Offset, RelType, Value.Addend); 1399 if (r_type == ELF::R_MIPS_CALL16 || r_type == ELF::R_MIPS_GOT_PAGE 1400 || r_type == ELF::R_MIPS_GOT_DISP) { 1401 StringMap<uint64_t>::iterator i = GOTSymbolOffsets.find(TargetName); 1402 if (i != GOTSymbolOffsets.end()) 1403 RE.SymOffset = i->second; 1404 else { 1405 RE.SymOffset = allocateGOTEntries(SectionID, 1); 1406 GOTSymbolOffsets[TargetName] = RE.SymOffset; 1407 } 1408 } 1409 if (Value.SymbolName) 1410 addRelocationForSymbol(RE, Value.SymbolName); 1411 else 1412 addRelocationForSection(RE, Value.SectionID); 1413 } else if (Arch == Triple::ppc64 || Arch == Triple::ppc64le) { 1414 if (RelType == ELF::R_PPC64_REL24) { 1415 // Determine ABI variant in use for this object. 1416 unsigned AbiVariant; 1417 Obj.getPlatformFlags(AbiVariant); 1418 AbiVariant &= ELF::EF_PPC64_ABI; 1419 // A PPC branch relocation will need a stub function if the target is 1420 // an external symbol (Symbol::ST_Unknown) or if the target address 1421 // is not within the signed 24-bits branch address. 1422 SectionEntry &Section = Sections[SectionID]; 1423 uint8_t *Target = Section.Address + Offset; 1424 bool RangeOverflow = false; 1425 if (SymType != SymbolRef::ST_Unknown) { 1426 if (AbiVariant != 2) { 1427 // In the ELFv1 ABI, a function call may point to the .opd entry, 1428 // so the final symbol value is calculated based on the relocation 1429 // values in the .opd section. 1430 findOPDEntrySection(Obj, ObjSectionToID, Value); 1431 } else { 1432 // In the ELFv2 ABI, a function symbol may provide a local entry 1433 // point, which must be used for direct calls. 1434 uint8_t SymOther = Symbol->getOther(); 1435 Value.Addend += ELF::decodePPC64LocalEntryOffset(SymOther); 1436 } 1437 uint8_t *RelocTarget = Sections[Value.SectionID].Address + Value.Addend; 1438 int32_t delta = static_cast<int32_t>(Target - RelocTarget); 1439 // If it is within 24-bits branch range, just set the branch target 1440 if (SignExtend32<24>(delta) == delta) { 1441 RelocationEntry RE(SectionID, Offset, RelType, Value.Addend); 1442 if (Value.SymbolName) 1443 addRelocationForSymbol(RE, Value.SymbolName); 1444 else 1445 addRelocationForSection(RE, Value.SectionID); 1446 } else { 1447 RangeOverflow = true; 1448 } 1449 } 1450 if (SymType == SymbolRef::ST_Unknown || RangeOverflow) { 1451 // It is an external symbol (SymbolRef::ST_Unknown) or within a range 1452 // larger than 24-bits. 1453 StubMap::const_iterator i = Stubs.find(Value); 1454 if (i != Stubs.end()) { 1455 // Symbol function stub already created, just relocate to it 1456 resolveRelocation(Section, Offset, 1457 (uint64_t)Section.Address + i->second, RelType, 0); 1458 DEBUG(dbgs() << " Stub function found\n"); 1459 } else { 1460 // Create a new stub function. 1461 DEBUG(dbgs() << " Create a new stub function\n"); 1462 Stubs[Value] = Section.StubOffset; 1463 uint8_t *StubTargetAddr = 1464 createStubFunction(Section.Address + Section.StubOffset, 1465 AbiVariant); 1466 RelocationEntry RE(SectionID, StubTargetAddr - Section.Address, 1467 ELF::R_PPC64_ADDR64, Value.Addend); 1468 1469 // Generates the 64-bits address loads as exemplified in section 1470 // 4.5.1 in PPC64 ELF ABI. Note that the relocations need to 1471 // apply to the low part of the instructions, so we have to update 1472 // the offset according to the target endianness. 1473 uint64_t StubRelocOffset = StubTargetAddr - Section.Address; 1474 if (!IsTargetLittleEndian) 1475 StubRelocOffset += 2; 1476 1477 RelocationEntry REhst(SectionID, StubRelocOffset + 0, 1478 ELF::R_PPC64_ADDR16_HIGHEST, Value.Addend); 1479 RelocationEntry REhr(SectionID, StubRelocOffset + 4, 1480 ELF::R_PPC64_ADDR16_HIGHER, Value.Addend); 1481 RelocationEntry REh(SectionID, StubRelocOffset + 12, 1482 ELF::R_PPC64_ADDR16_HI, Value.Addend); 1483 RelocationEntry REl(SectionID, StubRelocOffset + 16, 1484 ELF::R_PPC64_ADDR16_LO, Value.Addend); 1485 1486 if (Value.SymbolName) { 1487 addRelocationForSymbol(REhst, Value.SymbolName); 1488 addRelocationForSymbol(REhr, Value.SymbolName); 1489 addRelocationForSymbol(REh, Value.SymbolName); 1490 addRelocationForSymbol(REl, Value.SymbolName); 1491 } else { 1492 addRelocationForSection(REhst, Value.SectionID); 1493 addRelocationForSection(REhr, Value.SectionID); 1494 addRelocationForSection(REh, Value.SectionID); 1495 addRelocationForSection(REl, Value.SectionID); 1496 } 1497 1498 resolveRelocation(Section, Offset, 1499 (uint64_t)Section.Address + Section.StubOffset, 1500 RelType, 0); 1501 Section.StubOffset += getMaxStubSize(); 1502 } 1503 if (SymType == SymbolRef::ST_Unknown) { 1504 // Restore the TOC for external calls 1505 if (AbiVariant == 2) 1506 writeInt32BE(Target + 4, 0xE8410018); // ld r2,28(r1) 1507 else 1508 writeInt32BE(Target + 4, 0xE8410028); // ld r2,40(r1) 1509 } 1510 } 1511 } else if (RelType == ELF::R_PPC64_TOC16 || 1512 RelType == ELF::R_PPC64_TOC16_DS || 1513 RelType == ELF::R_PPC64_TOC16_LO || 1514 RelType == ELF::R_PPC64_TOC16_LO_DS || 1515 RelType == ELF::R_PPC64_TOC16_HI || 1516 RelType == ELF::R_PPC64_TOC16_HA) { 1517 // These relocations are supposed to subtract the TOC address from 1518 // the final value. This does not fit cleanly into the RuntimeDyld 1519 // scheme, since there may be *two* sections involved in determining 1520 // the relocation value (the section of the symbol referred to by the 1521 // relocation, and the TOC section associated with the current module). 1522 // 1523 // Fortunately, these relocations are currently only ever generated 1524 // referring to symbols that themselves reside in the TOC, which means 1525 // that the two sections are actually the same. Thus they cancel out 1526 // and we can immediately resolve the relocation right now. 1527 switch (RelType) { 1528 case ELF::R_PPC64_TOC16: RelType = ELF::R_PPC64_ADDR16; break; 1529 case ELF::R_PPC64_TOC16_DS: RelType = ELF::R_PPC64_ADDR16_DS; break; 1530 case ELF::R_PPC64_TOC16_LO: RelType = ELF::R_PPC64_ADDR16_LO; break; 1531 case ELF::R_PPC64_TOC16_LO_DS: RelType = ELF::R_PPC64_ADDR16_LO_DS; break; 1532 case ELF::R_PPC64_TOC16_HI: RelType = ELF::R_PPC64_ADDR16_HI; break; 1533 case ELF::R_PPC64_TOC16_HA: RelType = ELF::R_PPC64_ADDR16_HA; break; 1534 default: llvm_unreachable("Wrong relocation type."); 1535 } 1536 1537 RelocationValueRef TOCValue; 1538 findPPC64TOCSection(Obj, ObjSectionToID, TOCValue); 1539 if (Value.SymbolName || Value.SectionID != TOCValue.SectionID) 1540 llvm_unreachable("Unsupported TOC relocation."); 1541 Value.Addend -= TOCValue.Addend; 1542 resolveRelocation(Sections[SectionID], Offset, Value.Addend, RelType, 0); 1543 } else { 1544 // There are two ways to refer to the TOC address directly: either 1545 // via a ELF::R_PPC64_TOC relocation (where both symbol and addend are 1546 // ignored), or via any relocation that refers to the magic ".TOC." 1547 // symbols (in which case the addend is respected). 1548 if (RelType == ELF::R_PPC64_TOC) { 1549 RelType = ELF::R_PPC64_ADDR64; 1550 findPPC64TOCSection(Obj, ObjSectionToID, Value); 1551 } else if (TargetName == ".TOC.") { 1552 findPPC64TOCSection(Obj, ObjSectionToID, Value); 1553 Value.Addend += Addend; 1554 } 1555 1556 RelocationEntry RE(SectionID, Offset, RelType, Value.Addend); 1557 1558 if (Value.SymbolName) 1559 addRelocationForSymbol(RE, Value.SymbolName); 1560 else 1561 addRelocationForSection(RE, Value.SectionID); 1562 } 1563 } else if (Arch == Triple::systemz && 1564 (RelType == ELF::R_390_PLT32DBL || RelType == ELF::R_390_GOTENT)) { 1565 // Create function stubs for both PLT and GOT references, regardless of 1566 // whether the GOT reference is to data or code. The stub contains the 1567 // full address of the symbol, as needed by GOT references, and the 1568 // executable part only adds an overhead of 8 bytes. 1569 // 1570 // We could try to conserve space by allocating the code and data 1571 // parts of the stub separately. However, as things stand, we allocate 1572 // a stub for every relocation, so using a GOT in JIT code should be 1573 // no less space efficient than using an explicit constant pool. 1574 DEBUG(dbgs() << "\t\tThis is a SystemZ indirect relocation."); 1575 SectionEntry &Section = Sections[SectionID]; 1576 1577 // Look for an existing stub. 1578 StubMap::const_iterator i = Stubs.find(Value); 1579 uintptr_t StubAddress; 1580 if (i != Stubs.end()) { 1581 StubAddress = uintptr_t(Section.Address) + i->second; 1582 DEBUG(dbgs() << " Stub function found\n"); 1583 } else { 1584 // Create a new stub function. 1585 DEBUG(dbgs() << " Create a new stub function\n"); 1586 1587 uintptr_t BaseAddress = uintptr_t(Section.Address); 1588 uintptr_t StubAlignment = getStubAlignment(); 1589 StubAddress = (BaseAddress + Section.StubOffset + StubAlignment - 1) & 1590 -StubAlignment; 1591 unsigned StubOffset = StubAddress - BaseAddress; 1592 1593 Stubs[Value] = StubOffset; 1594 createStubFunction((uint8_t *)StubAddress); 1595 RelocationEntry RE(SectionID, StubOffset + 8, ELF::R_390_64, 1596 Value.Offset); 1597 if (Value.SymbolName) 1598 addRelocationForSymbol(RE, Value.SymbolName); 1599 else 1600 addRelocationForSection(RE, Value.SectionID); 1601 Section.StubOffset = StubOffset + getMaxStubSize(); 1602 } 1603 1604 if (RelType == ELF::R_390_GOTENT) 1605 resolveRelocation(Section, Offset, StubAddress + 8, ELF::R_390_PC32DBL, 1606 Addend); 1607 else 1608 resolveRelocation(Section, Offset, StubAddress, RelType, Addend); 1609 } else if (Arch == Triple::x86_64) { 1610 if (RelType == ELF::R_X86_64_PLT32) { 1611 // The way the PLT relocations normally work is that the linker allocates 1612 // the 1613 // PLT and this relocation makes a PC-relative call into the PLT. The PLT 1614 // entry will then jump to an address provided by the GOT. On first call, 1615 // the 1616 // GOT address will point back into PLT code that resolves the symbol. After 1617 // the first call, the GOT entry points to the actual function. 1618 // 1619 // For local functions we're ignoring all of that here and just replacing 1620 // the PLT32 relocation type with PC32, which will translate the relocation 1621 // into a PC-relative call directly to the function. For external symbols we 1622 // can't be sure the function will be within 2^32 bytes of the call site, so 1623 // we need to create a stub, which calls into the GOT. This case is 1624 // equivalent to the usual PLT implementation except that we use the stub 1625 // mechanism in RuntimeDyld (which puts stubs at the end of the section) 1626 // rather than allocating a PLT section. 1627 if (Value.SymbolName) { 1628 // This is a call to an external function. 1629 // Look for an existing stub. 1630 SectionEntry &Section = Sections[SectionID]; 1631 StubMap::const_iterator i = Stubs.find(Value); 1632 uintptr_t StubAddress; 1633 if (i != Stubs.end()) { 1634 StubAddress = uintptr_t(Section.Address) + i->second; 1635 DEBUG(dbgs() << " Stub function found\n"); 1636 } else { 1637 // Create a new stub function (equivalent to a PLT entry). 1638 DEBUG(dbgs() << " Create a new stub function\n"); 1639 1640 uintptr_t BaseAddress = uintptr_t(Section.Address); 1641 uintptr_t StubAlignment = getStubAlignment(); 1642 StubAddress = (BaseAddress + Section.StubOffset + StubAlignment - 1) & 1643 -StubAlignment; 1644 unsigned StubOffset = StubAddress - BaseAddress; 1645 Stubs[Value] = StubOffset; 1646 createStubFunction((uint8_t *)StubAddress); 1647 1648 // Bump our stub offset counter 1649 Section.StubOffset = StubOffset + getMaxStubSize(); 1650 1651 // Allocate a GOT Entry 1652 uint64_t GOTOffset = allocateGOTEntries(SectionID, 1); 1653 1654 // The load of the GOT address has an addend of -4 1655 resolveGOTOffsetRelocation(SectionID, StubOffset + 2, GOTOffset - 4); 1656 1657 // Fill in the value of the symbol we're targeting into the GOT 1658 addRelocationForSymbol(computeGOTOffsetRE(SectionID,GOTOffset,0,ELF::R_X86_64_64), 1659 Value.SymbolName); 1660 } 1661 1662 // Make the target call a call into the stub table. 1663 resolveRelocation(Section, Offset, StubAddress, ELF::R_X86_64_PC32, 1664 Addend); 1665 } else { 1666 RelocationEntry RE(SectionID, Offset, ELF::R_X86_64_PC32, Value.Addend, 1667 Value.Offset); 1668 addRelocationForSection(RE, Value.SectionID); 1669 } 1670 } else if (RelType == ELF::R_X86_64_GOTPCREL) { 1671 uint64_t GOTOffset = allocateGOTEntries(SectionID, 1); 1672 resolveGOTOffsetRelocation(SectionID, Offset, GOTOffset + Addend); 1673 1674 // Fill in the value of the symbol we're targeting into the GOT 1675 RelocationEntry RE = computeGOTOffsetRE(SectionID, GOTOffset, Value.Offset, ELF::R_X86_64_64); 1676 if (Value.SymbolName) 1677 addRelocationForSymbol(RE, Value.SymbolName); 1678 else 1679 addRelocationForSection(RE, Value.SectionID); 1680 } else if (RelType == ELF::R_X86_64_PC32) { 1681 Value.Addend += support::ulittle32_t::ref(computePlaceholderAddress(SectionID, Offset)); 1682 processSimpleRelocation(SectionID, Offset, RelType, Value); 1683 } else if (RelType == ELF::R_X86_64_PC64) { 1684 Value.Addend += support::ulittle64_t::ref(computePlaceholderAddress(SectionID, Offset)); 1685 processSimpleRelocation(SectionID, Offset, RelType, Value); 1686 } else { 1687 processSimpleRelocation(SectionID, Offset, RelType, Value); 1688 } 1689 } else { 1690 if (Arch == Triple::x86) { 1691 Value.Addend += support::ulittle32_t::ref(computePlaceholderAddress(SectionID, Offset)); 1692 } 1693 processSimpleRelocation(SectionID, Offset, RelType, Value); 1694 } 1695 return ++RelI; 1696 } 1697 1698 size_t RuntimeDyldELF::getGOTEntrySize() { 1699 // We don't use the GOT in all of these cases, but it's essentially free 1700 // to put them all here. 1701 size_t Result = 0; 1702 switch (Arch) { 1703 case Triple::x86_64: 1704 case Triple::aarch64: 1705 case Triple::aarch64_be: 1706 case Triple::ppc64: 1707 case Triple::ppc64le: 1708 case Triple::systemz: 1709 Result = sizeof(uint64_t); 1710 break; 1711 case Triple::x86: 1712 case Triple::arm: 1713 case Triple::thumb: 1714 Result = sizeof(uint32_t); 1715 break; 1716 case Triple::mips: 1717 case Triple::mipsel: 1718 case Triple::mips64: 1719 case Triple::mips64el: 1720 if (IsMipsO32ABI) 1721 Result = sizeof(uint32_t); 1722 else if (IsMipsN64ABI) 1723 Result = sizeof(uint64_t); 1724 else 1725 llvm_unreachable("Mips ABI not handled"); 1726 break; 1727 default: 1728 llvm_unreachable("Unsupported CPU type!"); 1729 } 1730 return Result; 1731 } 1732 1733 uint64_t RuntimeDyldELF::allocateGOTEntries(unsigned SectionID, unsigned no) 1734 { 1735 (void)SectionID; // The GOT Section is the same for all section in the object file 1736 if (GOTSectionID == 0) { 1737 GOTSectionID = Sections.size(); 1738 // Reserve a section id. We'll allocate the section later 1739 // once we know the total size 1740 Sections.push_back(SectionEntry(".got", 0, 0, 0)); 1741 } 1742 uint64_t StartOffset = CurrentGOTIndex * getGOTEntrySize(); 1743 CurrentGOTIndex += no; 1744 return StartOffset; 1745 } 1746 1747 void RuntimeDyldELF::resolveGOTOffsetRelocation(unsigned SectionID, uint64_t Offset, uint64_t GOTOffset) 1748 { 1749 // Fill in the relative address of the GOT Entry into the stub 1750 RelocationEntry GOTRE(SectionID, Offset, ELF::R_X86_64_PC32, GOTOffset); 1751 addRelocationForSection(GOTRE, GOTSectionID); 1752 } 1753 1754 RelocationEntry RuntimeDyldELF::computeGOTOffsetRE(unsigned SectionID, uint64_t GOTOffset, uint64_t SymbolOffset, 1755 uint32_t Type) 1756 { 1757 (void)SectionID; // The GOT Section is the same for all section in the object file 1758 return RelocationEntry(GOTSectionID, GOTOffset, Type, SymbolOffset); 1759 } 1760 1761 void RuntimeDyldELF::finalizeLoad(const ObjectFile &Obj, 1762 ObjSectionToIDMap &SectionMap) { 1763 if (IsMipsO32ABI) 1764 if (!PendingRelocs.empty()) 1765 report_fatal_error("Can't find matching LO16 reloc"); 1766 1767 // If necessary, allocate the global offset table 1768 if (GOTSectionID != 0) { 1769 // Allocate memory for the section 1770 size_t TotalSize = CurrentGOTIndex * getGOTEntrySize(); 1771 uint8_t *Addr = MemMgr.allocateDataSection(TotalSize, getGOTEntrySize(), 1772 GOTSectionID, ".got", false); 1773 if (!Addr) 1774 report_fatal_error("Unable to allocate memory for GOT!"); 1775 1776 Sections[GOTSectionID] = SectionEntry(".got", Addr, TotalSize, 0); 1777 1778 if (Checker) 1779 Checker->registerSection(Obj.getFileName(), GOTSectionID); 1780 1781 // For now, initialize all GOT entries to zero. We'll fill them in as 1782 // needed when GOT-based relocations are applied. 1783 memset(Addr, 0, TotalSize); 1784 if (IsMipsN64ABI) { 1785 // To correctly resolve Mips GOT relocations, we need a mapping from 1786 // object's sections to GOTs. 1787 for (section_iterator SI = Obj.section_begin(), SE = Obj.section_end(); 1788 SI != SE; ++SI) { 1789 if (SI->relocation_begin() != SI->relocation_end()) { 1790 section_iterator RelocatedSection = SI->getRelocatedSection(); 1791 ObjSectionToIDMap::iterator i = SectionMap.find(*RelocatedSection); 1792 assert (i != SectionMap.end()); 1793 SectionToGOTMap[i->second] = GOTSectionID; 1794 } 1795 } 1796 GOTSymbolOffsets.clear(); 1797 } 1798 } 1799 1800 // Look for and record the EH frame section. 1801 ObjSectionToIDMap::iterator i, e; 1802 for (i = SectionMap.begin(), e = SectionMap.end(); i != e; ++i) { 1803 const SectionRef &Section = i->first; 1804 StringRef Name; 1805 Section.getName(Name); 1806 if (Name == ".eh_frame") { 1807 UnregisteredEHFrameSections.push_back(i->second); 1808 break; 1809 } 1810 } 1811 1812 GOTSectionID = 0; 1813 CurrentGOTIndex = 0; 1814 } 1815 1816 bool RuntimeDyldELF::isCompatibleFile(const object::ObjectFile &Obj) const { 1817 return Obj.isELF(); 1818 } 1819 1820 } // namespace llvm 1821