1 //===-- RuntimeDyldELF.cpp - Run-time dynamic linker for MC-JIT -*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // Implementation of ELF support for the MC-JIT runtime dynamic linker. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "RuntimeDyldELF.h" 15 #include "RuntimeDyldCheckerImpl.h" 16 #include "llvm/ADT/IntervalMap.h" 17 #include "llvm/ADT/STLExtras.h" 18 #include "llvm/ADT/StringRef.h" 19 #include "llvm/ADT/Triple.h" 20 #include "llvm/MC/MCStreamer.h" 21 #include "llvm/Object/ELFObjectFile.h" 22 #include "llvm/Object/ObjectFile.h" 23 #include "llvm/Support/ELF.h" 24 #include "llvm/Support/Endian.h" 25 #include "llvm/Support/MemoryBuffer.h" 26 #include "llvm/Support/TargetRegistry.h" 27 28 using namespace llvm; 29 using namespace llvm::object; 30 31 #define DEBUG_TYPE "dyld" 32 33 static inline std::error_code check(std::error_code Err) { 34 if (Err) { 35 report_fatal_error(Err.message()); 36 } 37 return Err; 38 } 39 40 namespace { 41 42 template <class ELFT> class DyldELFObject : public ELFObjectFile<ELFT> { 43 LLVM_ELF_IMPORT_TYPES_ELFT(ELFT) 44 45 typedef Elf_Shdr_Impl<ELFT> Elf_Shdr; 46 typedef Elf_Sym_Impl<ELFT> Elf_Sym; 47 typedef Elf_Rel_Impl<ELFT, false> Elf_Rel; 48 typedef Elf_Rel_Impl<ELFT, true> Elf_Rela; 49 50 typedef Elf_Ehdr_Impl<ELFT> Elf_Ehdr; 51 52 typedef typename ELFDataTypeTypedefHelper<ELFT>::value_type addr_type; 53 54 public: 55 DyldELFObject(MemoryBufferRef Wrapper, std::error_code &ec); 56 57 void updateSectionAddress(const SectionRef &Sec, uint64_t Addr); 58 59 void updateSymbolAddress(const SymbolRef &SymRef, uint64_t Addr); 60 61 // Methods for type inquiry through isa, cast and dyn_cast 62 static inline bool classof(const Binary *v) { 63 return (isa<ELFObjectFile<ELFT>>(v) && 64 classof(cast<ELFObjectFile<ELFT>>(v))); 65 } 66 static inline bool classof(const ELFObjectFile<ELFT> *v) { 67 return v->isDyldType(); 68 } 69 }; 70 71 72 73 // The MemoryBuffer passed into this constructor is just a wrapper around the 74 // actual memory. Ultimately, the Binary parent class will take ownership of 75 // this MemoryBuffer object but not the underlying memory. 76 template <class ELFT> 77 DyldELFObject<ELFT>::DyldELFObject(MemoryBufferRef Wrapper, std::error_code &EC) 78 : ELFObjectFile<ELFT>(Wrapper, EC) { 79 this->isDyldELFObject = true; 80 } 81 82 template <class ELFT> 83 void DyldELFObject<ELFT>::updateSectionAddress(const SectionRef &Sec, 84 uint64_t Addr) { 85 DataRefImpl ShdrRef = Sec.getRawDataRefImpl(); 86 Elf_Shdr *shdr = 87 const_cast<Elf_Shdr *>(reinterpret_cast<const Elf_Shdr *>(ShdrRef.p)); 88 89 // This assumes the address passed in matches the target address bitness 90 // The template-based type cast handles everything else. 91 shdr->sh_addr = static_cast<addr_type>(Addr); 92 } 93 94 template <class ELFT> 95 void DyldELFObject<ELFT>::updateSymbolAddress(const SymbolRef &SymRef, 96 uint64_t Addr) { 97 98 Elf_Sym *sym = const_cast<Elf_Sym *>( 99 ELFObjectFile<ELFT>::getSymbol(SymRef.getRawDataRefImpl())); 100 101 // This assumes the address passed in matches the target address bitness 102 // The template-based type cast handles everything else. 103 sym->st_value = static_cast<addr_type>(Addr); 104 } 105 106 class LoadedELFObjectInfo final 107 : public RuntimeDyld::LoadedObjectInfoHelper<LoadedELFObjectInfo> { 108 public: 109 LoadedELFObjectInfo(RuntimeDyldImpl &RTDyld, ObjSectionToIDMap ObjSecToIDMap) 110 : LoadedObjectInfoHelper(RTDyld, std::move(ObjSecToIDMap)) {} 111 112 OwningBinary<ObjectFile> 113 getObjectForDebug(const ObjectFile &Obj) const override; 114 }; 115 116 template <typename ELFT> 117 std::unique_ptr<DyldELFObject<ELFT>> 118 createRTDyldELFObject(MemoryBufferRef Buffer, 119 const ObjectFile &SourceObject, 120 const LoadedELFObjectInfo &L, 121 std::error_code &ec) { 122 typedef typename ELFFile<ELFT>::Elf_Shdr Elf_Shdr; 123 typedef typename ELFDataTypeTypedefHelper<ELFT>::value_type addr_type; 124 125 std::unique_ptr<DyldELFObject<ELFT>> Obj = 126 llvm::make_unique<DyldELFObject<ELFT>>(Buffer, ec); 127 128 // Iterate over all sections in the object. 129 auto SI = SourceObject.section_begin(); 130 for (const auto &Sec : Obj->sections()) { 131 StringRef SectionName; 132 Sec.getName(SectionName); 133 if (SectionName != "") { 134 DataRefImpl ShdrRef = Sec.getRawDataRefImpl(); 135 Elf_Shdr *shdr = const_cast<Elf_Shdr *>( 136 reinterpret_cast<const Elf_Shdr *>(ShdrRef.p)); 137 138 if (uint64_t SecLoadAddr = L.getSectionLoadAddress(*SI)) { 139 // This assumes that the address passed in matches the target address 140 // bitness. The template-based type cast handles everything else. 141 shdr->sh_addr = static_cast<addr_type>(SecLoadAddr); 142 } 143 } 144 ++SI; 145 } 146 147 return Obj; 148 } 149 150 OwningBinary<ObjectFile> createELFDebugObject(const ObjectFile &Obj, 151 const LoadedELFObjectInfo &L) { 152 assert(Obj.isELF() && "Not an ELF object file."); 153 154 std::unique_ptr<MemoryBuffer> Buffer = 155 MemoryBuffer::getMemBufferCopy(Obj.getData(), Obj.getFileName()); 156 157 std::error_code ec; 158 159 std::unique_ptr<ObjectFile> DebugObj; 160 if (Obj.getBytesInAddress() == 4 && Obj.isLittleEndian()) { 161 typedef ELFType<support::little, false> ELF32LE; 162 DebugObj = createRTDyldELFObject<ELF32LE>(Buffer->getMemBufferRef(), Obj, L, 163 ec); 164 } else if (Obj.getBytesInAddress() == 4 && !Obj.isLittleEndian()) { 165 typedef ELFType<support::big, false> ELF32BE; 166 DebugObj = createRTDyldELFObject<ELF32BE>(Buffer->getMemBufferRef(), Obj, L, 167 ec); 168 } else if (Obj.getBytesInAddress() == 8 && !Obj.isLittleEndian()) { 169 typedef ELFType<support::big, true> ELF64BE; 170 DebugObj = createRTDyldELFObject<ELF64BE>(Buffer->getMemBufferRef(), Obj, L, 171 ec); 172 } else if (Obj.getBytesInAddress() == 8 && Obj.isLittleEndian()) { 173 typedef ELFType<support::little, true> ELF64LE; 174 DebugObj = createRTDyldELFObject<ELF64LE>(Buffer->getMemBufferRef(), Obj, L, 175 ec); 176 } else 177 llvm_unreachable("Unexpected ELF format"); 178 179 assert(!ec && "Could not construct copy ELF object file"); 180 181 return OwningBinary<ObjectFile>(std::move(DebugObj), std::move(Buffer)); 182 } 183 184 OwningBinary<ObjectFile> 185 LoadedELFObjectInfo::getObjectForDebug(const ObjectFile &Obj) const { 186 return createELFDebugObject(Obj, *this); 187 } 188 189 } // anonymous namespace 190 191 namespace llvm { 192 193 RuntimeDyldELF::RuntimeDyldELF(RuntimeDyld::MemoryManager &MemMgr, 194 RuntimeDyld::SymbolResolver &Resolver) 195 : RuntimeDyldImpl(MemMgr, Resolver), GOTSectionID(0), CurrentGOTIndex(0) {} 196 RuntimeDyldELF::~RuntimeDyldELF() {} 197 198 void RuntimeDyldELF::registerEHFrames() { 199 for (int i = 0, e = UnregisteredEHFrameSections.size(); i != e; ++i) { 200 SID EHFrameSID = UnregisteredEHFrameSections[i]; 201 uint8_t *EHFrameAddr = Sections[EHFrameSID].Address; 202 uint64_t EHFrameLoadAddr = Sections[EHFrameSID].LoadAddress; 203 size_t EHFrameSize = Sections[EHFrameSID].Size; 204 MemMgr.registerEHFrames(EHFrameAddr, EHFrameLoadAddr, EHFrameSize); 205 RegisteredEHFrameSections.push_back(EHFrameSID); 206 } 207 UnregisteredEHFrameSections.clear(); 208 } 209 210 void RuntimeDyldELF::deregisterEHFrames() { 211 for (int i = 0, e = RegisteredEHFrameSections.size(); i != e; ++i) { 212 SID EHFrameSID = RegisteredEHFrameSections[i]; 213 uint8_t *EHFrameAddr = Sections[EHFrameSID].Address; 214 uint64_t EHFrameLoadAddr = Sections[EHFrameSID].LoadAddress; 215 size_t EHFrameSize = Sections[EHFrameSID].Size; 216 MemMgr.deregisterEHFrames(EHFrameAddr, EHFrameLoadAddr, EHFrameSize); 217 } 218 RegisteredEHFrameSections.clear(); 219 } 220 221 std::unique_ptr<RuntimeDyld::LoadedObjectInfo> 222 RuntimeDyldELF::loadObject(const object::ObjectFile &O) { 223 return llvm::make_unique<LoadedELFObjectInfo>(*this, loadObjectImpl(O)); 224 } 225 226 void RuntimeDyldELF::resolveX86_64Relocation(const SectionEntry &Section, 227 uint64_t Offset, uint64_t Value, 228 uint32_t Type, int64_t Addend, 229 uint64_t SymOffset) { 230 switch (Type) { 231 default: 232 llvm_unreachable("Relocation type not implemented yet!"); 233 break; 234 case ELF::R_X86_64_64: { 235 support::ulittle64_t::ref(Section.Address + Offset) = Value + Addend; 236 DEBUG(dbgs() << "Writing " << format("%p", (Value + Addend)) << " at " 237 << format("%p\n", Section.Address + Offset)); 238 break; 239 } 240 case ELF::R_X86_64_32: 241 case ELF::R_X86_64_32S: { 242 Value += Addend; 243 assert((Type == ELF::R_X86_64_32 && (Value <= UINT32_MAX)) || 244 (Type == ELF::R_X86_64_32S && 245 ((int64_t)Value <= INT32_MAX && (int64_t)Value >= INT32_MIN))); 246 uint32_t TruncatedAddr = (Value & 0xFFFFFFFF); 247 support::ulittle32_t::ref(Section.Address + Offset) = TruncatedAddr; 248 DEBUG(dbgs() << "Writing " << format("%p", TruncatedAddr) << " at " 249 << format("%p\n", Section.Address + Offset)); 250 break; 251 } 252 case ELF::R_X86_64_PC32: { 253 uint64_t FinalAddress = Section.LoadAddress + Offset; 254 int64_t RealOffset = Value + Addend - FinalAddress; 255 assert(isInt<32>(RealOffset)); 256 int32_t TruncOffset = (RealOffset & 0xFFFFFFFF); 257 support::ulittle32_t::ref(Section.Address + Offset) = TruncOffset; 258 break; 259 } 260 case ELF::R_X86_64_PC64: { 261 uint64_t FinalAddress = Section.LoadAddress + Offset; 262 int64_t RealOffset = Value + Addend - FinalAddress; 263 support::ulittle64_t::ref(Section.Address + Offset) = RealOffset; 264 break; 265 } 266 } 267 } 268 269 void RuntimeDyldELF::resolveX86Relocation(const SectionEntry &Section, 270 uint64_t Offset, uint32_t Value, 271 uint32_t Type, int32_t Addend) { 272 switch (Type) { 273 case ELF::R_386_32: { 274 support::ulittle32_t::ref(Section.Address + Offset) = Value + Addend; 275 break; 276 } 277 case ELF::R_386_PC32: { 278 uint32_t FinalAddress = ((Section.LoadAddress + Offset) & 0xFFFFFFFF); 279 uint32_t RealOffset = Value + Addend - FinalAddress; 280 support::ulittle32_t::ref(Section.Address + Offset) = RealOffset; 281 break; 282 } 283 default: 284 // There are other relocation types, but it appears these are the 285 // only ones currently used by the LLVM ELF object writer 286 llvm_unreachable("Relocation type not implemented yet!"); 287 break; 288 } 289 } 290 291 void RuntimeDyldELF::resolveAArch64Relocation(const SectionEntry &Section, 292 uint64_t Offset, uint64_t Value, 293 uint32_t Type, int64_t Addend) { 294 uint32_t *TargetPtr = reinterpret_cast<uint32_t *>(Section.Address + Offset); 295 uint64_t FinalAddress = Section.LoadAddress + Offset; 296 297 DEBUG(dbgs() << "resolveAArch64Relocation, LocalAddress: 0x" 298 << format("%llx", Section.Address + Offset) 299 << " FinalAddress: 0x" << format("%llx", FinalAddress) 300 << " Value: 0x" << format("%llx", Value) << " Type: 0x" 301 << format("%x", Type) << " Addend: 0x" << format("%llx", Addend) 302 << "\n"); 303 304 switch (Type) { 305 default: 306 llvm_unreachable("Relocation type not implemented yet!"); 307 break; 308 case ELF::R_AARCH64_ABS64: { 309 uint64_t *TargetPtr = 310 reinterpret_cast<uint64_t *>(Section.Address + Offset); 311 *TargetPtr = Value + Addend; 312 break; 313 } 314 case ELF::R_AARCH64_PREL32: { 315 uint64_t Result = Value + Addend - FinalAddress; 316 assert(static_cast<int64_t>(Result) >= INT32_MIN && 317 static_cast<int64_t>(Result) <= UINT32_MAX); 318 *TargetPtr = static_cast<uint32_t>(Result & 0xffffffffU); 319 break; 320 } 321 case ELF::R_AARCH64_CALL26: // fallthrough 322 case ELF::R_AARCH64_JUMP26: { 323 // Operation: S+A-P. Set Call or B immediate value to bits fff_fffc of the 324 // calculation. 325 uint64_t BranchImm = Value + Addend - FinalAddress; 326 327 // "Check that -2^27 <= result < 2^27". 328 assert(isInt<28>(BranchImm)); 329 330 // AArch64 code is emitted with .rela relocations. The data already in any 331 // bits affected by the relocation on entry is garbage. 332 *TargetPtr &= 0xfc000000U; 333 // Immediate goes in bits 25:0 of B and BL. 334 *TargetPtr |= static_cast<uint32_t>(BranchImm & 0xffffffcU) >> 2; 335 break; 336 } 337 case ELF::R_AARCH64_MOVW_UABS_G3: { 338 uint64_t Result = Value + Addend; 339 340 // AArch64 code is emitted with .rela relocations. The data already in any 341 // bits affected by the relocation on entry is garbage. 342 *TargetPtr &= 0xffe0001fU; 343 // Immediate goes in bits 20:5 of MOVZ/MOVK instruction 344 *TargetPtr |= Result >> (48 - 5); 345 // Shift must be "lsl #48", in bits 22:21 346 assert((*TargetPtr >> 21 & 0x3) == 3 && "invalid shift for relocation"); 347 break; 348 } 349 case ELF::R_AARCH64_MOVW_UABS_G2_NC: { 350 uint64_t Result = Value + Addend; 351 352 // AArch64 code is emitted with .rela relocations. The data already in any 353 // bits affected by the relocation on entry is garbage. 354 *TargetPtr &= 0xffe0001fU; 355 // Immediate goes in bits 20:5 of MOVZ/MOVK instruction 356 *TargetPtr |= ((Result & 0xffff00000000ULL) >> (32 - 5)); 357 // Shift must be "lsl #32", in bits 22:21 358 assert((*TargetPtr >> 21 & 0x3) == 2 && "invalid shift for relocation"); 359 break; 360 } 361 case ELF::R_AARCH64_MOVW_UABS_G1_NC: { 362 uint64_t Result = Value + Addend; 363 364 // AArch64 code is emitted with .rela relocations. The data already in any 365 // bits affected by the relocation on entry is garbage. 366 *TargetPtr &= 0xffe0001fU; 367 // Immediate goes in bits 20:5 of MOVZ/MOVK instruction 368 *TargetPtr |= ((Result & 0xffff0000U) >> (16 - 5)); 369 // Shift must be "lsl #16", in bits 22:2 370 assert((*TargetPtr >> 21 & 0x3) == 1 && "invalid shift for relocation"); 371 break; 372 } 373 case ELF::R_AARCH64_MOVW_UABS_G0_NC: { 374 uint64_t Result = Value + Addend; 375 376 // AArch64 code is emitted with .rela relocations. The data already in any 377 // bits affected by the relocation on entry is garbage. 378 *TargetPtr &= 0xffe0001fU; 379 // Immediate goes in bits 20:5 of MOVZ/MOVK instruction 380 *TargetPtr |= ((Result & 0xffffU) << 5); 381 // Shift must be "lsl #0", in bits 22:21. 382 assert((*TargetPtr >> 21 & 0x3) == 0 && "invalid shift for relocation"); 383 break; 384 } 385 case ELF::R_AARCH64_ADR_PREL_PG_HI21: { 386 // Operation: Page(S+A) - Page(P) 387 uint64_t Result = 388 ((Value + Addend) & ~0xfffULL) - (FinalAddress & ~0xfffULL); 389 390 // Check that -2^32 <= X < 2^32 391 assert(isInt<33>(Result) && "overflow check failed for relocation"); 392 393 // AArch64 code is emitted with .rela relocations. The data already in any 394 // bits affected by the relocation on entry is garbage. 395 *TargetPtr &= 0x9f00001fU; 396 // Immediate goes in bits 30:29 + 5:23 of ADRP instruction, taken 397 // from bits 32:12 of X. 398 *TargetPtr |= ((Result & 0x3000U) << (29 - 12)); 399 *TargetPtr |= ((Result & 0x1ffffc000ULL) >> (14 - 5)); 400 break; 401 } 402 case ELF::R_AARCH64_LDST32_ABS_LO12_NC: { 403 // Operation: S + A 404 uint64_t Result = Value + Addend; 405 406 // AArch64 code is emitted with .rela relocations. The data already in any 407 // bits affected by the relocation on entry is garbage. 408 *TargetPtr &= 0xffc003ffU; 409 // Immediate goes in bits 21:10 of LD/ST instruction, taken 410 // from bits 11:2 of X 411 *TargetPtr |= ((Result & 0xffc) << (10 - 2)); 412 break; 413 } 414 case ELF::R_AARCH64_LDST64_ABS_LO12_NC: { 415 // Operation: S + A 416 uint64_t Result = Value + Addend; 417 418 // AArch64 code is emitted with .rela relocations. The data already in any 419 // bits affected by the relocation on entry is garbage. 420 *TargetPtr &= 0xffc003ffU; 421 // Immediate goes in bits 21:10 of LD/ST instruction, taken 422 // from bits 11:3 of X 423 *TargetPtr |= ((Result & 0xff8) << (10 - 3)); 424 break; 425 } 426 } 427 } 428 429 void RuntimeDyldELF::resolveARMRelocation(const SectionEntry &Section, 430 uint64_t Offset, uint32_t Value, 431 uint32_t Type, int32_t Addend) { 432 // TODO: Add Thumb relocations. 433 uint32_t *TargetPtr = (uint32_t *)(Section.Address + Offset); 434 uint32_t FinalAddress = ((Section.LoadAddress + Offset) & 0xFFFFFFFF); 435 Value += Addend; 436 437 DEBUG(dbgs() << "resolveARMRelocation, LocalAddress: " 438 << Section.Address + Offset 439 << " FinalAddress: " << format("%p", FinalAddress) << " Value: " 440 << format("%x", Value) << " Type: " << format("%x", Type) 441 << " Addend: " << format("%x", Addend) << "\n"); 442 443 switch (Type) { 444 default: 445 llvm_unreachable("Not implemented relocation type!"); 446 447 case ELF::R_ARM_NONE: 448 break; 449 case ELF::R_ARM_PREL31: 450 case ELF::R_ARM_TARGET1: 451 case ELF::R_ARM_ABS32: 452 *TargetPtr = Value; 453 break; 454 // Write first 16 bit of 32 bit value to the mov instruction. 455 // Last 4 bit should be shifted. 456 case ELF::R_ARM_MOVW_ABS_NC: 457 case ELF::R_ARM_MOVT_ABS: 458 if (Type == ELF::R_ARM_MOVW_ABS_NC) 459 Value = Value & 0xFFFF; 460 else if (Type == ELF::R_ARM_MOVT_ABS) 461 Value = (Value >> 16) & 0xFFFF; 462 *TargetPtr &= ~0x000F0FFF; 463 *TargetPtr |= Value & 0xFFF; 464 *TargetPtr |= ((Value >> 12) & 0xF) << 16; 465 break; 466 // Write 24 bit relative value to the branch instruction. 467 case ELF::R_ARM_PC24: // Fall through. 468 case ELF::R_ARM_CALL: // Fall through. 469 case ELF::R_ARM_JUMP24: 470 int32_t RelValue = static_cast<int32_t>(Value - FinalAddress - 8); 471 RelValue = (RelValue & 0x03FFFFFC) >> 2; 472 assert((*TargetPtr & 0xFFFFFF) == 0xFFFFFE); 473 *TargetPtr &= 0xFF000000; 474 *TargetPtr |= RelValue; 475 break; 476 } 477 } 478 479 void RuntimeDyldELF::resolveMIPSRelocation(const SectionEntry &Section, 480 uint64_t Offset, uint32_t Value, 481 uint32_t Type, int32_t Addend) { 482 uint8_t *TargetPtr = Section.Address + Offset; 483 Value += Addend; 484 485 DEBUG(dbgs() << "resolveMIPSRelocation, LocalAddress: " 486 << Section.Address + Offset << " FinalAddress: " 487 << format("%p", Section.LoadAddress + Offset) << " Value: " 488 << format("%x", Value) << " Type: " << format("%x", Type) 489 << " Addend: " << format("%x", Addend) << "\n"); 490 491 uint32_t Insn = readBytesUnaligned(TargetPtr, 4); 492 493 switch (Type) { 494 default: 495 llvm_unreachable("Not implemented relocation type!"); 496 break; 497 case ELF::R_MIPS_32: 498 writeBytesUnaligned(Value, TargetPtr, 4); 499 break; 500 case ELF::R_MIPS_26: 501 Insn &= 0xfc000000; 502 Insn |= (Value & 0x0fffffff) >> 2; 503 writeBytesUnaligned(Insn, TargetPtr, 4); 504 break; 505 case ELF::R_MIPS_HI16: 506 // Get the higher 16-bits. Also add 1 if bit 15 is 1. 507 Insn &= 0xffff0000; 508 Insn |= ((Value + 0x8000) >> 16) & 0xffff; 509 writeBytesUnaligned(Insn, TargetPtr, 4); 510 break; 511 case ELF::R_MIPS_LO16: 512 Insn &= 0xffff0000; 513 Insn |= Value & 0xffff; 514 writeBytesUnaligned(Insn, TargetPtr, 4); 515 break; 516 case ELF::R_MIPS_PC32: { 517 uint32_t FinalAddress = (Section.LoadAddress + Offset); 518 writeBytesUnaligned(Value - FinalAddress, (uint8_t *)TargetPtr, 4); 519 break; 520 } 521 case ELF::R_MIPS_PC16: { 522 uint32_t FinalAddress = (Section.LoadAddress + Offset); 523 Insn &= 0xffff0000; 524 Insn |= ((Value - FinalAddress) >> 2) & 0xffff; 525 writeBytesUnaligned(Insn, TargetPtr, 4); 526 break; 527 } 528 case ELF::R_MIPS_PC19_S2: { 529 uint32_t FinalAddress = (Section.LoadAddress + Offset); 530 Insn &= 0xfff80000; 531 Insn |= ((Value - (FinalAddress & ~0x3)) >> 2) & 0x7ffff; 532 writeBytesUnaligned(Insn, TargetPtr, 4); 533 break; 534 } 535 case ELF::R_MIPS_PC21_S2: { 536 uint32_t FinalAddress = (Section.LoadAddress + Offset); 537 Insn &= 0xffe00000; 538 Insn |= ((Value - FinalAddress) >> 2) & 0x1fffff; 539 writeBytesUnaligned(Insn, TargetPtr, 4); 540 break; 541 } 542 case ELF::R_MIPS_PC26_S2: { 543 uint32_t FinalAddress = (Section.LoadAddress + Offset); 544 Insn &= 0xfc000000; 545 Insn |= ((Value - FinalAddress) >> 2) & 0x3ffffff; 546 writeBytesUnaligned(Insn, TargetPtr, 4); 547 break; 548 } 549 case ELF::R_MIPS_PCHI16: { 550 uint32_t FinalAddress = (Section.LoadAddress + Offset); 551 Insn &= 0xffff0000; 552 Insn |= ((Value - FinalAddress + 0x8000) >> 16) & 0xffff; 553 writeBytesUnaligned(Insn, TargetPtr, 4); 554 break; 555 } 556 case ELF::R_MIPS_PCLO16: { 557 uint32_t FinalAddress = (Section.LoadAddress + Offset); 558 Insn &= 0xffff0000; 559 Insn |= (Value - FinalAddress) & 0xffff; 560 writeBytesUnaligned(Insn, TargetPtr, 4); 561 break; 562 } 563 } 564 } 565 566 void RuntimeDyldELF::setMipsABI(const ObjectFile &Obj) { 567 if (Arch == Triple::UnknownArch || 568 !StringRef(Triple::getArchTypePrefix(Arch)).equals("mips")) { 569 IsMipsO32ABI = false; 570 IsMipsN64ABI = false; 571 return; 572 } 573 unsigned AbiVariant; 574 Obj.getPlatformFlags(AbiVariant); 575 IsMipsO32ABI = AbiVariant & ELF::EF_MIPS_ABI_O32; 576 IsMipsN64ABI = Obj.getFileFormatName().equals("ELF64-mips"); 577 if (AbiVariant & ELF::EF_MIPS_ABI2) 578 llvm_unreachable("Mips N32 ABI is not supported yet"); 579 } 580 581 void RuntimeDyldELF::resolveMIPS64Relocation(const SectionEntry &Section, 582 uint64_t Offset, uint64_t Value, 583 uint32_t Type, int64_t Addend, 584 uint64_t SymOffset, 585 SID SectionID) { 586 uint32_t r_type = Type & 0xff; 587 uint32_t r_type2 = (Type >> 8) & 0xff; 588 uint32_t r_type3 = (Type >> 16) & 0xff; 589 590 // RelType is used to keep information for which relocation type we are 591 // applying relocation. 592 uint32_t RelType = r_type; 593 int64_t CalculatedValue = evaluateMIPS64Relocation(Section, Offset, Value, 594 RelType, Addend, 595 SymOffset, SectionID); 596 if (r_type2 != ELF::R_MIPS_NONE) { 597 RelType = r_type2; 598 CalculatedValue = evaluateMIPS64Relocation(Section, Offset, 0, RelType, 599 CalculatedValue, SymOffset, 600 SectionID); 601 } 602 if (r_type3 != ELF::R_MIPS_NONE) { 603 RelType = r_type3; 604 CalculatedValue = evaluateMIPS64Relocation(Section, Offset, 0, RelType, 605 CalculatedValue, SymOffset, 606 SectionID); 607 } 608 applyMIPS64Relocation(Section.Address + Offset, CalculatedValue, RelType); 609 } 610 611 int64_t 612 RuntimeDyldELF::evaluateMIPS64Relocation(const SectionEntry &Section, 613 uint64_t Offset, uint64_t Value, 614 uint32_t Type, int64_t Addend, 615 uint64_t SymOffset, SID SectionID) { 616 617 DEBUG(dbgs() << "evaluateMIPS64Relocation, LocalAddress: 0x" 618 << format("%llx", Section.Address + Offset) 619 << " FinalAddress: 0x" 620 << format("%llx", Section.LoadAddress + Offset) 621 << " Value: 0x" << format("%llx", Value) << " Type: 0x" 622 << format("%x", Type) << " Addend: 0x" << format("%llx", Addend) 623 << " SymOffset: " << format("%x", SymOffset) 624 << "\n"); 625 626 switch (Type) { 627 default: 628 llvm_unreachable("Not implemented relocation type!"); 629 break; 630 case ELF::R_MIPS_JALR: 631 case ELF::R_MIPS_NONE: 632 break; 633 case ELF::R_MIPS_32: 634 case ELF::R_MIPS_64: 635 return Value + Addend; 636 case ELF::R_MIPS_26: 637 return ((Value + Addend) >> 2) & 0x3ffffff; 638 case ELF::R_MIPS_GPREL16: { 639 uint64_t GOTAddr = getSectionLoadAddress(SectionToGOTMap[SectionID]); 640 return Value + Addend - (GOTAddr + 0x7ff0); 641 } 642 case ELF::R_MIPS_SUB: 643 return Value - Addend; 644 case ELF::R_MIPS_HI16: 645 // Get the higher 16-bits. Also add 1 if bit 15 is 1. 646 return ((Value + Addend + 0x8000) >> 16) & 0xffff; 647 case ELF::R_MIPS_LO16: 648 return (Value + Addend) & 0xffff; 649 case ELF::R_MIPS_CALL16: 650 case ELF::R_MIPS_GOT_DISP: 651 case ELF::R_MIPS_GOT_PAGE: { 652 uint8_t *LocalGOTAddr = 653 getSectionAddress(SectionToGOTMap[SectionID]) + SymOffset; 654 uint64_t GOTEntry = readBytesUnaligned(LocalGOTAddr, 8); 655 656 Value += Addend; 657 if (Type == ELF::R_MIPS_GOT_PAGE) 658 Value = (Value + 0x8000) & ~0xffff; 659 660 if (GOTEntry) 661 assert(GOTEntry == Value && 662 "GOT entry has two different addresses."); 663 else 664 writeBytesUnaligned(Value, LocalGOTAddr, 8); 665 666 return (SymOffset - 0x7ff0) & 0xffff; 667 } 668 case ELF::R_MIPS_GOT_OFST: { 669 int64_t page = (Value + Addend + 0x8000) & ~0xffff; 670 return (Value + Addend - page) & 0xffff; 671 } 672 case ELF::R_MIPS_GPREL32: { 673 uint64_t GOTAddr = getSectionLoadAddress(SectionToGOTMap[SectionID]); 674 return Value + Addend - (GOTAddr + 0x7ff0); 675 } 676 case ELF::R_MIPS_PC16: { 677 uint64_t FinalAddress = (Section.LoadAddress + Offset); 678 return ((Value + Addend - FinalAddress) >> 2) & 0xffff; 679 } 680 case ELF::R_MIPS_PC32: { 681 uint64_t FinalAddress = (Section.LoadAddress + Offset); 682 return Value + Addend - FinalAddress; 683 } 684 case ELF::R_MIPS_PC18_S3: { 685 uint64_t FinalAddress = (Section.LoadAddress + Offset); 686 return ((Value + Addend - (FinalAddress & ~0x7)) >> 3) & 0x3ffff; 687 } 688 case ELF::R_MIPS_PC19_S2: { 689 uint64_t FinalAddress = (Section.LoadAddress + Offset); 690 return ((Value + Addend - (FinalAddress & ~0x3)) >> 2) & 0x7ffff; 691 } 692 case ELF::R_MIPS_PC21_S2: { 693 uint64_t FinalAddress = (Section.LoadAddress + Offset); 694 return ((Value + Addend - FinalAddress) >> 2) & 0x1fffff; 695 } 696 case ELF::R_MIPS_PC26_S2: { 697 uint64_t FinalAddress = (Section.LoadAddress + Offset); 698 return ((Value + Addend - FinalAddress) >> 2) & 0x3ffffff; 699 } 700 case ELF::R_MIPS_PCHI16: { 701 uint64_t FinalAddress = (Section.LoadAddress + Offset); 702 return ((Value + Addend - FinalAddress + 0x8000) >> 16) & 0xffff; 703 } 704 case ELF::R_MIPS_PCLO16: { 705 uint64_t FinalAddress = (Section.LoadAddress + Offset); 706 return (Value + Addend - FinalAddress) & 0xffff; 707 } 708 } 709 return 0; 710 } 711 712 void RuntimeDyldELF::applyMIPS64Relocation(uint8_t *TargetPtr, 713 int64_t CalculatedValue, 714 uint32_t Type) { 715 uint32_t Insn = readBytesUnaligned(TargetPtr, 4); 716 717 switch (Type) { 718 default: 719 break; 720 case ELF::R_MIPS_32: 721 case ELF::R_MIPS_GPREL32: 722 case ELF::R_MIPS_PC32: 723 writeBytesUnaligned(CalculatedValue & 0xffffffff, TargetPtr, 4); 724 break; 725 case ELF::R_MIPS_64: 726 case ELF::R_MIPS_SUB: 727 writeBytesUnaligned(CalculatedValue, TargetPtr, 8); 728 break; 729 case ELF::R_MIPS_26: 730 case ELF::R_MIPS_PC26_S2: 731 Insn = (Insn & 0xfc000000) | CalculatedValue; 732 writeBytesUnaligned(Insn, TargetPtr, 4); 733 break; 734 case ELF::R_MIPS_GPREL16: 735 Insn = (Insn & 0xffff0000) | (CalculatedValue & 0xffff); 736 writeBytesUnaligned(Insn, TargetPtr, 4); 737 break; 738 case ELF::R_MIPS_HI16: 739 case ELF::R_MIPS_LO16: 740 case ELF::R_MIPS_PCHI16: 741 case ELF::R_MIPS_PCLO16: 742 case ELF::R_MIPS_PC16: 743 case ELF::R_MIPS_CALL16: 744 case ELF::R_MIPS_GOT_DISP: 745 case ELF::R_MIPS_GOT_PAGE: 746 case ELF::R_MIPS_GOT_OFST: 747 Insn = (Insn & 0xffff0000) | CalculatedValue; 748 writeBytesUnaligned(Insn, TargetPtr, 4); 749 break; 750 case ELF::R_MIPS_PC18_S3: 751 Insn = (Insn & 0xfffc0000) | CalculatedValue; 752 writeBytesUnaligned(Insn, TargetPtr, 4); 753 break; 754 case ELF::R_MIPS_PC19_S2: 755 Insn = (Insn & 0xfff80000) | CalculatedValue; 756 writeBytesUnaligned(Insn, TargetPtr, 4); 757 break; 758 case ELF::R_MIPS_PC21_S2: 759 Insn = (Insn & 0xffe00000) | CalculatedValue; 760 writeBytesUnaligned(Insn, TargetPtr, 4); 761 break; 762 } 763 } 764 765 // Return the .TOC. section and offset. 766 void RuntimeDyldELF::findPPC64TOCSection(const ELFObjectFileBase &Obj, 767 ObjSectionToIDMap &LocalSections, 768 RelocationValueRef &Rel) { 769 // Set a default SectionID in case we do not find a TOC section below. 770 // This may happen for references to TOC base base (sym@toc, .odp 771 // relocation) without a .toc directive. In this case just use the 772 // first section (which is usually the .odp) since the code won't 773 // reference the .toc base directly. 774 Rel.SymbolName = nullptr; 775 Rel.SectionID = 0; 776 777 // The TOC consists of sections .got, .toc, .tocbss, .plt in that 778 // order. The TOC starts where the first of these sections starts. 779 for (auto &Section: Obj.sections()) { 780 StringRef SectionName; 781 check(Section.getName(SectionName)); 782 783 if (SectionName == ".got" 784 || SectionName == ".toc" 785 || SectionName == ".tocbss" 786 || SectionName == ".plt") { 787 Rel.SectionID = findOrEmitSection(Obj, Section, false, LocalSections); 788 break; 789 } 790 } 791 792 // Per the ppc64-elf-linux ABI, The TOC base is TOC value plus 0x8000 793 // thus permitting a full 64 Kbytes segment. 794 Rel.Addend = 0x8000; 795 } 796 797 // Returns the sections and offset associated with the ODP entry referenced 798 // by Symbol. 799 void RuntimeDyldELF::findOPDEntrySection(const ELFObjectFileBase &Obj, 800 ObjSectionToIDMap &LocalSections, 801 RelocationValueRef &Rel) { 802 // Get the ELF symbol value (st_value) to compare with Relocation offset in 803 // .opd entries 804 for (section_iterator si = Obj.section_begin(), se = Obj.section_end(); 805 si != se; ++si) { 806 section_iterator RelSecI = si->getRelocatedSection(); 807 if (RelSecI == Obj.section_end()) 808 continue; 809 810 StringRef RelSectionName; 811 check(RelSecI->getName(RelSectionName)); 812 if (RelSectionName != ".opd") 813 continue; 814 815 for (elf_relocation_iterator i = si->relocation_begin(), 816 e = si->relocation_end(); 817 i != e;) { 818 // The R_PPC64_ADDR64 relocation indicates the first field 819 // of a .opd entry 820 uint64_t TypeFunc = i->getType(); 821 if (TypeFunc != ELF::R_PPC64_ADDR64) { 822 ++i; 823 continue; 824 } 825 826 uint64_t TargetSymbolOffset = i->getOffset(); 827 symbol_iterator TargetSymbol = i->getSymbol(); 828 ErrorOr<int64_t> AddendOrErr = i->getAddend(); 829 Check(AddendOrErr.getError()); 830 int64_t Addend = *AddendOrErr; 831 832 ++i; 833 if (i == e) 834 break; 835 836 // Just check if following relocation is a R_PPC64_TOC 837 uint64_t TypeTOC = i->getType(); 838 if (TypeTOC != ELF::R_PPC64_TOC) 839 continue; 840 841 // Finally compares the Symbol value and the target symbol offset 842 // to check if this .opd entry refers to the symbol the relocation 843 // points to. 844 if (Rel.Addend != (int64_t)TargetSymbolOffset) 845 continue; 846 847 ErrorOr<section_iterator> TSIOrErr = TargetSymbol->getSection(); 848 check(TSIOrErr.getError()); 849 section_iterator tsi = *TSIOrErr; 850 bool IsCode = tsi->isText(); 851 Rel.SectionID = findOrEmitSection(Obj, (*tsi), IsCode, LocalSections); 852 Rel.Addend = (intptr_t)Addend; 853 return; 854 } 855 } 856 llvm_unreachable("Attempting to get address of ODP entry!"); 857 } 858 859 // Relocation masks following the #lo(value), #hi(value), #ha(value), 860 // #higher(value), #highera(value), #highest(value), and #highesta(value) 861 // macros defined in section 4.5.1. Relocation Types of the PPC-elf64abi 862 // document. 863 864 static inline uint16_t applyPPClo(uint64_t value) { return value & 0xffff; } 865 866 static inline uint16_t applyPPChi(uint64_t value) { 867 return (value >> 16) & 0xffff; 868 } 869 870 static inline uint16_t applyPPCha (uint64_t value) { 871 return ((value + 0x8000) >> 16) & 0xffff; 872 } 873 874 static inline uint16_t applyPPChigher(uint64_t value) { 875 return (value >> 32) & 0xffff; 876 } 877 878 static inline uint16_t applyPPChighera (uint64_t value) { 879 return ((value + 0x8000) >> 32) & 0xffff; 880 } 881 882 static inline uint16_t applyPPChighest(uint64_t value) { 883 return (value >> 48) & 0xffff; 884 } 885 886 static inline uint16_t applyPPChighesta (uint64_t value) { 887 return ((value + 0x8000) >> 48) & 0xffff; 888 } 889 890 void RuntimeDyldELF::resolvePPC32Relocation(const SectionEntry &Section, 891 uint64_t Offset, uint64_t Value, 892 uint32_t Type, int64_t Addend) { 893 uint8_t *LocalAddress = Section.Address + Offset; 894 switch (Type) { 895 default: 896 llvm_unreachable("Relocation type not implemented yet!"); 897 break; 898 case ELF::R_PPC_ADDR16_LO: 899 writeInt16BE(LocalAddress, applyPPClo(Value + Addend)); 900 break; 901 case ELF::R_PPC_ADDR16_HI: 902 writeInt16BE(LocalAddress, applyPPChi(Value + Addend)); 903 break; 904 case ELF::R_PPC_ADDR16_HA: 905 writeInt16BE(LocalAddress, applyPPCha(Value + Addend)); 906 break; 907 } 908 } 909 910 void RuntimeDyldELF::resolvePPC64Relocation(const SectionEntry &Section, 911 uint64_t Offset, uint64_t Value, 912 uint32_t Type, int64_t Addend) { 913 uint8_t *LocalAddress = Section.Address + Offset; 914 switch (Type) { 915 default: 916 llvm_unreachable("Relocation type not implemented yet!"); 917 break; 918 case ELF::R_PPC64_ADDR16: 919 writeInt16BE(LocalAddress, applyPPClo(Value + Addend)); 920 break; 921 case ELF::R_PPC64_ADDR16_DS: 922 writeInt16BE(LocalAddress, applyPPClo(Value + Addend) & ~3); 923 break; 924 case ELF::R_PPC64_ADDR16_LO: 925 writeInt16BE(LocalAddress, applyPPClo(Value + Addend)); 926 break; 927 case ELF::R_PPC64_ADDR16_LO_DS: 928 writeInt16BE(LocalAddress, applyPPClo(Value + Addend) & ~3); 929 break; 930 case ELF::R_PPC64_ADDR16_HI: 931 writeInt16BE(LocalAddress, applyPPChi(Value + Addend)); 932 break; 933 case ELF::R_PPC64_ADDR16_HA: 934 writeInt16BE(LocalAddress, applyPPCha(Value + Addend)); 935 break; 936 case ELF::R_PPC64_ADDR16_HIGHER: 937 writeInt16BE(LocalAddress, applyPPChigher(Value + Addend)); 938 break; 939 case ELF::R_PPC64_ADDR16_HIGHERA: 940 writeInt16BE(LocalAddress, applyPPChighera(Value + Addend)); 941 break; 942 case ELF::R_PPC64_ADDR16_HIGHEST: 943 writeInt16BE(LocalAddress, applyPPChighest(Value + Addend)); 944 break; 945 case ELF::R_PPC64_ADDR16_HIGHESTA: 946 writeInt16BE(LocalAddress, applyPPChighesta(Value + Addend)); 947 break; 948 case ELF::R_PPC64_ADDR14: { 949 assert(((Value + Addend) & 3) == 0); 950 // Preserve the AA/LK bits in the branch instruction 951 uint8_t aalk = *(LocalAddress + 3); 952 writeInt16BE(LocalAddress + 2, (aalk & 3) | ((Value + Addend) & 0xfffc)); 953 } break; 954 case ELF::R_PPC64_REL16_LO: { 955 uint64_t FinalAddress = (Section.LoadAddress + Offset); 956 uint64_t Delta = Value - FinalAddress + Addend; 957 writeInt16BE(LocalAddress, applyPPClo(Delta)); 958 } break; 959 case ELF::R_PPC64_REL16_HI: { 960 uint64_t FinalAddress = (Section.LoadAddress + Offset); 961 uint64_t Delta = Value - FinalAddress + Addend; 962 writeInt16BE(LocalAddress, applyPPChi(Delta)); 963 } break; 964 case ELF::R_PPC64_REL16_HA: { 965 uint64_t FinalAddress = (Section.LoadAddress + Offset); 966 uint64_t Delta = Value - FinalAddress + Addend; 967 writeInt16BE(LocalAddress, applyPPCha(Delta)); 968 } break; 969 case ELF::R_PPC64_ADDR32: { 970 int32_t Result = static_cast<int32_t>(Value + Addend); 971 if (SignExtend32<32>(Result) != Result) 972 llvm_unreachable("Relocation R_PPC64_ADDR32 overflow"); 973 writeInt32BE(LocalAddress, Result); 974 } break; 975 case ELF::R_PPC64_REL24: { 976 uint64_t FinalAddress = (Section.LoadAddress + Offset); 977 int32_t delta = static_cast<int32_t>(Value - FinalAddress + Addend); 978 if (SignExtend32<24>(delta) != delta) 979 llvm_unreachable("Relocation R_PPC64_REL24 overflow"); 980 // Generates a 'bl <address>' instruction 981 writeInt32BE(LocalAddress, 0x48000001 | (delta & 0x03FFFFFC)); 982 } break; 983 case ELF::R_PPC64_REL32: { 984 uint64_t FinalAddress = (Section.LoadAddress + Offset); 985 int32_t delta = static_cast<int32_t>(Value - FinalAddress + Addend); 986 if (SignExtend32<32>(delta) != delta) 987 llvm_unreachable("Relocation R_PPC64_REL32 overflow"); 988 writeInt32BE(LocalAddress, delta); 989 } break; 990 case ELF::R_PPC64_REL64: { 991 uint64_t FinalAddress = (Section.LoadAddress + Offset); 992 uint64_t Delta = Value - FinalAddress + Addend; 993 writeInt64BE(LocalAddress, Delta); 994 } break; 995 case ELF::R_PPC64_ADDR64: 996 writeInt64BE(LocalAddress, Value + Addend); 997 break; 998 } 999 } 1000 1001 void RuntimeDyldELF::resolveSystemZRelocation(const SectionEntry &Section, 1002 uint64_t Offset, uint64_t Value, 1003 uint32_t Type, int64_t Addend) { 1004 uint8_t *LocalAddress = Section.Address + Offset; 1005 switch (Type) { 1006 default: 1007 llvm_unreachable("Relocation type not implemented yet!"); 1008 break; 1009 case ELF::R_390_PC16DBL: 1010 case ELF::R_390_PLT16DBL: { 1011 int64_t Delta = (Value + Addend) - (Section.LoadAddress + Offset); 1012 assert(int16_t(Delta / 2) * 2 == Delta && "R_390_PC16DBL overflow"); 1013 writeInt16BE(LocalAddress, Delta / 2); 1014 break; 1015 } 1016 case ELF::R_390_PC32DBL: 1017 case ELF::R_390_PLT32DBL: { 1018 int64_t Delta = (Value + Addend) - (Section.LoadAddress + Offset); 1019 assert(int32_t(Delta / 2) * 2 == Delta && "R_390_PC32DBL overflow"); 1020 writeInt32BE(LocalAddress, Delta / 2); 1021 break; 1022 } 1023 case ELF::R_390_PC32: { 1024 int64_t Delta = (Value + Addend) - (Section.LoadAddress + Offset); 1025 assert(int32_t(Delta) == Delta && "R_390_PC32 overflow"); 1026 writeInt32BE(LocalAddress, Delta); 1027 break; 1028 } 1029 case ELF::R_390_64: 1030 writeInt64BE(LocalAddress, Value + Addend); 1031 break; 1032 } 1033 } 1034 1035 // The target location for the relocation is described by RE.SectionID and 1036 // RE.Offset. RE.SectionID can be used to find the SectionEntry. Each 1037 // SectionEntry has three members describing its location. 1038 // SectionEntry::Address is the address at which the section has been loaded 1039 // into memory in the current (host) process. SectionEntry::LoadAddress is the 1040 // address that the section will have in the target process. 1041 // SectionEntry::ObjAddress is the address of the bits for this section in the 1042 // original emitted object image (also in the current address space). 1043 // 1044 // Relocations will be applied as if the section were loaded at 1045 // SectionEntry::LoadAddress, but they will be applied at an address based 1046 // on SectionEntry::Address. SectionEntry::ObjAddress will be used to refer to 1047 // Target memory contents if they are required for value calculations. 1048 // 1049 // The Value parameter here is the load address of the symbol for the 1050 // relocation to be applied. For relocations which refer to symbols in the 1051 // current object Value will be the LoadAddress of the section in which 1052 // the symbol resides (RE.Addend provides additional information about the 1053 // symbol location). For external symbols, Value will be the address of the 1054 // symbol in the target address space. 1055 void RuntimeDyldELF::resolveRelocation(const RelocationEntry &RE, 1056 uint64_t Value) { 1057 const SectionEntry &Section = Sections[RE.SectionID]; 1058 return resolveRelocation(Section, RE.Offset, Value, RE.RelType, RE.Addend, 1059 RE.SymOffset, RE.SectionID); 1060 } 1061 1062 void RuntimeDyldELF::resolveRelocation(const SectionEntry &Section, 1063 uint64_t Offset, uint64_t Value, 1064 uint32_t Type, int64_t Addend, 1065 uint64_t SymOffset, SID SectionID) { 1066 switch (Arch) { 1067 case Triple::x86_64: 1068 resolveX86_64Relocation(Section, Offset, Value, Type, Addend, SymOffset); 1069 break; 1070 case Triple::x86: 1071 resolveX86Relocation(Section, Offset, (uint32_t)(Value & 0xffffffffL), Type, 1072 (uint32_t)(Addend & 0xffffffffL)); 1073 break; 1074 case Triple::aarch64: 1075 case Triple::aarch64_be: 1076 resolveAArch64Relocation(Section, Offset, Value, Type, Addend); 1077 break; 1078 case Triple::arm: // Fall through. 1079 case Triple::armeb: 1080 case Triple::thumb: 1081 case Triple::thumbeb: 1082 resolveARMRelocation(Section, Offset, (uint32_t)(Value & 0xffffffffL), Type, 1083 (uint32_t)(Addend & 0xffffffffL)); 1084 break; 1085 case Triple::mips: // Fall through. 1086 case Triple::mipsel: 1087 case Triple::mips64: 1088 case Triple::mips64el: 1089 if (IsMipsO32ABI) 1090 resolveMIPSRelocation(Section, Offset, (uint32_t)(Value & 0xffffffffL), 1091 Type, (uint32_t)(Addend & 0xffffffffL)); 1092 else if (IsMipsN64ABI) 1093 resolveMIPS64Relocation(Section, Offset, Value, Type, Addend, SymOffset, 1094 SectionID); 1095 else 1096 llvm_unreachable("Mips ABI not handled"); 1097 break; 1098 case Triple::ppc: 1099 resolvePPC32Relocation(Section, Offset, Value, Type, Addend); 1100 break; 1101 case Triple::ppc64: // Fall through. 1102 case Triple::ppc64le: 1103 resolvePPC64Relocation(Section, Offset, Value, Type, Addend); 1104 break; 1105 case Triple::systemz: 1106 resolveSystemZRelocation(Section, Offset, Value, Type, Addend); 1107 break; 1108 default: 1109 llvm_unreachable("Unsupported CPU type!"); 1110 } 1111 } 1112 1113 void *RuntimeDyldELF::computePlaceholderAddress(unsigned SectionID, uint64_t Offset) const { 1114 return (void*)(Sections[SectionID].ObjAddress + Offset); 1115 } 1116 1117 void RuntimeDyldELF::processSimpleRelocation(unsigned SectionID, uint64_t Offset, unsigned RelType, RelocationValueRef Value) { 1118 RelocationEntry RE(SectionID, Offset, RelType, Value.Addend, Value.Offset); 1119 if (Value.SymbolName) 1120 addRelocationForSymbol(RE, Value.SymbolName); 1121 else 1122 addRelocationForSection(RE, Value.SectionID); 1123 } 1124 1125 uint32_t RuntimeDyldELF::getMatchingLoRelocation(uint32_t RelType, 1126 bool IsLocal) const { 1127 switch (RelType) { 1128 case ELF::R_MICROMIPS_GOT16: 1129 if (IsLocal) 1130 return ELF::R_MICROMIPS_LO16; 1131 break; 1132 case ELF::R_MICROMIPS_HI16: 1133 return ELF::R_MICROMIPS_LO16; 1134 case ELF::R_MIPS_GOT16: 1135 if (IsLocal) 1136 return ELF::R_MIPS_LO16; 1137 break; 1138 case ELF::R_MIPS_HI16: 1139 return ELF::R_MIPS_LO16; 1140 case ELF::R_MIPS_PCHI16: 1141 return ELF::R_MIPS_PCLO16; 1142 default: 1143 break; 1144 } 1145 return ELF::R_MIPS_NONE; 1146 } 1147 1148 relocation_iterator RuntimeDyldELF::processRelocationRef( 1149 unsigned SectionID, relocation_iterator RelI, const ObjectFile &O, 1150 ObjSectionToIDMap &ObjSectionToID, StubMap &Stubs) { 1151 const auto &Obj = cast<ELFObjectFileBase>(O); 1152 uint64_t RelType = RelI->getType(); 1153 ErrorOr<int64_t> AddendOrErr = ELFRelocationRef(*RelI).getAddend(); 1154 int64_t Addend = AddendOrErr ? *AddendOrErr : 0; 1155 elf_symbol_iterator Symbol = RelI->getSymbol(); 1156 1157 // Obtain the symbol name which is referenced in the relocation 1158 StringRef TargetName; 1159 if (Symbol != Obj.symbol_end()) { 1160 ErrorOr<StringRef> TargetNameOrErr = Symbol->getName(); 1161 if (std::error_code EC = TargetNameOrErr.getError()) 1162 report_fatal_error(EC.message()); 1163 TargetName = *TargetNameOrErr; 1164 } 1165 DEBUG(dbgs() << "\t\tRelType: " << RelType << " Addend: " << Addend 1166 << " TargetName: " << TargetName << "\n"); 1167 RelocationValueRef Value; 1168 // First search for the symbol in the local symbol table 1169 SymbolRef::Type SymType = SymbolRef::ST_Unknown; 1170 1171 // Search for the symbol in the global symbol table 1172 RTDyldSymbolTable::const_iterator gsi = GlobalSymbolTable.end(); 1173 if (Symbol != Obj.symbol_end()) { 1174 gsi = GlobalSymbolTable.find(TargetName.data()); 1175 SymType = Symbol->getType(); 1176 } 1177 if (gsi != GlobalSymbolTable.end()) { 1178 const auto &SymInfo = gsi->second; 1179 Value.SectionID = SymInfo.getSectionID(); 1180 Value.Offset = SymInfo.getOffset(); 1181 Value.Addend = SymInfo.getOffset() + Addend; 1182 } else { 1183 switch (SymType) { 1184 case SymbolRef::ST_Debug: { 1185 // TODO: Now ELF SymbolRef::ST_Debug = STT_SECTION, it's not obviously 1186 // and can be changed by another developers. Maybe best way is add 1187 // a new symbol type ST_Section to SymbolRef and use it. 1188 section_iterator si = *Symbol->getSection(); 1189 if (si == Obj.section_end()) 1190 llvm_unreachable("Symbol section not found, bad object file format!"); 1191 DEBUG(dbgs() << "\t\tThis is section symbol\n"); 1192 bool isCode = si->isText(); 1193 Value.SectionID = findOrEmitSection(Obj, (*si), isCode, ObjSectionToID); 1194 Value.Addend = Addend; 1195 break; 1196 } 1197 case SymbolRef::ST_Data: 1198 case SymbolRef::ST_Unknown: { 1199 Value.SymbolName = TargetName.data(); 1200 Value.Addend = Addend; 1201 1202 // Absolute relocations will have a zero symbol ID (STN_UNDEF), which 1203 // will manifest here as a NULL symbol name. 1204 // We can set this as a valid (but empty) symbol name, and rely 1205 // on addRelocationForSymbol to handle this. 1206 if (!Value.SymbolName) 1207 Value.SymbolName = ""; 1208 break; 1209 } 1210 default: 1211 llvm_unreachable("Unresolved symbol type!"); 1212 break; 1213 } 1214 } 1215 1216 uint64_t Offset = RelI->getOffset(); 1217 1218 DEBUG(dbgs() << "\t\tSectionID: " << SectionID << " Offset: " << Offset 1219 << "\n"); 1220 if ((Arch == Triple::aarch64 || Arch == Triple::aarch64_be) && 1221 (RelType == ELF::R_AARCH64_CALL26 || RelType == ELF::R_AARCH64_JUMP26)) { 1222 // This is an AArch64 branch relocation, need to use a stub function. 1223 DEBUG(dbgs() << "\t\tThis is an AArch64 branch relocation."); 1224 SectionEntry &Section = Sections[SectionID]; 1225 1226 // Look for an existing stub. 1227 StubMap::const_iterator i = Stubs.find(Value); 1228 if (i != Stubs.end()) { 1229 resolveRelocation(Section, Offset, (uint64_t)Section.Address + i->second, 1230 RelType, 0); 1231 DEBUG(dbgs() << " Stub function found\n"); 1232 } else { 1233 // Create a new stub function. 1234 DEBUG(dbgs() << " Create a new stub function\n"); 1235 Stubs[Value] = Section.StubOffset; 1236 uint8_t *StubTargetAddr = 1237 createStubFunction(Section.Address + Section.StubOffset); 1238 1239 RelocationEntry REmovz_g3(SectionID, StubTargetAddr - Section.Address, 1240 ELF::R_AARCH64_MOVW_UABS_G3, Value.Addend); 1241 RelocationEntry REmovk_g2(SectionID, StubTargetAddr - Section.Address + 4, 1242 ELF::R_AARCH64_MOVW_UABS_G2_NC, Value.Addend); 1243 RelocationEntry REmovk_g1(SectionID, StubTargetAddr - Section.Address + 8, 1244 ELF::R_AARCH64_MOVW_UABS_G1_NC, Value.Addend); 1245 RelocationEntry REmovk_g0(SectionID, 1246 StubTargetAddr - Section.Address + 12, 1247 ELF::R_AARCH64_MOVW_UABS_G0_NC, Value.Addend); 1248 1249 if (Value.SymbolName) { 1250 addRelocationForSymbol(REmovz_g3, Value.SymbolName); 1251 addRelocationForSymbol(REmovk_g2, Value.SymbolName); 1252 addRelocationForSymbol(REmovk_g1, Value.SymbolName); 1253 addRelocationForSymbol(REmovk_g0, Value.SymbolName); 1254 } else { 1255 addRelocationForSection(REmovz_g3, Value.SectionID); 1256 addRelocationForSection(REmovk_g2, Value.SectionID); 1257 addRelocationForSection(REmovk_g1, Value.SectionID); 1258 addRelocationForSection(REmovk_g0, Value.SectionID); 1259 } 1260 resolveRelocation(Section, Offset, 1261 (uint64_t)Section.Address + Section.StubOffset, RelType, 1262 0); 1263 Section.StubOffset += getMaxStubSize(); 1264 } 1265 } else if (Arch == Triple::arm) { 1266 if (RelType == ELF::R_ARM_PC24 || RelType == ELF::R_ARM_CALL || 1267 RelType == ELF::R_ARM_JUMP24) { 1268 // This is an ARM branch relocation, need to use a stub function. 1269 DEBUG(dbgs() << "\t\tThis is an ARM branch relocation."); 1270 SectionEntry &Section = Sections[SectionID]; 1271 1272 // Look for an existing stub. 1273 StubMap::const_iterator i = Stubs.find(Value); 1274 if (i != Stubs.end()) { 1275 resolveRelocation(Section, Offset, (uint64_t)Section.Address + i->second, 1276 RelType, 0); 1277 DEBUG(dbgs() << " Stub function found\n"); 1278 } else { 1279 // Create a new stub function. 1280 DEBUG(dbgs() << " Create a new stub function\n"); 1281 Stubs[Value] = Section.StubOffset; 1282 uint8_t *StubTargetAddr = 1283 createStubFunction(Section.Address + Section.StubOffset); 1284 RelocationEntry RE(SectionID, StubTargetAddr - Section.Address, 1285 ELF::R_ARM_ABS32, Value.Addend); 1286 if (Value.SymbolName) 1287 addRelocationForSymbol(RE, Value.SymbolName); 1288 else 1289 addRelocationForSection(RE, Value.SectionID); 1290 1291 resolveRelocation(Section, Offset, 1292 (uint64_t)Section.Address + Section.StubOffset, RelType, 1293 0); 1294 Section.StubOffset += getMaxStubSize(); 1295 } 1296 } else { 1297 uint32_t *Placeholder = 1298 reinterpret_cast<uint32_t*>(computePlaceholderAddress(SectionID, Offset)); 1299 if (RelType == ELF::R_ARM_PREL31 || RelType == ELF::R_ARM_TARGET1 || 1300 RelType == ELF::R_ARM_ABS32) { 1301 Value.Addend += *Placeholder; 1302 } else if (RelType == ELF::R_ARM_MOVW_ABS_NC || RelType == ELF::R_ARM_MOVT_ABS) { 1303 // See ELF for ARM documentation 1304 Value.Addend += (int16_t)((*Placeholder & 0xFFF) | (((*Placeholder >> 16) & 0xF) << 12)); 1305 } 1306 processSimpleRelocation(SectionID, Offset, RelType, Value); 1307 } 1308 } else if (IsMipsO32ABI) { 1309 uint8_t *Placeholder = reinterpret_cast<uint8_t *>( 1310 computePlaceholderAddress(SectionID, Offset)); 1311 uint32_t Opcode = readBytesUnaligned(Placeholder, 4); 1312 if (RelType == ELF::R_MIPS_26) { 1313 // This is an Mips branch relocation, need to use a stub function. 1314 DEBUG(dbgs() << "\t\tThis is a Mips branch relocation."); 1315 SectionEntry &Section = Sections[SectionID]; 1316 1317 // Extract the addend from the instruction. 1318 // We shift up by two since the Value will be down shifted again 1319 // when applying the relocation. 1320 uint32_t Addend = (Opcode & 0x03ffffff) << 2; 1321 1322 Value.Addend += Addend; 1323 1324 // Look up for existing stub. 1325 StubMap::const_iterator i = Stubs.find(Value); 1326 if (i != Stubs.end()) { 1327 RelocationEntry RE(SectionID, Offset, RelType, i->second); 1328 addRelocationForSection(RE, SectionID); 1329 DEBUG(dbgs() << " Stub function found\n"); 1330 } else { 1331 // Create a new stub function. 1332 DEBUG(dbgs() << " Create a new stub function\n"); 1333 Stubs[Value] = Section.StubOffset; 1334 uint8_t *StubTargetAddr = 1335 createStubFunction(Section.Address + Section.StubOffset); 1336 1337 // Creating Hi and Lo relocations for the filled stub instructions. 1338 RelocationEntry REHi(SectionID, StubTargetAddr - Section.Address, 1339 ELF::R_MIPS_HI16, Value.Addend); 1340 RelocationEntry RELo(SectionID, StubTargetAddr - Section.Address + 4, 1341 ELF::R_MIPS_LO16, Value.Addend); 1342 1343 if (Value.SymbolName) { 1344 addRelocationForSymbol(REHi, Value.SymbolName); 1345 addRelocationForSymbol(RELo, Value.SymbolName); 1346 } 1347 else { 1348 addRelocationForSection(REHi, Value.SectionID); 1349 addRelocationForSection(RELo, Value.SectionID); 1350 } 1351 1352 RelocationEntry RE(SectionID, Offset, RelType, Section.StubOffset); 1353 addRelocationForSection(RE, SectionID); 1354 Section.StubOffset += getMaxStubSize(); 1355 } 1356 } else if (RelType == ELF::R_MIPS_HI16 || RelType == ELF::R_MIPS_PCHI16) { 1357 int64_t Addend = (Opcode & 0x0000ffff) << 16; 1358 RelocationEntry RE(SectionID, Offset, RelType, Addend); 1359 PendingRelocs.push_back(std::make_pair(Value, RE)); 1360 } else if (RelType == ELF::R_MIPS_LO16 || RelType == ELF::R_MIPS_PCLO16) { 1361 int64_t Addend = Value.Addend + SignExtend32<16>(Opcode & 0x0000ffff); 1362 for (auto I = PendingRelocs.begin(); I != PendingRelocs.end();) { 1363 const RelocationValueRef &MatchingValue = I->first; 1364 RelocationEntry &Reloc = I->second; 1365 if (MatchingValue == Value && 1366 RelType == getMatchingLoRelocation(Reloc.RelType) && 1367 SectionID == Reloc.SectionID) { 1368 Reloc.Addend += Addend; 1369 if (Value.SymbolName) 1370 addRelocationForSymbol(Reloc, Value.SymbolName); 1371 else 1372 addRelocationForSection(Reloc, Value.SectionID); 1373 I = PendingRelocs.erase(I); 1374 } else 1375 ++I; 1376 } 1377 RelocationEntry RE(SectionID, Offset, RelType, Addend); 1378 if (Value.SymbolName) 1379 addRelocationForSymbol(RE, Value.SymbolName); 1380 else 1381 addRelocationForSection(RE, Value.SectionID); 1382 } else { 1383 if (RelType == ELF::R_MIPS_32) 1384 Value.Addend += Opcode; 1385 else if (RelType == ELF::R_MIPS_PC16) 1386 Value.Addend += SignExtend32<18>((Opcode & 0x0000ffff) << 2); 1387 else if (RelType == ELF::R_MIPS_PC19_S2) 1388 Value.Addend += SignExtend32<21>((Opcode & 0x0007ffff) << 2); 1389 else if (RelType == ELF::R_MIPS_PC21_S2) 1390 Value.Addend += SignExtend32<23>((Opcode & 0x001fffff) << 2); 1391 else if (RelType == ELF::R_MIPS_PC26_S2) 1392 Value.Addend += SignExtend32<28>((Opcode & 0x03ffffff) << 2); 1393 processSimpleRelocation(SectionID, Offset, RelType, Value); 1394 } 1395 } else if (IsMipsN64ABI) { 1396 uint32_t r_type = RelType & 0xff; 1397 RelocationEntry RE(SectionID, Offset, RelType, Value.Addend); 1398 if (r_type == ELF::R_MIPS_CALL16 || r_type == ELF::R_MIPS_GOT_PAGE 1399 || r_type == ELF::R_MIPS_GOT_DISP) { 1400 StringMap<uint64_t>::iterator i = GOTSymbolOffsets.find(TargetName); 1401 if (i != GOTSymbolOffsets.end()) 1402 RE.SymOffset = i->second; 1403 else { 1404 RE.SymOffset = allocateGOTEntries(SectionID, 1); 1405 GOTSymbolOffsets[TargetName] = RE.SymOffset; 1406 } 1407 } 1408 if (Value.SymbolName) 1409 addRelocationForSymbol(RE, Value.SymbolName); 1410 else 1411 addRelocationForSection(RE, Value.SectionID); 1412 } else if (Arch == Triple::ppc64 || Arch == Triple::ppc64le) { 1413 if (RelType == ELF::R_PPC64_REL24) { 1414 // Determine ABI variant in use for this object. 1415 unsigned AbiVariant; 1416 Obj.getPlatformFlags(AbiVariant); 1417 AbiVariant &= ELF::EF_PPC64_ABI; 1418 // A PPC branch relocation will need a stub function if the target is 1419 // an external symbol (Symbol::ST_Unknown) or if the target address 1420 // is not within the signed 24-bits branch address. 1421 SectionEntry &Section = Sections[SectionID]; 1422 uint8_t *Target = Section.Address + Offset; 1423 bool RangeOverflow = false; 1424 if (SymType != SymbolRef::ST_Unknown) { 1425 if (AbiVariant != 2) { 1426 // In the ELFv1 ABI, a function call may point to the .opd entry, 1427 // so the final symbol value is calculated based on the relocation 1428 // values in the .opd section. 1429 findOPDEntrySection(Obj, ObjSectionToID, Value); 1430 } else { 1431 // In the ELFv2 ABI, a function symbol may provide a local entry 1432 // point, which must be used for direct calls. 1433 uint8_t SymOther = Symbol->getOther(); 1434 Value.Addend += ELF::decodePPC64LocalEntryOffset(SymOther); 1435 } 1436 uint8_t *RelocTarget = Sections[Value.SectionID].Address + Value.Addend; 1437 int32_t delta = static_cast<int32_t>(Target - RelocTarget); 1438 // If it is within 24-bits branch range, just set the branch target 1439 if (SignExtend32<24>(delta) == delta) { 1440 RelocationEntry RE(SectionID, Offset, RelType, Value.Addend); 1441 if (Value.SymbolName) 1442 addRelocationForSymbol(RE, Value.SymbolName); 1443 else 1444 addRelocationForSection(RE, Value.SectionID); 1445 } else { 1446 RangeOverflow = true; 1447 } 1448 } 1449 if (SymType == SymbolRef::ST_Unknown || RangeOverflow) { 1450 // It is an external symbol (SymbolRef::ST_Unknown) or within a range 1451 // larger than 24-bits. 1452 StubMap::const_iterator i = Stubs.find(Value); 1453 if (i != Stubs.end()) { 1454 // Symbol function stub already created, just relocate to it 1455 resolveRelocation(Section, Offset, 1456 (uint64_t)Section.Address + i->second, RelType, 0); 1457 DEBUG(dbgs() << " Stub function found\n"); 1458 } else { 1459 // Create a new stub function. 1460 DEBUG(dbgs() << " Create a new stub function\n"); 1461 Stubs[Value] = Section.StubOffset; 1462 uint8_t *StubTargetAddr = 1463 createStubFunction(Section.Address + Section.StubOffset, 1464 AbiVariant); 1465 RelocationEntry RE(SectionID, StubTargetAddr - Section.Address, 1466 ELF::R_PPC64_ADDR64, Value.Addend); 1467 1468 // Generates the 64-bits address loads as exemplified in section 1469 // 4.5.1 in PPC64 ELF ABI. Note that the relocations need to 1470 // apply to the low part of the instructions, so we have to update 1471 // the offset according to the target endianness. 1472 uint64_t StubRelocOffset = StubTargetAddr - Section.Address; 1473 if (!IsTargetLittleEndian) 1474 StubRelocOffset += 2; 1475 1476 RelocationEntry REhst(SectionID, StubRelocOffset + 0, 1477 ELF::R_PPC64_ADDR16_HIGHEST, Value.Addend); 1478 RelocationEntry REhr(SectionID, StubRelocOffset + 4, 1479 ELF::R_PPC64_ADDR16_HIGHER, Value.Addend); 1480 RelocationEntry REh(SectionID, StubRelocOffset + 12, 1481 ELF::R_PPC64_ADDR16_HI, Value.Addend); 1482 RelocationEntry REl(SectionID, StubRelocOffset + 16, 1483 ELF::R_PPC64_ADDR16_LO, Value.Addend); 1484 1485 if (Value.SymbolName) { 1486 addRelocationForSymbol(REhst, Value.SymbolName); 1487 addRelocationForSymbol(REhr, Value.SymbolName); 1488 addRelocationForSymbol(REh, Value.SymbolName); 1489 addRelocationForSymbol(REl, Value.SymbolName); 1490 } else { 1491 addRelocationForSection(REhst, Value.SectionID); 1492 addRelocationForSection(REhr, Value.SectionID); 1493 addRelocationForSection(REh, Value.SectionID); 1494 addRelocationForSection(REl, Value.SectionID); 1495 } 1496 1497 resolveRelocation(Section, Offset, 1498 (uint64_t)Section.Address + Section.StubOffset, 1499 RelType, 0); 1500 Section.StubOffset += getMaxStubSize(); 1501 } 1502 if (SymType == SymbolRef::ST_Unknown) { 1503 // Restore the TOC for external calls 1504 if (AbiVariant == 2) 1505 writeInt32BE(Target + 4, 0xE8410018); // ld r2,28(r1) 1506 else 1507 writeInt32BE(Target + 4, 0xE8410028); // ld r2,40(r1) 1508 } 1509 } 1510 } else if (RelType == ELF::R_PPC64_TOC16 || 1511 RelType == ELF::R_PPC64_TOC16_DS || 1512 RelType == ELF::R_PPC64_TOC16_LO || 1513 RelType == ELF::R_PPC64_TOC16_LO_DS || 1514 RelType == ELF::R_PPC64_TOC16_HI || 1515 RelType == ELF::R_PPC64_TOC16_HA) { 1516 // These relocations are supposed to subtract the TOC address from 1517 // the final value. This does not fit cleanly into the RuntimeDyld 1518 // scheme, since there may be *two* sections involved in determining 1519 // the relocation value (the section of the symbol referred to by the 1520 // relocation, and the TOC section associated with the current module). 1521 // 1522 // Fortunately, these relocations are currently only ever generated 1523 // referring to symbols that themselves reside in the TOC, which means 1524 // that the two sections are actually the same. Thus they cancel out 1525 // and we can immediately resolve the relocation right now. 1526 switch (RelType) { 1527 case ELF::R_PPC64_TOC16: RelType = ELF::R_PPC64_ADDR16; break; 1528 case ELF::R_PPC64_TOC16_DS: RelType = ELF::R_PPC64_ADDR16_DS; break; 1529 case ELF::R_PPC64_TOC16_LO: RelType = ELF::R_PPC64_ADDR16_LO; break; 1530 case ELF::R_PPC64_TOC16_LO_DS: RelType = ELF::R_PPC64_ADDR16_LO_DS; break; 1531 case ELF::R_PPC64_TOC16_HI: RelType = ELF::R_PPC64_ADDR16_HI; break; 1532 case ELF::R_PPC64_TOC16_HA: RelType = ELF::R_PPC64_ADDR16_HA; break; 1533 default: llvm_unreachable("Wrong relocation type."); 1534 } 1535 1536 RelocationValueRef TOCValue; 1537 findPPC64TOCSection(Obj, ObjSectionToID, TOCValue); 1538 if (Value.SymbolName || Value.SectionID != TOCValue.SectionID) 1539 llvm_unreachable("Unsupported TOC relocation."); 1540 Value.Addend -= TOCValue.Addend; 1541 resolveRelocation(Sections[SectionID], Offset, Value.Addend, RelType, 0); 1542 } else { 1543 // There are two ways to refer to the TOC address directly: either 1544 // via a ELF::R_PPC64_TOC relocation (where both symbol and addend are 1545 // ignored), or via any relocation that refers to the magic ".TOC." 1546 // symbols (in which case the addend is respected). 1547 if (RelType == ELF::R_PPC64_TOC) { 1548 RelType = ELF::R_PPC64_ADDR64; 1549 findPPC64TOCSection(Obj, ObjSectionToID, Value); 1550 } else if (TargetName == ".TOC.") { 1551 findPPC64TOCSection(Obj, ObjSectionToID, Value); 1552 Value.Addend += Addend; 1553 } 1554 1555 RelocationEntry RE(SectionID, Offset, RelType, Value.Addend); 1556 1557 if (Value.SymbolName) 1558 addRelocationForSymbol(RE, Value.SymbolName); 1559 else 1560 addRelocationForSection(RE, Value.SectionID); 1561 } 1562 } else if (Arch == Triple::systemz && 1563 (RelType == ELF::R_390_PLT32DBL || RelType == ELF::R_390_GOTENT)) { 1564 // Create function stubs for both PLT and GOT references, regardless of 1565 // whether the GOT reference is to data or code. The stub contains the 1566 // full address of the symbol, as needed by GOT references, and the 1567 // executable part only adds an overhead of 8 bytes. 1568 // 1569 // We could try to conserve space by allocating the code and data 1570 // parts of the stub separately. However, as things stand, we allocate 1571 // a stub for every relocation, so using a GOT in JIT code should be 1572 // no less space efficient than using an explicit constant pool. 1573 DEBUG(dbgs() << "\t\tThis is a SystemZ indirect relocation."); 1574 SectionEntry &Section = Sections[SectionID]; 1575 1576 // Look for an existing stub. 1577 StubMap::const_iterator i = Stubs.find(Value); 1578 uintptr_t StubAddress; 1579 if (i != Stubs.end()) { 1580 StubAddress = uintptr_t(Section.Address) + i->second; 1581 DEBUG(dbgs() << " Stub function found\n"); 1582 } else { 1583 // Create a new stub function. 1584 DEBUG(dbgs() << " Create a new stub function\n"); 1585 1586 uintptr_t BaseAddress = uintptr_t(Section.Address); 1587 uintptr_t StubAlignment = getStubAlignment(); 1588 StubAddress = (BaseAddress + Section.StubOffset + StubAlignment - 1) & 1589 -StubAlignment; 1590 unsigned StubOffset = StubAddress - BaseAddress; 1591 1592 Stubs[Value] = StubOffset; 1593 createStubFunction((uint8_t *)StubAddress); 1594 RelocationEntry RE(SectionID, StubOffset + 8, ELF::R_390_64, 1595 Value.Offset); 1596 if (Value.SymbolName) 1597 addRelocationForSymbol(RE, Value.SymbolName); 1598 else 1599 addRelocationForSection(RE, Value.SectionID); 1600 Section.StubOffset = StubOffset + getMaxStubSize(); 1601 } 1602 1603 if (RelType == ELF::R_390_GOTENT) 1604 resolveRelocation(Section, Offset, StubAddress + 8, ELF::R_390_PC32DBL, 1605 Addend); 1606 else 1607 resolveRelocation(Section, Offset, StubAddress, RelType, Addend); 1608 } else if (Arch == Triple::x86_64) { 1609 if (RelType == ELF::R_X86_64_PLT32) { 1610 // The way the PLT relocations normally work is that the linker allocates 1611 // the 1612 // PLT and this relocation makes a PC-relative call into the PLT. The PLT 1613 // entry will then jump to an address provided by the GOT. On first call, 1614 // the 1615 // GOT address will point back into PLT code that resolves the symbol. After 1616 // the first call, the GOT entry points to the actual function. 1617 // 1618 // For local functions we're ignoring all of that here and just replacing 1619 // the PLT32 relocation type with PC32, which will translate the relocation 1620 // into a PC-relative call directly to the function. For external symbols we 1621 // can't be sure the function will be within 2^32 bytes of the call site, so 1622 // we need to create a stub, which calls into the GOT. This case is 1623 // equivalent to the usual PLT implementation except that we use the stub 1624 // mechanism in RuntimeDyld (which puts stubs at the end of the section) 1625 // rather than allocating a PLT section. 1626 if (Value.SymbolName) { 1627 // This is a call to an external function. 1628 // Look for an existing stub. 1629 SectionEntry &Section = Sections[SectionID]; 1630 StubMap::const_iterator i = Stubs.find(Value); 1631 uintptr_t StubAddress; 1632 if (i != Stubs.end()) { 1633 StubAddress = uintptr_t(Section.Address) + i->second; 1634 DEBUG(dbgs() << " Stub function found\n"); 1635 } else { 1636 // Create a new stub function (equivalent to a PLT entry). 1637 DEBUG(dbgs() << " Create a new stub function\n"); 1638 1639 uintptr_t BaseAddress = uintptr_t(Section.Address); 1640 uintptr_t StubAlignment = getStubAlignment(); 1641 StubAddress = (BaseAddress + Section.StubOffset + StubAlignment - 1) & 1642 -StubAlignment; 1643 unsigned StubOffset = StubAddress - BaseAddress; 1644 Stubs[Value] = StubOffset; 1645 createStubFunction((uint8_t *)StubAddress); 1646 1647 // Bump our stub offset counter 1648 Section.StubOffset = StubOffset + getMaxStubSize(); 1649 1650 // Allocate a GOT Entry 1651 uint64_t GOTOffset = allocateGOTEntries(SectionID, 1); 1652 1653 // The load of the GOT address has an addend of -4 1654 resolveGOTOffsetRelocation(SectionID, StubOffset + 2, GOTOffset - 4); 1655 1656 // Fill in the value of the symbol we're targeting into the GOT 1657 addRelocationForSymbol(computeGOTOffsetRE(SectionID,GOTOffset,0,ELF::R_X86_64_64), 1658 Value.SymbolName); 1659 } 1660 1661 // Make the target call a call into the stub table. 1662 resolveRelocation(Section, Offset, StubAddress, ELF::R_X86_64_PC32, 1663 Addend); 1664 } else { 1665 RelocationEntry RE(SectionID, Offset, ELF::R_X86_64_PC32, Value.Addend, 1666 Value.Offset); 1667 addRelocationForSection(RE, Value.SectionID); 1668 } 1669 } else if (RelType == ELF::R_X86_64_GOTPCREL) { 1670 uint64_t GOTOffset = allocateGOTEntries(SectionID, 1); 1671 resolveGOTOffsetRelocation(SectionID, Offset, GOTOffset + Addend); 1672 1673 // Fill in the value of the symbol we're targeting into the GOT 1674 RelocationEntry RE = computeGOTOffsetRE(SectionID, GOTOffset, Value.Offset, ELF::R_X86_64_64); 1675 if (Value.SymbolName) 1676 addRelocationForSymbol(RE, Value.SymbolName); 1677 else 1678 addRelocationForSection(RE, Value.SectionID); 1679 } else if (RelType == ELF::R_X86_64_PC32) { 1680 Value.Addend += support::ulittle32_t::ref(computePlaceholderAddress(SectionID, Offset)); 1681 processSimpleRelocation(SectionID, Offset, RelType, Value); 1682 } else if (RelType == ELF::R_X86_64_PC64) { 1683 Value.Addend += support::ulittle64_t::ref(computePlaceholderAddress(SectionID, Offset)); 1684 processSimpleRelocation(SectionID, Offset, RelType, Value); 1685 } else { 1686 processSimpleRelocation(SectionID, Offset, RelType, Value); 1687 } 1688 } else { 1689 if (Arch == Triple::x86) { 1690 Value.Addend += support::ulittle32_t::ref(computePlaceholderAddress(SectionID, Offset)); 1691 } 1692 processSimpleRelocation(SectionID, Offset, RelType, Value); 1693 } 1694 return ++RelI; 1695 } 1696 1697 size_t RuntimeDyldELF::getGOTEntrySize() { 1698 // We don't use the GOT in all of these cases, but it's essentially free 1699 // to put them all here. 1700 size_t Result = 0; 1701 switch (Arch) { 1702 case Triple::x86_64: 1703 case Triple::aarch64: 1704 case Triple::aarch64_be: 1705 case Triple::ppc64: 1706 case Triple::ppc64le: 1707 case Triple::systemz: 1708 Result = sizeof(uint64_t); 1709 break; 1710 case Triple::x86: 1711 case Triple::arm: 1712 case Triple::thumb: 1713 Result = sizeof(uint32_t); 1714 break; 1715 case Triple::mips: 1716 case Triple::mipsel: 1717 case Triple::mips64: 1718 case Triple::mips64el: 1719 if (IsMipsO32ABI) 1720 Result = sizeof(uint32_t); 1721 else if (IsMipsN64ABI) 1722 Result = sizeof(uint64_t); 1723 else 1724 llvm_unreachable("Mips ABI not handled"); 1725 break; 1726 default: 1727 llvm_unreachable("Unsupported CPU type!"); 1728 } 1729 return Result; 1730 } 1731 1732 uint64_t RuntimeDyldELF::allocateGOTEntries(unsigned SectionID, unsigned no) 1733 { 1734 (void)SectionID; // The GOT Section is the same for all section in the object file 1735 if (GOTSectionID == 0) { 1736 GOTSectionID = Sections.size(); 1737 // Reserve a section id. We'll allocate the section later 1738 // once we know the total size 1739 Sections.push_back(SectionEntry(".got", nullptr, 0, 0)); 1740 } 1741 uint64_t StartOffset = CurrentGOTIndex * getGOTEntrySize(); 1742 CurrentGOTIndex += no; 1743 return StartOffset; 1744 } 1745 1746 void RuntimeDyldELF::resolveGOTOffsetRelocation(unsigned SectionID, uint64_t Offset, uint64_t GOTOffset) 1747 { 1748 // Fill in the relative address of the GOT Entry into the stub 1749 RelocationEntry GOTRE(SectionID, Offset, ELF::R_X86_64_PC32, GOTOffset); 1750 addRelocationForSection(GOTRE, GOTSectionID); 1751 } 1752 1753 RelocationEntry RuntimeDyldELF::computeGOTOffsetRE(unsigned SectionID, uint64_t GOTOffset, uint64_t SymbolOffset, 1754 uint32_t Type) 1755 { 1756 (void)SectionID; // The GOT Section is the same for all section in the object file 1757 return RelocationEntry(GOTSectionID, GOTOffset, Type, SymbolOffset); 1758 } 1759 1760 void RuntimeDyldELF::finalizeLoad(const ObjectFile &Obj, 1761 ObjSectionToIDMap &SectionMap) { 1762 if (IsMipsO32ABI) 1763 if (!PendingRelocs.empty()) 1764 report_fatal_error("Can't find matching LO16 reloc"); 1765 1766 // If necessary, allocate the global offset table 1767 if (GOTSectionID != 0) { 1768 // Allocate memory for the section 1769 size_t TotalSize = CurrentGOTIndex * getGOTEntrySize(); 1770 uint8_t *Addr = MemMgr.allocateDataSection(TotalSize, getGOTEntrySize(), 1771 GOTSectionID, ".got", false); 1772 if (!Addr) 1773 report_fatal_error("Unable to allocate memory for GOT!"); 1774 1775 Sections[GOTSectionID] = SectionEntry(".got", Addr, TotalSize, 0); 1776 1777 if (Checker) 1778 Checker->registerSection(Obj.getFileName(), GOTSectionID); 1779 1780 // For now, initialize all GOT entries to zero. We'll fill them in as 1781 // needed when GOT-based relocations are applied. 1782 memset(Addr, 0, TotalSize); 1783 if (IsMipsN64ABI) { 1784 // To correctly resolve Mips GOT relocations, we need a mapping from 1785 // object's sections to GOTs. 1786 for (section_iterator SI = Obj.section_begin(), SE = Obj.section_end(); 1787 SI != SE; ++SI) { 1788 if (SI->relocation_begin() != SI->relocation_end()) { 1789 section_iterator RelocatedSection = SI->getRelocatedSection(); 1790 ObjSectionToIDMap::iterator i = SectionMap.find(*RelocatedSection); 1791 assert (i != SectionMap.end()); 1792 SectionToGOTMap[i->second] = GOTSectionID; 1793 } 1794 } 1795 GOTSymbolOffsets.clear(); 1796 } 1797 } 1798 1799 // Look for and record the EH frame section. 1800 ObjSectionToIDMap::iterator i, e; 1801 for (i = SectionMap.begin(), e = SectionMap.end(); i != e; ++i) { 1802 const SectionRef &Section = i->first; 1803 StringRef Name; 1804 Section.getName(Name); 1805 if (Name == ".eh_frame") { 1806 UnregisteredEHFrameSections.push_back(i->second); 1807 break; 1808 } 1809 } 1810 1811 GOTSectionID = 0; 1812 CurrentGOTIndex = 0; 1813 } 1814 1815 bool RuntimeDyldELF::isCompatibleFile(const object::ObjectFile &Obj) const { 1816 return Obj.isELF(); 1817 } 1818 1819 } // namespace llvm 1820