1 //===-- RuntimeDyldELF.cpp - Run-time dynamic linker for MC-JIT -*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // Implementation of ELF support for the MC-JIT runtime dynamic linker. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "RuntimeDyldELF.h" 15 #include "RuntimeDyldCheckerImpl.h" 16 #include "llvm/ADT/IntervalMap.h" 17 #include "llvm/ADT/STLExtras.h" 18 #include "llvm/ADT/StringRef.h" 19 #include "llvm/ADT/Triple.h" 20 #include "llvm/MC/MCStreamer.h" 21 #include "llvm/Object/ELFObjectFile.h" 22 #include "llvm/Object/ObjectFile.h" 23 #include "llvm/Support/ELF.h" 24 #include "llvm/Support/Endian.h" 25 #include "llvm/Support/MemoryBuffer.h" 26 #include "llvm/Support/TargetRegistry.h" 27 28 using namespace llvm; 29 using namespace llvm::object; 30 31 #define DEBUG_TYPE "dyld" 32 33 namespace { 34 35 template <class ELFT> class DyldELFObject : public ELFObjectFile<ELFT> { 36 LLVM_ELF_IMPORT_TYPES_ELFT(ELFT) 37 38 typedef Elf_Shdr_Impl<ELFT> Elf_Shdr; 39 typedef Elf_Sym_Impl<ELFT> Elf_Sym; 40 typedef Elf_Rel_Impl<ELFT, false> Elf_Rel; 41 typedef Elf_Rel_Impl<ELFT, true> Elf_Rela; 42 43 typedef Elf_Ehdr_Impl<ELFT> Elf_Ehdr; 44 45 typedef typename ELFDataTypeTypedefHelper<ELFT>::value_type addr_type; 46 47 public: 48 DyldELFObject(MemoryBufferRef Wrapper, std::error_code &ec); 49 50 void updateSectionAddress(const SectionRef &Sec, uint64_t Addr); 51 52 void updateSymbolAddress(const SymbolRef &SymRef, uint64_t Addr); 53 54 // Methods for type inquiry through isa, cast and dyn_cast 55 static inline bool classof(const Binary *v) { 56 return (isa<ELFObjectFile<ELFT>>(v) && 57 classof(cast<ELFObjectFile<ELFT>>(v))); 58 } 59 static inline bool classof(const ELFObjectFile<ELFT> *v) { 60 return v->isDyldType(); 61 } 62 }; 63 64 65 66 // The MemoryBuffer passed into this constructor is just a wrapper around the 67 // actual memory. Ultimately, the Binary parent class will take ownership of 68 // this MemoryBuffer object but not the underlying memory. 69 template <class ELFT> 70 DyldELFObject<ELFT>::DyldELFObject(MemoryBufferRef Wrapper, std::error_code &EC) 71 : ELFObjectFile<ELFT>(Wrapper, EC) { 72 this->isDyldELFObject = true; 73 } 74 75 template <class ELFT> 76 void DyldELFObject<ELFT>::updateSectionAddress(const SectionRef &Sec, 77 uint64_t Addr) { 78 DataRefImpl ShdrRef = Sec.getRawDataRefImpl(); 79 Elf_Shdr *shdr = 80 const_cast<Elf_Shdr *>(reinterpret_cast<const Elf_Shdr *>(ShdrRef.p)); 81 82 // This assumes the address passed in matches the target address bitness 83 // The template-based type cast handles everything else. 84 shdr->sh_addr = static_cast<addr_type>(Addr); 85 } 86 87 template <class ELFT> 88 void DyldELFObject<ELFT>::updateSymbolAddress(const SymbolRef &SymRef, 89 uint64_t Addr) { 90 91 Elf_Sym *sym = const_cast<Elf_Sym *>( 92 ELFObjectFile<ELFT>::getSymbol(SymRef.getRawDataRefImpl())); 93 94 // This assumes the address passed in matches the target address bitness 95 // The template-based type cast handles everything else. 96 sym->st_value = static_cast<addr_type>(Addr); 97 } 98 99 class LoadedELFObjectInfo final 100 : public RuntimeDyld::LoadedObjectInfoHelper<LoadedELFObjectInfo> { 101 public: 102 LoadedELFObjectInfo(RuntimeDyldImpl &RTDyld, ObjSectionToIDMap ObjSecToIDMap) 103 : LoadedObjectInfoHelper(RTDyld, std::move(ObjSecToIDMap)) {} 104 105 OwningBinary<ObjectFile> 106 getObjectForDebug(const ObjectFile &Obj) const override; 107 }; 108 109 template <typename ELFT> 110 std::unique_ptr<DyldELFObject<ELFT>> 111 createRTDyldELFObject(MemoryBufferRef Buffer, 112 const ObjectFile &SourceObject, 113 const LoadedELFObjectInfo &L, 114 std::error_code &ec) { 115 typedef typename ELFFile<ELFT>::Elf_Shdr Elf_Shdr; 116 typedef typename ELFDataTypeTypedefHelper<ELFT>::value_type addr_type; 117 118 std::unique_ptr<DyldELFObject<ELFT>> Obj = 119 llvm::make_unique<DyldELFObject<ELFT>>(Buffer, ec); 120 121 // Iterate over all sections in the object. 122 auto SI = SourceObject.section_begin(); 123 for (const auto &Sec : Obj->sections()) { 124 StringRef SectionName; 125 Sec.getName(SectionName); 126 if (SectionName != "") { 127 DataRefImpl ShdrRef = Sec.getRawDataRefImpl(); 128 Elf_Shdr *shdr = const_cast<Elf_Shdr *>( 129 reinterpret_cast<const Elf_Shdr *>(ShdrRef.p)); 130 131 if (uint64_t SecLoadAddr = L.getSectionLoadAddress(*SI)) { 132 // This assumes that the address passed in matches the target address 133 // bitness. The template-based type cast handles everything else. 134 shdr->sh_addr = static_cast<addr_type>(SecLoadAddr); 135 } 136 } 137 ++SI; 138 } 139 140 return Obj; 141 } 142 143 OwningBinary<ObjectFile> createELFDebugObject(const ObjectFile &Obj, 144 const LoadedELFObjectInfo &L) { 145 assert(Obj.isELF() && "Not an ELF object file."); 146 147 std::unique_ptr<MemoryBuffer> Buffer = 148 MemoryBuffer::getMemBufferCopy(Obj.getData(), Obj.getFileName()); 149 150 std::error_code ec; 151 152 std::unique_ptr<ObjectFile> DebugObj; 153 if (Obj.getBytesInAddress() == 4 && Obj.isLittleEndian()) { 154 typedef ELFType<support::little, false> ELF32LE; 155 DebugObj = createRTDyldELFObject<ELF32LE>(Buffer->getMemBufferRef(), Obj, L, 156 ec); 157 } else if (Obj.getBytesInAddress() == 4 && !Obj.isLittleEndian()) { 158 typedef ELFType<support::big, false> ELF32BE; 159 DebugObj = createRTDyldELFObject<ELF32BE>(Buffer->getMemBufferRef(), Obj, L, 160 ec); 161 } else if (Obj.getBytesInAddress() == 8 && !Obj.isLittleEndian()) { 162 typedef ELFType<support::big, true> ELF64BE; 163 DebugObj = createRTDyldELFObject<ELF64BE>(Buffer->getMemBufferRef(), Obj, L, 164 ec); 165 } else if (Obj.getBytesInAddress() == 8 && Obj.isLittleEndian()) { 166 typedef ELFType<support::little, true> ELF64LE; 167 DebugObj = createRTDyldELFObject<ELF64LE>(Buffer->getMemBufferRef(), Obj, L, 168 ec); 169 } else 170 llvm_unreachable("Unexpected ELF format"); 171 172 assert(!ec && "Could not construct copy ELF object file"); 173 174 return OwningBinary<ObjectFile>(std::move(DebugObj), std::move(Buffer)); 175 } 176 177 OwningBinary<ObjectFile> 178 LoadedELFObjectInfo::getObjectForDebug(const ObjectFile &Obj) const { 179 return createELFDebugObject(Obj, *this); 180 } 181 182 } // anonymous namespace 183 184 namespace llvm { 185 186 RuntimeDyldELF::RuntimeDyldELF(RuntimeDyld::MemoryManager &MemMgr, 187 JITSymbolResolver &Resolver) 188 : RuntimeDyldImpl(MemMgr, Resolver), GOTSectionID(0), CurrentGOTIndex(0) {} 189 RuntimeDyldELF::~RuntimeDyldELF() {} 190 191 void RuntimeDyldELF::registerEHFrames() { 192 for (int i = 0, e = UnregisteredEHFrameSections.size(); i != e; ++i) { 193 SID EHFrameSID = UnregisteredEHFrameSections[i]; 194 uint8_t *EHFrameAddr = Sections[EHFrameSID].getAddress(); 195 uint64_t EHFrameLoadAddr = Sections[EHFrameSID].getLoadAddress(); 196 size_t EHFrameSize = Sections[EHFrameSID].getSize(); 197 MemMgr.registerEHFrames(EHFrameAddr, EHFrameLoadAddr, EHFrameSize); 198 RegisteredEHFrameSections.push_back(EHFrameSID); 199 } 200 UnregisteredEHFrameSections.clear(); 201 } 202 203 void RuntimeDyldELF::deregisterEHFrames() { 204 for (int i = 0, e = RegisteredEHFrameSections.size(); i != e; ++i) { 205 SID EHFrameSID = RegisteredEHFrameSections[i]; 206 uint8_t *EHFrameAddr = Sections[EHFrameSID].getAddress(); 207 uint64_t EHFrameLoadAddr = Sections[EHFrameSID].getLoadAddress(); 208 size_t EHFrameSize = Sections[EHFrameSID].getSize(); 209 MemMgr.deregisterEHFrames(EHFrameAddr, EHFrameLoadAddr, EHFrameSize); 210 } 211 RegisteredEHFrameSections.clear(); 212 } 213 214 std::unique_ptr<RuntimeDyld::LoadedObjectInfo> 215 RuntimeDyldELF::loadObject(const object::ObjectFile &O) { 216 if (auto ObjSectionToIDOrErr = loadObjectImpl(O)) 217 return llvm::make_unique<LoadedELFObjectInfo>(*this, *ObjSectionToIDOrErr); 218 else { 219 HasError = true; 220 raw_string_ostream ErrStream(ErrorStr); 221 logAllUnhandledErrors(ObjSectionToIDOrErr.takeError(), ErrStream, ""); 222 return nullptr; 223 } 224 } 225 226 void RuntimeDyldELF::resolveX86_64Relocation(const SectionEntry &Section, 227 uint64_t Offset, uint64_t Value, 228 uint32_t Type, int64_t Addend, 229 uint64_t SymOffset) { 230 switch (Type) { 231 default: 232 llvm_unreachable("Relocation type not implemented yet!"); 233 break; 234 case ELF::R_X86_64_64: { 235 support::ulittle64_t::ref(Section.getAddressWithOffset(Offset)) = 236 Value + Addend; 237 DEBUG(dbgs() << "Writing " << format("%p", (Value + Addend)) << " at " 238 << format("%p\n", Section.getAddressWithOffset(Offset))); 239 break; 240 } 241 case ELF::R_X86_64_32: 242 case ELF::R_X86_64_32S: { 243 Value += Addend; 244 assert((Type == ELF::R_X86_64_32 && (Value <= UINT32_MAX)) || 245 (Type == ELF::R_X86_64_32S && 246 ((int64_t)Value <= INT32_MAX && (int64_t)Value >= INT32_MIN))); 247 uint32_t TruncatedAddr = (Value & 0xFFFFFFFF); 248 support::ulittle32_t::ref(Section.getAddressWithOffset(Offset)) = 249 TruncatedAddr; 250 DEBUG(dbgs() << "Writing " << format("%p", TruncatedAddr) << " at " 251 << format("%p\n", Section.getAddressWithOffset(Offset))); 252 break; 253 } 254 case ELF::R_X86_64_PC8: { 255 uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset); 256 int64_t RealOffset = Value + Addend - FinalAddress; 257 assert(isInt<8>(RealOffset)); 258 int8_t TruncOffset = (RealOffset & 0xFF); 259 Section.getAddress()[Offset] = TruncOffset; 260 break; 261 } 262 case ELF::R_X86_64_PC32: { 263 uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset); 264 int64_t RealOffset = Value + Addend - FinalAddress; 265 assert(isInt<32>(RealOffset)); 266 int32_t TruncOffset = (RealOffset & 0xFFFFFFFF); 267 support::ulittle32_t::ref(Section.getAddressWithOffset(Offset)) = 268 TruncOffset; 269 break; 270 } 271 case ELF::R_X86_64_PC64: { 272 uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset); 273 int64_t RealOffset = Value + Addend - FinalAddress; 274 support::ulittle64_t::ref(Section.getAddressWithOffset(Offset)) = 275 RealOffset; 276 break; 277 } 278 } 279 } 280 281 void RuntimeDyldELF::resolveX86Relocation(const SectionEntry &Section, 282 uint64_t Offset, uint32_t Value, 283 uint32_t Type, int32_t Addend) { 284 switch (Type) { 285 case ELF::R_386_32: { 286 support::ulittle32_t::ref(Section.getAddressWithOffset(Offset)) = 287 Value + Addend; 288 break; 289 } 290 case ELF::R_386_PC32: { 291 uint32_t FinalAddress = 292 Section.getLoadAddressWithOffset(Offset) & 0xFFFFFFFF; 293 uint32_t RealOffset = Value + Addend - FinalAddress; 294 support::ulittle32_t::ref(Section.getAddressWithOffset(Offset)) = 295 RealOffset; 296 break; 297 } 298 default: 299 // There are other relocation types, but it appears these are the 300 // only ones currently used by the LLVM ELF object writer 301 llvm_unreachable("Relocation type not implemented yet!"); 302 break; 303 } 304 } 305 306 void RuntimeDyldELF::resolveAArch64Relocation(const SectionEntry &Section, 307 uint64_t Offset, uint64_t Value, 308 uint32_t Type, int64_t Addend) { 309 uint32_t *TargetPtr = 310 reinterpret_cast<uint32_t *>(Section.getAddressWithOffset(Offset)); 311 uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset); 312 313 DEBUG(dbgs() << "resolveAArch64Relocation, LocalAddress: 0x" 314 << format("%llx", Section.getAddressWithOffset(Offset)) 315 << " FinalAddress: 0x" << format("%llx", FinalAddress) 316 << " Value: 0x" << format("%llx", Value) << " Type: 0x" 317 << format("%x", Type) << " Addend: 0x" << format("%llx", Addend) 318 << "\n"); 319 320 switch (Type) { 321 default: 322 llvm_unreachable("Relocation type not implemented yet!"); 323 break; 324 case ELF::R_AARCH64_ABS64: { 325 uint64_t *TargetPtr = 326 reinterpret_cast<uint64_t *>(Section.getAddressWithOffset(Offset)); 327 *TargetPtr = Value + Addend; 328 break; 329 } 330 case ELF::R_AARCH64_PREL32: { 331 uint64_t Result = Value + Addend - FinalAddress; 332 assert(static_cast<int64_t>(Result) >= INT32_MIN && 333 static_cast<int64_t>(Result) <= UINT32_MAX); 334 *TargetPtr = static_cast<uint32_t>(Result & 0xffffffffU); 335 break; 336 } 337 case ELF::R_AARCH64_CALL26: // fallthrough 338 case ELF::R_AARCH64_JUMP26: { 339 // Operation: S+A-P. Set Call or B immediate value to bits fff_fffc of the 340 // calculation. 341 uint64_t BranchImm = Value + Addend - FinalAddress; 342 343 // "Check that -2^27 <= result < 2^27". 344 assert(isInt<28>(BranchImm)); 345 346 // AArch64 code is emitted with .rela relocations. The data already in any 347 // bits affected by the relocation on entry is garbage. 348 *TargetPtr &= 0xfc000000U; 349 // Immediate goes in bits 25:0 of B and BL. 350 *TargetPtr |= static_cast<uint32_t>(BranchImm & 0xffffffcU) >> 2; 351 break; 352 } 353 case ELF::R_AARCH64_MOVW_UABS_G3: { 354 uint64_t Result = Value + Addend; 355 356 // AArch64 code is emitted with .rela relocations. The data already in any 357 // bits affected by the relocation on entry is garbage. 358 *TargetPtr &= 0xffe0001fU; 359 // Immediate goes in bits 20:5 of MOVZ/MOVK instruction 360 *TargetPtr |= Result >> (48 - 5); 361 // Shift must be "lsl #48", in bits 22:21 362 assert((*TargetPtr >> 21 & 0x3) == 3 && "invalid shift for relocation"); 363 break; 364 } 365 case ELF::R_AARCH64_MOVW_UABS_G2_NC: { 366 uint64_t Result = Value + Addend; 367 368 // AArch64 code is emitted with .rela relocations. The data already in any 369 // bits affected by the relocation on entry is garbage. 370 *TargetPtr &= 0xffe0001fU; 371 // Immediate goes in bits 20:5 of MOVZ/MOVK instruction 372 *TargetPtr |= ((Result & 0xffff00000000ULL) >> (32 - 5)); 373 // Shift must be "lsl #32", in bits 22:21 374 assert((*TargetPtr >> 21 & 0x3) == 2 && "invalid shift for relocation"); 375 break; 376 } 377 case ELF::R_AARCH64_MOVW_UABS_G1_NC: { 378 uint64_t Result = Value + Addend; 379 380 // AArch64 code is emitted with .rela relocations. The data already in any 381 // bits affected by the relocation on entry is garbage. 382 *TargetPtr &= 0xffe0001fU; 383 // Immediate goes in bits 20:5 of MOVZ/MOVK instruction 384 *TargetPtr |= ((Result & 0xffff0000U) >> (16 - 5)); 385 // Shift must be "lsl #16", in bits 22:2 386 assert((*TargetPtr >> 21 & 0x3) == 1 && "invalid shift for relocation"); 387 break; 388 } 389 case ELF::R_AARCH64_MOVW_UABS_G0_NC: { 390 uint64_t Result = Value + Addend; 391 392 // AArch64 code is emitted with .rela relocations. The data already in any 393 // bits affected by the relocation on entry is garbage. 394 *TargetPtr &= 0xffe0001fU; 395 // Immediate goes in bits 20:5 of MOVZ/MOVK instruction 396 *TargetPtr |= ((Result & 0xffffU) << 5); 397 // Shift must be "lsl #0", in bits 22:21. 398 assert((*TargetPtr >> 21 & 0x3) == 0 && "invalid shift for relocation"); 399 break; 400 } 401 case ELF::R_AARCH64_ADR_PREL_PG_HI21: { 402 // Operation: Page(S+A) - Page(P) 403 uint64_t Result = 404 ((Value + Addend) & ~0xfffULL) - (FinalAddress & ~0xfffULL); 405 406 // Check that -2^32 <= X < 2^32 407 assert(isInt<33>(Result) && "overflow check failed for relocation"); 408 409 // AArch64 code is emitted with .rela relocations. The data already in any 410 // bits affected by the relocation on entry is garbage. 411 *TargetPtr &= 0x9f00001fU; 412 // Immediate goes in bits 30:29 + 5:23 of ADRP instruction, taken 413 // from bits 32:12 of X. 414 *TargetPtr |= ((Result & 0x3000U) << (29 - 12)); 415 *TargetPtr |= ((Result & 0x1ffffc000ULL) >> (14 - 5)); 416 break; 417 } 418 case ELF::R_AARCH64_LDST32_ABS_LO12_NC: { 419 // Operation: S + A 420 uint64_t Result = Value + Addend; 421 422 // AArch64 code is emitted with .rela relocations. The data already in any 423 // bits affected by the relocation on entry is garbage. 424 *TargetPtr &= 0xffc003ffU; 425 // Immediate goes in bits 21:10 of LD/ST instruction, taken 426 // from bits 11:2 of X 427 *TargetPtr |= ((Result & 0xffc) << (10 - 2)); 428 break; 429 } 430 case ELF::R_AARCH64_LDST64_ABS_LO12_NC: { 431 // Operation: S + A 432 uint64_t Result = Value + Addend; 433 434 // AArch64 code is emitted with .rela relocations. The data already in any 435 // bits affected by the relocation on entry is garbage. 436 *TargetPtr &= 0xffc003ffU; 437 // Immediate goes in bits 21:10 of LD/ST instruction, taken 438 // from bits 11:3 of X 439 *TargetPtr |= ((Result & 0xff8) << (10 - 3)); 440 break; 441 } 442 } 443 } 444 445 void RuntimeDyldELF::resolveARMRelocation(const SectionEntry &Section, 446 uint64_t Offset, uint32_t Value, 447 uint32_t Type, int32_t Addend) { 448 // TODO: Add Thumb relocations. 449 uint32_t *TargetPtr = 450 reinterpret_cast<uint32_t *>(Section.getAddressWithOffset(Offset)); 451 uint32_t FinalAddress = Section.getLoadAddressWithOffset(Offset) & 0xFFFFFFFF; 452 Value += Addend; 453 454 DEBUG(dbgs() << "resolveARMRelocation, LocalAddress: " 455 << Section.getAddressWithOffset(Offset) 456 << " FinalAddress: " << format("%p", FinalAddress) << " Value: " 457 << format("%x", Value) << " Type: " << format("%x", Type) 458 << " Addend: " << format("%x", Addend) << "\n"); 459 460 switch (Type) { 461 default: 462 llvm_unreachable("Not implemented relocation type!"); 463 464 case ELF::R_ARM_NONE: 465 break; 466 // Write a 31bit signed offset 467 case ELF::R_ARM_PREL31: 468 support::ulittle32_t::ref{TargetPtr} = 469 (support::ulittle32_t::ref{TargetPtr} & 0x80000000) | 470 ((Value - FinalAddress) & ~0x80000000); 471 break; 472 case ELF::R_ARM_TARGET1: 473 case ELF::R_ARM_ABS32: 474 support::ulittle32_t::ref{TargetPtr} = Value; 475 break; 476 // Write first 16 bit of 32 bit value to the mov instruction. 477 // Last 4 bit should be shifted. 478 case ELF::R_ARM_MOVW_ABS_NC: 479 case ELF::R_ARM_MOVT_ABS: 480 if (Type == ELF::R_ARM_MOVW_ABS_NC) 481 Value = Value & 0xFFFF; 482 else if (Type == ELF::R_ARM_MOVT_ABS) 483 Value = (Value >> 16) & 0xFFFF; 484 support::ulittle32_t::ref{TargetPtr} = 485 (support::ulittle32_t::ref{TargetPtr} & ~0x000F0FFF) | (Value & 0xFFF) | 486 (((Value >> 12) & 0xF) << 16); 487 break; 488 // Write 24 bit relative value to the branch instruction. 489 case ELF::R_ARM_PC24: // Fall through. 490 case ELF::R_ARM_CALL: // Fall through. 491 case ELF::R_ARM_JUMP24: 492 int32_t RelValue = static_cast<int32_t>(Value - FinalAddress - 8); 493 RelValue = (RelValue & 0x03FFFFFC) >> 2; 494 assert((support::ulittle32_t::ref{TargetPtr} & 0xFFFFFF) == 0xFFFFFE); 495 support::ulittle32_t::ref{TargetPtr} = 496 (support::ulittle32_t::ref{TargetPtr} & 0xFF000000) | RelValue; 497 break; 498 } 499 } 500 501 void RuntimeDyldELF::resolveMIPSRelocation(const SectionEntry &Section, 502 uint64_t Offset, uint32_t Value, 503 uint32_t Type, int32_t Addend) { 504 uint8_t *TargetPtr = Section.getAddressWithOffset(Offset); 505 Value += Addend; 506 507 DEBUG(dbgs() << "resolveMIPSRelocation, LocalAddress: " 508 << Section.getAddressWithOffset(Offset) << " FinalAddress: " 509 << format("%p", Section.getLoadAddressWithOffset(Offset)) 510 << " Value: " << format("%x", Value) 511 << " Type: " << format("%x", Type) 512 << " Addend: " << format("%x", Addend) << "\n"); 513 514 uint32_t Insn = readBytesUnaligned(TargetPtr, 4); 515 516 switch (Type) { 517 default: 518 llvm_unreachable("Not implemented relocation type!"); 519 break; 520 case ELF::R_MIPS_32: 521 writeBytesUnaligned(Value, TargetPtr, 4); 522 break; 523 case ELF::R_MIPS_26: 524 Insn &= 0xfc000000; 525 Insn |= (Value & 0x0fffffff) >> 2; 526 writeBytesUnaligned(Insn, TargetPtr, 4); 527 break; 528 case ELF::R_MIPS_HI16: 529 // Get the higher 16-bits. Also add 1 if bit 15 is 1. 530 Insn &= 0xffff0000; 531 Insn |= ((Value + 0x8000) >> 16) & 0xffff; 532 writeBytesUnaligned(Insn, TargetPtr, 4); 533 break; 534 case ELF::R_MIPS_LO16: 535 Insn &= 0xffff0000; 536 Insn |= Value & 0xffff; 537 writeBytesUnaligned(Insn, TargetPtr, 4); 538 break; 539 case ELF::R_MIPS_PC32: { 540 uint32_t FinalAddress = Section.getLoadAddressWithOffset(Offset); 541 writeBytesUnaligned(Value - FinalAddress, (uint8_t *)TargetPtr, 4); 542 break; 543 } 544 case ELF::R_MIPS_PC16: { 545 uint32_t FinalAddress = Section.getLoadAddressWithOffset(Offset); 546 Insn &= 0xffff0000; 547 Insn |= ((Value - FinalAddress) >> 2) & 0xffff; 548 writeBytesUnaligned(Insn, TargetPtr, 4); 549 break; 550 } 551 case ELF::R_MIPS_PC19_S2: { 552 uint32_t FinalAddress = Section.getLoadAddressWithOffset(Offset); 553 Insn &= 0xfff80000; 554 Insn |= ((Value - (FinalAddress & ~0x3)) >> 2) & 0x7ffff; 555 writeBytesUnaligned(Insn, TargetPtr, 4); 556 break; 557 } 558 case ELF::R_MIPS_PC21_S2: { 559 uint32_t FinalAddress = Section.getLoadAddressWithOffset(Offset); 560 Insn &= 0xffe00000; 561 Insn |= ((Value - FinalAddress) >> 2) & 0x1fffff; 562 writeBytesUnaligned(Insn, TargetPtr, 4); 563 break; 564 } 565 case ELF::R_MIPS_PC26_S2: { 566 uint32_t FinalAddress = Section.getLoadAddressWithOffset(Offset); 567 Insn &= 0xfc000000; 568 Insn |= ((Value - FinalAddress) >> 2) & 0x3ffffff; 569 writeBytesUnaligned(Insn, TargetPtr, 4); 570 break; 571 } 572 case ELF::R_MIPS_PCHI16: { 573 uint32_t FinalAddress = Section.getLoadAddressWithOffset(Offset); 574 Insn &= 0xffff0000; 575 Insn |= ((Value - FinalAddress + 0x8000) >> 16) & 0xffff; 576 writeBytesUnaligned(Insn, TargetPtr, 4); 577 break; 578 } 579 case ELF::R_MIPS_PCLO16: { 580 uint32_t FinalAddress = Section.getLoadAddressWithOffset(Offset); 581 Insn &= 0xffff0000; 582 Insn |= (Value - FinalAddress) & 0xffff; 583 writeBytesUnaligned(Insn, TargetPtr, 4); 584 break; 585 } 586 } 587 } 588 589 void RuntimeDyldELF::setMipsABI(const ObjectFile &Obj) { 590 if (Arch == Triple::UnknownArch || 591 !StringRef(Triple::getArchTypePrefix(Arch)).equals("mips")) { 592 IsMipsO32ABI = false; 593 IsMipsN32ABI = false; 594 IsMipsN64ABI = false; 595 return; 596 } 597 unsigned AbiVariant; 598 Obj.getPlatformFlags(AbiVariant); 599 IsMipsO32ABI = AbiVariant & ELF::EF_MIPS_ABI_O32; 600 IsMipsN32ABI = AbiVariant & ELF::EF_MIPS_ABI2; 601 IsMipsN64ABI = Obj.getFileFormatName().equals("ELF64-mips"); 602 } 603 604 void RuntimeDyldELF::resolveMIPSN32Relocation(const SectionEntry &Section, 605 uint64_t Offset, uint64_t Value, 606 uint32_t Type, int64_t Addend, 607 uint64_t SymOffset, 608 SID SectionID) { 609 int64_t CalculatedValue = evaluateMIPS64Relocation( 610 Section, Offset, Value, Type, Addend, SymOffset, SectionID); 611 applyMIPS64Relocation(Section.getAddressWithOffset(Offset), CalculatedValue, 612 Type); 613 } 614 615 void RuntimeDyldELF::resolveMIPSN64Relocation(const SectionEntry &Section, 616 uint64_t Offset, uint64_t Value, 617 uint32_t Type, int64_t Addend, 618 uint64_t SymOffset, 619 SID SectionID) { 620 uint32_t r_type = Type & 0xff; 621 uint32_t r_type2 = (Type >> 8) & 0xff; 622 uint32_t r_type3 = (Type >> 16) & 0xff; 623 624 // RelType is used to keep information for which relocation type we are 625 // applying relocation. 626 uint32_t RelType = r_type; 627 int64_t CalculatedValue = evaluateMIPS64Relocation(Section, Offset, Value, 628 RelType, Addend, 629 SymOffset, SectionID); 630 if (r_type2 != ELF::R_MIPS_NONE) { 631 RelType = r_type2; 632 CalculatedValue = evaluateMIPS64Relocation(Section, Offset, 0, RelType, 633 CalculatedValue, SymOffset, 634 SectionID); 635 } 636 if (r_type3 != ELF::R_MIPS_NONE) { 637 RelType = r_type3; 638 CalculatedValue = evaluateMIPS64Relocation(Section, Offset, 0, RelType, 639 CalculatedValue, SymOffset, 640 SectionID); 641 } 642 applyMIPS64Relocation(Section.getAddressWithOffset(Offset), CalculatedValue, 643 RelType); 644 } 645 646 int64_t 647 RuntimeDyldELF::evaluateMIPS64Relocation(const SectionEntry &Section, 648 uint64_t Offset, uint64_t Value, 649 uint32_t Type, int64_t Addend, 650 uint64_t SymOffset, SID SectionID) { 651 652 DEBUG(dbgs() << "evaluateMIPS64Relocation, LocalAddress: 0x" 653 << format("%llx", Section.getAddressWithOffset(Offset)) 654 << " FinalAddress: 0x" 655 << format("%llx", Section.getLoadAddressWithOffset(Offset)) 656 << " Value: 0x" << format("%llx", Value) << " Type: 0x" 657 << format("%x", Type) << " Addend: 0x" << format("%llx", Addend) 658 << " SymOffset: " << format("%x", SymOffset) << "\n"); 659 660 switch (Type) { 661 default: 662 llvm_unreachable("Not implemented relocation type!"); 663 break; 664 case ELF::R_MIPS_JALR: 665 case ELF::R_MIPS_NONE: 666 break; 667 case ELF::R_MIPS_32: 668 case ELF::R_MIPS_64: 669 return Value + Addend; 670 case ELF::R_MIPS_26: 671 return ((Value + Addend) >> 2) & 0x3ffffff; 672 case ELF::R_MIPS_GPREL16: { 673 uint64_t GOTAddr = getSectionLoadAddress(SectionToGOTMap[SectionID]); 674 return Value + Addend - (GOTAddr + 0x7ff0); 675 } 676 case ELF::R_MIPS_SUB: 677 return Value - Addend; 678 case ELF::R_MIPS_HI16: 679 // Get the higher 16-bits. Also add 1 if bit 15 is 1. 680 return ((Value + Addend + 0x8000) >> 16) & 0xffff; 681 case ELF::R_MIPS_LO16: 682 return (Value + Addend) & 0xffff; 683 case ELF::R_MIPS_CALL16: 684 case ELF::R_MIPS_GOT_DISP: 685 case ELF::R_MIPS_GOT_PAGE: { 686 uint8_t *LocalGOTAddr = 687 getSectionAddress(SectionToGOTMap[SectionID]) + SymOffset; 688 uint64_t GOTEntry = readBytesUnaligned(LocalGOTAddr, getGOTEntrySize()); 689 690 Value += Addend; 691 if (Type == ELF::R_MIPS_GOT_PAGE) 692 Value = (Value + 0x8000) & ~0xffff; 693 694 if (GOTEntry) 695 assert(GOTEntry == Value && 696 "GOT entry has two different addresses."); 697 else 698 writeBytesUnaligned(Value, LocalGOTAddr, getGOTEntrySize()); 699 700 return (SymOffset - 0x7ff0) & 0xffff; 701 } 702 case ELF::R_MIPS_GOT_OFST: { 703 int64_t page = (Value + Addend + 0x8000) & ~0xffff; 704 return (Value + Addend - page) & 0xffff; 705 } 706 case ELF::R_MIPS_GPREL32: { 707 uint64_t GOTAddr = getSectionLoadAddress(SectionToGOTMap[SectionID]); 708 return Value + Addend - (GOTAddr + 0x7ff0); 709 } 710 case ELF::R_MIPS_PC16: { 711 uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset); 712 return ((Value + Addend - FinalAddress) >> 2) & 0xffff; 713 } 714 case ELF::R_MIPS_PC32: { 715 uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset); 716 return Value + Addend - FinalAddress; 717 } 718 case ELF::R_MIPS_PC18_S3: { 719 uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset); 720 return ((Value + Addend - (FinalAddress & ~0x7)) >> 3) & 0x3ffff; 721 } 722 case ELF::R_MIPS_PC19_S2: { 723 uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset); 724 return ((Value + Addend - (FinalAddress & ~0x3)) >> 2) & 0x7ffff; 725 } 726 case ELF::R_MIPS_PC21_S2: { 727 uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset); 728 return ((Value + Addend - FinalAddress) >> 2) & 0x1fffff; 729 } 730 case ELF::R_MIPS_PC26_S2: { 731 uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset); 732 return ((Value + Addend - FinalAddress) >> 2) & 0x3ffffff; 733 } 734 case ELF::R_MIPS_PCHI16: { 735 uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset); 736 return ((Value + Addend - FinalAddress + 0x8000) >> 16) & 0xffff; 737 } 738 case ELF::R_MIPS_PCLO16: { 739 uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset); 740 return (Value + Addend - FinalAddress) & 0xffff; 741 } 742 } 743 return 0; 744 } 745 746 void RuntimeDyldELF::applyMIPS64Relocation(uint8_t *TargetPtr, 747 int64_t CalculatedValue, 748 uint32_t Type) { 749 uint32_t Insn = readBytesUnaligned(TargetPtr, 4); 750 751 switch (Type) { 752 default: 753 break; 754 case ELF::R_MIPS_32: 755 case ELF::R_MIPS_GPREL32: 756 case ELF::R_MIPS_PC32: 757 writeBytesUnaligned(CalculatedValue & 0xffffffff, TargetPtr, 4); 758 break; 759 case ELF::R_MIPS_64: 760 case ELF::R_MIPS_SUB: 761 writeBytesUnaligned(CalculatedValue, TargetPtr, 8); 762 break; 763 case ELF::R_MIPS_26: 764 case ELF::R_MIPS_PC26_S2: 765 Insn = (Insn & 0xfc000000) | CalculatedValue; 766 writeBytesUnaligned(Insn, TargetPtr, 4); 767 break; 768 case ELF::R_MIPS_GPREL16: 769 Insn = (Insn & 0xffff0000) | (CalculatedValue & 0xffff); 770 writeBytesUnaligned(Insn, TargetPtr, 4); 771 break; 772 case ELF::R_MIPS_HI16: 773 case ELF::R_MIPS_LO16: 774 case ELF::R_MIPS_PCHI16: 775 case ELF::R_MIPS_PCLO16: 776 case ELF::R_MIPS_PC16: 777 case ELF::R_MIPS_CALL16: 778 case ELF::R_MIPS_GOT_DISP: 779 case ELF::R_MIPS_GOT_PAGE: 780 case ELF::R_MIPS_GOT_OFST: 781 Insn = (Insn & 0xffff0000) | CalculatedValue; 782 writeBytesUnaligned(Insn, TargetPtr, 4); 783 break; 784 case ELF::R_MIPS_PC18_S3: 785 Insn = (Insn & 0xfffc0000) | CalculatedValue; 786 writeBytesUnaligned(Insn, TargetPtr, 4); 787 break; 788 case ELF::R_MIPS_PC19_S2: 789 Insn = (Insn & 0xfff80000) | CalculatedValue; 790 writeBytesUnaligned(Insn, TargetPtr, 4); 791 break; 792 case ELF::R_MIPS_PC21_S2: 793 Insn = (Insn & 0xffe00000) | CalculatedValue; 794 writeBytesUnaligned(Insn, TargetPtr, 4); 795 break; 796 } 797 } 798 799 // Return the .TOC. section and offset. 800 Error RuntimeDyldELF::findPPC64TOCSection(const ELFObjectFileBase &Obj, 801 ObjSectionToIDMap &LocalSections, 802 RelocationValueRef &Rel) { 803 // Set a default SectionID in case we do not find a TOC section below. 804 // This may happen for references to TOC base base (sym@toc, .odp 805 // relocation) without a .toc directive. In this case just use the 806 // first section (which is usually the .odp) since the code won't 807 // reference the .toc base directly. 808 Rel.SymbolName = nullptr; 809 Rel.SectionID = 0; 810 811 // The TOC consists of sections .got, .toc, .tocbss, .plt in that 812 // order. The TOC starts where the first of these sections starts. 813 for (auto &Section: Obj.sections()) { 814 StringRef SectionName; 815 if (auto EC = Section.getName(SectionName)) 816 return errorCodeToError(EC); 817 818 if (SectionName == ".got" 819 || SectionName == ".toc" 820 || SectionName == ".tocbss" 821 || SectionName == ".plt") { 822 if (auto SectionIDOrErr = 823 findOrEmitSection(Obj, Section, false, LocalSections)) 824 Rel.SectionID = *SectionIDOrErr; 825 else 826 return SectionIDOrErr.takeError(); 827 break; 828 } 829 } 830 831 // Per the ppc64-elf-linux ABI, The TOC base is TOC value plus 0x8000 832 // thus permitting a full 64 Kbytes segment. 833 Rel.Addend = 0x8000; 834 835 return Error::success(); 836 } 837 838 // Returns the sections and offset associated with the ODP entry referenced 839 // by Symbol. 840 Error RuntimeDyldELF::findOPDEntrySection(const ELFObjectFileBase &Obj, 841 ObjSectionToIDMap &LocalSections, 842 RelocationValueRef &Rel) { 843 // Get the ELF symbol value (st_value) to compare with Relocation offset in 844 // .opd entries 845 for (section_iterator si = Obj.section_begin(), se = Obj.section_end(); 846 si != se; ++si) { 847 section_iterator RelSecI = si->getRelocatedSection(); 848 if (RelSecI == Obj.section_end()) 849 continue; 850 851 StringRef RelSectionName; 852 if (auto EC = RelSecI->getName(RelSectionName)) 853 return errorCodeToError(EC); 854 855 if (RelSectionName != ".opd") 856 continue; 857 858 for (elf_relocation_iterator i = si->relocation_begin(), 859 e = si->relocation_end(); 860 i != e;) { 861 // The R_PPC64_ADDR64 relocation indicates the first field 862 // of a .opd entry 863 uint64_t TypeFunc = i->getType(); 864 if (TypeFunc != ELF::R_PPC64_ADDR64) { 865 ++i; 866 continue; 867 } 868 869 uint64_t TargetSymbolOffset = i->getOffset(); 870 symbol_iterator TargetSymbol = i->getSymbol(); 871 int64_t Addend; 872 if (auto AddendOrErr = i->getAddend()) 873 Addend = *AddendOrErr; 874 else 875 return errorCodeToError(AddendOrErr.getError()); 876 877 ++i; 878 if (i == e) 879 break; 880 881 // Just check if following relocation is a R_PPC64_TOC 882 uint64_t TypeTOC = i->getType(); 883 if (TypeTOC != ELF::R_PPC64_TOC) 884 continue; 885 886 // Finally compares the Symbol value and the target symbol offset 887 // to check if this .opd entry refers to the symbol the relocation 888 // points to. 889 if (Rel.Addend != (int64_t)TargetSymbolOffset) 890 continue; 891 892 section_iterator TSI = Obj.section_end(); 893 if (auto TSIOrErr = TargetSymbol->getSection()) 894 TSI = *TSIOrErr; 895 else 896 return TSIOrErr.takeError(); 897 assert(TSI != Obj.section_end() && "TSI should refer to a valid section"); 898 899 bool IsCode = TSI->isText(); 900 if (auto SectionIDOrErr = findOrEmitSection(Obj, *TSI, IsCode, 901 LocalSections)) 902 Rel.SectionID = *SectionIDOrErr; 903 else 904 return SectionIDOrErr.takeError(); 905 Rel.Addend = (intptr_t)Addend; 906 return Error::success(); 907 } 908 } 909 llvm_unreachable("Attempting to get address of ODP entry!"); 910 } 911 912 // Relocation masks following the #lo(value), #hi(value), #ha(value), 913 // #higher(value), #highera(value), #highest(value), and #highesta(value) 914 // macros defined in section 4.5.1. Relocation Types of the PPC-elf64abi 915 // document. 916 917 static inline uint16_t applyPPClo(uint64_t value) { return value & 0xffff; } 918 919 static inline uint16_t applyPPChi(uint64_t value) { 920 return (value >> 16) & 0xffff; 921 } 922 923 static inline uint16_t applyPPCha (uint64_t value) { 924 return ((value + 0x8000) >> 16) & 0xffff; 925 } 926 927 static inline uint16_t applyPPChigher(uint64_t value) { 928 return (value >> 32) & 0xffff; 929 } 930 931 static inline uint16_t applyPPChighera (uint64_t value) { 932 return ((value + 0x8000) >> 32) & 0xffff; 933 } 934 935 static inline uint16_t applyPPChighest(uint64_t value) { 936 return (value >> 48) & 0xffff; 937 } 938 939 static inline uint16_t applyPPChighesta (uint64_t value) { 940 return ((value + 0x8000) >> 48) & 0xffff; 941 } 942 943 void RuntimeDyldELF::resolvePPC32Relocation(const SectionEntry &Section, 944 uint64_t Offset, uint64_t Value, 945 uint32_t Type, int64_t Addend) { 946 uint8_t *LocalAddress = Section.getAddressWithOffset(Offset); 947 switch (Type) { 948 default: 949 llvm_unreachable("Relocation type not implemented yet!"); 950 break; 951 case ELF::R_PPC_ADDR16_LO: 952 writeInt16BE(LocalAddress, applyPPClo(Value + Addend)); 953 break; 954 case ELF::R_PPC_ADDR16_HI: 955 writeInt16BE(LocalAddress, applyPPChi(Value + Addend)); 956 break; 957 case ELF::R_PPC_ADDR16_HA: 958 writeInt16BE(LocalAddress, applyPPCha(Value + Addend)); 959 break; 960 } 961 } 962 963 void RuntimeDyldELF::resolvePPC64Relocation(const SectionEntry &Section, 964 uint64_t Offset, uint64_t Value, 965 uint32_t Type, int64_t Addend) { 966 uint8_t *LocalAddress = Section.getAddressWithOffset(Offset); 967 switch (Type) { 968 default: 969 llvm_unreachable("Relocation type not implemented yet!"); 970 break; 971 case ELF::R_PPC64_ADDR16: 972 writeInt16BE(LocalAddress, applyPPClo(Value + Addend)); 973 break; 974 case ELF::R_PPC64_ADDR16_DS: 975 writeInt16BE(LocalAddress, applyPPClo(Value + Addend) & ~3); 976 break; 977 case ELF::R_PPC64_ADDR16_LO: 978 writeInt16BE(LocalAddress, applyPPClo(Value + Addend)); 979 break; 980 case ELF::R_PPC64_ADDR16_LO_DS: 981 writeInt16BE(LocalAddress, applyPPClo(Value + Addend) & ~3); 982 break; 983 case ELF::R_PPC64_ADDR16_HI: 984 writeInt16BE(LocalAddress, applyPPChi(Value + Addend)); 985 break; 986 case ELF::R_PPC64_ADDR16_HA: 987 writeInt16BE(LocalAddress, applyPPCha(Value + Addend)); 988 break; 989 case ELF::R_PPC64_ADDR16_HIGHER: 990 writeInt16BE(LocalAddress, applyPPChigher(Value + Addend)); 991 break; 992 case ELF::R_PPC64_ADDR16_HIGHERA: 993 writeInt16BE(LocalAddress, applyPPChighera(Value + Addend)); 994 break; 995 case ELF::R_PPC64_ADDR16_HIGHEST: 996 writeInt16BE(LocalAddress, applyPPChighest(Value + Addend)); 997 break; 998 case ELF::R_PPC64_ADDR16_HIGHESTA: 999 writeInt16BE(LocalAddress, applyPPChighesta(Value + Addend)); 1000 break; 1001 case ELF::R_PPC64_ADDR14: { 1002 assert(((Value + Addend) & 3) == 0); 1003 // Preserve the AA/LK bits in the branch instruction 1004 uint8_t aalk = *(LocalAddress + 3); 1005 writeInt16BE(LocalAddress + 2, (aalk & 3) | ((Value + Addend) & 0xfffc)); 1006 } break; 1007 case ELF::R_PPC64_REL16_LO: { 1008 uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset); 1009 uint64_t Delta = Value - FinalAddress + Addend; 1010 writeInt16BE(LocalAddress, applyPPClo(Delta)); 1011 } break; 1012 case ELF::R_PPC64_REL16_HI: { 1013 uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset); 1014 uint64_t Delta = Value - FinalAddress + Addend; 1015 writeInt16BE(LocalAddress, applyPPChi(Delta)); 1016 } break; 1017 case ELF::R_PPC64_REL16_HA: { 1018 uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset); 1019 uint64_t Delta = Value - FinalAddress + Addend; 1020 writeInt16BE(LocalAddress, applyPPCha(Delta)); 1021 } break; 1022 case ELF::R_PPC64_ADDR32: { 1023 int32_t Result = static_cast<int32_t>(Value + Addend); 1024 if (SignExtend32<32>(Result) != Result) 1025 llvm_unreachable("Relocation R_PPC64_ADDR32 overflow"); 1026 writeInt32BE(LocalAddress, Result); 1027 } break; 1028 case ELF::R_PPC64_REL24: { 1029 uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset); 1030 int32_t delta = static_cast<int32_t>(Value - FinalAddress + Addend); 1031 if (SignExtend32<26>(delta) != delta) 1032 llvm_unreachable("Relocation R_PPC64_REL24 overflow"); 1033 // Generates a 'bl <address>' instruction 1034 writeInt32BE(LocalAddress, 0x48000001 | (delta & 0x03FFFFFC)); 1035 } break; 1036 case ELF::R_PPC64_REL32: { 1037 uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset); 1038 int32_t delta = static_cast<int32_t>(Value - FinalAddress + Addend); 1039 if (SignExtend32<32>(delta) != delta) 1040 llvm_unreachable("Relocation R_PPC64_REL32 overflow"); 1041 writeInt32BE(LocalAddress, delta); 1042 } break; 1043 case ELF::R_PPC64_REL64: { 1044 uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset); 1045 uint64_t Delta = Value - FinalAddress + Addend; 1046 writeInt64BE(LocalAddress, Delta); 1047 } break; 1048 case ELF::R_PPC64_ADDR64: 1049 writeInt64BE(LocalAddress, Value + Addend); 1050 break; 1051 } 1052 } 1053 1054 void RuntimeDyldELF::resolveSystemZRelocation(const SectionEntry &Section, 1055 uint64_t Offset, uint64_t Value, 1056 uint32_t Type, int64_t Addend) { 1057 uint8_t *LocalAddress = Section.getAddressWithOffset(Offset); 1058 switch (Type) { 1059 default: 1060 llvm_unreachable("Relocation type not implemented yet!"); 1061 break; 1062 case ELF::R_390_PC16DBL: 1063 case ELF::R_390_PLT16DBL: { 1064 int64_t Delta = (Value + Addend) - Section.getLoadAddressWithOffset(Offset); 1065 assert(int16_t(Delta / 2) * 2 == Delta && "R_390_PC16DBL overflow"); 1066 writeInt16BE(LocalAddress, Delta / 2); 1067 break; 1068 } 1069 case ELF::R_390_PC32DBL: 1070 case ELF::R_390_PLT32DBL: { 1071 int64_t Delta = (Value + Addend) - Section.getLoadAddressWithOffset(Offset); 1072 assert(int32_t(Delta / 2) * 2 == Delta && "R_390_PC32DBL overflow"); 1073 writeInt32BE(LocalAddress, Delta / 2); 1074 break; 1075 } 1076 case ELF::R_390_PC32: { 1077 int64_t Delta = (Value + Addend) - Section.getLoadAddressWithOffset(Offset); 1078 assert(int32_t(Delta) == Delta && "R_390_PC32 overflow"); 1079 writeInt32BE(LocalAddress, Delta); 1080 break; 1081 } 1082 case ELF::R_390_64: 1083 writeInt64BE(LocalAddress, Value + Addend); 1084 break; 1085 case ELF::R_390_PC64: { 1086 int64_t Delta = (Value + Addend) - Section.getLoadAddressWithOffset(Offset); 1087 writeInt64BE(LocalAddress, Delta); 1088 break; 1089 } 1090 } 1091 } 1092 1093 // The target location for the relocation is described by RE.SectionID and 1094 // RE.Offset. RE.SectionID can be used to find the SectionEntry. Each 1095 // SectionEntry has three members describing its location. 1096 // SectionEntry::Address is the address at which the section has been loaded 1097 // into memory in the current (host) process. SectionEntry::LoadAddress is the 1098 // address that the section will have in the target process. 1099 // SectionEntry::ObjAddress is the address of the bits for this section in the 1100 // original emitted object image (also in the current address space). 1101 // 1102 // Relocations will be applied as if the section were loaded at 1103 // SectionEntry::LoadAddress, but they will be applied at an address based 1104 // on SectionEntry::Address. SectionEntry::ObjAddress will be used to refer to 1105 // Target memory contents if they are required for value calculations. 1106 // 1107 // The Value parameter here is the load address of the symbol for the 1108 // relocation to be applied. For relocations which refer to symbols in the 1109 // current object Value will be the LoadAddress of the section in which 1110 // the symbol resides (RE.Addend provides additional information about the 1111 // symbol location). For external symbols, Value will be the address of the 1112 // symbol in the target address space. 1113 void RuntimeDyldELF::resolveRelocation(const RelocationEntry &RE, 1114 uint64_t Value) { 1115 const SectionEntry &Section = Sections[RE.SectionID]; 1116 return resolveRelocation(Section, RE.Offset, Value, RE.RelType, RE.Addend, 1117 RE.SymOffset, RE.SectionID); 1118 } 1119 1120 void RuntimeDyldELF::resolveRelocation(const SectionEntry &Section, 1121 uint64_t Offset, uint64_t Value, 1122 uint32_t Type, int64_t Addend, 1123 uint64_t SymOffset, SID SectionID) { 1124 switch (Arch) { 1125 case Triple::x86_64: 1126 resolveX86_64Relocation(Section, Offset, Value, Type, Addend, SymOffset); 1127 break; 1128 case Triple::x86: 1129 resolveX86Relocation(Section, Offset, (uint32_t)(Value & 0xffffffffL), Type, 1130 (uint32_t)(Addend & 0xffffffffL)); 1131 break; 1132 case Triple::aarch64: 1133 case Triple::aarch64_be: 1134 resolveAArch64Relocation(Section, Offset, Value, Type, Addend); 1135 break; 1136 case Triple::arm: // Fall through. 1137 case Triple::armeb: 1138 case Triple::thumb: 1139 case Triple::thumbeb: 1140 resolveARMRelocation(Section, Offset, (uint32_t)(Value & 0xffffffffL), Type, 1141 (uint32_t)(Addend & 0xffffffffL)); 1142 break; 1143 case Triple::mips: // Fall through. 1144 case Triple::mipsel: 1145 case Triple::mips64: 1146 case Triple::mips64el: 1147 if (IsMipsO32ABI) 1148 resolveMIPSRelocation(Section, Offset, (uint32_t)(Value & 0xffffffffL), 1149 Type, (uint32_t)(Addend & 0xffffffffL)); 1150 else if (IsMipsN32ABI) 1151 resolveMIPSN32Relocation(Section, Offset, Value, Type, Addend, SymOffset, 1152 SectionID); 1153 else if (IsMipsN64ABI) 1154 resolveMIPSN64Relocation(Section, Offset, Value, Type, Addend, SymOffset, 1155 SectionID); 1156 else 1157 llvm_unreachable("Mips ABI not handled"); 1158 break; 1159 case Triple::ppc: 1160 resolvePPC32Relocation(Section, Offset, Value, Type, Addend); 1161 break; 1162 case Triple::ppc64: // Fall through. 1163 case Triple::ppc64le: 1164 resolvePPC64Relocation(Section, Offset, Value, Type, Addend); 1165 break; 1166 case Triple::systemz: 1167 resolveSystemZRelocation(Section, Offset, Value, Type, Addend); 1168 break; 1169 default: 1170 llvm_unreachable("Unsupported CPU type!"); 1171 } 1172 } 1173 1174 void *RuntimeDyldELF::computePlaceholderAddress(unsigned SectionID, uint64_t Offset) const { 1175 return (void *)(Sections[SectionID].getObjAddress() + Offset); 1176 } 1177 1178 void RuntimeDyldELF::processSimpleRelocation(unsigned SectionID, uint64_t Offset, unsigned RelType, RelocationValueRef Value) { 1179 RelocationEntry RE(SectionID, Offset, RelType, Value.Addend, Value.Offset); 1180 if (Value.SymbolName) 1181 addRelocationForSymbol(RE, Value.SymbolName); 1182 else 1183 addRelocationForSection(RE, Value.SectionID); 1184 } 1185 1186 uint32_t RuntimeDyldELF::getMatchingLoRelocation(uint32_t RelType, 1187 bool IsLocal) const { 1188 switch (RelType) { 1189 case ELF::R_MICROMIPS_GOT16: 1190 if (IsLocal) 1191 return ELF::R_MICROMIPS_LO16; 1192 break; 1193 case ELF::R_MICROMIPS_HI16: 1194 return ELF::R_MICROMIPS_LO16; 1195 case ELF::R_MIPS_GOT16: 1196 if (IsLocal) 1197 return ELF::R_MIPS_LO16; 1198 break; 1199 case ELF::R_MIPS_HI16: 1200 return ELF::R_MIPS_LO16; 1201 case ELF::R_MIPS_PCHI16: 1202 return ELF::R_MIPS_PCLO16; 1203 default: 1204 break; 1205 } 1206 return ELF::R_MIPS_NONE; 1207 } 1208 1209 Expected<relocation_iterator> 1210 RuntimeDyldELF::processRelocationRef( 1211 unsigned SectionID, relocation_iterator RelI, const ObjectFile &O, 1212 ObjSectionToIDMap &ObjSectionToID, StubMap &Stubs) { 1213 const auto &Obj = cast<ELFObjectFileBase>(O); 1214 uint64_t RelType = RelI->getType(); 1215 ErrorOr<int64_t> AddendOrErr = ELFRelocationRef(*RelI).getAddend(); 1216 int64_t Addend = AddendOrErr ? *AddendOrErr : 0; 1217 elf_symbol_iterator Symbol = RelI->getSymbol(); 1218 1219 // Obtain the symbol name which is referenced in the relocation 1220 StringRef TargetName; 1221 if (Symbol != Obj.symbol_end()) { 1222 if (auto TargetNameOrErr = Symbol->getName()) 1223 TargetName = *TargetNameOrErr; 1224 else 1225 return TargetNameOrErr.takeError(); 1226 } 1227 DEBUG(dbgs() << "\t\tRelType: " << RelType << " Addend: " << Addend 1228 << " TargetName: " << TargetName << "\n"); 1229 RelocationValueRef Value; 1230 // First search for the symbol in the local symbol table 1231 SymbolRef::Type SymType = SymbolRef::ST_Unknown; 1232 1233 // Search for the symbol in the global symbol table 1234 RTDyldSymbolTable::const_iterator gsi = GlobalSymbolTable.end(); 1235 if (Symbol != Obj.symbol_end()) { 1236 gsi = GlobalSymbolTable.find(TargetName.data()); 1237 Expected<SymbolRef::Type> SymTypeOrErr = Symbol->getType(); 1238 if (!SymTypeOrErr) { 1239 std::string Buf; 1240 raw_string_ostream OS(Buf); 1241 logAllUnhandledErrors(SymTypeOrErr.takeError(), OS, ""); 1242 OS.flush(); 1243 report_fatal_error(Buf); 1244 } 1245 SymType = *SymTypeOrErr; 1246 } 1247 if (gsi != GlobalSymbolTable.end()) { 1248 const auto &SymInfo = gsi->second; 1249 Value.SectionID = SymInfo.getSectionID(); 1250 Value.Offset = SymInfo.getOffset(); 1251 Value.Addend = SymInfo.getOffset() + Addend; 1252 } else { 1253 switch (SymType) { 1254 case SymbolRef::ST_Debug: { 1255 // TODO: Now ELF SymbolRef::ST_Debug = STT_SECTION, it's not obviously 1256 // and can be changed by another developers. Maybe best way is add 1257 // a new symbol type ST_Section to SymbolRef and use it. 1258 auto SectionOrErr = Symbol->getSection(); 1259 if (!SectionOrErr) { 1260 std::string Buf; 1261 raw_string_ostream OS(Buf); 1262 logAllUnhandledErrors(SectionOrErr.takeError(), OS, ""); 1263 OS.flush(); 1264 report_fatal_error(Buf); 1265 } 1266 section_iterator si = *SectionOrErr; 1267 if (si == Obj.section_end()) 1268 llvm_unreachable("Symbol section not found, bad object file format!"); 1269 DEBUG(dbgs() << "\t\tThis is section symbol\n"); 1270 bool isCode = si->isText(); 1271 if (auto SectionIDOrErr = findOrEmitSection(Obj, (*si), isCode, 1272 ObjSectionToID)) 1273 Value.SectionID = *SectionIDOrErr; 1274 else 1275 return SectionIDOrErr.takeError(); 1276 Value.Addend = Addend; 1277 break; 1278 } 1279 case SymbolRef::ST_Data: 1280 case SymbolRef::ST_Function: 1281 case SymbolRef::ST_Unknown: { 1282 Value.SymbolName = TargetName.data(); 1283 Value.Addend = Addend; 1284 1285 // Absolute relocations will have a zero symbol ID (STN_UNDEF), which 1286 // will manifest here as a NULL symbol name. 1287 // We can set this as a valid (but empty) symbol name, and rely 1288 // on addRelocationForSymbol to handle this. 1289 if (!Value.SymbolName) 1290 Value.SymbolName = ""; 1291 break; 1292 } 1293 default: 1294 llvm_unreachable("Unresolved symbol type!"); 1295 break; 1296 } 1297 } 1298 1299 uint64_t Offset = RelI->getOffset(); 1300 1301 DEBUG(dbgs() << "\t\tSectionID: " << SectionID << " Offset: " << Offset 1302 << "\n"); 1303 if ((Arch == Triple::aarch64 || Arch == Triple::aarch64_be) && 1304 (RelType == ELF::R_AARCH64_CALL26 || RelType == ELF::R_AARCH64_JUMP26)) { 1305 // This is an AArch64 branch relocation, need to use a stub function. 1306 DEBUG(dbgs() << "\t\tThis is an AArch64 branch relocation."); 1307 SectionEntry &Section = Sections[SectionID]; 1308 1309 // Look for an existing stub. 1310 StubMap::const_iterator i = Stubs.find(Value); 1311 if (i != Stubs.end()) { 1312 resolveRelocation(Section, Offset, 1313 (uint64_t)Section.getAddressWithOffset(i->second), 1314 RelType, 0); 1315 DEBUG(dbgs() << " Stub function found\n"); 1316 } else { 1317 // Create a new stub function. 1318 DEBUG(dbgs() << " Create a new stub function\n"); 1319 Stubs[Value] = Section.getStubOffset(); 1320 uint8_t *StubTargetAddr = createStubFunction( 1321 Section.getAddressWithOffset(Section.getStubOffset())); 1322 1323 RelocationEntry REmovz_g3(SectionID, 1324 StubTargetAddr - Section.getAddress(), 1325 ELF::R_AARCH64_MOVW_UABS_G3, Value.Addend); 1326 RelocationEntry REmovk_g2(SectionID, StubTargetAddr - 1327 Section.getAddress() + 4, 1328 ELF::R_AARCH64_MOVW_UABS_G2_NC, Value.Addend); 1329 RelocationEntry REmovk_g1(SectionID, StubTargetAddr - 1330 Section.getAddress() + 8, 1331 ELF::R_AARCH64_MOVW_UABS_G1_NC, Value.Addend); 1332 RelocationEntry REmovk_g0(SectionID, StubTargetAddr - 1333 Section.getAddress() + 12, 1334 ELF::R_AARCH64_MOVW_UABS_G0_NC, Value.Addend); 1335 1336 if (Value.SymbolName) { 1337 addRelocationForSymbol(REmovz_g3, Value.SymbolName); 1338 addRelocationForSymbol(REmovk_g2, Value.SymbolName); 1339 addRelocationForSymbol(REmovk_g1, Value.SymbolName); 1340 addRelocationForSymbol(REmovk_g0, Value.SymbolName); 1341 } else { 1342 addRelocationForSection(REmovz_g3, Value.SectionID); 1343 addRelocationForSection(REmovk_g2, Value.SectionID); 1344 addRelocationForSection(REmovk_g1, Value.SectionID); 1345 addRelocationForSection(REmovk_g0, Value.SectionID); 1346 } 1347 resolveRelocation(Section, Offset, 1348 reinterpret_cast<uint64_t>(Section.getAddressWithOffset( 1349 Section.getStubOffset())), 1350 RelType, 0); 1351 Section.advanceStubOffset(getMaxStubSize()); 1352 } 1353 } else if (Arch == Triple::arm) { 1354 if (RelType == ELF::R_ARM_PC24 || RelType == ELF::R_ARM_CALL || 1355 RelType == ELF::R_ARM_JUMP24) { 1356 // This is an ARM branch relocation, need to use a stub function. 1357 DEBUG(dbgs() << "\t\tThis is an ARM branch relocation.\n"); 1358 SectionEntry &Section = Sections[SectionID]; 1359 1360 // Look for an existing stub. 1361 StubMap::const_iterator i = Stubs.find(Value); 1362 if (i != Stubs.end()) { 1363 resolveRelocation( 1364 Section, Offset, 1365 reinterpret_cast<uint64_t>(Section.getAddressWithOffset(i->second)), 1366 RelType, 0); 1367 DEBUG(dbgs() << " Stub function found\n"); 1368 } else { 1369 // Create a new stub function. 1370 DEBUG(dbgs() << " Create a new stub function\n"); 1371 Stubs[Value] = Section.getStubOffset(); 1372 uint8_t *StubTargetAddr = createStubFunction( 1373 Section.getAddressWithOffset(Section.getStubOffset())); 1374 RelocationEntry RE(SectionID, StubTargetAddr - Section.getAddress(), 1375 ELF::R_ARM_ABS32, Value.Addend); 1376 if (Value.SymbolName) 1377 addRelocationForSymbol(RE, Value.SymbolName); 1378 else 1379 addRelocationForSection(RE, Value.SectionID); 1380 1381 resolveRelocation(Section, Offset, reinterpret_cast<uint64_t>( 1382 Section.getAddressWithOffset( 1383 Section.getStubOffset())), 1384 RelType, 0); 1385 Section.advanceStubOffset(getMaxStubSize()); 1386 } 1387 } else { 1388 uint32_t *Placeholder = 1389 reinterpret_cast<uint32_t*>(computePlaceholderAddress(SectionID, Offset)); 1390 if (RelType == ELF::R_ARM_PREL31 || RelType == ELF::R_ARM_TARGET1 || 1391 RelType == ELF::R_ARM_ABS32) { 1392 Value.Addend += *Placeholder; 1393 } else if (RelType == ELF::R_ARM_MOVW_ABS_NC || RelType == ELF::R_ARM_MOVT_ABS) { 1394 // See ELF for ARM documentation 1395 Value.Addend += (int16_t)((*Placeholder & 0xFFF) | (((*Placeholder >> 16) & 0xF) << 12)); 1396 } 1397 processSimpleRelocation(SectionID, Offset, RelType, Value); 1398 } 1399 } else if (IsMipsO32ABI) { 1400 uint8_t *Placeholder = reinterpret_cast<uint8_t *>( 1401 computePlaceholderAddress(SectionID, Offset)); 1402 uint32_t Opcode = readBytesUnaligned(Placeholder, 4); 1403 if (RelType == ELF::R_MIPS_26) { 1404 // This is an Mips branch relocation, need to use a stub function. 1405 DEBUG(dbgs() << "\t\tThis is a Mips branch relocation."); 1406 SectionEntry &Section = Sections[SectionID]; 1407 1408 // Extract the addend from the instruction. 1409 // We shift up by two since the Value will be down shifted again 1410 // when applying the relocation. 1411 uint32_t Addend = (Opcode & 0x03ffffff) << 2; 1412 1413 Value.Addend += Addend; 1414 1415 // Look up for existing stub. 1416 StubMap::const_iterator i = Stubs.find(Value); 1417 if (i != Stubs.end()) { 1418 RelocationEntry RE(SectionID, Offset, RelType, i->second); 1419 addRelocationForSection(RE, SectionID); 1420 DEBUG(dbgs() << " Stub function found\n"); 1421 } else { 1422 // Create a new stub function. 1423 DEBUG(dbgs() << " Create a new stub function\n"); 1424 Stubs[Value] = Section.getStubOffset(); 1425 1426 unsigned AbiVariant; 1427 O.getPlatformFlags(AbiVariant); 1428 1429 uint8_t *StubTargetAddr = createStubFunction( 1430 Section.getAddressWithOffset(Section.getStubOffset()), AbiVariant); 1431 1432 // Creating Hi and Lo relocations for the filled stub instructions. 1433 RelocationEntry REHi(SectionID, StubTargetAddr - Section.getAddress(), 1434 ELF::R_MIPS_HI16, Value.Addend); 1435 RelocationEntry RELo(SectionID, 1436 StubTargetAddr - Section.getAddress() + 4, 1437 ELF::R_MIPS_LO16, Value.Addend); 1438 1439 if (Value.SymbolName) { 1440 addRelocationForSymbol(REHi, Value.SymbolName); 1441 addRelocationForSymbol(RELo, Value.SymbolName); 1442 } 1443 else { 1444 addRelocationForSection(REHi, Value.SectionID); 1445 addRelocationForSection(RELo, Value.SectionID); 1446 } 1447 1448 RelocationEntry RE(SectionID, Offset, RelType, Section.getStubOffset()); 1449 addRelocationForSection(RE, SectionID); 1450 Section.advanceStubOffset(getMaxStubSize()); 1451 } 1452 } else if (RelType == ELF::R_MIPS_HI16 || RelType == ELF::R_MIPS_PCHI16) { 1453 int64_t Addend = (Opcode & 0x0000ffff) << 16; 1454 RelocationEntry RE(SectionID, Offset, RelType, Addend); 1455 PendingRelocs.push_back(std::make_pair(Value, RE)); 1456 } else if (RelType == ELF::R_MIPS_LO16 || RelType == ELF::R_MIPS_PCLO16) { 1457 int64_t Addend = Value.Addend + SignExtend32<16>(Opcode & 0x0000ffff); 1458 for (auto I = PendingRelocs.begin(); I != PendingRelocs.end();) { 1459 const RelocationValueRef &MatchingValue = I->first; 1460 RelocationEntry &Reloc = I->second; 1461 if (MatchingValue == Value && 1462 RelType == getMatchingLoRelocation(Reloc.RelType) && 1463 SectionID == Reloc.SectionID) { 1464 Reloc.Addend += Addend; 1465 if (Value.SymbolName) 1466 addRelocationForSymbol(Reloc, Value.SymbolName); 1467 else 1468 addRelocationForSection(Reloc, Value.SectionID); 1469 I = PendingRelocs.erase(I); 1470 } else 1471 ++I; 1472 } 1473 RelocationEntry RE(SectionID, Offset, RelType, Addend); 1474 if (Value.SymbolName) 1475 addRelocationForSymbol(RE, Value.SymbolName); 1476 else 1477 addRelocationForSection(RE, Value.SectionID); 1478 } else { 1479 if (RelType == ELF::R_MIPS_32) 1480 Value.Addend += Opcode; 1481 else if (RelType == ELF::R_MIPS_PC16) 1482 Value.Addend += SignExtend32<18>((Opcode & 0x0000ffff) << 2); 1483 else if (RelType == ELF::R_MIPS_PC19_S2) 1484 Value.Addend += SignExtend32<21>((Opcode & 0x0007ffff) << 2); 1485 else if (RelType == ELF::R_MIPS_PC21_S2) 1486 Value.Addend += SignExtend32<23>((Opcode & 0x001fffff) << 2); 1487 else if (RelType == ELF::R_MIPS_PC26_S2) 1488 Value.Addend += SignExtend32<28>((Opcode & 0x03ffffff) << 2); 1489 processSimpleRelocation(SectionID, Offset, RelType, Value); 1490 } 1491 } else if (IsMipsN32ABI || IsMipsN64ABI) { 1492 uint32_t r_type = RelType & 0xff; 1493 RelocationEntry RE(SectionID, Offset, RelType, Value.Addend); 1494 if (r_type == ELF::R_MIPS_CALL16 || r_type == ELF::R_MIPS_GOT_PAGE 1495 || r_type == ELF::R_MIPS_GOT_DISP) { 1496 StringMap<uint64_t>::iterator i = GOTSymbolOffsets.find(TargetName); 1497 if (i != GOTSymbolOffsets.end()) 1498 RE.SymOffset = i->second; 1499 else { 1500 RE.SymOffset = allocateGOTEntries(SectionID, 1); 1501 GOTSymbolOffsets[TargetName] = RE.SymOffset; 1502 } 1503 } 1504 if (Value.SymbolName) 1505 addRelocationForSymbol(RE, Value.SymbolName); 1506 else 1507 addRelocationForSection(RE, Value.SectionID); 1508 } else if (Arch == Triple::ppc64 || Arch == Triple::ppc64le) { 1509 if (RelType == ELF::R_PPC64_REL24) { 1510 // Determine ABI variant in use for this object. 1511 unsigned AbiVariant; 1512 Obj.getPlatformFlags(AbiVariant); 1513 AbiVariant &= ELF::EF_PPC64_ABI; 1514 // A PPC branch relocation will need a stub function if the target is 1515 // an external symbol (Symbol::ST_Unknown) or if the target address 1516 // is not within the signed 24-bits branch address. 1517 SectionEntry &Section = Sections[SectionID]; 1518 uint8_t *Target = Section.getAddressWithOffset(Offset); 1519 bool RangeOverflow = false; 1520 if (SymType != SymbolRef::ST_Unknown) { 1521 if (AbiVariant != 2) { 1522 // In the ELFv1 ABI, a function call may point to the .opd entry, 1523 // so the final symbol value is calculated based on the relocation 1524 // values in the .opd section. 1525 if (auto Err = findOPDEntrySection(Obj, ObjSectionToID, Value)) 1526 return std::move(Err); 1527 } else { 1528 // In the ELFv2 ABI, a function symbol may provide a local entry 1529 // point, which must be used for direct calls. 1530 uint8_t SymOther = Symbol->getOther(); 1531 Value.Addend += ELF::decodePPC64LocalEntryOffset(SymOther); 1532 } 1533 uint8_t *RelocTarget = 1534 Sections[Value.SectionID].getAddressWithOffset(Value.Addend); 1535 int32_t delta = static_cast<int32_t>(Target - RelocTarget); 1536 // If it is within 26-bits branch range, just set the branch target 1537 if (SignExtend32<26>(delta) == delta) { 1538 RelocationEntry RE(SectionID, Offset, RelType, Value.Addend); 1539 if (Value.SymbolName) 1540 addRelocationForSymbol(RE, Value.SymbolName); 1541 else 1542 addRelocationForSection(RE, Value.SectionID); 1543 } else { 1544 RangeOverflow = true; 1545 } 1546 } 1547 if (SymType == SymbolRef::ST_Unknown || RangeOverflow) { 1548 // It is an external symbol (SymbolRef::ST_Unknown) or within a range 1549 // larger than 24-bits. 1550 StubMap::const_iterator i = Stubs.find(Value); 1551 if (i != Stubs.end()) { 1552 // Symbol function stub already created, just relocate to it 1553 resolveRelocation(Section, Offset, 1554 reinterpret_cast<uint64_t>( 1555 Section.getAddressWithOffset(i->second)), 1556 RelType, 0); 1557 DEBUG(dbgs() << " Stub function found\n"); 1558 } else { 1559 // Create a new stub function. 1560 DEBUG(dbgs() << " Create a new stub function\n"); 1561 Stubs[Value] = Section.getStubOffset(); 1562 uint8_t *StubTargetAddr = createStubFunction( 1563 Section.getAddressWithOffset(Section.getStubOffset()), 1564 AbiVariant); 1565 RelocationEntry RE(SectionID, StubTargetAddr - Section.getAddress(), 1566 ELF::R_PPC64_ADDR64, Value.Addend); 1567 1568 // Generates the 64-bits address loads as exemplified in section 1569 // 4.5.1 in PPC64 ELF ABI. Note that the relocations need to 1570 // apply to the low part of the instructions, so we have to update 1571 // the offset according to the target endianness. 1572 uint64_t StubRelocOffset = StubTargetAddr - Section.getAddress(); 1573 if (!IsTargetLittleEndian) 1574 StubRelocOffset += 2; 1575 1576 RelocationEntry REhst(SectionID, StubRelocOffset + 0, 1577 ELF::R_PPC64_ADDR16_HIGHEST, Value.Addend); 1578 RelocationEntry REhr(SectionID, StubRelocOffset + 4, 1579 ELF::R_PPC64_ADDR16_HIGHER, Value.Addend); 1580 RelocationEntry REh(SectionID, StubRelocOffset + 12, 1581 ELF::R_PPC64_ADDR16_HI, Value.Addend); 1582 RelocationEntry REl(SectionID, StubRelocOffset + 16, 1583 ELF::R_PPC64_ADDR16_LO, Value.Addend); 1584 1585 if (Value.SymbolName) { 1586 addRelocationForSymbol(REhst, Value.SymbolName); 1587 addRelocationForSymbol(REhr, Value.SymbolName); 1588 addRelocationForSymbol(REh, Value.SymbolName); 1589 addRelocationForSymbol(REl, Value.SymbolName); 1590 } else { 1591 addRelocationForSection(REhst, Value.SectionID); 1592 addRelocationForSection(REhr, Value.SectionID); 1593 addRelocationForSection(REh, Value.SectionID); 1594 addRelocationForSection(REl, Value.SectionID); 1595 } 1596 1597 resolveRelocation(Section, Offset, reinterpret_cast<uint64_t>( 1598 Section.getAddressWithOffset( 1599 Section.getStubOffset())), 1600 RelType, 0); 1601 Section.advanceStubOffset(getMaxStubSize()); 1602 } 1603 if (SymType == SymbolRef::ST_Unknown) { 1604 // Restore the TOC for external calls 1605 if (AbiVariant == 2) 1606 writeInt32BE(Target + 4, 0xE8410018); // ld r2,28(r1) 1607 else 1608 writeInt32BE(Target + 4, 0xE8410028); // ld r2,40(r1) 1609 } 1610 } 1611 } else if (RelType == ELF::R_PPC64_TOC16 || 1612 RelType == ELF::R_PPC64_TOC16_DS || 1613 RelType == ELF::R_PPC64_TOC16_LO || 1614 RelType == ELF::R_PPC64_TOC16_LO_DS || 1615 RelType == ELF::R_PPC64_TOC16_HI || 1616 RelType == ELF::R_PPC64_TOC16_HA) { 1617 // These relocations are supposed to subtract the TOC address from 1618 // the final value. This does not fit cleanly into the RuntimeDyld 1619 // scheme, since there may be *two* sections involved in determining 1620 // the relocation value (the section of the symbol referred to by the 1621 // relocation, and the TOC section associated with the current module). 1622 // 1623 // Fortunately, these relocations are currently only ever generated 1624 // referring to symbols that themselves reside in the TOC, which means 1625 // that the two sections are actually the same. Thus they cancel out 1626 // and we can immediately resolve the relocation right now. 1627 switch (RelType) { 1628 case ELF::R_PPC64_TOC16: RelType = ELF::R_PPC64_ADDR16; break; 1629 case ELF::R_PPC64_TOC16_DS: RelType = ELF::R_PPC64_ADDR16_DS; break; 1630 case ELF::R_PPC64_TOC16_LO: RelType = ELF::R_PPC64_ADDR16_LO; break; 1631 case ELF::R_PPC64_TOC16_LO_DS: RelType = ELF::R_PPC64_ADDR16_LO_DS; break; 1632 case ELF::R_PPC64_TOC16_HI: RelType = ELF::R_PPC64_ADDR16_HI; break; 1633 case ELF::R_PPC64_TOC16_HA: RelType = ELF::R_PPC64_ADDR16_HA; break; 1634 default: llvm_unreachable("Wrong relocation type."); 1635 } 1636 1637 RelocationValueRef TOCValue; 1638 if (auto Err = findPPC64TOCSection(Obj, ObjSectionToID, TOCValue)) 1639 return std::move(Err); 1640 if (Value.SymbolName || Value.SectionID != TOCValue.SectionID) 1641 llvm_unreachable("Unsupported TOC relocation."); 1642 Value.Addend -= TOCValue.Addend; 1643 resolveRelocation(Sections[SectionID], Offset, Value.Addend, RelType, 0); 1644 } else { 1645 // There are two ways to refer to the TOC address directly: either 1646 // via a ELF::R_PPC64_TOC relocation (where both symbol and addend are 1647 // ignored), or via any relocation that refers to the magic ".TOC." 1648 // symbols (in which case the addend is respected). 1649 if (RelType == ELF::R_PPC64_TOC) { 1650 RelType = ELF::R_PPC64_ADDR64; 1651 if (auto Err = findPPC64TOCSection(Obj, ObjSectionToID, Value)) 1652 return std::move(Err); 1653 } else if (TargetName == ".TOC.") { 1654 if (auto Err = findPPC64TOCSection(Obj, ObjSectionToID, Value)) 1655 return std::move(Err); 1656 Value.Addend += Addend; 1657 } 1658 1659 RelocationEntry RE(SectionID, Offset, RelType, Value.Addend); 1660 1661 if (Value.SymbolName) 1662 addRelocationForSymbol(RE, Value.SymbolName); 1663 else 1664 addRelocationForSection(RE, Value.SectionID); 1665 } 1666 } else if (Arch == Triple::systemz && 1667 (RelType == ELF::R_390_PLT32DBL || RelType == ELF::R_390_GOTENT)) { 1668 // Create function stubs for both PLT and GOT references, regardless of 1669 // whether the GOT reference is to data or code. The stub contains the 1670 // full address of the symbol, as needed by GOT references, and the 1671 // executable part only adds an overhead of 8 bytes. 1672 // 1673 // We could try to conserve space by allocating the code and data 1674 // parts of the stub separately. However, as things stand, we allocate 1675 // a stub for every relocation, so using a GOT in JIT code should be 1676 // no less space efficient than using an explicit constant pool. 1677 DEBUG(dbgs() << "\t\tThis is a SystemZ indirect relocation."); 1678 SectionEntry &Section = Sections[SectionID]; 1679 1680 // Look for an existing stub. 1681 StubMap::const_iterator i = Stubs.find(Value); 1682 uintptr_t StubAddress; 1683 if (i != Stubs.end()) { 1684 StubAddress = uintptr_t(Section.getAddressWithOffset(i->second)); 1685 DEBUG(dbgs() << " Stub function found\n"); 1686 } else { 1687 // Create a new stub function. 1688 DEBUG(dbgs() << " Create a new stub function\n"); 1689 1690 uintptr_t BaseAddress = uintptr_t(Section.getAddress()); 1691 uintptr_t StubAlignment = getStubAlignment(); 1692 StubAddress = 1693 (BaseAddress + Section.getStubOffset() + StubAlignment - 1) & 1694 -StubAlignment; 1695 unsigned StubOffset = StubAddress - BaseAddress; 1696 1697 Stubs[Value] = StubOffset; 1698 createStubFunction((uint8_t *)StubAddress); 1699 RelocationEntry RE(SectionID, StubOffset + 8, ELF::R_390_64, 1700 Value.Offset); 1701 if (Value.SymbolName) 1702 addRelocationForSymbol(RE, Value.SymbolName); 1703 else 1704 addRelocationForSection(RE, Value.SectionID); 1705 Section.advanceStubOffset(getMaxStubSize()); 1706 } 1707 1708 if (RelType == ELF::R_390_GOTENT) 1709 resolveRelocation(Section, Offset, StubAddress + 8, ELF::R_390_PC32DBL, 1710 Addend); 1711 else 1712 resolveRelocation(Section, Offset, StubAddress, RelType, Addend); 1713 } else if (Arch == Triple::x86_64) { 1714 if (RelType == ELF::R_X86_64_PLT32) { 1715 // The way the PLT relocations normally work is that the linker allocates 1716 // the 1717 // PLT and this relocation makes a PC-relative call into the PLT. The PLT 1718 // entry will then jump to an address provided by the GOT. On first call, 1719 // the 1720 // GOT address will point back into PLT code that resolves the symbol. After 1721 // the first call, the GOT entry points to the actual function. 1722 // 1723 // For local functions we're ignoring all of that here and just replacing 1724 // the PLT32 relocation type with PC32, which will translate the relocation 1725 // into a PC-relative call directly to the function. For external symbols we 1726 // can't be sure the function will be within 2^32 bytes of the call site, so 1727 // we need to create a stub, which calls into the GOT. This case is 1728 // equivalent to the usual PLT implementation except that we use the stub 1729 // mechanism in RuntimeDyld (which puts stubs at the end of the section) 1730 // rather than allocating a PLT section. 1731 if (Value.SymbolName) { 1732 // This is a call to an external function. 1733 // Look for an existing stub. 1734 SectionEntry &Section = Sections[SectionID]; 1735 StubMap::const_iterator i = Stubs.find(Value); 1736 uintptr_t StubAddress; 1737 if (i != Stubs.end()) { 1738 StubAddress = uintptr_t(Section.getAddress()) + i->second; 1739 DEBUG(dbgs() << " Stub function found\n"); 1740 } else { 1741 // Create a new stub function (equivalent to a PLT entry). 1742 DEBUG(dbgs() << " Create a new stub function\n"); 1743 1744 uintptr_t BaseAddress = uintptr_t(Section.getAddress()); 1745 uintptr_t StubAlignment = getStubAlignment(); 1746 StubAddress = 1747 (BaseAddress + Section.getStubOffset() + StubAlignment - 1) & 1748 -StubAlignment; 1749 unsigned StubOffset = StubAddress - BaseAddress; 1750 Stubs[Value] = StubOffset; 1751 createStubFunction((uint8_t *)StubAddress); 1752 1753 // Bump our stub offset counter 1754 Section.advanceStubOffset(getMaxStubSize()); 1755 1756 // Allocate a GOT Entry 1757 uint64_t GOTOffset = allocateGOTEntries(SectionID, 1); 1758 1759 // The load of the GOT address has an addend of -4 1760 resolveGOTOffsetRelocation(SectionID, StubOffset + 2, GOTOffset - 4); 1761 1762 // Fill in the value of the symbol we're targeting into the GOT 1763 addRelocationForSymbol( 1764 computeGOTOffsetRE(SectionID, GOTOffset, 0, ELF::R_X86_64_64), 1765 Value.SymbolName); 1766 } 1767 1768 // Make the target call a call into the stub table. 1769 resolveRelocation(Section, Offset, StubAddress, ELF::R_X86_64_PC32, 1770 Addend); 1771 } else { 1772 RelocationEntry RE(SectionID, Offset, ELF::R_X86_64_PC32, Value.Addend, 1773 Value.Offset); 1774 addRelocationForSection(RE, Value.SectionID); 1775 } 1776 } else if (RelType == ELF::R_X86_64_GOTPCREL || 1777 RelType == ELF::R_X86_64_GOTPCRELX || 1778 RelType == ELF::R_X86_64_REX_GOTPCRELX) { 1779 uint64_t GOTOffset = allocateGOTEntries(SectionID, 1); 1780 resolveGOTOffsetRelocation(SectionID, Offset, GOTOffset + Addend); 1781 1782 // Fill in the value of the symbol we're targeting into the GOT 1783 RelocationEntry RE = computeGOTOffsetRE(SectionID, GOTOffset, Value.Offset, ELF::R_X86_64_64); 1784 if (Value.SymbolName) 1785 addRelocationForSymbol(RE, Value.SymbolName); 1786 else 1787 addRelocationForSection(RE, Value.SectionID); 1788 } else if (RelType == ELF::R_X86_64_PC32) { 1789 Value.Addend += support::ulittle32_t::ref(computePlaceholderAddress(SectionID, Offset)); 1790 processSimpleRelocation(SectionID, Offset, RelType, Value); 1791 } else if (RelType == ELF::R_X86_64_PC64) { 1792 Value.Addend += support::ulittle64_t::ref(computePlaceholderAddress(SectionID, Offset)); 1793 processSimpleRelocation(SectionID, Offset, RelType, Value); 1794 } else { 1795 processSimpleRelocation(SectionID, Offset, RelType, Value); 1796 } 1797 } else { 1798 if (Arch == Triple::x86) { 1799 Value.Addend += support::ulittle32_t::ref(computePlaceholderAddress(SectionID, Offset)); 1800 } 1801 processSimpleRelocation(SectionID, Offset, RelType, Value); 1802 } 1803 return ++RelI; 1804 } 1805 1806 size_t RuntimeDyldELF::getGOTEntrySize() { 1807 // We don't use the GOT in all of these cases, but it's essentially free 1808 // to put them all here. 1809 size_t Result = 0; 1810 switch (Arch) { 1811 case Triple::x86_64: 1812 case Triple::aarch64: 1813 case Triple::aarch64_be: 1814 case Triple::ppc64: 1815 case Triple::ppc64le: 1816 case Triple::systemz: 1817 Result = sizeof(uint64_t); 1818 break; 1819 case Triple::x86: 1820 case Triple::arm: 1821 case Triple::thumb: 1822 Result = sizeof(uint32_t); 1823 break; 1824 case Triple::mips: 1825 case Triple::mipsel: 1826 case Triple::mips64: 1827 case Triple::mips64el: 1828 if (IsMipsO32ABI || IsMipsN32ABI) 1829 Result = sizeof(uint32_t); 1830 else if (IsMipsN64ABI) 1831 Result = sizeof(uint64_t); 1832 else 1833 llvm_unreachable("Mips ABI not handled"); 1834 break; 1835 default: 1836 llvm_unreachable("Unsupported CPU type!"); 1837 } 1838 return Result; 1839 } 1840 1841 uint64_t RuntimeDyldELF::allocateGOTEntries(unsigned SectionID, unsigned no) 1842 { 1843 (void)SectionID; // The GOT Section is the same for all section in the object file 1844 if (GOTSectionID == 0) { 1845 GOTSectionID = Sections.size(); 1846 // Reserve a section id. We'll allocate the section later 1847 // once we know the total size 1848 Sections.push_back(SectionEntry(".got", nullptr, 0, 0, 0)); 1849 } 1850 uint64_t StartOffset = CurrentGOTIndex * getGOTEntrySize(); 1851 CurrentGOTIndex += no; 1852 return StartOffset; 1853 } 1854 1855 void RuntimeDyldELF::resolveGOTOffsetRelocation(unsigned SectionID, uint64_t Offset, uint64_t GOTOffset) 1856 { 1857 // Fill in the relative address of the GOT Entry into the stub 1858 RelocationEntry GOTRE(SectionID, Offset, ELF::R_X86_64_PC32, GOTOffset); 1859 addRelocationForSection(GOTRE, GOTSectionID); 1860 } 1861 1862 RelocationEntry RuntimeDyldELF::computeGOTOffsetRE(unsigned SectionID, uint64_t GOTOffset, uint64_t SymbolOffset, 1863 uint32_t Type) 1864 { 1865 (void)SectionID; // The GOT Section is the same for all section in the object file 1866 return RelocationEntry(GOTSectionID, GOTOffset, Type, SymbolOffset); 1867 } 1868 1869 Error RuntimeDyldELF::finalizeLoad(const ObjectFile &Obj, 1870 ObjSectionToIDMap &SectionMap) { 1871 if (IsMipsO32ABI) 1872 if (!PendingRelocs.empty()) 1873 return make_error<RuntimeDyldError>("Can't find matching LO16 reloc"); 1874 1875 // If necessary, allocate the global offset table 1876 if (GOTSectionID != 0) { 1877 // Allocate memory for the section 1878 size_t TotalSize = CurrentGOTIndex * getGOTEntrySize(); 1879 uint8_t *Addr = MemMgr.allocateDataSection(TotalSize, getGOTEntrySize(), 1880 GOTSectionID, ".got", false); 1881 if (!Addr) 1882 return make_error<RuntimeDyldError>("Unable to allocate memory for GOT!"); 1883 1884 Sections[GOTSectionID] = 1885 SectionEntry(".got", Addr, TotalSize, TotalSize, 0); 1886 1887 if (Checker) 1888 Checker->registerSection(Obj.getFileName(), GOTSectionID); 1889 1890 // For now, initialize all GOT entries to zero. We'll fill them in as 1891 // needed when GOT-based relocations are applied. 1892 memset(Addr, 0, TotalSize); 1893 if (IsMipsN32ABI || IsMipsN64ABI) { 1894 // To correctly resolve Mips GOT relocations, we need a mapping from 1895 // object's sections to GOTs. 1896 for (section_iterator SI = Obj.section_begin(), SE = Obj.section_end(); 1897 SI != SE; ++SI) { 1898 if (SI->relocation_begin() != SI->relocation_end()) { 1899 section_iterator RelocatedSection = SI->getRelocatedSection(); 1900 ObjSectionToIDMap::iterator i = SectionMap.find(*RelocatedSection); 1901 assert (i != SectionMap.end()); 1902 SectionToGOTMap[i->second] = GOTSectionID; 1903 } 1904 } 1905 GOTSymbolOffsets.clear(); 1906 } 1907 } 1908 1909 // Look for and record the EH frame section. 1910 ObjSectionToIDMap::iterator i, e; 1911 for (i = SectionMap.begin(), e = SectionMap.end(); i != e; ++i) { 1912 const SectionRef &Section = i->first; 1913 StringRef Name; 1914 Section.getName(Name); 1915 if (Name == ".eh_frame") { 1916 UnregisteredEHFrameSections.push_back(i->second); 1917 break; 1918 } 1919 } 1920 1921 GOTSectionID = 0; 1922 CurrentGOTIndex = 0; 1923 1924 return Error::success(); 1925 } 1926 1927 bool RuntimeDyldELF::isCompatibleFile(const object::ObjectFile &Obj) const { 1928 return Obj.isELF(); 1929 } 1930 1931 bool RuntimeDyldELF::relocationNeedsStub(const RelocationRef &R) const { 1932 if (Arch != Triple::x86_64) 1933 return true; // Conservative answer 1934 1935 switch (R.getType()) { 1936 default: 1937 return true; // Conservative answer 1938 1939 1940 case ELF::R_X86_64_GOTPCREL: 1941 case ELF::R_X86_64_GOTPCRELX: 1942 case ELF::R_X86_64_REX_GOTPCRELX: 1943 case ELF::R_X86_64_PC32: 1944 case ELF::R_X86_64_PC64: 1945 case ELF::R_X86_64_64: 1946 // We know that these reloation types won't need a stub function. This list 1947 // can be extended as needed. 1948 return false; 1949 } 1950 } 1951 1952 } // namespace llvm 1953