1 //===- InputSection.cpp ---------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "InputSection.h"
10 #include "Config.h"
11 #include "InputFiles.h"
12 #include "OutputSections.h"
13 #include "Relocations.h"
14 #include "SymbolTable.h"
15 #include "Symbols.h"
16 #include "SyntheticSections.h"
17 #include "Target.h"
18 #include "lld/Common/CommonLinkerContext.h"
19 #include "llvm/Support/Compiler.h"
20 #include "llvm/Support/Compression.h"
21 #include "llvm/Support/Endian.h"
22 #include "llvm/Support/xxhash.h"
23 #include <algorithm>
24 #include <mutex>
25 #include <vector>
26 
27 using namespace llvm;
28 using namespace llvm::ELF;
29 using namespace llvm::object;
30 using namespace llvm::support;
31 using namespace llvm::support::endian;
32 using namespace llvm::sys;
33 using namespace lld;
34 using namespace lld::elf;
35 
36 SmallVector<InputSectionBase *, 0> elf::inputSections;
37 DenseSet<std::pair<const Symbol *, uint64_t>> elf::ppc64noTocRelax;
38 
39 // Returns a string to construct an error message.
40 std::string lld::toString(const InputSectionBase *sec) {
41   return (toString(sec->file) + ":(" + sec->name + ")").str();
42 }
43 
44 template <class ELFT>
45 static ArrayRef<uint8_t> getSectionContents(ObjFile<ELFT> &file,
46                                             const typename ELFT::Shdr &hdr) {
47   if (hdr.sh_type == SHT_NOBITS)
48     return makeArrayRef<uint8_t>(nullptr, hdr.sh_size);
49   return check(file.getObj().getSectionContents(hdr));
50 }
51 
52 InputSectionBase::InputSectionBase(InputFile *file, uint64_t flags,
53                                    uint32_t type, uint64_t entsize,
54                                    uint32_t link, uint32_t info,
55                                    uint32_t alignment, ArrayRef<uint8_t> data,
56                                    StringRef name, Kind sectionKind)
57     : SectionBase(sectionKind, name, flags, entsize, alignment, type, info,
58                   link),
59       file(file), rawData(data) {
60   // In order to reduce memory allocation, we assume that mergeable
61   // sections are smaller than 4 GiB, which is not an unreasonable
62   // assumption as of 2017.
63   if (sectionKind == SectionBase::Merge && rawData.size() > UINT32_MAX)
64     error(toString(this) + ": section too large");
65 
66   // The ELF spec states that a value of 0 means the section has
67   // no alignment constraints.
68   uint32_t v = std::max<uint32_t>(alignment, 1);
69   if (!isPowerOf2_64(v))
70     fatal(toString(this) + ": sh_addralign is not a power of 2");
71   this->alignment = v;
72 
73   // In ELF, each section can be compressed by zlib, and if compressed,
74   // section name may be mangled by appending "z" (e.g. ".zdebug_info").
75   // If that's the case, demangle section name so that we can handle a
76   // section as if it weren't compressed.
77   if ((flags & SHF_COMPRESSED) || name.startswith(".zdebug")) {
78     if (!zlib::isAvailable())
79       error(toString(file) + ": contains a compressed section, " +
80             "but zlib is not available");
81     invokeELFT(parseCompressedHeader);
82   }
83 }
84 
85 // Drop SHF_GROUP bit unless we are producing a re-linkable object file.
86 // SHF_GROUP is a marker that a section belongs to some comdat group.
87 // That flag doesn't make sense in an executable.
88 static uint64_t getFlags(uint64_t flags) {
89   flags &= ~(uint64_t)SHF_INFO_LINK;
90   if (!config->relocatable)
91     flags &= ~(uint64_t)SHF_GROUP;
92   return flags;
93 }
94 
95 template <class ELFT>
96 InputSectionBase::InputSectionBase(ObjFile<ELFT> &file,
97                                    const typename ELFT::Shdr &hdr,
98                                    StringRef name, Kind sectionKind)
99     : InputSectionBase(&file, getFlags(hdr.sh_flags), hdr.sh_type,
100                        hdr.sh_entsize, hdr.sh_link, hdr.sh_info,
101                        hdr.sh_addralign, getSectionContents(file, hdr), name,
102                        sectionKind) {
103   // We reject object files having insanely large alignments even though
104   // they are allowed by the spec. I think 4GB is a reasonable limitation.
105   // We might want to relax this in the future.
106   if (hdr.sh_addralign > UINT32_MAX)
107     fatal(toString(&file) + ": section sh_addralign is too large");
108 }
109 
110 size_t InputSectionBase::getSize() const {
111   if (auto *s = dyn_cast<SyntheticSection>(this))
112     return s->getSize();
113   if (uncompressedSize >= 0)
114     return uncompressedSize;
115   return rawData.size() - bytesDropped;
116 }
117 
118 void InputSectionBase::uncompress() const {
119   size_t size = uncompressedSize;
120   char *uncompressedBuf;
121   {
122     static std::mutex mu;
123     std::lock_guard<std::mutex> lock(mu);
124     uncompressedBuf = bAlloc().Allocate<char>(size);
125   }
126 
127   if (Error e = zlib::uncompress(toStringRef(rawData), uncompressedBuf, size))
128     fatal(toString(this) +
129           ": uncompress failed: " + llvm::toString(std::move(e)));
130   rawData = makeArrayRef((uint8_t *)uncompressedBuf, size);
131   uncompressedSize = -1;
132 }
133 
134 template <class ELFT> RelsOrRelas<ELFT> InputSectionBase::relsOrRelas() const {
135   if (relSecIdx == 0)
136     return {};
137   RelsOrRelas<ELFT> ret;
138   typename ELFT::Shdr shdr =
139       cast<ELFFileBase>(file)->getELFShdrs<ELFT>()[relSecIdx];
140   if (shdr.sh_type == SHT_REL) {
141     ret.rels = makeArrayRef(reinterpret_cast<const typename ELFT::Rel *>(
142                                 file->mb.getBufferStart() + shdr.sh_offset),
143                             shdr.sh_size / sizeof(typename ELFT::Rel));
144   } else {
145     assert(shdr.sh_type == SHT_RELA);
146     ret.relas = makeArrayRef(reinterpret_cast<const typename ELFT::Rela *>(
147                                  file->mb.getBufferStart() + shdr.sh_offset),
148                              shdr.sh_size / sizeof(typename ELFT::Rela));
149   }
150   return ret;
151 }
152 
153 uint64_t SectionBase::getOffset(uint64_t offset) const {
154   switch (kind()) {
155   case Output: {
156     auto *os = cast<OutputSection>(this);
157     // For output sections we treat offset -1 as the end of the section.
158     return offset == uint64_t(-1) ? os->size : offset;
159   }
160   case Regular:
161   case Synthetic:
162     return cast<InputSection>(this)->outSecOff + offset;
163   case EHFrame:
164     // The file crtbeginT.o has relocations pointing to the start of an empty
165     // .eh_frame that is known to be the first in the link. It does that to
166     // identify the start of the output .eh_frame.
167     return offset;
168   case Merge:
169     const MergeInputSection *ms = cast<MergeInputSection>(this);
170     if (InputSection *isec = ms->getParent())
171       return isec->outSecOff + ms->getParentOffset(offset);
172     return ms->getParentOffset(offset);
173   }
174   llvm_unreachable("invalid section kind");
175 }
176 
177 uint64_t SectionBase::getVA(uint64_t offset) const {
178   const OutputSection *out = getOutputSection();
179   return (out ? out->addr : 0) + getOffset(offset);
180 }
181 
182 OutputSection *SectionBase::getOutputSection() {
183   InputSection *sec;
184   if (auto *isec = dyn_cast<InputSection>(this))
185     sec = isec;
186   else if (auto *ms = dyn_cast<MergeInputSection>(this))
187     sec = ms->getParent();
188   else if (auto *eh = dyn_cast<EhInputSection>(this))
189     sec = eh->getParent();
190   else
191     return cast<OutputSection>(this);
192   return sec ? sec->getParent() : nullptr;
193 }
194 
195 // When a section is compressed, `rawData` consists with a header followed
196 // by zlib-compressed data. This function parses a header to initialize
197 // `uncompressedSize` member and remove the header from `rawData`.
198 template <typename ELFT> void InputSectionBase::parseCompressedHeader() {
199   // Old-style header
200   if (!(flags & SHF_COMPRESSED)) {
201     assert(name.startswith(".zdebug"));
202     if (!toStringRef(rawData).startswith("ZLIB")) {
203       error(toString(this) + ": corrupted compressed section header");
204       return;
205     }
206     rawData = rawData.slice(4);
207 
208     if (rawData.size() < 8) {
209       error(toString(this) + ": corrupted compressed section header");
210       return;
211     }
212 
213     uncompressedSize = read64be(rawData.data());
214     rawData = rawData.slice(8);
215 
216     // Restore the original section name.
217     // (e.g. ".zdebug_info" -> ".debug_info")
218     name = saver().save("." + name.substr(2));
219     return;
220   }
221 
222   flags &= ~(uint64_t)SHF_COMPRESSED;
223 
224   // New-style header
225   if (rawData.size() < sizeof(typename ELFT::Chdr)) {
226     error(toString(this) + ": corrupted compressed section");
227     return;
228   }
229 
230   auto *hdr = reinterpret_cast<const typename ELFT::Chdr *>(rawData.data());
231   if (hdr->ch_type != ELFCOMPRESS_ZLIB) {
232     error(toString(this) + ": unsupported compression type");
233     return;
234   }
235 
236   uncompressedSize = hdr->ch_size;
237   alignment = std::max<uint32_t>(hdr->ch_addralign, 1);
238   rawData = rawData.slice(sizeof(*hdr));
239 }
240 
241 InputSection *InputSectionBase::getLinkOrderDep() const {
242   assert(flags & SHF_LINK_ORDER);
243   if (!link)
244     return nullptr;
245   return cast<InputSection>(file->getSections()[link]);
246 }
247 
248 // Find a function symbol that encloses a given location.
249 Defined *InputSectionBase::getEnclosingFunction(uint64_t offset) {
250   for (Symbol *b : file->getSymbols())
251     if (Defined *d = dyn_cast<Defined>(b))
252       if (d->section == this && d->type == STT_FUNC && d->value <= offset &&
253           offset < d->value + d->size)
254         return d;
255   return nullptr;
256 }
257 
258 // Returns an object file location string. Used to construct an error message.
259 std::string InputSectionBase::getLocation(uint64_t offset) {
260   std::string secAndOffset =
261       (name + "+0x" + Twine::utohexstr(offset) + ")").str();
262 
263   // We don't have file for synthetic sections.
264   if (file == nullptr)
265     return (config->outputFile + ":(" + secAndOffset).str();
266 
267   std::string filename = toString(file);
268   if (Defined *d = getEnclosingFunction(offset))
269     return filename + ":(function " + toString(*d) + ": " + secAndOffset;
270 
271   return filename + ":(" + secAndOffset;
272 }
273 
274 // This function is intended to be used for constructing an error message.
275 // The returned message looks like this:
276 //
277 //   foo.c:42 (/home/alice/possibly/very/long/path/foo.c:42)
278 //
279 //  Returns an empty string if there's no way to get line info.
280 std::string InputSectionBase::getSrcMsg(const Symbol &sym, uint64_t offset) {
281   return file->getSrcMsg(sym, *this, offset);
282 }
283 
284 // Returns a filename string along with an optional section name. This
285 // function is intended to be used for constructing an error
286 // message. The returned message looks like this:
287 //
288 //   path/to/foo.o:(function bar)
289 //
290 // or
291 //
292 //   path/to/foo.o:(function bar) in archive path/to/bar.a
293 std::string InputSectionBase::getObjMsg(uint64_t off) {
294   std::string filename = std::string(file->getName());
295 
296   std::string archive;
297   if (!file->archiveName.empty())
298     archive = (" in archive " + file->archiveName).str();
299 
300   // Find a symbol that encloses a given location.
301   for (Symbol *b : file->getSymbols())
302     if (auto *d = dyn_cast<Defined>(b))
303       if (d->section == this && d->value <= off && off < d->value + d->size)
304         return filename + ":(" + toString(*d) + ")" + archive;
305 
306   // If there's no symbol, print out the offset in the section.
307   return (filename + ":(" + name + "+0x" + utohexstr(off) + ")" + archive)
308       .str();
309 }
310 
311 InputSection InputSection::discarded(nullptr, 0, 0, 0, ArrayRef<uint8_t>(), "");
312 
313 InputSection::InputSection(InputFile *f, uint64_t flags, uint32_t type,
314                            uint32_t alignment, ArrayRef<uint8_t> data,
315                            StringRef name, Kind k)
316     : InputSectionBase(f, flags, type,
317                        /*Entsize*/ 0, /*Link*/ 0, /*Info*/ 0, alignment, data,
318                        name, k) {}
319 
320 template <class ELFT>
321 InputSection::InputSection(ObjFile<ELFT> &f, const typename ELFT::Shdr &header,
322                            StringRef name)
323     : InputSectionBase(f, header, name, InputSectionBase::Regular) {}
324 
325 bool InputSection::classof(const SectionBase *s) {
326   return s->kind() == SectionBase::Regular ||
327          s->kind() == SectionBase::Synthetic;
328 }
329 
330 OutputSection *InputSection::getParent() const {
331   return cast_or_null<OutputSection>(parent);
332 }
333 
334 // Copy SHT_GROUP section contents. Used only for the -r option.
335 template <class ELFT> void InputSection::copyShtGroup(uint8_t *buf) {
336   // ELFT::Word is the 32-bit integral type in the target endianness.
337   using u32 = typename ELFT::Word;
338   ArrayRef<u32> from = getDataAs<u32>();
339   auto *to = reinterpret_cast<u32 *>(buf);
340 
341   // The first entry is not a section number but a flag.
342   *to++ = from[0];
343 
344   // Adjust section numbers because section numbers in an input object files are
345   // different in the output. We also need to handle combined or discarded
346   // members.
347   ArrayRef<InputSectionBase *> sections = file->getSections();
348   DenseSet<uint32_t> seen;
349   for (uint32_t idx : from.slice(1)) {
350     OutputSection *osec = sections[idx]->getOutputSection();
351     if (osec && seen.insert(osec->sectionIndex).second)
352       *to++ = osec->sectionIndex;
353   }
354 }
355 
356 InputSectionBase *InputSection::getRelocatedSection() const {
357   if (!file || (type != SHT_RELA && type != SHT_REL))
358     return nullptr;
359   ArrayRef<InputSectionBase *> sections = file->getSections();
360   return sections[info];
361 }
362 
363 // This is used for -r and --emit-relocs. We can't use memcpy to copy
364 // relocations because we need to update symbol table offset and section index
365 // for each relocation. So we copy relocations one by one.
366 template <class ELFT, class RelTy>
367 void InputSection::copyRelocations(uint8_t *buf, ArrayRef<RelTy> rels) {
368   const TargetInfo &target = *elf::target;
369   InputSectionBase *sec = getRelocatedSection();
370 
371   for (const RelTy &rel : rels) {
372     RelType type = rel.getType(config->isMips64EL);
373     const ObjFile<ELFT> *file = getFile<ELFT>();
374     Symbol &sym = file->getRelocTargetSym(rel);
375 
376     auto *p = reinterpret_cast<typename ELFT::Rela *>(buf);
377     buf += sizeof(RelTy);
378 
379     if (RelTy::IsRela)
380       p->r_addend = getAddend<ELFT>(rel);
381 
382     // Output section VA is zero for -r, so r_offset is an offset within the
383     // section, but for --emit-relocs it is a virtual address.
384     p->r_offset = sec->getVA(rel.r_offset);
385     p->setSymbolAndType(in.symTab->getSymbolIndex(&sym), type,
386                         config->isMips64EL);
387 
388     if (sym.type == STT_SECTION) {
389       // We combine multiple section symbols into only one per
390       // section. This means we have to update the addend. That is
391       // trivial for Elf_Rela, but for Elf_Rel we have to write to the
392       // section data. We do that by adding to the Relocation vector.
393 
394       // .eh_frame is horribly special and can reference discarded sections. To
395       // avoid having to parse and recreate .eh_frame, we just replace any
396       // relocation in it pointing to discarded sections with R_*_NONE, which
397       // hopefully creates a frame that is ignored at runtime. Also, don't warn
398       // on .gcc_except_table and debug sections.
399       //
400       // See the comment in maybeReportUndefined for PPC32 .got2 and PPC64 .toc
401       auto *d = dyn_cast<Defined>(&sym);
402       if (!d) {
403         if (!isDebugSection(*sec) && sec->name != ".eh_frame" &&
404             sec->name != ".gcc_except_table" && sec->name != ".got2" &&
405             sec->name != ".toc") {
406           uint32_t secIdx = cast<Undefined>(sym).discardedSecIdx;
407           Elf_Shdr_Impl<ELFT> sec = file->template getELFShdrs<ELFT>()[secIdx];
408           warn("relocation refers to a discarded section: " +
409                CHECK(file->getObj().getSectionName(sec), file) +
410                "\n>>> referenced by " + getObjMsg(p->r_offset));
411         }
412         p->setSymbolAndType(0, 0, false);
413         continue;
414       }
415       SectionBase *section = d->section;
416       if (!section->isLive()) {
417         p->setSymbolAndType(0, 0, false);
418         continue;
419       }
420 
421       int64_t addend = getAddend<ELFT>(rel);
422       const uint8_t *bufLoc = sec->data().begin() + rel.r_offset;
423       if (!RelTy::IsRela)
424         addend = target.getImplicitAddend(bufLoc, type);
425 
426       if (config->emachine == EM_MIPS &&
427           target.getRelExpr(type, sym, bufLoc) == R_MIPS_GOTREL) {
428         // Some MIPS relocations depend on "gp" value. By default,
429         // this value has 0x7ff0 offset from a .got section. But
430         // relocatable files produced by a compiler or a linker
431         // might redefine this default value and we must use it
432         // for a calculation of the relocation result. When we
433         // generate EXE or DSO it's trivial. Generating a relocatable
434         // output is more difficult case because the linker does
435         // not calculate relocations in this mode and loses
436         // individual "gp" values used by each input object file.
437         // As a workaround we add the "gp" value to the relocation
438         // addend and save it back to the file.
439         addend += sec->getFile<ELFT>()->mipsGp0;
440       }
441 
442       if (RelTy::IsRela)
443         p->r_addend = sym.getVA(addend) - section->getOutputSection()->addr;
444       else if (config->relocatable && type != target.noneRel)
445         sec->relocations.push_back({R_ABS, type, rel.r_offset, addend, &sym});
446     } else if (config->emachine == EM_PPC && type == R_PPC_PLTREL24 &&
447                p->r_addend >= 0x8000 && sec->file->ppc32Got2) {
448       // Similar to R_MIPS_GPREL{16,32}. If the addend of R_PPC_PLTREL24
449       // indicates that r30 is relative to the input section .got2
450       // (r_addend>=0x8000), after linking, r30 should be relative to the output
451       // section .got2 . To compensate for the shift, adjust r_addend by
452       // ppc32Got->outSecOff.
453       p->r_addend += sec->file->ppc32Got2->outSecOff;
454     }
455   }
456 }
457 
458 // The ARM and AArch64 ABI handle pc-relative relocations to undefined weak
459 // references specially. The general rule is that the value of the symbol in
460 // this context is the address of the place P. A further special case is that
461 // branch relocations to an undefined weak reference resolve to the next
462 // instruction.
463 static uint32_t getARMUndefinedRelativeWeakVA(RelType type, uint32_t a,
464                                               uint32_t p) {
465   switch (type) {
466   // Unresolved branch relocations to weak references resolve to next
467   // instruction, this will be either 2 or 4 bytes on from P.
468   case R_ARM_THM_JUMP8:
469   case R_ARM_THM_JUMP11:
470     return p + 2 + a;
471   case R_ARM_CALL:
472   case R_ARM_JUMP24:
473   case R_ARM_PC24:
474   case R_ARM_PLT32:
475   case R_ARM_PREL31:
476   case R_ARM_THM_JUMP19:
477   case R_ARM_THM_JUMP24:
478     return p + 4 + a;
479   case R_ARM_THM_CALL:
480     // We don't want an interworking BLX to ARM
481     return p + 5 + a;
482   // Unresolved non branch pc-relative relocations
483   // R_ARM_TARGET2 which can be resolved relatively is not present as it never
484   // targets a weak-reference.
485   case R_ARM_MOVW_PREL_NC:
486   case R_ARM_MOVT_PREL:
487   case R_ARM_REL32:
488   case R_ARM_THM_ALU_PREL_11_0:
489   case R_ARM_THM_MOVW_PREL_NC:
490   case R_ARM_THM_MOVT_PREL:
491   case R_ARM_THM_PC12:
492     return p + a;
493   // p + a is unrepresentable as negative immediates can't be encoded.
494   case R_ARM_THM_PC8:
495     return p;
496   }
497   llvm_unreachable("ARM pc-relative relocation expected\n");
498 }
499 
500 // The comment above getARMUndefinedRelativeWeakVA applies to this function.
501 static uint64_t getAArch64UndefinedRelativeWeakVA(uint64_t type, uint64_t p) {
502   switch (type) {
503   // Unresolved branch relocations to weak references resolve to next
504   // instruction, this is 4 bytes on from P.
505   case R_AARCH64_CALL26:
506   case R_AARCH64_CONDBR19:
507   case R_AARCH64_JUMP26:
508   case R_AARCH64_TSTBR14:
509     return p + 4;
510   // Unresolved non branch pc-relative relocations
511   case R_AARCH64_PREL16:
512   case R_AARCH64_PREL32:
513   case R_AARCH64_PREL64:
514   case R_AARCH64_ADR_PREL_LO21:
515   case R_AARCH64_LD_PREL_LO19:
516   case R_AARCH64_PLT32:
517     return p;
518   }
519   llvm_unreachable("AArch64 pc-relative relocation expected\n");
520 }
521 
522 static uint64_t getRISCVUndefinedRelativeWeakVA(uint64_t type, uint64_t p) {
523   switch (type) {
524   case R_RISCV_BRANCH:
525   case R_RISCV_JAL:
526   case R_RISCV_CALL:
527   case R_RISCV_CALL_PLT:
528   case R_RISCV_RVC_BRANCH:
529   case R_RISCV_RVC_JUMP:
530     return p;
531   default:
532     return 0;
533   }
534 }
535 
536 // ARM SBREL relocations are of the form S + A - B where B is the static base
537 // The ARM ABI defines base to be "addressing origin of the output segment
538 // defining the symbol S". We defined the "addressing origin"/static base to be
539 // the base of the PT_LOAD segment containing the Sym.
540 // The procedure call standard only defines a Read Write Position Independent
541 // RWPI variant so in practice we should expect the static base to be the base
542 // of the RW segment.
543 static uint64_t getARMStaticBase(const Symbol &sym) {
544   OutputSection *os = sym.getOutputSection();
545   if (!os || !os->ptLoad || !os->ptLoad->firstSec)
546     fatal("SBREL relocation to " + sym.getName() + " without static base");
547   return os->ptLoad->firstSec->addr;
548 }
549 
550 // For R_RISCV_PC_INDIRECT (R_RISCV_PCREL_LO12_{I,S}), the symbol actually
551 // points the corresponding R_RISCV_PCREL_HI20 relocation, and the target VA
552 // is calculated using PCREL_HI20's symbol.
553 //
554 // This function returns the R_RISCV_PCREL_HI20 relocation from
555 // R_RISCV_PCREL_LO12's symbol and addend.
556 static Relocation *getRISCVPCRelHi20(const Symbol *sym, uint64_t addend) {
557   const Defined *d = cast<Defined>(sym);
558   if (!d->section) {
559     error("R_RISCV_PCREL_LO12 relocation points to an absolute symbol: " +
560           sym->getName());
561     return nullptr;
562   }
563   InputSection *isec = cast<InputSection>(d->section);
564 
565   if (addend != 0)
566     warn("non-zero addend in R_RISCV_PCREL_LO12 relocation to " +
567          isec->getObjMsg(d->value) + " is ignored");
568 
569   // Relocations are sorted by offset, so we can use std::equal_range to do
570   // binary search.
571   Relocation r;
572   r.offset = d->value;
573   auto range =
574       std::equal_range(isec->relocations.begin(), isec->relocations.end(), r,
575                        [](const Relocation &lhs, const Relocation &rhs) {
576                          return lhs.offset < rhs.offset;
577                        });
578 
579   for (auto it = range.first; it != range.second; ++it)
580     if (it->type == R_RISCV_PCREL_HI20 || it->type == R_RISCV_GOT_HI20 ||
581         it->type == R_RISCV_TLS_GD_HI20 || it->type == R_RISCV_TLS_GOT_HI20)
582       return &*it;
583 
584   error("R_RISCV_PCREL_LO12 relocation points to " + isec->getObjMsg(d->value) +
585         " without an associated R_RISCV_PCREL_HI20 relocation");
586   return nullptr;
587 }
588 
589 // A TLS symbol's virtual address is relative to the TLS segment. Add a
590 // target-specific adjustment to produce a thread-pointer-relative offset.
591 static int64_t getTlsTpOffset(const Symbol &s) {
592   // On targets that support TLSDESC, _TLS_MODULE_BASE_@tpoff = 0.
593   if (&s == ElfSym::tlsModuleBase)
594     return 0;
595 
596   // There are 2 TLS layouts. Among targets we support, x86 uses TLS Variant 2
597   // while most others use Variant 1. At run time TP will be aligned to p_align.
598 
599   // Variant 1. TP will be followed by an optional gap (which is the size of 2
600   // pointers on ARM/AArch64, 0 on other targets), followed by alignment
601   // padding, then the static TLS blocks. The alignment padding is added so that
602   // (TP + gap + padding) is congruent to p_vaddr modulo p_align.
603   //
604   // Variant 2. Static TLS blocks, followed by alignment padding are placed
605   // before TP. The alignment padding is added so that (TP - padding -
606   // p_memsz) is congruent to p_vaddr modulo p_align.
607   PhdrEntry *tls = Out::tlsPhdr;
608   switch (config->emachine) {
609     // Variant 1.
610   case EM_ARM:
611   case EM_AARCH64:
612     return s.getVA(0) + config->wordsize * 2 +
613            ((tls->p_vaddr - config->wordsize * 2) & (tls->p_align - 1));
614   case EM_MIPS:
615   case EM_PPC:
616   case EM_PPC64:
617     // Adjusted Variant 1. TP is placed with a displacement of 0x7000, which is
618     // to allow a signed 16-bit offset to reach 0x1000 of TCB/thread-library
619     // data and 0xf000 of the program's TLS segment.
620     return s.getVA(0) + (tls->p_vaddr & (tls->p_align - 1)) - 0x7000;
621   case EM_RISCV:
622     return s.getVA(0) + (tls->p_vaddr & (tls->p_align - 1));
623 
624     // Variant 2.
625   case EM_HEXAGON:
626   case EM_SPARCV9:
627   case EM_386:
628   case EM_X86_64:
629     return s.getVA(0) - tls->p_memsz -
630            ((-tls->p_vaddr - tls->p_memsz) & (tls->p_align - 1));
631   default:
632     llvm_unreachable("unhandled Config->EMachine");
633   }
634 }
635 
636 uint64_t InputSectionBase::getRelocTargetVA(const InputFile *file, RelType type,
637                                             int64_t a, uint64_t p,
638                                             const Symbol &sym, RelExpr expr) {
639   switch (expr) {
640   case R_ABS:
641   case R_DTPREL:
642   case R_RELAX_TLS_LD_TO_LE_ABS:
643   case R_RELAX_GOT_PC_NOPIC:
644   case R_RISCV_ADD:
645     return sym.getVA(a);
646   case R_ADDEND:
647     return a;
648   case R_ARM_SBREL:
649     return sym.getVA(a) - getARMStaticBase(sym);
650   case R_GOT:
651   case R_RELAX_TLS_GD_TO_IE_ABS:
652     return sym.getGotVA() + a;
653   case R_GOTONLY_PC:
654     return in.got->getVA() + a - p;
655   case R_GOTPLTONLY_PC:
656     return in.gotPlt->getVA() + a - p;
657   case R_GOTREL:
658   case R_PPC64_RELAX_TOC:
659     return sym.getVA(a) - in.got->getVA();
660   case R_GOTPLTREL:
661     return sym.getVA(a) - in.gotPlt->getVA();
662   case R_GOTPLT:
663   case R_RELAX_TLS_GD_TO_IE_GOTPLT:
664     return sym.getGotVA() + a - in.gotPlt->getVA();
665   case R_TLSLD_GOT_OFF:
666   case R_GOT_OFF:
667   case R_RELAX_TLS_GD_TO_IE_GOT_OFF:
668     return sym.getGotOffset() + a;
669   case R_AARCH64_GOT_PAGE_PC:
670   case R_AARCH64_RELAX_TLS_GD_TO_IE_PAGE_PC:
671     return getAArch64Page(sym.getGotVA() + a) - getAArch64Page(p);
672   case R_AARCH64_GOT_PAGE:
673     return sym.getGotVA() + a - getAArch64Page(in.got->getVA());
674   case R_GOT_PC:
675   case R_RELAX_TLS_GD_TO_IE:
676     return sym.getGotVA() + a - p;
677   case R_MIPS_GOTREL:
678     return sym.getVA(a) - in.mipsGot->getGp(file);
679   case R_MIPS_GOT_GP:
680     return in.mipsGot->getGp(file) + a;
681   case R_MIPS_GOT_GP_PC: {
682     // R_MIPS_LO16 expression has R_MIPS_GOT_GP_PC type iif the target
683     // is _gp_disp symbol. In that case we should use the following
684     // formula for calculation "AHL + GP - P + 4". For details see p. 4-19 at
685     // ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf
686     // microMIPS variants of these relocations use slightly different
687     // expressions: AHL + GP - P + 3 for %lo() and AHL + GP - P - 1 for %hi()
688     // to correctly handle less-significant bit of the microMIPS symbol.
689     uint64_t v = in.mipsGot->getGp(file) + a - p;
690     if (type == R_MIPS_LO16 || type == R_MICROMIPS_LO16)
691       v += 4;
692     if (type == R_MICROMIPS_LO16 || type == R_MICROMIPS_HI16)
693       v -= 1;
694     return v;
695   }
696   case R_MIPS_GOT_LOCAL_PAGE:
697     // If relocation against MIPS local symbol requires GOT entry, this entry
698     // should be initialized by 'page address'. This address is high 16-bits
699     // of sum the symbol's value and the addend.
700     return in.mipsGot->getVA() + in.mipsGot->getPageEntryOffset(file, sym, a) -
701            in.mipsGot->getGp(file);
702   case R_MIPS_GOT_OFF:
703   case R_MIPS_GOT_OFF32:
704     // In case of MIPS if a GOT relocation has non-zero addend this addend
705     // should be applied to the GOT entry content not to the GOT entry offset.
706     // That is why we use separate expression type.
707     return in.mipsGot->getVA() + in.mipsGot->getSymEntryOffset(file, sym, a) -
708            in.mipsGot->getGp(file);
709   case R_MIPS_TLSGD:
710     return in.mipsGot->getVA() + in.mipsGot->getGlobalDynOffset(file, sym) -
711            in.mipsGot->getGp(file);
712   case R_MIPS_TLSLD:
713     return in.mipsGot->getVA() + in.mipsGot->getTlsIndexOffset(file) -
714            in.mipsGot->getGp(file);
715   case R_AARCH64_PAGE_PC: {
716     uint64_t val = sym.isUndefWeak() ? p + a : sym.getVA(a);
717     return getAArch64Page(val) - getAArch64Page(p);
718   }
719   case R_RISCV_PC_INDIRECT: {
720     if (const Relocation *hiRel = getRISCVPCRelHi20(&sym, a))
721       return getRelocTargetVA(file, hiRel->type, hiRel->addend, sym.getVA(),
722                               *hiRel->sym, hiRel->expr);
723     return 0;
724   }
725   case R_PC:
726   case R_ARM_PCA: {
727     uint64_t dest;
728     if (expr == R_ARM_PCA)
729       // Some PC relative ARM (Thumb) relocations align down the place.
730       p = p & 0xfffffffc;
731     if (sym.isUndefWeak()) {
732       // On ARM and AArch64 a branch to an undefined weak resolves to the next
733       // instruction, otherwise the place. On RISCV, resolve an undefined weak
734       // to the same instruction to cause an infinite loop (making the user
735       // aware of the issue) while ensuring no overflow.
736       if (config->emachine == EM_ARM)
737         dest = getARMUndefinedRelativeWeakVA(type, a, p);
738       else if (config->emachine == EM_AARCH64)
739         dest = getAArch64UndefinedRelativeWeakVA(type, p) + a;
740       else if (config->emachine == EM_PPC)
741         dest = p;
742       else if (config->emachine == EM_RISCV)
743         dest = getRISCVUndefinedRelativeWeakVA(type, p) + a;
744       else
745         dest = sym.getVA(a);
746     } else {
747       dest = sym.getVA(a);
748     }
749     return dest - p;
750   }
751   case R_PLT:
752     return sym.getPltVA() + a;
753   case R_PLT_PC:
754   case R_PPC64_CALL_PLT:
755     return sym.getPltVA() + a - p;
756   case R_PLT_GOTPLT:
757     return sym.getPltVA() + a - in.gotPlt->getVA();
758   case R_PPC32_PLTREL:
759     // R_PPC_PLTREL24 uses the addend (usually 0 or 0x8000) to indicate r30
760     // stores _GLOBAL_OFFSET_TABLE_ or .got2+0x8000. The addend is ignored for
761     // target VA computation.
762     return sym.getPltVA() - p;
763   case R_PPC64_CALL: {
764     uint64_t symVA = sym.getVA(a);
765     // If we have an undefined weak symbol, we might get here with a symbol
766     // address of zero. That could overflow, but the code must be unreachable,
767     // so don't bother doing anything at all.
768     if (!symVA)
769       return 0;
770 
771     // PPC64 V2 ABI describes two entry points to a function. The global entry
772     // point is used for calls where the caller and callee (may) have different
773     // TOC base pointers and r2 needs to be modified to hold the TOC base for
774     // the callee. For local calls the caller and callee share the same
775     // TOC base and so the TOC pointer initialization code should be skipped by
776     // branching to the local entry point.
777     return symVA - p + getPPC64GlobalEntryToLocalEntryOffset(sym.stOther);
778   }
779   case R_PPC64_TOCBASE:
780     return getPPC64TocBase() + a;
781   case R_RELAX_GOT_PC:
782   case R_PPC64_RELAX_GOT_PC:
783     return sym.getVA(a) - p;
784   case R_RELAX_TLS_GD_TO_LE:
785   case R_RELAX_TLS_IE_TO_LE:
786   case R_RELAX_TLS_LD_TO_LE:
787   case R_TPREL:
788     // It is not very clear what to return if the symbol is undefined. With
789     // --noinhibit-exec, even a non-weak undefined reference may reach here.
790     // Just return A, which matches R_ABS, and the behavior of some dynamic
791     // loaders.
792     if (sym.isUndefined())
793       return a;
794     return getTlsTpOffset(sym) + a;
795   case R_RELAX_TLS_GD_TO_LE_NEG:
796   case R_TPREL_NEG:
797     if (sym.isUndefined())
798       return a;
799     return -getTlsTpOffset(sym) + a;
800   case R_SIZE:
801     return sym.getSize() + a;
802   case R_TLSDESC:
803     return in.got->getTlsDescAddr(sym) + a;
804   case R_TLSDESC_PC:
805     return in.got->getTlsDescAddr(sym) + a - p;
806   case R_TLSDESC_GOTPLT:
807     return in.got->getTlsDescAddr(sym) + a - in.gotPlt->getVA();
808   case R_AARCH64_TLSDESC_PAGE:
809     return getAArch64Page(in.got->getTlsDescAddr(sym) + a) - getAArch64Page(p);
810   case R_TLSGD_GOT:
811     return in.got->getGlobalDynOffset(sym) + a;
812   case R_TLSGD_GOTPLT:
813     return in.got->getGlobalDynAddr(sym) + a - in.gotPlt->getVA();
814   case R_TLSGD_PC:
815     return in.got->getGlobalDynAddr(sym) + a - p;
816   case R_TLSLD_GOTPLT:
817     return in.got->getVA() + in.got->getTlsIndexOff() + a - in.gotPlt->getVA();
818   case R_TLSLD_GOT:
819     return in.got->getTlsIndexOff() + a;
820   case R_TLSLD_PC:
821     return in.got->getTlsIndexVA() + a - p;
822   default:
823     llvm_unreachable("invalid expression");
824   }
825 }
826 
827 // This function applies relocations to sections without SHF_ALLOC bit.
828 // Such sections are never mapped to memory at runtime. Debug sections are
829 // an example. Relocations in non-alloc sections are much easier to
830 // handle than in allocated sections because it will never need complex
831 // treatment such as GOT or PLT (because at runtime no one refers them).
832 // So, we handle relocations for non-alloc sections directly in this
833 // function as a performance optimization.
834 template <class ELFT, class RelTy>
835 void InputSection::relocateNonAlloc(uint8_t *buf, ArrayRef<RelTy> rels) {
836   const unsigned bits = sizeof(typename ELFT::uint) * 8;
837   const TargetInfo &target = *elf::target;
838   const bool isDebug = isDebugSection(*this);
839   const bool isDebugLocOrRanges =
840       isDebug && (name == ".debug_loc" || name == ".debug_ranges");
841   const bool isDebugLine = isDebug && name == ".debug_line";
842   Optional<uint64_t> tombstone;
843   for (const auto &patAndValue : llvm::reverse(config->deadRelocInNonAlloc))
844     if (patAndValue.first.match(this->name)) {
845       tombstone = patAndValue.second;
846       break;
847     }
848 
849   for (const RelTy &rel : rels) {
850     RelType type = rel.getType(config->isMips64EL);
851 
852     // GCC 8.0 or earlier have a bug that they emit R_386_GOTPC relocations
853     // against _GLOBAL_OFFSET_TABLE_ for .debug_info. The bug has been fixed
854     // in 2017 (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82630), but we
855     // need to keep this bug-compatible code for a while.
856     if (config->emachine == EM_386 && type == R_386_GOTPC)
857       continue;
858 
859     uint64_t offset = rel.r_offset;
860     uint8_t *bufLoc = buf + offset;
861     int64_t addend = getAddend<ELFT>(rel);
862     if (!RelTy::IsRela)
863       addend += target.getImplicitAddend(bufLoc, type);
864 
865     Symbol &sym = getFile<ELFT>()->getRelocTargetSym(rel);
866     RelExpr expr = target.getRelExpr(type, sym, bufLoc);
867     if (expr == R_NONE)
868       continue;
869 
870     if (tombstone ||
871         (isDebug && (type == target.symbolicRel || expr == R_DTPREL))) {
872       // Resolve relocations in .debug_* referencing (discarded symbols or ICF
873       // folded section symbols) to a tombstone value. Resolving to addend is
874       // unsatisfactory because the result address range may collide with a
875       // valid range of low address, or leave multiple CUs claiming ownership of
876       // the same range of code, which may confuse consumers.
877       //
878       // To address the problems, we use -1 as a tombstone value for most
879       // .debug_* sections. We have to ignore the addend because we don't want
880       // to resolve an address attribute (which may have a non-zero addend) to
881       // -1+addend (wrap around to a low address).
882       //
883       // R_DTPREL type relocations represent an offset into the dynamic thread
884       // vector. The computed value is st_value plus a non-negative offset.
885       // Negative values are invalid, so -1 can be used as the tombstone value.
886       //
887       // If the referenced symbol is discarded (made Undefined), or the
888       // section defining the referenced symbol is garbage collected,
889       // sym.getOutputSection() is nullptr. `ds->folded` catches the ICF folded
890       // case. However, resolving a relocation in .debug_line to -1 would stop
891       // debugger users from setting breakpoints on the folded-in function, so
892       // exclude .debug_line.
893       //
894       // For pre-DWARF-v5 .debug_loc and .debug_ranges, -1 is a reserved value
895       // (base address selection entry), use 1 (which is used by GNU ld for
896       // .debug_ranges).
897       //
898       // TODO To reduce disruption, we use 0 instead of -1 as the tombstone
899       // value. Enable -1 in a future release.
900       auto *ds = dyn_cast<Defined>(&sym);
901       if (!sym.getOutputSection() || (ds && ds->folded && !isDebugLine)) {
902         // If -z dead-reloc-in-nonalloc= is specified, respect it.
903         const uint64_t value = tombstone ? SignExtend64<bits>(*tombstone)
904                                          : (isDebugLocOrRanges ? 1 : 0);
905         target.relocateNoSym(bufLoc, type, value);
906         continue;
907       }
908     }
909 
910     // For a relocatable link, only tombstone values are applied.
911     if (config->relocatable)
912       continue;
913 
914     if (expr == R_SIZE) {
915       target.relocateNoSym(bufLoc, type,
916                            SignExtend64<bits>(sym.getSize() + addend));
917       continue;
918     }
919 
920     // R_ABS/R_DTPREL and some other relocations can be used from non-SHF_ALLOC
921     // sections.
922     if (expr == R_ABS || expr == R_DTPREL || expr == R_GOTPLTREL ||
923         expr == R_RISCV_ADD) {
924       target.relocateNoSym(bufLoc, type, SignExtend64<bits>(sym.getVA(addend)));
925       continue;
926     }
927 
928     std::string msg = getLocation(offset) + ": has non-ABS relocation " +
929                       toString(type) + " against symbol '" + toString(sym) +
930                       "'";
931     if (expr != R_PC && expr != R_ARM_PCA) {
932       error(msg);
933       return;
934     }
935 
936     // If the control reaches here, we found a PC-relative relocation in a
937     // non-ALLOC section. Since non-ALLOC section is not loaded into memory
938     // at runtime, the notion of PC-relative doesn't make sense here. So,
939     // this is a usage error. However, GNU linkers historically accept such
940     // relocations without any errors and relocate them as if they were at
941     // address 0. For bug-compatibilty, we accept them with warnings. We
942     // know Steel Bank Common Lisp as of 2018 have this bug.
943     warn(msg);
944     target.relocateNoSym(
945         bufLoc, type,
946         SignExtend64<bits>(sym.getVA(addend - offset - outSecOff)));
947   }
948 }
949 
950 // This is used when '-r' is given.
951 // For REL targets, InputSection::copyRelocations() may store artificial
952 // relocations aimed to update addends. They are handled in relocateAlloc()
953 // for allocatable sections, and this function does the same for
954 // non-allocatable sections, such as sections with debug information.
955 static void relocateNonAllocForRelocatable(InputSection *sec, uint8_t *buf) {
956   const unsigned bits = config->is64 ? 64 : 32;
957 
958   for (const Relocation &rel : sec->relocations) {
959     // InputSection::copyRelocations() adds only R_ABS relocations.
960     assert(rel.expr == R_ABS);
961     uint8_t *bufLoc = buf + rel.offset;
962     uint64_t targetVA = SignExtend64(rel.sym->getVA(rel.addend), bits);
963     target->relocate(bufLoc, rel, targetVA);
964   }
965 }
966 
967 template <class ELFT>
968 void InputSectionBase::relocate(uint8_t *buf, uint8_t *bufEnd) {
969   if ((flags & SHF_EXECINSTR) && LLVM_UNLIKELY(getFile<ELFT>()->splitStack))
970     adjustSplitStackFunctionPrologues<ELFT>(buf, bufEnd);
971 
972   if (flags & SHF_ALLOC) {
973     relocateAlloc(buf, bufEnd);
974     return;
975   }
976 
977   auto *sec = cast<InputSection>(this);
978   if (config->relocatable)
979     relocateNonAllocForRelocatable(sec, buf);
980   // For a relocatable link, also call relocateNonAlloc() to rewrite applicable
981   // locations with tombstone values.
982   const RelsOrRelas<ELFT> rels = sec->template relsOrRelas<ELFT>();
983   if (rels.areRelocsRel())
984     sec->relocateNonAlloc<ELFT>(buf, rels.rels);
985   else
986     sec->relocateNonAlloc<ELFT>(buf, rels.relas);
987 }
988 
989 void InputSectionBase::relocateAlloc(uint8_t *buf, uint8_t *bufEnd) {
990   assert(flags & SHF_ALLOC);
991   const unsigned bits = config->wordsize * 8;
992   const TargetInfo &target = *elf::target;
993   uint64_t lastPPCRelaxedRelocOff = UINT64_C(-1);
994   AArch64Relaxer aarch64relaxer(relocations);
995   for (size_t i = 0, size = relocations.size(); i != size; ++i) {
996     const Relocation &rel = relocations[i];
997     if (rel.expr == R_NONE)
998       continue;
999     uint64_t offset = rel.offset;
1000     uint8_t *bufLoc = buf + offset;
1001 
1002     uint64_t secAddr = getOutputSection()->addr;
1003     if (auto *sec = dyn_cast<InputSection>(this))
1004       secAddr += sec->outSecOff;
1005     const uint64_t addrLoc = secAddr + offset;
1006     const uint64_t targetVA =
1007         SignExtend64(getRelocTargetVA(file, rel.type, rel.addend, addrLoc,
1008                                       *rel.sym, rel.expr),
1009                      bits);
1010     switch (rel.expr) {
1011     case R_RELAX_GOT_PC:
1012     case R_RELAX_GOT_PC_NOPIC:
1013       target.relaxGot(bufLoc, rel, targetVA);
1014       break;
1015     case R_AARCH64_GOT_PAGE_PC:
1016       if (i + 1 < size && aarch64relaxer.tryRelaxAdrpLdr(
1017                               rel, relocations[i + 1], secAddr, buf)) {
1018         ++i;
1019         continue;
1020       }
1021       target.relocate(bufLoc, rel, targetVA);
1022       break;
1023     case R_AARCH64_PAGE_PC:
1024       if (i + 1 < size && aarch64relaxer.tryRelaxAdrpAdd(
1025                               rel, relocations[i + 1], secAddr, buf)) {
1026         ++i;
1027         continue;
1028       }
1029       target.relocate(bufLoc, rel, targetVA);
1030       break;
1031     case R_PPC64_RELAX_GOT_PC: {
1032       // The R_PPC64_PCREL_OPT relocation must appear immediately after
1033       // R_PPC64_GOT_PCREL34 in the relocations table at the same offset.
1034       // We can only relax R_PPC64_PCREL_OPT if we have also relaxed
1035       // the associated R_PPC64_GOT_PCREL34 since only the latter has an
1036       // associated symbol. So save the offset when relaxing R_PPC64_GOT_PCREL34
1037       // and only relax the other if the saved offset matches.
1038       if (rel.type == R_PPC64_GOT_PCREL34)
1039         lastPPCRelaxedRelocOff = offset;
1040       if (rel.type == R_PPC64_PCREL_OPT && offset != lastPPCRelaxedRelocOff)
1041         break;
1042       target.relaxGot(bufLoc, rel, targetVA);
1043       break;
1044     }
1045     case R_PPC64_RELAX_TOC:
1046       // rel.sym refers to the STT_SECTION symbol associated to the .toc input
1047       // section. If an R_PPC64_TOC16_LO (.toc + addend) references the TOC
1048       // entry, there may be R_PPC64_TOC16_HA not paired with
1049       // R_PPC64_TOC16_LO_DS. Don't relax. This loses some relaxation
1050       // opportunities but is safe.
1051       if (ppc64noTocRelax.count({rel.sym, rel.addend}) ||
1052           !tryRelaxPPC64TocIndirection(rel, bufLoc))
1053         target.relocate(bufLoc, rel, targetVA);
1054       break;
1055     case R_RELAX_TLS_IE_TO_LE:
1056       target.relaxTlsIeToLe(bufLoc, rel, targetVA);
1057       break;
1058     case R_RELAX_TLS_LD_TO_LE:
1059     case R_RELAX_TLS_LD_TO_LE_ABS:
1060       target.relaxTlsLdToLe(bufLoc, rel, targetVA);
1061       break;
1062     case R_RELAX_TLS_GD_TO_LE:
1063     case R_RELAX_TLS_GD_TO_LE_NEG:
1064       target.relaxTlsGdToLe(bufLoc, rel, targetVA);
1065       break;
1066     case R_AARCH64_RELAX_TLS_GD_TO_IE_PAGE_PC:
1067     case R_RELAX_TLS_GD_TO_IE:
1068     case R_RELAX_TLS_GD_TO_IE_ABS:
1069     case R_RELAX_TLS_GD_TO_IE_GOT_OFF:
1070     case R_RELAX_TLS_GD_TO_IE_GOTPLT:
1071       target.relaxTlsGdToIe(bufLoc, rel, targetVA);
1072       break;
1073     case R_PPC64_CALL:
1074       // If this is a call to __tls_get_addr, it may be part of a TLS
1075       // sequence that has been relaxed and turned into a nop. In this
1076       // case, we don't want to handle it as a call.
1077       if (read32(bufLoc) == 0x60000000) // nop
1078         break;
1079 
1080       // Patch a nop (0x60000000) to a ld.
1081       if (rel.sym->needsTocRestore) {
1082         // gcc/gfortran 5.4, 6.3 and earlier versions do not add nop for
1083         // recursive calls even if the function is preemptible. This is not
1084         // wrong in the common case where the function is not preempted at
1085         // runtime. Just ignore.
1086         if ((bufLoc + 8 > bufEnd || read32(bufLoc + 4) != 0x60000000) &&
1087             rel.sym->file != file) {
1088           // Use substr(6) to remove the "__plt_" prefix.
1089           errorOrWarn(getErrorLocation(bufLoc) + "call to " +
1090                       lld::toString(*rel.sym).substr(6) +
1091                       " lacks nop, can't restore toc");
1092           break;
1093         }
1094         write32(bufLoc + 4, 0xe8410018); // ld %r2, 24(%r1)
1095       }
1096       target.relocate(bufLoc, rel, targetVA);
1097       break;
1098     default:
1099       target.relocate(bufLoc, rel, targetVA);
1100       break;
1101     }
1102   }
1103 
1104   // Apply jumpInstrMods.  jumpInstrMods are created when the opcode of
1105   // a jmp insn must be modified to shrink the jmp insn or to flip the jmp
1106   // insn.  This is primarily used to relax and optimize jumps created with
1107   // basic block sections.
1108   if (jumpInstrMod) {
1109     target.applyJumpInstrMod(buf + jumpInstrMod->offset, jumpInstrMod->original,
1110                              jumpInstrMod->size);
1111   }
1112 }
1113 
1114 // For each function-defining prologue, find any calls to __morestack,
1115 // and replace them with calls to __morestack_non_split.
1116 static void switchMorestackCallsToMorestackNonSplit(
1117     DenseSet<Defined *> &prologues,
1118     SmallVector<Relocation *, 0> &morestackCalls) {
1119 
1120   // If the target adjusted a function's prologue, all calls to
1121   // __morestack inside that function should be switched to
1122   // __morestack_non_split.
1123   Symbol *moreStackNonSplit = symtab->find("__morestack_non_split");
1124   if (!moreStackNonSplit) {
1125     error("mixing split-stack objects requires a definition of "
1126           "__morestack_non_split");
1127     return;
1128   }
1129 
1130   // Sort both collections to compare addresses efficiently.
1131   llvm::sort(morestackCalls, [](const Relocation *l, const Relocation *r) {
1132     return l->offset < r->offset;
1133   });
1134   std::vector<Defined *> functions(prologues.begin(), prologues.end());
1135   llvm::sort(functions, [](const Defined *l, const Defined *r) {
1136     return l->value < r->value;
1137   });
1138 
1139   auto it = morestackCalls.begin();
1140   for (Defined *f : functions) {
1141     // Find the first call to __morestack within the function.
1142     while (it != morestackCalls.end() && (*it)->offset < f->value)
1143       ++it;
1144     // Adjust all calls inside the function.
1145     while (it != morestackCalls.end() && (*it)->offset < f->value + f->size) {
1146       (*it)->sym = moreStackNonSplit;
1147       ++it;
1148     }
1149   }
1150 }
1151 
1152 static bool enclosingPrologueAttempted(uint64_t offset,
1153                                        const DenseSet<Defined *> &prologues) {
1154   for (Defined *f : prologues)
1155     if (f->value <= offset && offset < f->value + f->size)
1156       return true;
1157   return false;
1158 }
1159 
1160 // If a function compiled for split stack calls a function not
1161 // compiled for split stack, then the caller needs its prologue
1162 // adjusted to ensure that the called function will have enough stack
1163 // available. Find those functions, and adjust their prologues.
1164 template <class ELFT>
1165 void InputSectionBase::adjustSplitStackFunctionPrologues(uint8_t *buf,
1166                                                          uint8_t *end) {
1167   DenseSet<Defined *> prologues;
1168   SmallVector<Relocation *, 0> morestackCalls;
1169 
1170   for (Relocation &rel : relocations) {
1171     // Ignore calls into the split-stack api.
1172     if (rel.sym->getName().startswith("__morestack")) {
1173       if (rel.sym->getName().equals("__morestack"))
1174         morestackCalls.push_back(&rel);
1175       continue;
1176     }
1177 
1178     // A relocation to non-function isn't relevant. Sometimes
1179     // __morestack is not marked as a function, so this check comes
1180     // after the name check.
1181     if (rel.sym->type != STT_FUNC)
1182       continue;
1183 
1184     // If the callee's-file was compiled with split stack, nothing to do.  In
1185     // this context, a "Defined" symbol is one "defined by the binary currently
1186     // being produced". So an "undefined" symbol might be provided by a shared
1187     // library. It is not possible to tell how such symbols were compiled, so be
1188     // conservative.
1189     if (Defined *d = dyn_cast<Defined>(rel.sym))
1190       if (InputSection *isec = cast_or_null<InputSection>(d->section))
1191         if (!isec || !isec->getFile<ELFT>() || isec->getFile<ELFT>()->splitStack)
1192           continue;
1193 
1194     if (enclosingPrologueAttempted(rel.offset, prologues))
1195       continue;
1196 
1197     if (Defined *f = getEnclosingFunction(rel.offset)) {
1198       prologues.insert(f);
1199       if (target->adjustPrologueForCrossSplitStack(buf + f->value, end,
1200                                                    f->stOther))
1201         continue;
1202       if (!getFile<ELFT>()->someNoSplitStack)
1203         error(lld::toString(this) + ": " + f->getName() +
1204               " (with -fsplit-stack) calls " + rel.sym->getName() +
1205               " (without -fsplit-stack), but couldn't adjust its prologue");
1206     }
1207   }
1208 
1209   if (target->needsMoreStackNonSplit)
1210     switchMorestackCallsToMorestackNonSplit(prologues, morestackCalls);
1211 }
1212 
1213 template <class ELFT> void InputSection::writeTo(uint8_t *buf) {
1214   if (auto *s = dyn_cast<SyntheticSection>(this)) {
1215     s->writeTo(buf);
1216     return;
1217   }
1218 
1219   if (LLVM_UNLIKELY(type == SHT_NOBITS))
1220     return;
1221   // If -r or --emit-relocs is given, then an InputSection
1222   // may be a relocation section.
1223   if (LLVM_UNLIKELY(type == SHT_RELA)) {
1224     copyRelocations<ELFT>(buf, getDataAs<typename ELFT::Rela>());
1225     return;
1226   }
1227   if (LLVM_UNLIKELY(type == SHT_REL)) {
1228     copyRelocations<ELFT>(buf, getDataAs<typename ELFT::Rel>());
1229     return;
1230   }
1231 
1232   // If -r is given, we may have a SHT_GROUP section.
1233   if (LLVM_UNLIKELY(type == SHT_GROUP)) {
1234     copyShtGroup<ELFT>(buf);
1235     return;
1236   }
1237 
1238   // If this is a compressed section, uncompress section contents directly
1239   // to the buffer.
1240   if (uncompressedSize >= 0) {
1241     size_t size = uncompressedSize;
1242     if (Error e = zlib::uncompress(toStringRef(rawData), (char *)buf, size))
1243       fatal(toString(this) +
1244             ": uncompress failed: " + llvm::toString(std::move(e)));
1245     uint8_t *bufEnd = buf + size;
1246     relocate<ELFT>(buf, bufEnd);
1247     return;
1248   }
1249 
1250   // Copy section contents from source object file to output file
1251   // and then apply relocations.
1252   memcpy(buf, rawData.data(), rawData.size());
1253   relocate<ELFT>(buf, buf + rawData.size());
1254 }
1255 
1256 void InputSection::replace(InputSection *other) {
1257   alignment = std::max(alignment, other->alignment);
1258 
1259   // When a section is replaced with another section that was allocated to
1260   // another partition, the replacement section (and its associated sections)
1261   // need to be placed in the main partition so that both partitions will be
1262   // able to access it.
1263   if (partition != other->partition) {
1264     partition = 1;
1265     for (InputSection *isec : dependentSections)
1266       isec->partition = 1;
1267   }
1268 
1269   other->repl = repl;
1270   other->markDead();
1271 }
1272 
1273 template <class ELFT>
1274 EhInputSection::EhInputSection(ObjFile<ELFT> &f,
1275                                const typename ELFT::Shdr &header,
1276                                StringRef name)
1277     : InputSectionBase(f, header, name, InputSectionBase::EHFrame) {}
1278 
1279 SyntheticSection *EhInputSection::getParent() const {
1280   return cast_or_null<SyntheticSection>(parent);
1281 }
1282 
1283 // Returns the index of the first relocation that points to a region between
1284 // Begin and Begin+Size.
1285 template <class IntTy, class RelTy>
1286 static unsigned getReloc(IntTy begin, IntTy size, const ArrayRef<RelTy> &rels,
1287                          unsigned &relocI) {
1288   // Start search from RelocI for fast access. That works because the
1289   // relocations are sorted in .eh_frame.
1290   for (unsigned n = rels.size(); relocI < n; ++relocI) {
1291     const RelTy &rel = rels[relocI];
1292     if (rel.r_offset < begin)
1293       continue;
1294 
1295     if (rel.r_offset < begin + size)
1296       return relocI;
1297     return -1;
1298   }
1299   return -1;
1300 }
1301 
1302 // .eh_frame is a sequence of CIE or FDE records.
1303 // This function splits an input section into records and returns them.
1304 template <class ELFT> void EhInputSection::split() {
1305   const RelsOrRelas<ELFT> rels = relsOrRelas<ELFT>();
1306   // getReloc expects the relocations to be sorted by r_offset. See the comment
1307   // in scanRelocs.
1308   if (rels.areRelocsRel()) {
1309     SmallVector<typename ELFT::Rel, 0> storage;
1310     split<ELFT>(sortRels(rels.rels, storage));
1311   } else {
1312     SmallVector<typename ELFT::Rela, 0> storage;
1313     split<ELFT>(sortRels(rels.relas, storage));
1314   }
1315 }
1316 
1317 template <class ELFT, class RelTy>
1318 void EhInputSection::split(ArrayRef<RelTy> rels) {
1319   ArrayRef<uint8_t> d = rawData;
1320   const char *msg = nullptr;
1321   unsigned relI = 0;
1322   while (!d.empty()) {
1323     if (d.size() < 4) {
1324       msg = "CIE/FDE too small";
1325       break;
1326     }
1327     uint64_t size = endian::read32<ELFT::TargetEndianness>(d.data());
1328     // If it is 0xFFFFFFFF, the next 8 bytes contain the size instead,
1329     // but we do not support that format yet.
1330     if (size == UINT32_MAX) {
1331       msg = "CIE/FDE too large";
1332       break;
1333     }
1334     size += 4;
1335     if (size > d.size()) {
1336       msg = "CIE/FDE ends past the end of the section";
1337       break;
1338     }
1339 
1340     uint64_t off = d.data() - rawData.data();
1341     pieces.emplace_back(off, this, size, getReloc(off, size, rels, relI));
1342     d = d.slice(size);
1343   }
1344   if (msg)
1345     errorOrWarn("corrupted .eh_frame: " + Twine(msg) + "\n>>> defined in " +
1346                 getObjMsg(d.data() - rawData.data()));
1347 }
1348 
1349 static size_t findNull(StringRef s, size_t entSize) {
1350   for (unsigned i = 0, n = s.size(); i != n; i += entSize) {
1351     const char *b = s.begin() + i;
1352     if (std::all_of(b, b + entSize, [](char c) { return c == 0; }))
1353       return i;
1354   }
1355   llvm_unreachable("");
1356 }
1357 
1358 SyntheticSection *MergeInputSection::getParent() const {
1359   return cast_or_null<SyntheticSection>(parent);
1360 }
1361 
1362 // Split SHF_STRINGS section. Such section is a sequence of
1363 // null-terminated strings.
1364 void MergeInputSection::splitStrings(StringRef s, size_t entSize) {
1365   const bool live = !(flags & SHF_ALLOC) || !config->gcSections;
1366   const char *p = s.data(), *end = s.data() + s.size();
1367   if (!std::all_of(end - entSize, end, [](char c) { return c == 0; }))
1368     fatal(toString(this) + ": string is not null terminated");
1369   if (entSize == 1) {
1370     // Optimize the common case.
1371     do {
1372       size_t size = strlen(p) + 1;
1373       pieces.emplace_back(p - s.begin(), xxHash64(StringRef(p, size)), live);
1374       p += size;
1375     } while (p != end);
1376   } else {
1377     do {
1378       size_t size = findNull(StringRef(p, end - p), entSize) + entSize;
1379       pieces.emplace_back(p - s.begin(), xxHash64(StringRef(p, size)), live);
1380       p += size;
1381     } while (p != end);
1382   }
1383 }
1384 
1385 // Split non-SHF_STRINGS section. Such section is a sequence of
1386 // fixed size records.
1387 void MergeInputSection::splitNonStrings(ArrayRef<uint8_t> data,
1388                                         size_t entSize) {
1389   size_t size = data.size();
1390   assert((size % entSize) == 0);
1391   const bool live = !(flags & SHF_ALLOC) || !config->gcSections;
1392 
1393   pieces.resize_for_overwrite(size / entSize);
1394   for (size_t i = 0, j = 0; i != size; i += entSize, j++)
1395     pieces[j] = {i, (uint32_t)xxHash64(data.slice(i, entSize)), live};
1396 }
1397 
1398 template <class ELFT>
1399 MergeInputSection::MergeInputSection(ObjFile<ELFT> &f,
1400                                      const typename ELFT::Shdr &header,
1401                                      StringRef name)
1402     : InputSectionBase(f, header, name, InputSectionBase::Merge) {}
1403 
1404 MergeInputSection::MergeInputSection(uint64_t flags, uint32_t type,
1405                                      uint64_t entsize, ArrayRef<uint8_t> data,
1406                                      StringRef name)
1407     : InputSectionBase(nullptr, flags, type, entsize, /*Link*/ 0, /*Info*/ 0,
1408                        /*Alignment*/ entsize, data, name, SectionBase::Merge) {}
1409 
1410 // This function is called after we obtain a complete list of input sections
1411 // that need to be linked. This is responsible to split section contents
1412 // into small chunks for further processing.
1413 //
1414 // Note that this function is called from parallelForEach. This must be
1415 // thread-safe (i.e. no memory allocation from the pools).
1416 void MergeInputSection::splitIntoPieces() {
1417   assert(pieces.empty());
1418 
1419   if (flags & SHF_STRINGS)
1420     splitStrings(toStringRef(data()), entsize);
1421   else
1422     splitNonStrings(data(), entsize);
1423 }
1424 
1425 SectionPiece *MergeInputSection::getSectionPiece(uint64_t offset) {
1426   if (this->data().size() <= offset)
1427     fatal(toString(this) + ": offset is outside the section");
1428 
1429   // If Offset is not at beginning of a section piece, it is not in the map.
1430   // In that case we need to  do a binary search of the original section piece vector.
1431   auto it = partition_point(
1432       pieces, [=](SectionPiece p) { return p.inputOff <= offset; });
1433   return &it[-1];
1434 }
1435 
1436 // Returns the offset in an output section for a given input offset.
1437 // Because contents of a mergeable section is not contiguous in output,
1438 // it is not just an addition to a base output offset.
1439 uint64_t MergeInputSection::getParentOffset(uint64_t offset) const {
1440   // If Offset is not at beginning of a section piece, it is not in the map.
1441   // In that case we need to search from the original section piece vector.
1442   const SectionPiece &piece = *getSectionPiece(offset);
1443   uint64_t addend = offset - piece.inputOff;
1444   return piece.outputOff + addend;
1445 }
1446 
1447 template InputSection::InputSection(ObjFile<ELF32LE> &, const ELF32LE::Shdr &,
1448                                     StringRef);
1449 template InputSection::InputSection(ObjFile<ELF32BE> &, const ELF32BE::Shdr &,
1450                                     StringRef);
1451 template InputSection::InputSection(ObjFile<ELF64LE> &, const ELF64LE::Shdr &,
1452                                     StringRef);
1453 template InputSection::InputSection(ObjFile<ELF64BE> &, const ELF64BE::Shdr &,
1454                                     StringRef);
1455 
1456 template void InputSection::writeTo<ELF32LE>(uint8_t *);
1457 template void InputSection::writeTo<ELF32BE>(uint8_t *);
1458 template void InputSection::writeTo<ELF64LE>(uint8_t *);
1459 template void InputSection::writeTo<ELF64BE>(uint8_t *);
1460 
1461 template RelsOrRelas<ELF32LE> InputSectionBase::relsOrRelas<ELF32LE>() const;
1462 template RelsOrRelas<ELF32BE> InputSectionBase::relsOrRelas<ELF32BE>() const;
1463 template RelsOrRelas<ELF64LE> InputSectionBase::relsOrRelas<ELF64LE>() const;
1464 template RelsOrRelas<ELF64BE> InputSectionBase::relsOrRelas<ELF64BE>() const;
1465 
1466 template MergeInputSection::MergeInputSection(ObjFile<ELF32LE> &,
1467                                               const ELF32LE::Shdr &, StringRef);
1468 template MergeInputSection::MergeInputSection(ObjFile<ELF32BE> &,
1469                                               const ELF32BE::Shdr &, StringRef);
1470 template MergeInputSection::MergeInputSection(ObjFile<ELF64LE> &,
1471                                               const ELF64LE::Shdr &, StringRef);
1472 template MergeInputSection::MergeInputSection(ObjFile<ELF64BE> &,
1473                                               const ELF64BE::Shdr &, StringRef);
1474 
1475 template EhInputSection::EhInputSection(ObjFile<ELF32LE> &,
1476                                         const ELF32LE::Shdr &, StringRef);
1477 template EhInputSection::EhInputSection(ObjFile<ELF32BE> &,
1478                                         const ELF32BE::Shdr &, StringRef);
1479 template EhInputSection::EhInputSection(ObjFile<ELF64LE> &,
1480                                         const ELF64LE::Shdr &, StringRef);
1481 template EhInputSection::EhInputSection(ObjFile<ELF64BE> &,
1482                                         const ELF64BE::Shdr &, StringRef);
1483 
1484 template void EhInputSection::split<ELF32LE>();
1485 template void EhInputSection::split<ELF32BE>();
1486 template void EhInputSection::split<ELF64LE>();
1487 template void EhInputSection::split<ELF64BE>();
1488