1 //===- ConcatOutputSection.cpp --------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "ConcatOutputSection.h"
10 #include "Config.h"
11 #include "OutputSegment.h"
12 #include "SymbolTable.h"
13 #include "Symbols.h"
14 #include "SyntheticSections.h"
15 #include "Target.h"
16 #include "lld/Common/ErrorHandler.h"
17 #include "lld/Common/Memory.h"
18 #include "llvm/BinaryFormat/MachO.h"
19 #include "llvm/Support/ScopedPrinter.h"
20 #include "llvm/Support/TimeProfiler.h"
21 
22 using namespace llvm;
23 using namespace llvm::MachO;
24 using namespace lld;
25 using namespace lld::macho;
26 
27 MapVector<NamePair, ConcatOutputSection *> macho::concatOutputSections;
28 
29 void ConcatOutputSection::addInput(ConcatInputSection *input) {
30   assert(input->parent == this);
31   if (inputs.empty()) {
32     align = input->align;
33     flags = input->getFlags();
34   } else {
35     align = std::max(align, input->align);
36     finalizeFlags(input);
37   }
38   inputs.push_back(input);
39 }
40 
41 // Branch-range extension can be implemented in two ways, either through ...
42 //
43 // (1) Branch islands: Single branch instructions (also of limited range),
44 //     that might be chained in multiple hops to reach the desired
45 //     destination. On ARM64, as 16 branch islands are needed to hop between
46 //     opposite ends of a 2 GiB program. LD64 uses branch islands exclusively,
47 //     even when it needs excessive hops.
48 //
49 // (2) Thunks: Instruction(s) to load the destination address into a scratch
50 //     register, followed by a register-indirect branch. Thunks are
51 //     constructed to reach any arbitrary address, so need not be
52 //     chained. Although thunks need not be chained, a program might need
53 //     multiple thunks to the same destination distributed throughout a large
54 //     program so that all call sites can have one within range.
55 //
56 // The optimal approach is to mix islands for distinations within two hops,
57 // and use thunks for destinations at greater distance. For now, we only
58 // implement thunks. TODO: Adding support for branch islands!
59 //
60 // Internally -- as expressed in LLD's data structures -- a
61 // branch-range-extension thunk comprises ...
62 //
63 // (1) new Defined privateExtern symbol for the thunk named
64 //     <FUNCTION>.thunk.<SEQUENCE>, which references ...
65 // (2) new InputSection, which contains ...
66 // (3.1) new data for the instructions to load & branch to the far address +
67 // (3.2) new Relocs on instructions to load the far address, which reference ...
68 // (4.1) existing Defined extern symbol for the real function in __text, or
69 // (4.2) existing DylibSymbol for the real function in a dylib
70 //
71 // Nearly-optimal thunk-placement algorithm features:
72 //
73 // * Single pass: O(n) on the number of call sites.
74 //
75 // * Accounts for the exact space overhead of thunks - no heuristics
76 //
77 // * Exploits the full range of call instructions - forward & backward
78 //
79 // Data:
80 //
81 // * DenseMap<Symbol *, ThunkInfo> thunkMap: Maps the function symbol
82 //   to its thunk bookkeeper.
83 //
84 // * struct ThunkInfo (bookkeeper): Call instructions have limited range, and
85 //   distant call sites might be unable to reach the same thunk, so multiple
86 //   thunks are necessary to serve all call sites in a very large program. A
87 //   thunkInfo stores state for all thunks associated with a particular
88 //   function: (a) thunk symbol, (b) input section containing stub code, and
89 //   (c) sequence number for the active thunk incarnation. When an old thunk
90 //   goes out of range, we increment the sequence number and create a new
91 //   thunk named <FUNCTION>.thunk.<SEQUENCE>.
92 //
93 // * A thunk incarnation comprises (a) private-extern Defined symbol pointing
94 //   to (b) an InputSection holding machine instructions (similar to a MachO
95 //   stub), and (c) Reloc(s) that reference the real function for fixing-up
96 //   the stub code.
97 //
98 // * std::vector<InputSection *> MergedInputSection::thunks: A vector parallel
99 //   to the inputs vector. We store new thunks via cheap vector append, rather
100 //   than costly insertion into the inputs vector.
101 //
102 // Control Flow:
103 //
104 // * During address assignment, MergedInputSection::finalize() examines call
105 //   sites by ascending address and creates thunks.  When a function is beyond
106 //   the range of a call site, we need a thunk. Place it at the largest
107 //   available forward address from the call site. Call sites increase
108 //   monotonically and thunks are always placed as far forward as possible;
109 //   thus, we place thunks at monotonically increasing addresses. Once a thunk
110 //   is placed, it and all previous input-section addresses are final.
111 //
112 // * ConcatInputSection::finalize() and ConcatInputSection::writeTo() merge
113 //   the inputs and thunks vectors (both ordered by ascending address), which
114 //   is simple and cheap.
115 
116 DenseMap<Symbol *, ThunkInfo> lld::macho::thunkMap;
117 
118 // Determine whether we need thunks, which depends on the target arch -- RISC
119 // (i.e., ARM) generally does because it has limited-range branch/call
120 // instructions, whereas CISC (i.e., x86) generally doesn't. RISC only needs
121 // thunks for programs so large that branch source & destination addresses
122 // might differ more than the range of branch instruction(s).
123 bool ConcatOutputSection::needsThunks() const {
124   if (!target->usesThunks())
125     return false;
126   uint64_t isecAddr = addr;
127   for (InputSection *isec : inputs)
128     isecAddr = alignTo(isecAddr, isec->align) + isec->getSize();
129   if (isecAddr - addr + in.stubs->getSize() <=
130       std::min(target->backwardBranchRange, target->forwardBranchRange))
131     return false;
132   // Yes, this program is large enough to need thunks.
133   for (InputSection *isec : inputs) {
134     for (Reloc &r : isec->relocs) {
135       if (!target->hasAttr(r.type, RelocAttrBits::BRANCH))
136         continue;
137       auto *sym = r.referent.get<Symbol *>();
138       // Pre-populate the thunkMap and memoize call site counts for every
139       // InputSection and ThunkInfo. We do this for the benefit of
140       // ConcatOutputSection::estimateStubsInRangeVA()
141       ThunkInfo &thunkInfo = thunkMap[sym];
142       // Knowing ThunkInfo call site count will help us know whether or not we
143       // might need to create more for this referent at the time we are
144       // estimating distance to __stubs in .
145       ++thunkInfo.callSiteCount;
146       // Knowing InputSection call site count will help us avoid work on those
147       // that have no BRANCH relocs.
148       ++isec->callSiteCount;
149     }
150   }
151   return true;
152 }
153 
154 // Since __stubs is placed after __text, we must estimate the address
155 // beyond which stubs are within range of a simple forward branch.
156 uint64_t ConcatOutputSection::estimateStubsInRangeVA(size_t callIdx) const {
157   size_t endIdx = inputs.size();
158   ConcatInputSection *isec = inputs[callIdx];
159   uint64_t isecVA = isec->getVA();
160   // Tally the non-stub functions which still have call sites
161   // remaining to process, which yields the maximum number
162   // of thunks we might yet place.
163   size_t maxPotentialThunks = 0;
164   for (auto &tp : thunkMap) {
165     ThunkInfo &ti = tp.second;
166     maxPotentialThunks +=
167         !tp.first->isInStubs() && ti.callSitesUsed < ti.callSiteCount;
168   }
169   // Tally the total size of input sections remaining to process.
170   uint64_t isecEnd = isec->getVA();
171   for (size_t i = callIdx; i < endIdx; i++) {
172     InputSection *isec = inputs[i];
173     isecEnd = alignTo(isecEnd, isec->align) + isec->getSize();
174   }
175   // Estimate the address after which call sites can safely call stubs
176   // directly rather than through intermediary thunks.
177   uint64_t forwardBranchRange = target->forwardBranchRange;
178   uint64_t stubsInRangeVA = isecEnd + maxPotentialThunks * target->thunkSize +
179                             in.stubs->getSize() - forwardBranchRange;
180   log("thunks = " + std::to_string(thunkMap.size()) +
181       ", potential = " + std::to_string(maxPotentialThunks) +
182       ", stubs = " + std::to_string(in.stubs->getSize()) + ", isecVA = " +
183       to_hexString(isecVA) + ", threshold = " + to_hexString(stubsInRangeVA) +
184       ", isecEnd = " + to_hexString(isecEnd) +
185       ", tail = " + to_hexString(isecEnd - isecVA) +
186       ", slop = " + to_hexString(forwardBranchRange - (isecEnd - isecVA)));
187   return stubsInRangeVA;
188 }
189 
190 void ConcatOutputSection::finalize() {
191   uint64_t isecAddr = addr;
192   uint64_t isecFileOff = fileOff;
193   auto finalizeOne = [&](ConcatInputSection *isec) {
194     isecAddr = alignTo(isecAddr, isec->align);
195     isecFileOff = alignTo(isecFileOff, isec->align);
196     isec->outSecOff = isecAddr - addr;
197     isec->isFinal = true;
198     isecAddr += isec->getSize();
199     isecFileOff += isec->getFileSize();
200   };
201 
202   if (!needsThunks()) {
203     for (ConcatInputSection *isec : inputs)
204       finalizeOne(isec);
205     size = isecAddr - addr;
206     fileSize = isecFileOff - fileOff;
207     return;
208   }
209 
210   uint64_t forwardBranchRange = target->forwardBranchRange;
211   uint64_t backwardBranchRange = target->backwardBranchRange;
212   uint64_t stubsInRangeVA = TargetInfo::outOfRangeVA;
213   size_t thunkSize = target->thunkSize;
214   size_t relocCount = 0;
215   size_t callSiteCount = 0;
216   size_t thunkCallCount = 0;
217   size_t thunkCount = 0;
218 
219   // inputs[finalIdx] is for finalization (address-assignment)
220   size_t finalIdx = 0;
221   // Kick-off by ensuring that the first input section has an address
222   for (size_t callIdx = 0, endIdx = inputs.size(); callIdx < endIdx;
223        ++callIdx) {
224     if (finalIdx == callIdx)
225       finalizeOne(inputs[finalIdx++]);
226     ConcatInputSection *isec = inputs[callIdx];
227     assert(isec->isFinal);
228     uint64_t isecVA = isec->getVA();
229     // Assign addresses up-to the forward branch-range limit
230     while (finalIdx < endIdx && isecAddr + inputs[finalIdx]->getSize() <
231                                     isecVA + forwardBranchRange - thunkSize)
232       finalizeOne(inputs[finalIdx++]);
233     if (isec->callSiteCount == 0)
234       continue;
235     if (finalIdx == endIdx && stubsInRangeVA == TargetInfo::outOfRangeVA) {
236       // When we have finalized all input sections, __stubs (destined
237       // to follow __text) comes within range of forward branches and
238       // we can estimate the threshold address after which we can
239       // reach any stub with a forward branch. Note that although it
240       // sits in the middle of a loop, this code executes only once.
241       // It is in the loop because we need to call it at the proper
242       // time: the earliest call site from which the end of __text
243       // (and start of __stubs) comes within range of a forward branch.
244       stubsInRangeVA = estimateStubsInRangeVA(callIdx);
245     }
246     // Process relocs by ascending address, i.e., ascending offset within isec
247     std::vector<Reloc> &relocs = isec->relocs;
248     // FIXME: This property does not hold for object files produced by ld64's
249     // `-r` mode.
250     assert(is_sorted(relocs,
251                      [](Reloc &a, Reloc &b) { return a.offset > b.offset; }));
252     for (Reloc &r : reverse(relocs)) {
253       ++relocCount;
254       if (!target->hasAttr(r.type, RelocAttrBits::BRANCH))
255         continue;
256       ++callSiteCount;
257       // Calculate branch reachability boundaries
258       uint64_t callVA = isecVA + r.offset;
259       uint64_t lowVA =
260           backwardBranchRange < callVA ? callVA - backwardBranchRange : 0;
261       uint64_t highVA = callVA + forwardBranchRange;
262       // Calculate our call referent address
263       auto *funcSym = r.referent.get<Symbol *>();
264       ThunkInfo &thunkInfo = thunkMap[funcSym];
265       // The referent is not reachable, so we need to use a thunk ...
266       if (funcSym->isInStubs() && callVA >= stubsInRangeVA) {
267         // ... Oh, wait! We are close enough to the end that __stubs
268         // are now within range of a simple forward branch.
269         continue;
270       }
271       uint64_t funcVA = funcSym->resolveBranchVA();
272       ++thunkInfo.callSitesUsed;
273       if (lowVA <= funcVA && funcVA <= highVA) {
274         // The referent is reachable with a simple call instruction.
275         continue;
276       }
277       ++thunkInfo.thunkCallCount;
278       ++thunkCallCount;
279       // If an existing thunk is reachable, use it ...
280       if (thunkInfo.sym) {
281         uint64_t thunkVA = thunkInfo.isec->getVA();
282         if (lowVA <= thunkVA && thunkVA <= highVA) {
283           r.referent = thunkInfo.sym;
284           continue;
285         }
286       }
287       // ... otherwise, create a new thunk
288       if (isecAddr > highVA) {
289         // When there is small-to-no margin between highVA and
290         // isecAddr and the distance between subsequent call sites is
291         // smaller than thunkSize, then a new thunk can go out of
292         // range.  Fix by unfinalizing inputs[finalIdx] to reduce the
293         // distance between callVA and highVA, then shift some thunks
294         // to occupy address-space formerly occupied by the
295         // unfinalized inputs[finalIdx].
296         fatal(Twine(__FUNCTION__) + ": FIXME: thunk range overrun");
297       }
298       thunkInfo.isec =
299           make<ConcatInputSection>(isec->getSegName(), isec->getName());
300       thunkInfo.isec->parent = this;
301 
302       // This code runs after dead code removal. Need to set the `live` bit
303       // on the thunk isec so that asserts that check that only live sections
304       // get written are happy.
305       thunkInfo.isec->live = true;
306 
307       StringRef thunkName = saver.save(funcSym->getName() + ".thunk." +
308                                        std::to_string(thunkInfo.sequence++));
309       r.referent = thunkInfo.sym = symtab->addDefined(
310           thunkName, /*file=*/nullptr, thunkInfo.isec, /*value=*/0,
311           /*size=*/thunkSize, /*isWeakDef=*/false, /*isPrivateExtern=*/true,
312           /*isThumb=*/false, /*isReferencedDynamically=*/false,
313           /*noDeadStrip=*/false);
314       target->populateThunk(thunkInfo.isec, funcSym);
315       finalizeOne(thunkInfo.isec);
316       thunks.push_back(thunkInfo.isec);
317       ++thunkCount;
318     }
319   }
320   size = isecAddr - addr;
321   fileSize = isecFileOff - fileOff;
322 
323   log("thunks for " + parent->name + "," + name +
324       ": funcs = " + std::to_string(thunkMap.size()) +
325       ", relocs = " + std::to_string(relocCount) +
326       ", all calls = " + std::to_string(callSiteCount) +
327       ", thunk calls = " + std::to_string(thunkCallCount) +
328       ", thunks = " + std::to_string(thunkCount));
329 }
330 
331 void ConcatOutputSection::writeTo(uint8_t *buf) const {
332   // Merge input sections from thunk & ordinary vectors
333   size_t i = 0, ie = inputs.size();
334   size_t t = 0, te = thunks.size();
335   while (i < ie || t < te) {
336     while (i < ie && (t == te || inputs[i]->getSize() == 0 ||
337                       inputs[i]->outSecOff < thunks[t]->outSecOff)) {
338       inputs[i]->writeTo(buf + inputs[i]->outSecOff);
339       ++i;
340     }
341     while (t < te && (i == ie || thunks[t]->outSecOff < inputs[i]->outSecOff)) {
342       thunks[t]->writeTo(buf + thunks[t]->outSecOff);
343       ++t;
344     }
345   }
346 }
347 
348 void ConcatOutputSection::finalizeFlags(InputSection *input) {
349   switch (sectionType(input->getFlags())) {
350   default /*type-unspec'ed*/:
351     // FIXME: Add additional logic here when supporting emitting obj files.
352     break;
353   case S_4BYTE_LITERALS:
354   case S_8BYTE_LITERALS:
355   case S_16BYTE_LITERALS:
356   case S_CSTRING_LITERALS:
357   case S_ZEROFILL:
358   case S_LAZY_SYMBOL_POINTERS:
359   case S_MOD_TERM_FUNC_POINTERS:
360   case S_THREAD_LOCAL_REGULAR:
361   case S_THREAD_LOCAL_ZEROFILL:
362   case S_THREAD_LOCAL_VARIABLES:
363   case S_THREAD_LOCAL_INIT_FUNCTION_POINTERS:
364   case S_THREAD_LOCAL_VARIABLE_POINTERS:
365   case S_NON_LAZY_SYMBOL_POINTERS:
366   case S_SYMBOL_STUBS:
367     flags |= input->getFlags();
368     break;
369   }
370 }
371 
372 ConcatOutputSection *
373 ConcatOutputSection::getOrCreateForInput(const InputSection *isec) {
374   NamePair names = maybeRenameSection({isec->getSegName(), isec->getName()});
375   ConcatOutputSection *&osec = concatOutputSections[names];
376   if (!osec)
377     osec = make<ConcatOutputSection>(names.second);
378   return osec;
379 }
380 
381 NamePair macho::maybeRenameSection(NamePair key) {
382   auto newNames = config->sectionRenameMap.find(key);
383   if (newNames != config->sectionRenameMap.end())
384     return newNames->second;
385   return key;
386 }
387