1 //===- ConcatOutputSection.cpp --------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "ConcatOutputSection.h" 10 #include "Config.h" 11 #include "OutputSegment.h" 12 #include "SymbolTable.h" 13 #include "Symbols.h" 14 #include "SyntheticSections.h" 15 #include "Target.h" 16 #include "lld/Common/ErrorHandler.h" 17 #include "lld/Common/Memory.h" 18 #include "llvm/BinaryFormat/MachO.h" 19 #include "llvm/Support/ScopedPrinter.h" 20 21 #include <algorithm> 22 23 using namespace llvm; 24 using namespace llvm::MachO; 25 using namespace lld; 26 using namespace lld::macho; 27 28 void ConcatOutputSection::addInput(ConcatInputSection *input) { 29 if (inputs.empty()) { 30 align = input->align; 31 flags = input->flags; 32 } else { 33 align = std::max(align, input->align); 34 mergeFlags(input); 35 } 36 inputs.push_back(input); 37 input->parent = this; 38 } 39 40 // Branch-range extension can be implemented in two ways, either through ... 41 // 42 // (1) Branch islands: Single branch instructions (also of limited range), 43 // that might be chained in multiple hops to reach the desired 44 // destination. On ARM64, as 16 branch islands are needed to hop between 45 // opposite ends of a 2 GiB program. LD64 uses branch islands exclusively, 46 // even when it needs excessive hops. 47 // 48 // (2) Thunks: Instruction(s) to load the destination address into a scratch 49 // register, followed by a register-indirect branch. Thunks are 50 // constructed to reach any arbitrary address, so need not be 51 // chained. Although thunks need not be chained, a program might need 52 // multiple thunks to the same destination distributed throughout a large 53 // program so that all call sites can have one within range. 54 // 55 // The optimal approach is to mix islands for distinations within two hops, 56 // and use thunks for destinations at greater distance. For now, we only 57 // implement thunks. TODO: Adding support for branch islands! 58 // 59 // Internally -- as expressed in LLD's data structures -- a 60 // branch-range-extension thunk comprises ... 61 // 62 // (1) new Defined privateExtern symbol for the thunk named 63 // <FUNCTION>.thunk.<SEQUENCE>, which references ... 64 // (2) new InputSection, which contains ... 65 // (3.1) new data for the instructions to load & branch to the far address + 66 // (3.2) new Relocs on instructions to load the far address, which reference ... 67 // (4.1) existing Defined extern symbol for the real function in __text, or 68 // (4.2) existing DylibSymbol for the real function in a dylib 69 // 70 // Nearly-optimal thunk-placement algorithm features: 71 // 72 // * Single pass: O(n) on the number of call sites. 73 // 74 // * Accounts for the exact space overhead of thunks - no heuristics 75 // 76 // * Exploits the full range of call instructions - forward & backward 77 // 78 // Data: 79 // 80 // * DenseMap<Symbol *, ThunkInfo> thunkMap: Maps the function symbol 81 // to its thunk bookkeeper. 82 // 83 // * struct ThunkInfo (bookkeeper): Call instructions have limited range, and 84 // distant call sites might be unable to reach the same thunk, so multiple 85 // thunks are necessary to serve all call sites in a very large program. A 86 // thunkInfo stores state for all thunks associated with a particular 87 // function: (a) thunk symbol, (b) input section containing stub code, and 88 // (c) sequence number for the active thunk incarnation. When an old thunk 89 // goes out of range, we increment the sequence number and create a new 90 // thunk named <FUNCTION>.thunk.<SEQUENCE>. 91 // 92 // * A thunk incarnation comprises (a) private-extern Defined symbol pointing 93 // to (b) an InputSection holding machine instructions (similar to a MachO 94 // stub), and (c) Reloc(s) that reference the real function for fixing-up 95 // the stub code. 96 // 97 // * std::vector<InputSection *> MergedInputSection::thunks: A vector parallel 98 // to the inputs vector. We store new thunks via cheap vector append, rather 99 // than costly insertion into the inputs vector. 100 // 101 // Control Flow: 102 // 103 // * During address assignment, MergedInputSection::finalize() examines call 104 // sites by ascending address and creates thunks. When a function is beyond 105 // the range of a call site, we need a thunk. Place it at the largest 106 // available forward address from the call site. Call sites increase 107 // monotonically and thunks are always placed as far forward as possible; 108 // thus, we place thunks at monotonically increasing addresses. Once a thunk 109 // is placed, it and all previous input-section addresses are final. 110 // 111 // * MergedInputSection::finalize() and MergedInputSection::writeTo() merge 112 // the inputs and thunks vectors (both ordered by ascending address), which 113 // is simple and cheap. 114 115 DenseMap<Symbol *, ThunkInfo> lld::macho::thunkMap; 116 117 // Determine whether we need thunks, which depends on the target arch -- RISC 118 // (i.e., ARM) generally does because it has limited-range branch/call 119 // instructions, whereas CISC (i.e., x86) generally doesn't. RISC only needs 120 // thunks for programs so large that branch source & destination addresses 121 // might differ more than the range of branch instruction(s). 122 bool ConcatOutputSection::needsThunks() const { 123 if (!target->usesThunks()) 124 return false; 125 uint64_t isecAddr = addr; 126 for (InputSection *isec : inputs) 127 isecAddr = alignTo(isecAddr, isec->align) + isec->getSize(); 128 if (isecAddr - addr + in.stubs->getSize() <= target->branchRange) 129 return false; 130 // Yes, this program is large enough to need thunks. 131 for (InputSection *isec : inputs) { 132 for (Reloc &r : isec->relocs) { 133 if (!target->hasAttr(r.type, RelocAttrBits::BRANCH)) 134 continue; 135 auto *sym = r.referent.get<Symbol *>(); 136 // Pre-populate the thunkMap and memoize call site counts for every 137 // InputSection and ThunkInfo. We do this for the benefit of 138 // ConcatOutputSection::estimateStubsInRangeVA() 139 ThunkInfo &thunkInfo = thunkMap[sym]; 140 // Knowing ThunkInfo call site count will help us know whether or not we 141 // might need to create more for this referent at the time we are 142 // estimating distance to __stubs in . 143 ++thunkInfo.callSiteCount; 144 // Knowing InputSection call site count will help us avoid work on those 145 // that have no BRANCH relocs. 146 ++isec->callSiteCount; 147 } 148 } 149 return true; 150 } 151 152 // Since __stubs is placed after __text, we must estimate the address 153 // beyond which stubs are within range of a simple forward branch. 154 uint64_t ConcatOutputSection::estimateStubsInRangeVA(size_t callIdx) const { 155 uint64_t branchRange = target->branchRange; 156 size_t endIdx = inputs.size(); 157 ConcatInputSection *isec = inputs[callIdx]; 158 uint64_t isecVA = isec->getVA(); 159 // Tally the non-stub functions which still have call sites 160 // remaining to process, which yields the maximum number 161 // of thunks we might yet place. 162 size_t maxPotentialThunks = 0; 163 for (auto &tp : thunkMap) { 164 ThunkInfo &ti = tp.second; 165 maxPotentialThunks += 166 !tp.first->isInStubs() && ti.callSitesUsed < ti.callSiteCount; 167 } 168 // Tally the total size of input sections remaining to process. 169 uint64_t isecEnd = isec->getVA(); 170 for (size_t i = callIdx; i < endIdx; i++) { 171 InputSection *isec = inputs[i]; 172 isecEnd = alignTo(isecEnd, isec->align) + isec->getSize(); 173 } 174 // Estimate the address after which call sites can safely call stubs 175 // directly rather than through intermediary thunks. 176 uint64_t stubsInRangeVA = isecEnd + maxPotentialThunks * target->thunkSize + 177 in.stubs->getSize() - branchRange; 178 log("thunks = " + std::to_string(thunkMap.size()) + 179 ", potential = " + std::to_string(maxPotentialThunks) + 180 ", stubs = " + std::to_string(in.stubs->getSize()) + ", isecVA = " + 181 to_hexString(isecVA) + ", threshold = " + to_hexString(stubsInRangeVA) + 182 ", isecEnd = " + to_hexString(isecEnd) + 183 ", tail = " + to_hexString(isecEnd - isecVA) + 184 ", slop = " + to_hexString(branchRange - (isecEnd - isecVA))); 185 return stubsInRangeVA; 186 } 187 188 void ConcatOutputSection::finalize() { 189 uint64_t isecAddr = addr; 190 uint64_t isecFileOff = fileOff; 191 auto finalizeOne = [&](ConcatInputSection *isec) { 192 isecAddr = alignTo(isecAddr, isec->align); 193 isecFileOff = alignTo(isecFileOff, isec->align); 194 isec->outSecOff = isecAddr - addr; 195 isec->outSecFileOff = isecFileOff - fileOff; 196 isec->isFinal = true; 197 isecAddr += isec->getSize(); 198 isecFileOff += isec->getFileSize(); 199 }; 200 201 if (!needsThunks()) { 202 for (ConcatInputSection *isec : inputs) 203 finalizeOne(isec); 204 size = isecAddr - addr; 205 fileSize = isecFileOff - fileOff; 206 return; 207 } 208 209 uint64_t branchRange = target->branchRange; 210 uint64_t stubsInRangeVA = TargetInfo::outOfRangeVA; 211 size_t thunkSize = target->thunkSize; 212 size_t relocCount = 0; 213 size_t callSiteCount = 0; 214 size_t thunkCallCount = 0; 215 size_t thunkCount = 0; 216 217 // inputs[finalIdx] is for finalization (address-assignment) 218 size_t finalIdx = 0; 219 // Kick-off by ensuring that the first input section has an address 220 for (size_t callIdx = 0, endIdx = inputs.size(); callIdx < endIdx; 221 ++callIdx) { 222 if (finalIdx == callIdx) 223 finalizeOne(inputs[finalIdx++]); 224 ConcatInputSection *isec = inputs[callIdx]; 225 assert(isec->isFinal); 226 uint64_t isecVA = isec->getVA(); 227 // Assign addresses up-to the forward branch-range limit 228 while (finalIdx < endIdx && 229 isecAddr + inputs[finalIdx]->getSize() < isecVA + branchRange) 230 finalizeOne(inputs[finalIdx++]); 231 if (isec->callSiteCount == 0) 232 continue; 233 if (finalIdx == endIdx && stubsInRangeVA == TargetInfo::outOfRangeVA) { 234 // When we have finalized all input sections, __stubs (destined 235 // to follow __text) comes within range of forward branches and 236 // we can estimate the threshold address after which we can 237 // reach any stub with a forward branch. Note that although it 238 // sits in the middle of a loop, this code executes only once. 239 // It is in the loop because we need to call it at the proper 240 // time: the earliest call site from which the end of __text 241 // (and start of __stubs) comes within range of a forward branch. 242 stubsInRangeVA = estimateStubsInRangeVA(callIdx); 243 } 244 // Process relocs by ascending address, i.e., ascending offset within isec 245 std::vector<Reloc> &relocs = isec->relocs; 246 assert(is_sorted(relocs, 247 [](Reloc &a, Reloc &b) { return a.offset > b.offset; })); 248 for (Reloc &r : reverse(relocs)) { 249 ++relocCount; 250 if (!target->hasAttr(r.type, RelocAttrBits::BRANCH)) 251 continue; 252 ++callSiteCount; 253 // Calculate branch reachability boundaries 254 uint64_t callVA = isecVA + r.offset; 255 uint64_t lowVA = branchRange < callVA ? callVA - branchRange : 0; 256 uint64_t highVA = callVA + branchRange; 257 // Calculate our call referent address 258 auto *funcSym = r.referent.get<Symbol *>(); 259 ThunkInfo &thunkInfo = thunkMap[funcSym]; 260 // The referent is not reachable, so we need to use a thunk ... 261 if (funcSym->isInStubs() && callVA >= stubsInRangeVA) { 262 // ... Oh, wait! We are close enough to the end that __stubs 263 // are now within range of a simple forward branch. 264 continue; 265 } 266 uint64_t funcVA = funcSym->resolveBranchVA(); 267 ++thunkInfo.callSitesUsed; 268 if (lowVA < funcVA && funcVA < highVA) { 269 // The referent is reachable with a simple call instruction. 270 continue; 271 } 272 ++thunkInfo.thunkCallCount; 273 ++thunkCallCount; 274 // If an existing thunk is reachable, use it ... 275 if (thunkInfo.sym) { 276 uint64_t thunkVA = thunkInfo.isec->getVA(); 277 if (lowVA < thunkVA && thunkVA < highVA) { 278 r.referent = thunkInfo.sym; 279 continue; 280 } 281 } 282 // ... otherwise, create a new thunk 283 if (isecAddr > highVA) { 284 // When there is small-to-no margin between highVA and 285 // isecAddr and the distance between subsequent call sites is 286 // smaller than thunkSize, then a new thunk can go out of 287 // range. Fix by unfinalizing inputs[finalIdx] to reduce the 288 // distance between callVA and highVA, then shift some thunks 289 // to occupy address-space formerly occupied by the 290 // unfinalized inputs[finalIdx]. 291 fatal(Twine(__FUNCTION__) + ": FIXME: thunk range overrun"); 292 } 293 thunkInfo.isec = make<ConcatInputSection>(); 294 thunkInfo.isec->name = isec->name; 295 thunkInfo.isec->segname = isec->segname; 296 thunkInfo.isec->parent = this; 297 StringRef thunkName = saver.save(funcSym->getName() + ".thunk." + 298 std::to_string(thunkInfo.sequence++)); 299 r.referent = thunkInfo.sym = symtab->addDefined( 300 thunkName, /*file=*/nullptr, thunkInfo.isec, /*value=*/0, 301 /*size=*/thunkSize, /*isWeakDef=*/false, /*isPrivateExtern=*/true, 302 /*isThumb=*/false, /*isReferencedDynamically=*/false, 303 /*noDeadStrip=*/false); 304 target->populateThunk(thunkInfo.isec, funcSym); 305 finalizeOne(thunkInfo.isec); 306 thunks.push_back(thunkInfo.isec); 307 ++thunkCount; 308 } 309 } 310 size = isecAddr - addr; 311 fileSize = isecFileOff - fileOff; 312 313 log("thunks for " + parent->name + "," + name + 314 ": funcs = " + std::to_string(thunkMap.size()) + 315 ", relocs = " + std::to_string(relocCount) + 316 ", all calls = " + std::to_string(callSiteCount) + 317 ", thunk calls = " + std::to_string(thunkCallCount) + 318 ", thunks = " + std::to_string(thunkCount)); 319 } 320 321 void ConcatOutputSection::writeTo(uint8_t *buf) const { 322 // Merge input sections from thunk & ordinary vectors 323 size_t i = 0, ie = inputs.size(); 324 size_t t = 0, te = thunks.size(); 325 while (i < ie || t < te) { 326 while (i < ie && (t == te || inputs[i]->getSize() == 0 || 327 inputs[i]->outSecOff < thunks[t]->outSecOff)) { 328 inputs[i]->writeTo(buf + inputs[i]->outSecFileOff); 329 ++i; 330 } 331 while (t < te && (i == ie || thunks[t]->outSecOff < inputs[i]->outSecOff)) { 332 thunks[t]->writeTo(buf + thunks[t]->outSecFileOff); 333 ++t; 334 } 335 } 336 } 337 338 // TODO: this is most likely wrong; reconsider how section flags 339 // are actually merged. The logic presented here was written without 340 // any form of informed research. 341 void ConcatOutputSection::mergeFlags(InputSection *input) { 342 uint8_t baseType = sectionType(flags); 343 uint8_t inputType = sectionType(input->flags); 344 if (baseType != inputType) 345 error("Cannot merge section " + input->name + " (type=0x" + 346 to_hexString(inputType) + ") into " + name + " (type=0x" + 347 to_hexString(baseType) + "): inconsistent types"); 348 349 constexpr uint32_t strictFlags = S_ATTR_DEBUG | S_ATTR_STRIP_STATIC_SYMS | 350 S_ATTR_NO_DEAD_STRIP | S_ATTR_LIVE_SUPPORT; 351 if ((input->flags ^ flags) & strictFlags) 352 error("Cannot merge section " + input->name + " (flags=0x" + 353 to_hexString(input->flags) + ") into " + name + " (flags=0x" + 354 to_hexString(flags) + "): strict flags differ"); 355 356 // Negate pure instruction presence if any section isn't pure. 357 uint32_t pureMask = ~S_ATTR_PURE_INSTRUCTIONS | (input->flags & flags); 358 359 // Merge the rest 360 flags |= input->flags; 361 flags &= pureMask; 362 } 363