1 //===- OutputSections.cpp -------------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "OutputSections.h" 10 #include "Config.h" 11 #include "LinkerScript.h" 12 #include "Symbols.h" 13 #include "SyntheticSections.h" 14 #include "Target.h" 15 #include "lld/Common/Arrays.h" 16 #include "lld/Common/Memory.h" 17 #include "llvm/BinaryFormat/Dwarf.h" 18 #include "llvm/Config/llvm-config.h" // LLVM_ENABLE_ZLIB 19 #include "llvm/Support/Parallel.h" 20 #include "llvm/Support/Path.h" 21 #include "llvm/Support/TimeProfiler.h" 22 #if LLVM_ENABLE_ZLIB 23 #include <zlib.h> 24 #endif 25 26 using namespace llvm; 27 using namespace llvm::dwarf; 28 using namespace llvm::object; 29 using namespace llvm::support::endian; 30 using namespace llvm::ELF; 31 using namespace lld; 32 using namespace lld::elf; 33 34 uint8_t *Out::bufferStart; 35 PhdrEntry *Out::tlsPhdr; 36 OutputSection *Out::elfHeader; 37 OutputSection *Out::programHeaders; 38 OutputSection *Out::preinitArray; 39 OutputSection *Out::initArray; 40 OutputSection *Out::finiArray; 41 42 SmallVector<OutputSection *, 0> elf::outputSections; 43 44 uint32_t OutputSection::getPhdrFlags() const { 45 uint32_t ret = 0; 46 if (config->emachine != EM_ARM || !(flags & SHF_ARM_PURECODE)) 47 ret |= PF_R; 48 if (flags & SHF_WRITE) 49 ret |= PF_W; 50 if (flags & SHF_EXECINSTR) 51 ret |= PF_X; 52 return ret; 53 } 54 55 template <class ELFT> 56 void OutputSection::writeHeaderTo(typename ELFT::Shdr *shdr) { 57 shdr->sh_entsize = entsize; 58 shdr->sh_addralign = alignment; 59 shdr->sh_type = type; 60 shdr->sh_offset = offset; 61 shdr->sh_flags = flags; 62 shdr->sh_info = info; 63 shdr->sh_link = link; 64 shdr->sh_addr = addr; 65 shdr->sh_size = size; 66 shdr->sh_name = shName; 67 } 68 69 OutputSection::OutputSection(StringRef name, uint32_t type, uint64_t flags) 70 : SectionCommand(OutputSectionKind), 71 SectionBase(Output, name, flags, /*Entsize*/ 0, /*Alignment*/ 1, type, 72 /*Info*/ 0, /*Link*/ 0) {} 73 74 // We allow sections of types listed below to merged into a 75 // single progbits section. This is typically done by linker 76 // scripts. Merging nobits and progbits will force disk space 77 // to be allocated for nobits sections. Other ones don't require 78 // any special treatment on top of progbits, so there doesn't 79 // seem to be a harm in merging them. 80 // 81 // NOTE: clang since rL252300 emits SHT_X86_64_UNWIND .eh_frame sections. Allow 82 // them to be merged into SHT_PROGBITS .eh_frame (GNU as .cfi_*). 83 static bool canMergeToProgbits(unsigned type) { 84 return type == SHT_NOBITS || type == SHT_PROGBITS || type == SHT_INIT_ARRAY || 85 type == SHT_PREINIT_ARRAY || type == SHT_FINI_ARRAY || 86 type == SHT_NOTE || 87 (type == SHT_X86_64_UNWIND && config->emachine == EM_X86_64); 88 } 89 90 // Record that isec will be placed in the OutputSection. isec does not become 91 // permanent until finalizeInputSections() is called. The function should not be 92 // used after finalizeInputSections() is called. If you need to add an 93 // InputSection post finalizeInputSections(), then you must do the following: 94 // 95 // 1. Find or create an InputSectionDescription to hold InputSection. 96 // 2. Add the InputSection to the InputSectionDescription::sections. 97 // 3. Call commitSection(isec). 98 void OutputSection::recordSection(InputSectionBase *isec) { 99 partition = isec->partition; 100 isec->parent = this; 101 if (commands.empty() || !isa<InputSectionDescription>(commands.back())) 102 commands.push_back(make<InputSectionDescription>("")); 103 auto *isd = cast<InputSectionDescription>(commands.back()); 104 isd->sectionBases.push_back(isec); 105 } 106 107 // Update fields (type, flags, alignment, etc) according to the InputSection 108 // isec. Also check whether the InputSection flags and type are consistent with 109 // other InputSections. 110 void OutputSection::commitSection(InputSection *isec) { 111 if (!hasInputSections) { 112 // If IS is the first section to be added to this section, 113 // initialize type, entsize and flags from isec. 114 hasInputSections = true; 115 type = isec->type; 116 entsize = isec->entsize; 117 flags = isec->flags; 118 } else { 119 // Otherwise, check if new type or flags are compatible with existing ones. 120 if ((flags ^ isec->flags) & SHF_TLS) 121 error("incompatible section flags for " + name + "\n>>> " + toString(isec) + 122 ": 0x" + utohexstr(isec->flags) + "\n>>> output section " + name + 123 ": 0x" + utohexstr(flags)); 124 125 if (type != isec->type) { 126 if (!canMergeToProgbits(type) || !canMergeToProgbits(isec->type)) 127 error("section type mismatch for " + isec->name + "\n>>> " + 128 toString(isec) + ": " + 129 getELFSectionTypeName(config->emachine, isec->type) + 130 "\n>>> output section " + name + ": " + 131 getELFSectionTypeName(config->emachine, type)); 132 type = SHT_PROGBITS; 133 } 134 } 135 if (noload) 136 type = SHT_NOBITS; 137 138 isec->parent = this; 139 uint64_t andMask = 140 config->emachine == EM_ARM ? (uint64_t)SHF_ARM_PURECODE : 0; 141 uint64_t orMask = ~andMask; 142 uint64_t andFlags = (flags & isec->flags) & andMask; 143 uint64_t orFlags = (flags | isec->flags) & orMask; 144 flags = andFlags | orFlags; 145 if (nonAlloc) 146 flags &= ~(uint64_t)SHF_ALLOC; 147 148 alignment = std::max(alignment, isec->alignment); 149 150 // If this section contains a table of fixed-size entries, sh_entsize 151 // holds the element size. If it contains elements of different size we 152 // set sh_entsize to 0. 153 if (entsize != isec->entsize) 154 entsize = 0; 155 } 156 157 static MergeSyntheticSection *createMergeSynthetic(StringRef name, 158 uint32_t type, 159 uint64_t flags, 160 uint32_t alignment) { 161 if ((flags & SHF_STRINGS) && config->optimize >= 2) 162 return make<MergeTailSection>(name, type, flags, alignment); 163 return make<MergeNoTailSection>(name, type, flags, alignment); 164 } 165 166 // This function scans over the InputSectionBase list sectionBases to create 167 // InputSectionDescription::sections. 168 // 169 // It removes MergeInputSections from the input section array and adds 170 // new synthetic sections at the location of the first input section 171 // that it replaces. It then finalizes each synthetic section in order 172 // to compute an output offset for each piece of each input section. 173 void OutputSection::finalizeInputSections() { 174 std::vector<MergeSyntheticSection *> mergeSections; 175 for (SectionCommand *cmd : commands) { 176 auto *isd = dyn_cast<InputSectionDescription>(cmd); 177 if (!isd) 178 continue; 179 isd->sections.reserve(isd->sectionBases.size()); 180 for (InputSectionBase *s : isd->sectionBases) { 181 MergeInputSection *ms = dyn_cast<MergeInputSection>(s); 182 if (!ms) { 183 isd->sections.push_back(cast<InputSection>(s)); 184 continue; 185 } 186 187 // We do not want to handle sections that are not alive, so just remove 188 // them instead of trying to merge. 189 if (!ms->isLive()) 190 continue; 191 192 auto i = llvm::find_if(mergeSections, [=](MergeSyntheticSection *sec) { 193 // While we could create a single synthetic section for two different 194 // values of Entsize, it is better to take Entsize into consideration. 195 // 196 // With a single synthetic section no two pieces with different Entsize 197 // could be equal, so we may as well have two sections. 198 // 199 // Using Entsize in here also allows us to propagate it to the synthetic 200 // section. 201 // 202 // SHF_STRINGS section with different alignments should not be merged. 203 return sec->flags == ms->flags && sec->entsize == ms->entsize && 204 (sec->alignment == ms->alignment || !(sec->flags & SHF_STRINGS)); 205 }); 206 if (i == mergeSections.end()) { 207 MergeSyntheticSection *syn = 208 createMergeSynthetic(name, ms->type, ms->flags, ms->alignment); 209 mergeSections.push_back(syn); 210 i = std::prev(mergeSections.end()); 211 syn->entsize = ms->entsize; 212 isd->sections.push_back(syn); 213 } 214 (*i)->addSection(ms); 215 } 216 217 // sectionBases should not be used from this point onwards. Clear it to 218 // catch misuses. 219 isd->sectionBases.clear(); 220 221 // Some input sections may be removed from the list after ICF. 222 for (InputSection *s : isd->sections) 223 commitSection(s); 224 } 225 for (auto *ms : mergeSections) 226 ms->finalizeContents(); 227 } 228 229 static void sortByOrder(MutableArrayRef<InputSection *> in, 230 llvm::function_ref<int(InputSectionBase *s)> order) { 231 std::vector<std::pair<int, InputSection *>> v; 232 for (InputSection *s : in) 233 v.push_back({order(s), s}); 234 llvm::stable_sort(v, less_first()); 235 236 for (size_t i = 0; i < v.size(); ++i) 237 in[i] = v[i].second; 238 } 239 240 uint64_t elf::getHeaderSize() { 241 if (config->oFormatBinary) 242 return 0; 243 return Out::elfHeader->size + Out::programHeaders->size; 244 } 245 246 bool OutputSection::classof(const SectionCommand *c) { 247 return c->kind == OutputSectionKind; 248 } 249 250 void OutputSection::sort(llvm::function_ref<int(InputSectionBase *s)> order) { 251 assert(isLive()); 252 for (SectionCommand *b : commands) 253 if (auto *isd = dyn_cast<InputSectionDescription>(b)) 254 sortByOrder(isd->sections, order); 255 } 256 257 static void nopInstrFill(uint8_t *buf, size_t size) { 258 if (size == 0) 259 return; 260 unsigned i = 0; 261 if (size == 0) 262 return; 263 std::vector<std::vector<uint8_t>> nopFiller = *target->nopInstrs; 264 unsigned num = size / nopFiller.back().size(); 265 for (unsigned c = 0; c < num; ++c) { 266 memcpy(buf + i, nopFiller.back().data(), nopFiller.back().size()); 267 i += nopFiller.back().size(); 268 } 269 unsigned remaining = size - i; 270 if (!remaining) 271 return; 272 assert(nopFiller[remaining - 1].size() == remaining); 273 memcpy(buf + i, nopFiller[remaining - 1].data(), remaining); 274 } 275 276 // Fill [Buf, Buf + Size) with Filler. 277 // This is used for linker script "=fillexp" command. 278 static void fill(uint8_t *buf, size_t size, 279 const std::array<uint8_t, 4> &filler) { 280 size_t i = 0; 281 for (; i + 4 < size; i += 4) 282 memcpy(buf + i, filler.data(), 4); 283 memcpy(buf + i, filler.data(), size - i); 284 } 285 286 #if LLVM_ENABLE_ZLIB 287 static SmallVector<uint8_t, 0> deflateShard(ArrayRef<uint8_t> in, int level, 288 int flush) { 289 // 15 and 8 are default. windowBits=-15 is negative to generate raw deflate 290 // data with no zlib header or trailer. 291 z_stream s = {}; 292 deflateInit2(&s, level, Z_DEFLATED, -15, 8, Z_DEFAULT_STRATEGY); 293 s.next_in = const_cast<uint8_t *>(in.data()); 294 s.avail_in = in.size(); 295 296 // Allocate a buffer of half of the input size, and grow it by 1.5x if 297 // insufficient. 298 SmallVector<uint8_t, 0> out; 299 size_t pos = 0; 300 out.resize_for_overwrite(std::max<size_t>(in.size() / 2, 64)); 301 do { 302 if (pos == out.size()) 303 out.resize_for_overwrite(out.size() * 3 / 2); 304 s.next_out = out.data() + pos; 305 s.avail_out = out.size() - pos; 306 (void)deflate(&s, flush); 307 pos = s.next_out - out.data(); 308 } while (s.avail_out == 0); 309 assert(s.avail_in == 0); 310 311 out.truncate(pos); 312 deflateEnd(&s); 313 return out; 314 } 315 #endif 316 317 // Compress section contents if this section contains debug info. 318 template <class ELFT> void OutputSection::maybeCompress() { 319 #if LLVM_ENABLE_ZLIB 320 using Elf_Chdr = typename ELFT::Chdr; 321 322 // Compress only DWARF debug sections. 323 if (!config->compressDebugSections || (flags & SHF_ALLOC) || 324 !name.startswith(".debug_") || size == 0) 325 return; 326 327 llvm::TimeTraceScope timeScope("Compress debug sections"); 328 329 // Write uncompressed data to a temporary zero-initialized buffer. 330 auto buf = std::make_unique<uint8_t[]>(size); 331 writeTo<ELFT>(buf.get()); 332 // We chose 1 (Z_BEST_SPEED) as the default compression level because it is 333 // the fastest. If -O2 is given, we use level 6 to compress debug info more by 334 // ~15%. We found that level 7 to 9 doesn't make much difference (~1% more 335 // compression) while they take significant amount of time (~2x), so level 6 336 // seems enough. 337 const int level = config->optimize >= 2 ? 6 : Z_BEST_SPEED; 338 339 // Split input into 1-MiB shards. 340 constexpr size_t shardSize = 1 << 20; 341 auto shardsIn = split(makeArrayRef<uint8_t>(buf.get(), size), shardSize); 342 const size_t numShards = shardsIn.size(); 343 344 // Compress shards and compute Alder-32 checksums. Use Z_SYNC_FLUSH for all 345 // shards but the last to flush the output to a byte boundary to be 346 // concatenated with the next shard. 347 auto shardsOut = std::make_unique<SmallVector<uint8_t, 0>[]>(numShards); 348 auto shardsAdler = std::make_unique<uint32_t[]>(numShards); 349 parallelForEachN(0, numShards, [&](size_t i) { 350 shardsOut[i] = deflateShard(shardsIn[i], level, 351 i != numShards - 1 ? Z_SYNC_FLUSH : Z_FINISH); 352 shardsAdler[i] = adler32(1, shardsIn[i].data(), shardsIn[i].size()); 353 }); 354 355 // Update section size and combine Alder-32 checksums. 356 uint32_t checksum = 1; // Initial Adler-32 value 357 compressed.uncompressedSize = size; 358 size = sizeof(Elf_Chdr) + 2; // Elf_Chdir and zlib header 359 for (size_t i = 0; i != numShards; ++i) { 360 size += shardsOut[i].size(); 361 checksum = adler32_combine(checksum, shardsAdler[i], shardsIn[i].size()); 362 } 363 size += 4; // checksum 364 365 compressed.shards = std::move(shardsOut); 366 compressed.numShards = numShards; 367 compressed.checksum = checksum; 368 flags |= SHF_COMPRESSED; 369 #endif 370 } 371 372 static void writeInt(uint8_t *buf, uint64_t data, uint64_t size) { 373 if (size == 1) 374 *buf = data; 375 else if (size == 2) 376 write16(buf, data); 377 else if (size == 4) 378 write32(buf, data); 379 else if (size == 8) 380 write64(buf, data); 381 else 382 llvm_unreachable("unsupported Size argument"); 383 } 384 385 template <class ELFT> void OutputSection::writeTo(uint8_t *buf) { 386 llvm::TimeTraceScope timeScope("Write sections", name); 387 if (type == SHT_NOBITS) 388 return; 389 390 // If --compress-debug-section is specified and if this is a debug section, 391 // we've already compressed section contents. If that's the case, 392 // just write it down. 393 if (compressed.shards) { 394 auto *chdr = reinterpret_cast<typename ELFT::Chdr *>(buf); 395 chdr->ch_type = ELFCOMPRESS_ZLIB; 396 chdr->ch_size = compressed.uncompressedSize; 397 chdr->ch_addralign = alignment; 398 buf += sizeof(*chdr); 399 400 // Compute shard offsets. 401 auto offsets = std::make_unique<size_t[]>(compressed.numShards); 402 offsets[0] = 2; // zlib header 403 for (size_t i = 1; i != compressed.numShards; ++i) 404 offsets[i] = offsets[i - 1] + compressed.shards[i - 1].size(); 405 406 buf[0] = 0x78; // CMF 407 buf[1] = 0x01; // FLG: best speed 408 parallelForEachN(0, compressed.numShards, [&](size_t i) { 409 memcpy(buf + offsets[i], compressed.shards[i].data(), 410 compressed.shards[i].size()); 411 }); 412 413 write32be(buf + (size - sizeof(*chdr) - 4), compressed.checksum); 414 return; 415 } 416 417 // Write leading padding. 418 SmallVector<InputSection *, 0> sections = getInputSections(*this); 419 std::array<uint8_t, 4> filler = getFiller(); 420 bool nonZeroFiller = read32(filler.data()) != 0; 421 if (nonZeroFiller) 422 fill(buf, sections.empty() ? size : sections[0]->outSecOff, filler); 423 424 parallelForEachN(0, sections.size(), [&](size_t i) { 425 InputSection *isec = sections[i]; 426 isec->writeTo<ELFT>(buf + isec->outSecOff); 427 428 // Fill gaps between sections. 429 if (nonZeroFiller) { 430 uint8_t *start = buf + isec->outSecOff + isec->getSize(); 431 uint8_t *end; 432 if (i + 1 == sections.size()) 433 end = buf + size; 434 else 435 end = buf + sections[i + 1]->outSecOff; 436 if (isec->nopFiller) { 437 assert(target->nopInstrs); 438 nopInstrFill(start, end - start); 439 } else 440 fill(start, end - start, filler); 441 } 442 }); 443 444 // Linker scripts may have BYTE()-family commands with which you 445 // can write arbitrary bytes to the output. Process them if any. 446 for (SectionCommand *cmd : commands) 447 if (auto *data = dyn_cast<ByteCommand>(cmd)) 448 writeInt(buf + data->offset, data->expression().getValue(), data->size); 449 } 450 451 static void finalizeShtGroup(OutputSection *os, 452 InputSection *section) { 453 assert(config->relocatable); 454 455 // sh_link field for SHT_GROUP sections should contain the section index of 456 // the symbol table. 457 os->link = in.symTab->getParent()->sectionIndex; 458 459 // sh_info then contain index of an entry in symbol table section which 460 // provides signature of the section group. 461 ArrayRef<Symbol *> symbols = section->file->getSymbols(); 462 os->info = in.symTab->getSymbolIndex(symbols[section->info]); 463 464 // Some group members may be combined or discarded, so we need to compute the 465 // new size. The content will be rewritten in InputSection::copyShtGroup. 466 DenseSet<uint32_t> seen; 467 ArrayRef<InputSectionBase *> sections = section->file->getSections(); 468 for (const uint32_t &idx : section->getDataAs<uint32_t>().slice(1)) 469 if (OutputSection *osec = sections[read32(&idx)]->getOutputSection()) 470 seen.insert(osec->sectionIndex); 471 os->size = (1 + seen.size()) * sizeof(uint32_t); 472 } 473 474 void OutputSection::finalize() { 475 InputSection *first = getFirstInputSection(this); 476 477 if (flags & SHF_LINK_ORDER) { 478 // We must preserve the link order dependency of sections with the 479 // SHF_LINK_ORDER flag. The dependency is indicated by the sh_link field. We 480 // need to translate the InputSection sh_link to the OutputSection sh_link, 481 // all InputSections in the OutputSection have the same dependency. 482 if (auto *ex = dyn_cast<ARMExidxSyntheticSection>(first)) 483 link = ex->getLinkOrderDep()->getParent()->sectionIndex; 484 else if (first->flags & SHF_LINK_ORDER) 485 if (auto *d = first->getLinkOrderDep()) 486 link = d->getParent()->sectionIndex; 487 } 488 489 if (type == SHT_GROUP) { 490 finalizeShtGroup(this, first); 491 return; 492 } 493 494 if (!config->copyRelocs || (type != SHT_RELA && type != SHT_REL)) 495 return; 496 497 // Skip if 'first' is synthetic, i.e. not a section created by --emit-relocs. 498 // Normally 'type' was changed by 'first' so 'first' should be non-null. 499 // However, if the output section is .rela.dyn, 'type' can be set by the empty 500 // synthetic .rela.plt and first can be null. 501 if (!first || isa<SyntheticSection>(first)) 502 return; 503 504 link = in.symTab->getParent()->sectionIndex; 505 // sh_info for SHT_REL[A] sections should contain the section header index of 506 // the section to which the relocation applies. 507 InputSectionBase *s = first->getRelocatedSection(); 508 info = s->getOutputSection()->sectionIndex; 509 flags |= SHF_INFO_LINK; 510 } 511 512 // Returns true if S is in one of the many forms the compiler driver may pass 513 // crtbegin files. 514 // 515 // Gcc uses any of crtbegin[<empty>|S|T].o. 516 // Clang uses Gcc's plus clang_rt.crtbegin[-<arch>|<empty>].o. 517 518 static bool isCrt(StringRef s, StringRef beginEnd) { 519 s = sys::path::filename(s); 520 if (!s.consume_back(".o")) 521 return false; 522 if (s.consume_front("clang_rt.")) 523 return s.consume_front(beginEnd); 524 return s.consume_front(beginEnd) && s.size() <= 1; 525 } 526 527 // .ctors and .dtors are sorted by this order: 528 // 529 // 1. .ctors/.dtors in crtbegin (which contains a sentinel value -1). 530 // 2. The section is named ".ctors" or ".dtors" (priority: 65536). 531 // 3. The section has an optional priority value in the form of ".ctors.N" or 532 // ".dtors.N" where N is a number in the form of %05u (priority: 65535-N). 533 // 4. .ctors/.dtors in crtend (which contains a sentinel value 0). 534 // 535 // For 2 and 3, the sections are sorted by priority from high to low, e.g. 536 // .ctors (65536), .ctors.00100 (65436), .ctors.00200 (65336). In GNU ld's 537 // internal linker scripts, the sorting is by string comparison which can 538 // achieve the same goal given the optional priority values are of the same 539 // length. 540 // 541 // In an ideal world, we don't need this function because .init_array and 542 // .ctors are duplicate features (and .init_array is newer.) However, there 543 // are too many real-world use cases of .ctors, so we had no choice to 544 // support that with this rather ad-hoc semantics. 545 static bool compCtors(const InputSection *a, const InputSection *b) { 546 bool beginA = isCrt(a->file->getName(), "crtbegin"); 547 bool beginB = isCrt(b->file->getName(), "crtbegin"); 548 if (beginA != beginB) 549 return beginA; 550 bool endA = isCrt(a->file->getName(), "crtend"); 551 bool endB = isCrt(b->file->getName(), "crtend"); 552 if (endA != endB) 553 return endB; 554 return getPriority(a->name) > getPriority(b->name); 555 } 556 557 // Sorts input sections by the special rules for .ctors and .dtors. 558 // Unfortunately, the rules are different from the one for .{init,fini}_array. 559 // Read the comment above. 560 void OutputSection::sortCtorsDtors() { 561 assert(commands.size() == 1); 562 auto *isd = cast<InputSectionDescription>(commands[0]); 563 llvm::stable_sort(isd->sections, compCtors); 564 } 565 566 // If an input string is in the form of "foo.N" where N is a number, return N 567 // (65535-N if .ctors.N or .dtors.N). Otherwise, returns 65536, which is one 568 // greater than the lowest priority. 569 int elf::getPriority(StringRef s) { 570 size_t pos = s.rfind('.'); 571 if (pos == StringRef::npos) 572 return 65536; 573 int v = 65536; 574 if (to_integer(s.substr(pos + 1), v, 10) && 575 (pos == 6 && (s.startswith(".ctors") || s.startswith(".dtors")))) 576 v = 65535 - v; 577 return v; 578 } 579 580 InputSection *elf::getFirstInputSection(const OutputSection *os) { 581 for (SectionCommand *cmd : os->commands) 582 if (auto *isd = dyn_cast<InputSectionDescription>(cmd)) 583 if (!isd->sections.empty()) 584 return isd->sections[0]; 585 return nullptr; 586 } 587 588 SmallVector<InputSection *, 0> elf::getInputSections(const OutputSection &os) { 589 SmallVector<InputSection *, 0> ret; 590 for (SectionCommand *cmd : os.commands) 591 if (auto *isd = dyn_cast<InputSectionDescription>(cmd)) 592 ret.insert(ret.end(), isd->sections.begin(), isd->sections.end()); 593 return ret; 594 } 595 596 // Sorts input sections by section name suffixes, so that .foo.N comes 597 // before .foo.M if N < M. Used to sort .{init,fini}_array.N sections. 598 // We want to keep the original order if the priorities are the same 599 // because the compiler keeps the original initialization order in a 600 // translation unit and we need to respect that. 601 // For more detail, read the section of the GCC's manual about init_priority. 602 void OutputSection::sortInitFini() { 603 // Sort sections by priority. 604 sort([](InputSectionBase *s) { return getPriority(s->name); }); 605 } 606 607 std::array<uint8_t, 4> OutputSection::getFiller() { 608 if (filler) 609 return *filler; 610 if (flags & SHF_EXECINSTR) 611 return target->trapInstr; 612 return {0, 0, 0, 0}; 613 } 614 615 void OutputSection::checkDynRelAddends(const uint8_t *bufStart) { 616 assert(config->writeAddends && config->checkDynamicRelocs); 617 assert(type == SHT_REL || type == SHT_RELA); 618 SmallVector<InputSection *, 0> sections = getInputSections(*this); 619 parallelForEachN(0, sections.size(), [&](size_t i) { 620 // When linking with -r or --emit-relocs we might also call this function 621 // for input .rel[a].<sec> sections which we simply pass through to the 622 // output. We skip over those and only look at the synthetic relocation 623 // sections created during linking. 624 const auto *sec = dyn_cast<RelocationBaseSection>(sections[i]); 625 if (!sec) 626 return; 627 for (const DynamicReloc &rel : sec->relocs) { 628 int64_t addend = rel.addend; 629 const OutputSection *relOsec = rel.inputSec->getOutputSection(); 630 assert(relOsec != nullptr && "missing output section for relocation"); 631 const uint8_t *relocTarget = 632 bufStart + relOsec->offset + rel.inputSec->getOffset(rel.offsetInSec); 633 // For SHT_NOBITS the written addend is always zero. 634 int64_t writtenAddend = 635 relOsec->type == SHT_NOBITS 636 ? 0 637 : target->getImplicitAddend(relocTarget, rel.type); 638 if (addend != writtenAddend) 639 internalLinkerError( 640 getErrorLocation(relocTarget), 641 "wrote incorrect addend value 0x" + utohexstr(writtenAddend) + 642 " instead of 0x" + utohexstr(addend) + 643 " for dynamic relocation " + toString(rel.type) + 644 " at offset 0x" + utohexstr(rel.getOffset()) + 645 (rel.sym ? " against symbol " + toString(*rel.sym) : "")); 646 } 647 }); 648 } 649 650 template void OutputSection::writeHeaderTo<ELF32LE>(ELF32LE::Shdr *Shdr); 651 template void OutputSection::writeHeaderTo<ELF32BE>(ELF32BE::Shdr *Shdr); 652 template void OutputSection::writeHeaderTo<ELF64LE>(ELF64LE::Shdr *Shdr); 653 template void OutputSection::writeHeaderTo<ELF64BE>(ELF64BE::Shdr *Shdr); 654 655 template void OutputSection::writeTo<ELF32LE>(uint8_t *Buf); 656 template void OutputSection::writeTo<ELF32BE>(uint8_t *Buf); 657 template void OutputSection::writeTo<ELF64LE>(uint8_t *Buf); 658 template void OutputSection::writeTo<ELF64BE>(uint8_t *Buf); 659 660 template void OutputSection::maybeCompress<ELF32LE>(); 661 template void OutputSection::maybeCompress<ELF32BE>(); 662 template void OutputSection::maybeCompress<ELF64LE>(); 663 template void OutputSection::maybeCompress<ELF64BE>(); 664