1 //===- InputChunks.cpp ----------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "InputChunks.h"
10 #include "Config.h"
11 #include "OutputSegment.h"
12 #include "WriterUtils.h"
13 #include "lld/Common/ErrorHandler.h"
14 #include "lld/Common/LLVM.h"
15 #include "llvm/Support/LEB128.h"
16 #include "llvm/Support/xxhash.h"
17 
18 #define DEBUG_TYPE "lld"
19 
20 using namespace llvm;
21 using namespace llvm::wasm;
22 using namespace llvm::support::endian;
23 
24 namespace lld {
25 StringRef relocTypeToString(uint8_t relocType) {
26   switch (relocType) {
27 #define WASM_RELOC(NAME, REL)                                                  \
28   case REL:                                                                    \
29     return #NAME;
30 #include "llvm/BinaryFormat/WasmRelocs.def"
31 #undef WASM_RELOC
32   }
33   llvm_unreachable("unknown reloc type");
34 }
35 
36 bool relocIs64(uint8_t relocType) {
37   switch (relocType) {
38   case R_WASM_MEMORY_ADDR_LEB64:
39   case R_WASM_MEMORY_ADDR_SLEB64:
40   case R_WASM_MEMORY_ADDR_REL_SLEB64:
41   case R_WASM_MEMORY_ADDR_I64:
42   case R_WASM_TABLE_INDEX_SLEB64:
43   case R_WASM_TABLE_INDEX_I64:
44   case R_WASM_FUNCTION_OFFSET_I64:
45   case R_WASM_TABLE_INDEX_REL_SLEB64:
46     return true;
47   default:
48     return false;
49   }
50 }
51 
52 std::string toString(const wasm::InputChunk *c) {
53   return (toString(c->file) + ":(" + c->getName() + ")").str();
54 }
55 
56 namespace wasm {
57 StringRef InputChunk::getComdatName() const {
58   uint32_t index = getComdat();
59   if (index == UINT32_MAX)
60     return StringRef();
61   return file->getWasmObj()->linkingData().Comdats[index];
62 }
63 
64 uint32_t InputChunk::getSize() const {
65   if (const auto *ms = dyn_cast<SyntheticMergedChunk>(this))
66     return ms->builder.getSize();
67 
68   if (const auto *f = dyn_cast<InputFunction>(this)) {
69     if (config->compressRelocations && f->file) {
70       return f->getCompressedSize();
71     }
72   }
73 
74   return data().size();
75 }
76 
77 uint32_t InputChunk::getInputSize() const {
78   if (const auto *f = dyn_cast<InputFunction>(this))
79     return f->function->Size;
80   return getSize();
81 }
82 
83 // Copy this input chunk to an mmap'ed output file and apply relocations.
84 void InputChunk::writeTo(uint8_t *buf) const {
85   if (const auto *f = dyn_cast<InputFunction>(this)) {
86     if (file && config->compressRelocations)
87       return f->writeCompressed(buf);
88   } else if (const auto *ms = dyn_cast<SyntheticMergedChunk>(this)) {
89     ms->builder.write(buf + outSecOff);
90     // Apply relocations
91     ms->relocate(buf + outSecOff);
92     return;
93   }
94 
95   // Copy contents
96   memcpy(buf + outSecOff, data().data(), data().size());
97 
98   // Apply relocations
99   relocate(buf + outSecOff);
100 }
101 
102 void InputChunk::relocate(uint8_t *buf) const {
103   if (relocations.empty())
104     return;
105 
106   LLVM_DEBUG(dbgs() << "applying relocations: " << toString(this)
107                     << " count=" << relocations.size() << "\n");
108   int32_t inputSectionOffset = getInputSectionOffset();
109   uint64_t tombstone = getTombstone();
110 
111   for (const WasmRelocation &rel : relocations) {
112     uint8_t *loc = buf + rel.Offset - inputSectionOffset;
113     LLVM_DEBUG(dbgs() << "apply reloc: type=" << relocTypeToString(rel.Type));
114     if (rel.Type != R_WASM_TYPE_INDEX_LEB)
115       LLVM_DEBUG(dbgs() << " sym=" << file->getSymbols()[rel.Index]->getName());
116     LLVM_DEBUG(dbgs() << " addend=" << rel.Addend << " index=" << rel.Index
117                       << " offset=" << rel.Offset << "\n");
118     auto value = file->calcNewValue(rel, tombstone, this);
119 
120     switch (rel.Type) {
121     case R_WASM_TYPE_INDEX_LEB:
122     case R_WASM_FUNCTION_INDEX_LEB:
123     case R_WASM_GLOBAL_INDEX_LEB:
124     case R_WASM_TAG_INDEX_LEB:
125     case R_WASM_MEMORY_ADDR_LEB:
126     case R_WASM_TABLE_NUMBER_LEB:
127       encodeULEB128(value, loc, 5);
128       break;
129     case R_WASM_MEMORY_ADDR_LEB64:
130       encodeULEB128(value, loc, 10);
131       break;
132     case R_WASM_TABLE_INDEX_SLEB:
133     case R_WASM_TABLE_INDEX_REL_SLEB:
134     case R_WASM_MEMORY_ADDR_SLEB:
135     case R_WASM_MEMORY_ADDR_REL_SLEB:
136     case R_WASM_MEMORY_ADDR_TLS_SLEB:
137       encodeSLEB128(static_cast<int32_t>(value), loc, 5);
138       break;
139     case R_WASM_TABLE_INDEX_SLEB64:
140     case R_WASM_TABLE_INDEX_REL_SLEB64:
141     case R_WASM_MEMORY_ADDR_SLEB64:
142     case R_WASM_MEMORY_ADDR_REL_SLEB64:
143       encodeSLEB128(static_cast<int64_t>(value), loc, 10);
144       break;
145     case R_WASM_TABLE_INDEX_I32:
146     case R_WASM_MEMORY_ADDR_I32:
147     case R_WASM_FUNCTION_OFFSET_I32:
148     case R_WASM_SECTION_OFFSET_I32:
149     case R_WASM_GLOBAL_INDEX_I32:
150     case R_WASM_MEMORY_ADDR_LOCREL_I32:
151       write32le(loc, value);
152       break;
153     case R_WASM_TABLE_INDEX_I64:
154     case R_WASM_MEMORY_ADDR_I64:
155     case R_WASM_FUNCTION_OFFSET_I64:
156       write64le(loc, value);
157       break;
158     default:
159       llvm_unreachable("unknown relocation type");
160     }
161   }
162 }
163 
164 // Copy relocation entries to a given output stream.
165 // This function is used only when a user passes "-r". For a regular link,
166 // we consume relocations instead of copying them to an output file.
167 void InputChunk::writeRelocations(raw_ostream &os) const {
168   if (relocations.empty())
169     return;
170 
171   int32_t off = outSecOff - getInputSectionOffset();
172   LLVM_DEBUG(dbgs() << "writeRelocations: " << file->getName()
173                     << " offset=" << Twine(off) << "\n");
174 
175   for (const WasmRelocation &rel : relocations) {
176     writeUleb128(os, rel.Type, "reloc type");
177     writeUleb128(os, rel.Offset + off, "reloc offset");
178     writeUleb128(os, file->calcNewIndex(rel), "reloc index");
179 
180     if (relocTypeHasAddend(rel.Type))
181       writeSleb128(os, file->calcNewAddend(rel), "reloc addend");
182   }
183 }
184 
185 uint64_t InputChunk::getTombstone() const {
186   if (const auto *s = dyn_cast<InputSection>(this)) {
187     return s->tombstoneValue;
188   }
189 
190   return 0;
191 }
192 
193 void InputFunction::setFunctionIndex(uint32_t index) {
194   LLVM_DEBUG(dbgs() << "InputFunction::setFunctionIndex: " << getName()
195                     << " -> " << index << "\n");
196   assert(!hasFunctionIndex());
197   functionIndex = index;
198 }
199 
200 void InputFunction::setTableIndex(uint32_t index) {
201   LLVM_DEBUG(dbgs() << "InputFunction::setTableIndex: " << getName() << " -> "
202                     << index << "\n");
203   assert(!hasTableIndex());
204   tableIndex = index;
205 }
206 
207 // Write a relocation value without padding and return the number of bytes
208 // witten.
209 static unsigned writeCompressedReloc(uint8_t *buf, const WasmRelocation &rel,
210                                      uint64_t value) {
211   switch (rel.Type) {
212   case R_WASM_TYPE_INDEX_LEB:
213   case R_WASM_FUNCTION_INDEX_LEB:
214   case R_WASM_GLOBAL_INDEX_LEB:
215   case R_WASM_TAG_INDEX_LEB:
216   case R_WASM_MEMORY_ADDR_LEB:
217   case R_WASM_MEMORY_ADDR_LEB64:
218   case R_WASM_TABLE_NUMBER_LEB:
219     return encodeULEB128(value, buf);
220   case R_WASM_TABLE_INDEX_SLEB:
221   case R_WASM_TABLE_INDEX_SLEB64:
222   case R_WASM_MEMORY_ADDR_SLEB:
223   case R_WASM_MEMORY_ADDR_SLEB64:
224     return encodeSLEB128(static_cast<int64_t>(value), buf);
225   default:
226     llvm_unreachable("unexpected relocation type");
227   }
228 }
229 
230 static unsigned getRelocWidthPadded(const WasmRelocation &rel) {
231   switch (rel.Type) {
232   case R_WASM_TYPE_INDEX_LEB:
233   case R_WASM_FUNCTION_INDEX_LEB:
234   case R_WASM_GLOBAL_INDEX_LEB:
235   case R_WASM_TAG_INDEX_LEB:
236   case R_WASM_MEMORY_ADDR_LEB:
237   case R_WASM_TABLE_NUMBER_LEB:
238   case R_WASM_TABLE_INDEX_SLEB:
239   case R_WASM_MEMORY_ADDR_SLEB:
240     return 5;
241   case R_WASM_TABLE_INDEX_SLEB64:
242   case R_WASM_MEMORY_ADDR_LEB64:
243   case R_WASM_MEMORY_ADDR_SLEB64:
244     return 10;
245   default:
246     llvm_unreachable("unexpected relocation type");
247   }
248 }
249 
250 static unsigned getRelocWidth(const WasmRelocation &rel, uint64_t value) {
251   uint8_t buf[10];
252   return writeCompressedReloc(buf, rel, value);
253 }
254 
255 // Relocations of type LEB and SLEB in the code section are padded to 5 bytes
256 // so that a fast linker can blindly overwrite them without needing to worry
257 // about the number of bytes needed to encode the values.
258 // However, for optimal output the code section can be compressed to remove
259 // the padding then outputting non-relocatable files.
260 // In this case we need to perform a size calculation based on the value at each
261 // relocation.  At best we end up saving 4 bytes for each relocation entry.
262 //
263 // This function only computes the final output size.  It must be called
264 // before getSize() is used to calculate of layout of the code section.
265 void InputFunction::calculateSize() {
266   if (!file || !config->compressRelocations)
267     return;
268 
269   LLVM_DEBUG(dbgs() << "calculateSize: " << getName() << "\n");
270 
271   const uint8_t *secStart = file->codeSection->Content.data();
272   const uint8_t *funcStart = secStart + getInputSectionOffset();
273   uint32_t functionSizeLength;
274   decodeULEB128(funcStart, &functionSizeLength);
275 
276   uint32_t start = getInputSectionOffset();
277   uint32_t end = start + function->Size;
278 
279   uint64_t tombstone = getTombstone();
280 
281   uint32_t lastRelocEnd = start + functionSizeLength;
282   for (const WasmRelocation &rel : relocations) {
283     LLVM_DEBUG(dbgs() << "  region: " << (rel.Offset - lastRelocEnd) << "\n");
284     compressedFuncSize += rel.Offset - lastRelocEnd;
285     compressedFuncSize +=
286         getRelocWidth(rel, file->calcNewValue(rel, tombstone, this));
287     lastRelocEnd = rel.Offset + getRelocWidthPadded(rel);
288   }
289   LLVM_DEBUG(dbgs() << "  final region: " << (end - lastRelocEnd) << "\n");
290   compressedFuncSize += end - lastRelocEnd;
291 
292   // Now we know how long the resulting function is we can add the encoding
293   // of its length
294   uint8_t buf[5];
295   compressedSize = compressedFuncSize + encodeULEB128(compressedFuncSize, buf);
296 
297   LLVM_DEBUG(dbgs() << "  calculateSize orig: " << function->Size << "\n");
298   LLVM_DEBUG(dbgs() << "  calculateSize  new: " << compressedSize << "\n");
299 }
300 
301 // Override the default writeTo method so that we can (optionally) write the
302 // compressed version of the function.
303 void InputFunction::writeCompressed(uint8_t *buf) const {
304   buf += outSecOff;
305   uint8_t *orig = buf;
306   (void)orig;
307 
308   const uint8_t *secStart = file->codeSection->Content.data();
309   const uint8_t *funcStart = secStart + getInputSectionOffset();
310   const uint8_t *end = funcStart + function->Size;
311   uint64_t tombstone = getTombstone();
312   uint32_t count;
313   decodeULEB128(funcStart, &count);
314   funcStart += count;
315 
316   LLVM_DEBUG(dbgs() << "write func: " << getName() << "\n");
317   buf += encodeULEB128(compressedFuncSize, buf);
318   const uint8_t *lastRelocEnd = funcStart;
319   for (const WasmRelocation &rel : relocations) {
320     unsigned chunkSize = (secStart + rel.Offset) - lastRelocEnd;
321     LLVM_DEBUG(dbgs() << "  write chunk: " << chunkSize << "\n");
322     memcpy(buf, lastRelocEnd, chunkSize);
323     buf += chunkSize;
324     buf += writeCompressedReloc(buf, rel,
325                                 file->calcNewValue(rel, tombstone, this));
326     lastRelocEnd = secStart + rel.Offset + getRelocWidthPadded(rel);
327   }
328 
329   unsigned chunkSize = end - lastRelocEnd;
330   LLVM_DEBUG(dbgs() << "  write final chunk: " << chunkSize << "\n");
331   memcpy(buf, lastRelocEnd, chunkSize);
332   LLVM_DEBUG(dbgs() << "  total: " << (buf + chunkSize - orig) << "\n");
333 }
334 
335 uint64_t InputChunk::getChunkOffset(uint64_t offset) const {
336   if (const auto *ms = dyn_cast<MergeInputChunk>(this)) {
337     LLVM_DEBUG(dbgs() << "getChunkOffset(merged): " << getName() << "\n");
338     LLVM_DEBUG(dbgs() << "offset: " << offset << "\n");
339     LLVM_DEBUG(dbgs() << "parentOffset: " << ms->getParentOffset(offset)
340                       << "\n");
341     assert(ms->parent);
342     return ms->parent->getChunkOffset(ms->getParentOffset(offset));
343   }
344   return outputSegmentOffset + offset;
345 }
346 
347 uint64_t InputChunk::getOffset(uint64_t offset) const {
348   return outSecOff + getChunkOffset(offset);
349 }
350 
351 uint64_t InputChunk::getVA(uint64_t offset) const {
352   return (outputSeg ? outputSeg->startVA : 0) + getChunkOffset(offset);
353 }
354 
355 // Generate code to apply relocations to the data section at runtime.
356 // This is only called when generating shared libaries (PIC) where address are
357 // not known at static link time.
358 void InputChunk::generateRelocationCode(raw_ostream &os) const {
359   LLVM_DEBUG(dbgs() << "generating runtime relocations: " << getName()
360                     << " count=" << relocations.size() << "\n");
361 
362   bool is64 = config->is64.getValueOr(false);
363   unsigned opcode_ptr_const = is64 ? WASM_OPCODE_I64_CONST
364                                    : WASM_OPCODE_I32_CONST;
365   unsigned opcode_ptr_add = is64 ? WASM_OPCODE_I64_ADD
366                                  : WASM_OPCODE_I32_ADD;
367 
368   uint64_t tombstone = getTombstone();
369   // TODO(sbc): Encode the relocations in the data section and write a loop
370   // here to apply them.
371   for (const WasmRelocation &rel : relocations) {
372     uint64_t offset = getVA(rel.Offset) - getInputSectionOffset();
373 
374     LLVM_DEBUG(dbgs() << "gen reloc: type=" << relocTypeToString(rel.Type)
375                       << " addend=" << rel.Addend << " index=" << rel.Index
376                       << " output offset=" << offset << "\n");
377 
378     // Get __memory_base
379     writeU8(os, WASM_OPCODE_GLOBAL_GET, "GLOBAL_GET");
380     writeUleb128(os, WasmSym::memoryBase->getGlobalIndex(), "memory_base");
381 
382     // Add the offset of the relocation
383     writeU8(os, opcode_ptr_const, "CONST");
384     writeSleb128(os, offset, "offset");
385     writeU8(os, opcode_ptr_add, "ADD");
386 
387     bool is64 = relocIs64(rel.Type);
388     unsigned opcode_reloc_const =
389         is64 ? WASM_OPCODE_I64_CONST : WASM_OPCODE_I32_CONST;
390     unsigned opcode_reloc_add =
391         is64 ? WASM_OPCODE_I64_ADD : WASM_OPCODE_I32_ADD;
392     unsigned opcode_reloc_store =
393         is64 ? WASM_OPCODE_I64_STORE : WASM_OPCODE_I32_STORE;
394 
395     Symbol *sym = file->getSymbol(rel);
396     // Now figure out what we want to store
397     if (sym->hasGOTIndex()) {
398       writeU8(os, WASM_OPCODE_GLOBAL_GET, "GLOBAL_GET");
399       writeUleb128(os, sym->getGOTIndex(), "global index");
400       if (rel.Addend) {
401         writeU8(os, opcode_reloc_const, "CONST");
402         writeSleb128(os, rel.Addend, "addend");
403         writeU8(os, opcode_reloc_add, "ADD");
404       }
405     } else {
406       const GlobalSymbol* baseSymbol = WasmSym::memoryBase;
407       if (rel.Type == R_WASM_TABLE_INDEX_I32 ||
408           rel.Type == R_WASM_TABLE_INDEX_I64)
409         baseSymbol = WasmSym::tableBase;
410       writeU8(os, WASM_OPCODE_GLOBAL_GET, "GLOBAL_GET");
411       writeUleb128(os, baseSymbol->getGlobalIndex(), "base");
412       writeU8(os, opcode_reloc_const, "CONST");
413       writeSleb128(os, file->calcNewValue(rel, tombstone, this), "offset");
414       writeU8(os, opcode_reloc_add, "ADD");
415     }
416 
417     // Store that value at the virtual address
418     writeU8(os, opcode_reloc_store, "I32_STORE");
419     writeUleb128(os, 2, "align");
420     writeUleb128(os, 0, "offset");
421   }
422 }
423 
424 // Split WASM_SEG_FLAG_STRINGS section. Such a section is a sequence of
425 // null-terminated strings.
426 void MergeInputChunk::splitStrings(ArrayRef<uint8_t> data) {
427   LLVM_DEBUG(llvm::dbgs() << "splitStrings\n");
428   size_t off = 0;
429   StringRef s = toStringRef(data);
430 
431   while (!s.empty()) {
432     size_t end = s.find(0);
433     if (end == StringRef::npos)
434       fatal(toString(this) + ": string is not null terminated");
435     size_t size = end + 1;
436 
437     pieces.emplace_back(off, xxHash64(s.substr(0, size)), true);
438     s = s.substr(size);
439     off += size;
440   }
441 }
442 
443 // This function is called after we obtain a complete list of input sections
444 // that need to be linked. This is responsible to split section contents
445 // into small chunks for further processing.
446 //
447 // Note that this function is called from parallelForEach. This must be
448 // thread-safe (i.e. no memory allocation from the pools).
449 void MergeInputChunk::splitIntoPieces() {
450   assert(pieces.empty());
451   // As of now we only support WASM_SEG_FLAG_STRINGS but in the future we
452   // could add other types of splitting (see ELF's splitIntoPieces).
453   assert(flags & WASM_SEG_FLAG_STRINGS);
454   splitStrings(data());
455 }
456 
457 SectionPiece *MergeInputChunk::getSectionPiece(uint64_t offset) {
458   if (this->data().size() <= offset)
459     fatal(toString(this) + ": offset is outside the section");
460 
461   // If Offset is not at beginning of a section piece, it is not in the map.
462   // In that case we need to  do a binary search of the original section piece
463   // vector.
464   auto it = partition_point(
465       pieces, [=](SectionPiece p) { return p.inputOff <= offset; });
466   return &it[-1];
467 }
468 
469 // Returns the offset in an output section for a given input offset.
470 // Because contents of a mergeable section is not contiguous in output,
471 // it is not just an addition to a base output offset.
472 uint64_t MergeInputChunk::getParentOffset(uint64_t offset) const {
473   // If Offset is not at beginning of a section piece, it is not in the map.
474   // In that case we need to search from the original section piece vector.
475   const SectionPiece *piece = getSectionPiece(offset);
476   uint64_t addend = offset - piece->inputOff;
477   return piece->outputOff + addend;
478 }
479 
480 void SyntheticMergedChunk::finalizeContents() {
481   // Add all string pieces to the string table builder to create section
482   // contents.
483   for (MergeInputChunk *sec : chunks)
484     for (size_t i = 0, e = sec->pieces.size(); i != e; ++i)
485       if (sec->pieces[i].live)
486         builder.add(sec->getData(i));
487 
488   // Fix the string table content. After this, the contents will never change.
489   builder.finalize();
490 
491   // finalize() fixed tail-optimized strings, so we can now get
492   // offsets of strings. Get an offset for each string and save it
493   // to a corresponding SectionPiece for easy access.
494   for (MergeInputChunk *sec : chunks)
495     for (size_t i = 0, e = sec->pieces.size(); i != e; ++i)
496       if (sec->pieces[i].live)
497         sec->pieces[i].outputOff = builder.getOffset(sec->getData(i));
498 }
499 
500 uint64_t InputSection::getTombstoneForSection(StringRef name) {
501   // When a function is not live we need to update relocations referring to it.
502   // If they occur in DWARF debug symbols, we want to change the pc of the
503   // function to -1 to avoid overlapping with a valid range. However for the
504   // debug_ranges and debug_loc sections that would conflict with the existing
505   // meaning of -1 so we use -2.
506   // Returning 0 means there is no tombstone value for this section, and relocation
507   // will just use the addend.
508   if (!name.startswith(".debug_"))
509     return 0;
510   if (name.equals(".debug_ranges") || name.equals(".debug_loc"))
511     return UINT64_C(-2);
512   return UINT64_C(-1);
513 }
514 
515 } // namespace wasm
516 } // namespace lld
517