1 //===- InputChunks.cpp ----------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "InputChunks.h"
10 #include "Config.h"
11 #include "OutputSegment.h"
12 #include "WriterUtils.h"
13 #include "lld/Common/ErrorHandler.h"
14 #include "lld/Common/LLVM.h"
15 #include "llvm/Support/LEB128.h"
16 #include "llvm/Support/xxhash.h"
17 
18 #define DEBUG_TYPE "lld"
19 
20 using namespace llvm;
21 using namespace llvm::wasm;
22 using namespace llvm::support::endian;
23 
24 namespace lld {
25 StringRef relocTypeToString(uint8_t relocType) {
26   switch (relocType) {
27 #define WASM_RELOC(NAME, REL)                                                  \
28   case REL:                                                                    \
29     return #NAME;
30 #include "llvm/BinaryFormat/WasmRelocs.def"
31 #undef WASM_RELOC
32   }
33   llvm_unreachable("unknown reloc type");
34 }
35 
36 bool relocIs64(uint8_t relocType) {
37   switch (relocType) {
38   case R_WASM_MEMORY_ADDR_LEB64:
39   case R_WASM_MEMORY_ADDR_SLEB64:
40   case R_WASM_MEMORY_ADDR_REL_SLEB64:
41   case R_WASM_MEMORY_ADDR_I64:
42     return true;
43   default:
44     return false;
45   }
46 }
47 
48 std::string toString(const wasm::InputChunk *c) {
49   return (toString(c->file) + ":(" + c->getName() + ")").str();
50 }
51 
52 namespace wasm {
53 StringRef InputChunk::getComdatName() const {
54   uint32_t index = getComdat();
55   if (index == UINT32_MAX)
56     return StringRef();
57   return file->getWasmObj()->linkingData().Comdats[index];
58 }
59 
60 uint32_t InputChunk::getSize() const {
61   if (const auto *ms = dyn_cast<SyntheticMergedChunk>(this))
62     return ms->builder.getSize();
63 
64   if (const auto *f = dyn_cast<InputFunction>(this)) {
65     if (config->compressRelocations && f->file) {
66       return f->getCompressedSize();
67     }
68   }
69 
70   return data().size();
71 }
72 
73 uint32_t InputChunk::getInputSize() const {
74   if (const auto *f = dyn_cast<InputFunction>(this))
75     return f->function->Size;
76   return getSize();
77 }
78 
79 // Copy this input chunk to an mmap'ed output file and apply relocations.
80 void InputChunk::writeTo(uint8_t *buf) const {
81   if (const auto *f = dyn_cast<InputFunction>(this)) {
82     if (file && config->compressRelocations)
83       return f->writeCompressed(buf);
84   } else if (const auto *ms = dyn_cast<SyntheticMergedChunk>(this)) {
85     ms->builder.write(buf + outSecOff);
86     // Apply relocations
87     ms->relocate(buf + outSecOff);
88     return;
89   }
90 
91   // Copy contents
92   memcpy(buf + outSecOff, data().data(), data().size());
93 
94   // Apply relocations
95   relocate(buf + outSecOff);
96 }
97 
98 void InputChunk::relocate(uint8_t *buf) const {
99   if (relocations.empty())
100     return;
101 
102   LLVM_DEBUG(dbgs() << "applying relocations: " << toString(this)
103                     << " count=" << relocations.size() << "\n");
104   int32_t inputSectionOffset = getInputSectionOffset();
105   uint64_t tombstone = getTombstone();
106 
107   for (const WasmRelocation &rel : relocations) {
108     uint8_t *loc = buf + rel.Offset - inputSectionOffset;
109     auto value = file->calcNewValue(rel, tombstone, this);
110     LLVM_DEBUG(dbgs() << "apply reloc: type=" << relocTypeToString(rel.Type));
111     if (rel.Type != R_WASM_TYPE_INDEX_LEB)
112       LLVM_DEBUG(dbgs() << " sym=" << file->getSymbols()[rel.Index]->getName());
113     LLVM_DEBUG(dbgs() << " addend=" << rel.Addend << " index=" << rel.Index
114                       << " value=" << value << " offset=" << rel.Offset
115                       << "\n");
116 
117     switch (rel.Type) {
118     case R_WASM_TYPE_INDEX_LEB:
119     case R_WASM_FUNCTION_INDEX_LEB:
120     case R_WASM_GLOBAL_INDEX_LEB:
121     case R_WASM_EVENT_INDEX_LEB:
122     case R_WASM_MEMORY_ADDR_LEB:
123     case R_WASM_TABLE_NUMBER_LEB:
124       encodeULEB128(value, loc, 5);
125       break;
126     case R_WASM_MEMORY_ADDR_LEB64:
127       encodeULEB128(value, loc, 10);
128       break;
129     case R_WASM_TABLE_INDEX_SLEB:
130     case R_WASM_TABLE_INDEX_REL_SLEB:
131     case R_WASM_MEMORY_ADDR_SLEB:
132     case R_WASM_MEMORY_ADDR_REL_SLEB:
133     case R_WASM_MEMORY_ADDR_TLS_SLEB:
134       encodeSLEB128(static_cast<int32_t>(value), loc, 5);
135       break;
136     case R_WASM_TABLE_INDEX_SLEB64:
137     case R_WASM_TABLE_INDEX_REL_SLEB64:
138     case R_WASM_MEMORY_ADDR_SLEB64:
139     case R_WASM_MEMORY_ADDR_REL_SLEB64:
140       encodeSLEB128(static_cast<int64_t>(value), loc, 10);
141       break;
142     case R_WASM_TABLE_INDEX_I32:
143     case R_WASM_MEMORY_ADDR_I32:
144     case R_WASM_FUNCTION_OFFSET_I32:
145     case R_WASM_SECTION_OFFSET_I32:
146     case R_WASM_GLOBAL_INDEX_I32:
147     case R_WASM_MEMORY_ADDR_LOCREL_I32:
148       write32le(loc, value);
149       break;
150     case R_WASM_TABLE_INDEX_I64:
151     case R_WASM_MEMORY_ADDR_I64:
152     case R_WASM_FUNCTION_OFFSET_I64:
153       write64le(loc, value);
154       break;
155     default:
156       llvm_unreachable("unknown relocation type");
157     }
158   }
159 }
160 
161 // Copy relocation entries to a given output stream.
162 // This function is used only when a user passes "-r". For a regular link,
163 // we consume relocations instead of copying them to an output file.
164 void InputChunk::writeRelocations(raw_ostream &os) const {
165   if (relocations.empty())
166     return;
167 
168   int32_t off = outSecOff - getInputSectionOffset();
169   LLVM_DEBUG(dbgs() << "writeRelocations: " << file->getName()
170                     << " offset=" << Twine(off) << "\n");
171 
172   for (const WasmRelocation &rel : relocations) {
173     writeUleb128(os, rel.Type, "reloc type");
174     writeUleb128(os, rel.Offset + off, "reloc offset");
175     writeUleb128(os, file->calcNewIndex(rel), "reloc index");
176 
177     if (relocTypeHasAddend(rel.Type))
178       writeSleb128(os, file->calcNewAddend(rel), "reloc addend");
179   }
180 }
181 
182 uint64_t InputChunk::getTombstone() const {
183   if (const auto *s = dyn_cast<InputSection>(this)) {
184     return s->tombstoneValue;
185   }
186 
187   return 0;
188 }
189 
190 void InputFunction::setFunctionIndex(uint32_t index) {
191   LLVM_DEBUG(dbgs() << "InputFunction::setFunctionIndex: " << getName()
192                     << " -> " << index << "\n");
193   assert(!hasFunctionIndex());
194   functionIndex = index;
195 }
196 
197 void InputFunction::setTableIndex(uint32_t index) {
198   LLVM_DEBUG(dbgs() << "InputFunction::setTableIndex: " << getName() << " -> "
199                     << index << "\n");
200   assert(!hasTableIndex());
201   tableIndex = index;
202 }
203 
204 // Write a relocation value without padding and return the number of bytes
205 // witten.
206 static unsigned writeCompressedReloc(uint8_t *buf, const WasmRelocation &rel,
207                                      uint64_t value) {
208   switch (rel.Type) {
209   case R_WASM_TYPE_INDEX_LEB:
210   case R_WASM_FUNCTION_INDEX_LEB:
211   case R_WASM_GLOBAL_INDEX_LEB:
212   case R_WASM_EVENT_INDEX_LEB:
213   case R_WASM_MEMORY_ADDR_LEB:
214   case R_WASM_MEMORY_ADDR_LEB64:
215   case R_WASM_TABLE_NUMBER_LEB:
216     return encodeULEB128(value, buf);
217   case R_WASM_TABLE_INDEX_SLEB:
218   case R_WASM_TABLE_INDEX_SLEB64:
219   case R_WASM_MEMORY_ADDR_SLEB:
220   case R_WASM_MEMORY_ADDR_SLEB64:
221     return encodeSLEB128(static_cast<int64_t>(value), buf);
222   default:
223     llvm_unreachable("unexpected relocation type");
224   }
225 }
226 
227 static unsigned getRelocWidthPadded(const WasmRelocation &rel) {
228   switch (rel.Type) {
229   case R_WASM_TYPE_INDEX_LEB:
230   case R_WASM_FUNCTION_INDEX_LEB:
231   case R_WASM_GLOBAL_INDEX_LEB:
232   case R_WASM_EVENT_INDEX_LEB:
233   case R_WASM_MEMORY_ADDR_LEB:
234   case R_WASM_TABLE_NUMBER_LEB:
235   case R_WASM_TABLE_INDEX_SLEB:
236   case R_WASM_MEMORY_ADDR_SLEB:
237     return 5;
238   case R_WASM_TABLE_INDEX_SLEB64:
239   case R_WASM_MEMORY_ADDR_LEB64:
240   case R_WASM_MEMORY_ADDR_SLEB64:
241     return 10;
242   default:
243     llvm_unreachable("unexpected relocation type");
244   }
245 }
246 
247 static unsigned getRelocWidth(const WasmRelocation &rel, uint64_t value) {
248   uint8_t buf[10];
249   return writeCompressedReloc(buf, rel, value);
250 }
251 
252 // Relocations of type LEB and SLEB in the code section are padded to 5 bytes
253 // so that a fast linker can blindly overwrite them without needing to worry
254 // about the number of bytes needed to encode the values.
255 // However, for optimal output the code section can be compressed to remove
256 // the padding then outputting non-relocatable files.
257 // In this case we need to perform a size calculation based on the value at each
258 // relocation.  At best we end up saving 4 bytes for each relocation entry.
259 //
260 // This function only computes the final output size.  It must be called
261 // before getSize() is used to calculate of layout of the code section.
262 void InputFunction::calculateSize() {
263   if (!file || !config->compressRelocations)
264     return;
265 
266   LLVM_DEBUG(dbgs() << "calculateSize: " << getName() << "\n");
267 
268   const uint8_t *secStart = file->codeSection->Content.data();
269   const uint8_t *funcStart = secStart + getInputSectionOffset();
270   uint32_t functionSizeLength;
271   decodeULEB128(funcStart, &functionSizeLength);
272 
273   uint32_t start = getInputSectionOffset();
274   uint32_t end = start + function->Size;
275 
276   uint64_t tombstone = getTombstone();
277 
278   uint32_t lastRelocEnd = start + functionSizeLength;
279   for (const WasmRelocation &rel : relocations) {
280     LLVM_DEBUG(dbgs() << "  region: " << (rel.Offset - lastRelocEnd) << "\n");
281     compressedFuncSize += rel.Offset - lastRelocEnd;
282     compressedFuncSize +=
283         getRelocWidth(rel, file->calcNewValue(rel, tombstone, this));
284     lastRelocEnd = rel.Offset + getRelocWidthPadded(rel);
285   }
286   LLVM_DEBUG(dbgs() << "  final region: " << (end - lastRelocEnd) << "\n");
287   compressedFuncSize += end - lastRelocEnd;
288 
289   // Now we know how long the resulting function is we can add the encoding
290   // of its length
291   uint8_t buf[5];
292   compressedSize = compressedFuncSize + encodeULEB128(compressedFuncSize, buf);
293 
294   LLVM_DEBUG(dbgs() << "  calculateSize orig: " << function->Size << "\n");
295   LLVM_DEBUG(dbgs() << "  calculateSize  new: " << compressedSize << "\n");
296 }
297 
298 // Override the default writeTo method so that we can (optionally) write the
299 // compressed version of the function.
300 void InputFunction::writeCompressed(uint8_t *buf) const {
301   buf += outSecOff;
302   uint8_t *orig = buf;
303   (void)orig;
304 
305   const uint8_t *secStart = file->codeSection->Content.data();
306   const uint8_t *funcStart = secStart + getInputSectionOffset();
307   const uint8_t *end = funcStart + function->Size;
308   uint64_t tombstone = getTombstone();
309   uint32_t count;
310   decodeULEB128(funcStart, &count);
311   funcStart += count;
312 
313   LLVM_DEBUG(dbgs() << "write func: " << getName() << "\n");
314   buf += encodeULEB128(compressedFuncSize, buf);
315   const uint8_t *lastRelocEnd = funcStart;
316   for (const WasmRelocation &rel : relocations) {
317     unsigned chunkSize = (secStart + rel.Offset) - lastRelocEnd;
318     LLVM_DEBUG(dbgs() << "  write chunk: " << chunkSize << "\n");
319     memcpy(buf, lastRelocEnd, chunkSize);
320     buf += chunkSize;
321     buf += writeCompressedReloc(buf, rel,
322                                 file->calcNewValue(rel, tombstone, this));
323     lastRelocEnd = secStart + rel.Offset + getRelocWidthPadded(rel);
324   }
325 
326   unsigned chunkSize = end - lastRelocEnd;
327   LLVM_DEBUG(dbgs() << "  write final chunk: " << chunkSize << "\n");
328   memcpy(buf, lastRelocEnd, chunkSize);
329   LLVM_DEBUG(dbgs() << "  total: " << (buf + chunkSize - orig) << "\n");
330 }
331 
332 uint64_t InputChunk::getChunkOffset(uint64_t offset) const {
333   if (const auto *ms = dyn_cast<MergeInputChunk>(this)) {
334     LLVM_DEBUG(dbgs() << "getChunkOffset(merged): " << getName() << "\n");
335     LLVM_DEBUG(dbgs() << "offset: " << offset << "\n");
336     LLVM_DEBUG(dbgs() << "parentOffset: " << ms->getParentOffset(offset)
337                       << "\n");
338     assert(ms->parent);
339     return ms->parent->getChunkOffset(ms->getParentOffset(offset));
340   }
341   return outputSegmentOffset + offset;
342 }
343 
344 uint64_t InputChunk::getOffset(uint64_t offset) const {
345   return outSecOff + getChunkOffset(offset);
346 }
347 
348 uint64_t InputChunk::getVA(uint64_t offset) const {
349   return (outputSeg ? outputSeg->startVA : 0) + getChunkOffset(offset);
350 }
351 
352 // Generate code to apply relocations to the data section at runtime.
353 // This is only called when generating shared libaries (PIC) where address are
354 // not known at static link time.
355 void InputChunk::generateRelocationCode(raw_ostream &os) const {
356   LLVM_DEBUG(dbgs() << "generating runtime relocations: " << getName()
357                     << " count=" << relocations.size() << "\n");
358 
359   unsigned opcode_ptr_const = config->is64.getValueOr(false)
360                                   ? WASM_OPCODE_I64_CONST
361                                   : WASM_OPCODE_I32_CONST;
362   unsigned opcode_ptr_add = config->is64.getValueOr(false)
363                                 ? WASM_OPCODE_I64_ADD
364                                 : WASM_OPCODE_I32_ADD;
365 
366   uint64_t tombstone = getTombstone();
367   // TODO(sbc): Encode the relocations in the data section and write a loop
368   // here to apply them.
369   for (const WasmRelocation &rel : relocations) {
370     uint64_t offset = getVA(rel.Offset) - getInputSectionOffset();
371 
372     LLVM_DEBUG(dbgs() << "gen reloc: type=" << relocTypeToString(rel.Type)
373                       << " addend=" << rel.Addend << " index=" << rel.Index
374                       << " output offset=" << offset << "\n");
375 
376     // Get __memory_base
377     writeU8(os, WASM_OPCODE_GLOBAL_GET, "GLOBAL_GET");
378     writeUleb128(os, WasmSym::memoryBase->getGlobalIndex(), "memory_base");
379 
380     // Add the offset of the relocation
381     writeU8(os, opcode_ptr_const, "CONST");
382     writeSleb128(os, offset, "offset");
383     writeU8(os, opcode_ptr_add, "ADD");
384 
385     bool is64 = relocIs64(rel.Type);
386     unsigned opcode_reloc_const =
387         is64 ? WASM_OPCODE_I64_CONST : WASM_OPCODE_I32_CONST;
388     unsigned opcode_reloc_add =
389         is64 ? WASM_OPCODE_I64_ADD : WASM_OPCODE_I32_ADD;
390     unsigned opcode_reloc_store =
391         is64 ? WASM_OPCODE_I64_STORE : WASM_OPCODE_I32_STORE;
392 
393     Symbol *sym = file->getSymbol(rel);
394     // Now figure out what we want to store
395     if (sym->hasGOTIndex()) {
396       writeU8(os, WASM_OPCODE_GLOBAL_GET, "GLOBAL_GET");
397       writeUleb128(os, sym->getGOTIndex(), "global index");
398       if (rel.Addend) {
399         writeU8(os, opcode_reloc_const, "CONST");
400         writeSleb128(os, rel.Addend, "addend");
401         writeU8(os, opcode_reloc_add, "ADD");
402       }
403     } else {
404       const GlobalSymbol* baseSymbol = WasmSym::memoryBase;
405       if (rel.Type == R_WASM_TABLE_INDEX_I32 ||
406           rel.Type == R_WASM_TABLE_INDEX_I64)
407         baseSymbol = WasmSym::tableBase;
408       writeU8(os, WASM_OPCODE_GLOBAL_GET, "GLOBAL_GET");
409       writeUleb128(os, baseSymbol->getGlobalIndex(), "base");
410       writeU8(os, opcode_reloc_const, "CONST");
411       writeSleb128(os, file->calcNewValue(rel, tombstone, this), "offset");
412       writeU8(os, opcode_reloc_add, "ADD");
413     }
414 
415     // Store that value at the virtual address
416     writeU8(os, opcode_reloc_store, "I32_STORE");
417     writeUleb128(os, 2, "align");
418     writeUleb128(os, 0, "offset");
419   }
420 }
421 
422 // Split WASM_SEG_FLAG_STRINGS section. Such a section is a sequence of
423 // null-terminated strings.
424 void MergeInputChunk::splitStrings(ArrayRef<uint8_t> data) {
425   LLVM_DEBUG(llvm::dbgs() << "splitStrings\n");
426   size_t off = 0;
427   StringRef s = toStringRef(data);
428 
429   while (!s.empty()) {
430     size_t end = s.find(0);
431     if (end == StringRef::npos)
432       fatal(toString(this) + ": string is not null terminated");
433     size_t size = end + 1;
434 
435     pieces.emplace_back(off, xxHash64(s.substr(0, size)), true);
436     s = s.substr(size);
437     off += size;
438   }
439 }
440 
441 // This function is called after we obtain a complete list of input sections
442 // that need to be linked. This is responsible to split section contents
443 // into small chunks for further processing.
444 //
445 // Note that this function is called from parallelForEach. This must be
446 // thread-safe (i.e. no memory allocation from the pools).
447 void MergeInputChunk::splitIntoPieces() {
448   assert(pieces.empty());
449   // As of now we only support WASM_SEG_FLAG_STRINGS but in the future we
450   // could add other types of splitting (see ELF's splitIntoPieces).
451   assert(flags & WASM_SEG_FLAG_STRINGS);
452   splitStrings(data());
453 }
454 
455 SectionPiece *MergeInputChunk::getSectionPiece(uint64_t offset) {
456   if (this->data().size() <= offset)
457     fatal(toString(this) + ": offset is outside the section");
458 
459   // If Offset is not at beginning of a section piece, it is not in the map.
460   // In that case we need to  do a binary search of the original section piece
461   // vector.
462   auto it = partition_point(
463       pieces, [=](SectionPiece p) { return p.inputOff <= offset; });
464   return &it[-1];
465 }
466 
467 // Returns the offset in an output section for a given input offset.
468 // Because contents of a mergeable section is not contiguous in output,
469 // it is not just an addition to a base output offset.
470 uint64_t MergeInputChunk::getParentOffset(uint64_t offset) const {
471   // If Offset is not at beginning of a section piece, it is not in the map.
472   // In that case we need to search from the original section piece vector.
473   const SectionPiece *piece = getSectionPiece(offset);
474   uint64_t addend = offset - piece->inputOff;
475   return piece->outputOff + addend;
476 }
477 
478 void SyntheticMergedChunk::finalizeContents() {
479   // Add all string pieces to the string table builder to create section
480   // contents.
481   for (MergeInputChunk *sec : chunks)
482     for (size_t i = 0, e = sec->pieces.size(); i != e; ++i)
483       if (sec->pieces[i].live)
484         builder.add(sec->getData(i));
485 
486   // Fix the string table content. After this, the contents will never change.
487   builder.finalize();
488 
489   // finalize() fixed tail-optimized strings, so we can now get
490   // offsets of strings. Get an offset for each string and save it
491   // to a corresponding SectionPiece for easy access.
492   for (MergeInputChunk *sec : chunks)
493     for (size_t i = 0, e = sec->pieces.size(); i != e; ++i)
494       if (sec->pieces[i].live)
495         sec->pieces[i].outputOff = builder.getOffset(sec->getData(i));
496 }
497 
498 uint64_t InputSection::getTombstoneForSection(StringRef name) {
499   // When a function is not live we need to update relocations referring to it.
500   // If they occur in DWARF debug symbols, we want to change the pc of the
501   // function to -1 to avoid overlapping with a valid range. However for the
502   // debug_ranges and debug_loc sections that would conflict with the existing
503   // meaning of -1 so we use -2.
504   // Returning 0 means there is no tombstone value for this section, and relocation
505   // will just use the addend.
506   if (!name.startswith(".debug_"))
507     return 0;
508   if (name.equals(".debug_ranges") || name.equals(".debug_loc"))
509     return UINT64_C(-2);
510   return UINT64_C(-1);
511 }
512 
513 } // namespace wasm
514 } // namespace lld
515