1 //===- InputChunks.cpp ----------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "InputChunks.h"
10 #include "Config.h"
11 #include "OutputSegment.h"
12 #include "WriterUtils.h"
13 #include "lld/Common/ErrorHandler.h"
14 #include "lld/Common/LLVM.h"
15 #include "llvm/Support/LEB128.h"
16 #include "llvm/Support/xxhash.h"
17 
18 #define DEBUG_TYPE "lld"
19 
20 using namespace llvm;
21 using namespace llvm::wasm;
22 using namespace llvm::support::endian;
23 
24 namespace lld {
25 StringRef relocTypeToString(uint8_t relocType) {
26   switch (relocType) {
27 #define WASM_RELOC(NAME, REL)                                                  \
28   case REL:                                                                    \
29     return #NAME;
30 #include "llvm/BinaryFormat/WasmRelocs.def"
31 #undef WASM_RELOC
32   }
33   llvm_unreachable("unknown reloc type");
34 }
35 
36 bool relocIs64(uint8_t relocType) {
37   switch (relocType) {
38   case R_WASM_MEMORY_ADDR_LEB64:
39   case R_WASM_MEMORY_ADDR_SLEB64:
40   case R_WASM_MEMORY_ADDR_REL_SLEB64:
41   case R_WASM_MEMORY_ADDR_I64:
42     return true;
43   default:
44     return false;
45   }
46 }
47 
48 std::string toString(const wasm::InputChunk *c) {
49   return (toString(c->file) + ":(" + c->getName() + ")").str();
50 }
51 
52 namespace wasm {
53 StringRef InputChunk::getComdatName() const {
54   uint32_t index = getComdat();
55   if (index == UINT32_MAX)
56     return StringRef();
57   return file->getWasmObj()->linkingData().Comdats[index];
58 }
59 
60 // Copy this input chunk to an mmap'ed output file and apply relocations.
61 void InputChunk::writeTo(uint8_t *buf) const {
62   // Copy contents
63   memcpy(buf + outSecOff, data().data(), data().size());
64 
65   // Apply relocations
66   relocate(buf + outSecOff);
67 }
68 
69 void InputChunk::relocate(uint8_t *buf) const {
70   if (relocations.empty())
71     return;
72 
73   LLVM_DEBUG(dbgs() << "applying relocations: " << toString(this)
74                     << " count=" << relocations.size() << "\n");
75   int32_t inputSectionOffset = getInputSectionOffset();
76   auto tombstone = getTombstone();
77 
78   for (const WasmRelocation &rel : relocations) {
79     uint8_t *loc = buf + rel.Offset - inputSectionOffset;
80     auto value = file->calcNewValue(rel, tombstone, this);
81     LLVM_DEBUG(dbgs() << "apply reloc: type=" << relocTypeToString(rel.Type));
82     if (rel.Type != R_WASM_TYPE_INDEX_LEB)
83       LLVM_DEBUG(dbgs() << " sym=" << file->getSymbols()[rel.Index]->getName());
84     LLVM_DEBUG(dbgs() << " addend=" << rel.Addend << " index=" << rel.Index
85                       << " value=" << value << " offset=" << rel.Offset
86                       << "\n");
87 
88     switch (rel.Type) {
89     case R_WASM_TYPE_INDEX_LEB:
90     case R_WASM_FUNCTION_INDEX_LEB:
91     case R_WASM_GLOBAL_INDEX_LEB:
92     case R_WASM_EVENT_INDEX_LEB:
93     case R_WASM_MEMORY_ADDR_LEB:
94     case R_WASM_TABLE_NUMBER_LEB:
95       encodeULEB128(value, loc, 5);
96       break;
97     case R_WASM_MEMORY_ADDR_LEB64:
98       encodeULEB128(value, loc, 10);
99       break;
100     case R_WASM_TABLE_INDEX_SLEB:
101     case R_WASM_TABLE_INDEX_REL_SLEB:
102     case R_WASM_MEMORY_ADDR_SLEB:
103     case R_WASM_MEMORY_ADDR_REL_SLEB:
104     case R_WASM_MEMORY_ADDR_TLS_SLEB:
105       encodeSLEB128(static_cast<int32_t>(value), loc, 5);
106       break;
107     case R_WASM_TABLE_INDEX_SLEB64:
108     case R_WASM_MEMORY_ADDR_SLEB64:
109     case R_WASM_MEMORY_ADDR_REL_SLEB64:
110       encodeSLEB128(static_cast<int64_t>(value), loc, 10);
111       break;
112     case R_WASM_TABLE_INDEX_I32:
113     case R_WASM_MEMORY_ADDR_I32:
114     case R_WASM_FUNCTION_OFFSET_I32:
115     case R_WASM_SECTION_OFFSET_I32:
116     case R_WASM_GLOBAL_INDEX_I32:
117     case R_WASM_MEMORY_ADDR_LOCREL_I32:
118       write32le(loc, value);
119       break;
120     case R_WASM_TABLE_INDEX_I64:
121     case R_WASM_MEMORY_ADDR_I64:
122     case R_WASM_FUNCTION_OFFSET_I64:
123       write64le(loc, value);
124       break;
125     default:
126       llvm_unreachable("unknown relocation type");
127     }
128   }
129 }
130 
131 // Copy relocation entries to a given output stream.
132 // This function is used only when a user passes "-r". For a regular link,
133 // we consume relocations instead of copying them to an output file.
134 void InputChunk::writeRelocations(raw_ostream &os) const {
135   if (relocations.empty())
136     return;
137 
138   int32_t off = outSecOff - getInputSectionOffset();
139   LLVM_DEBUG(dbgs() << "writeRelocations: " << file->getName()
140                     << " offset=" << Twine(off) << "\n");
141 
142   for (const WasmRelocation &rel : relocations) {
143     writeUleb128(os, rel.Type, "reloc type");
144     writeUleb128(os, rel.Offset + off, "reloc offset");
145     writeUleb128(os, file->calcNewIndex(rel), "reloc index");
146 
147     if (relocTypeHasAddend(rel.Type))
148       writeSleb128(os, file->calcNewAddend(rel), "reloc addend");
149   }
150 }
151 
152 void InputFunction::setFunctionIndex(uint32_t index) {
153   LLVM_DEBUG(dbgs() << "InputFunction::setFunctionIndex: " << getName()
154                     << " -> " << index << "\n");
155   assert(!hasFunctionIndex());
156   functionIndex = index;
157 }
158 
159 void InputFunction::setTableIndex(uint32_t index) {
160   LLVM_DEBUG(dbgs() << "InputFunction::setTableIndex: " << getName() << " -> "
161                     << index << "\n");
162   assert(!hasTableIndex());
163   tableIndex = index;
164 }
165 
166 // Write a relocation value without padding and return the number of bytes
167 // witten.
168 static unsigned writeCompressedReloc(uint8_t *buf, const WasmRelocation &rel,
169                                      uint64_t value) {
170   switch (rel.Type) {
171   case R_WASM_TYPE_INDEX_LEB:
172   case R_WASM_FUNCTION_INDEX_LEB:
173   case R_WASM_GLOBAL_INDEX_LEB:
174   case R_WASM_EVENT_INDEX_LEB:
175   case R_WASM_MEMORY_ADDR_LEB:
176   case R_WASM_MEMORY_ADDR_LEB64:
177   case R_WASM_TABLE_NUMBER_LEB:
178     return encodeULEB128(value, buf);
179   case R_WASM_TABLE_INDEX_SLEB:
180   case R_WASM_TABLE_INDEX_SLEB64:
181   case R_WASM_MEMORY_ADDR_SLEB:
182   case R_WASM_MEMORY_ADDR_SLEB64:
183     return encodeSLEB128(static_cast<int64_t>(value), buf);
184   default:
185     llvm_unreachable("unexpected relocation type");
186   }
187 }
188 
189 static unsigned getRelocWidthPadded(const WasmRelocation &rel) {
190   switch (rel.Type) {
191   case R_WASM_TYPE_INDEX_LEB:
192   case R_WASM_FUNCTION_INDEX_LEB:
193   case R_WASM_GLOBAL_INDEX_LEB:
194   case R_WASM_EVENT_INDEX_LEB:
195   case R_WASM_MEMORY_ADDR_LEB:
196   case R_WASM_TABLE_NUMBER_LEB:
197   case R_WASM_TABLE_INDEX_SLEB:
198   case R_WASM_MEMORY_ADDR_SLEB:
199     return 5;
200   case R_WASM_TABLE_INDEX_SLEB64:
201   case R_WASM_MEMORY_ADDR_LEB64:
202   case R_WASM_MEMORY_ADDR_SLEB64:
203     return 10;
204   default:
205     llvm_unreachable("unexpected relocation type");
206   }
207 }
208 
209 static unsigned getRelocWidth(const WasmRelocation &rel, uint64_t value) {
210   uint8_t buf[10];
211   return writeCompressedReloc(buf, rel, value);
212 }
213 
214 // Relocations of type LEB and SLEB in the code section are padded to 5 bytes
215 // so that a fast linker can blindly overwrite them without needing to worry
216 // about the number of bytes needed to encode the values.
217 // However, for optimal output the code section can be compressed to remove
218 // the padding then outputting non-relocatable files.
219 // In this case we need to perform a size calculation based on the value at each
220 // relocation.  At best we end up saving 4 bytes for each relocation entry.
221 //
222 // This function only computes the final output size.  It must be called
223 // before getSize() is used to calculate of layout of the code section.
224 void InputFunction::calculateSize() {
225   if (!file || !config->compressRelocations)
226     return;
227 
228   LLVM_DEBUG(dbgs() << "calculateSize: " << getName() << "\n");
229 
230   const uint8_t *secStart = file->codeSection->Content.data();
231   const uint8_t *funcStart = secStart + getInputSectionOffset();
232   uint32_t functionSizeLength;
233   decodeULEB128(funcStart, &functionSizeLength);
234 
235   uint32_t start = getInputSectionOffset();
236   uint32_t end = start + function->Size;
237 
238   auto tombstone = getTombstone();
239 
240   uint32_t lastRelocEnd = start + functionSizeLength;
241   for (const WasmRelocation &rel : relocations) {
242     LLVM_DEBUG(dbgs() << "  region: " << (rel.Offset - lastRelocEnd) << "\n");
243     compressedFuncSize += rel.Offset - lastRelocEnd;
244     compressedFuncSize +=
245         getRelocWidth(rel, file->calcNewValue(rel, tombstone, this));
246     lastRelocEnd = rel.Offset + getRelocWidthPadded(rel);
247   }
248   LLVM_DEBUG(dbgs() << "  final region: " << (end - lastRelocEnd) << "\n");
249   compressedFuncSize += end - lastRelocEnd;
250 
251   // Now we know how long the resulting function is we can add the encoding
252   // of its length
253   uint8_t buf[5];
254   compressedSize = compressedFuncSize + encodeULEB128(compressedFuncSize, buf);
255 
256   LLVM_DEBUG(dbgs() << "  calculateSize orig: " << function->Size << "\n");
257   LLVM_DEBUG(dbgs() << "  calculateSize  new: " << compressedSize << "\n");
258 }
259 
260 // Override the default writeTo method so that we can (optionally) write the
261 // compressed version of the function.
262 void InputFunction::writeTo(uint8_t *buf) const {
263   if (!file || !config->compressRelocations)
264     return InputChunk::writeTo(buf);
265 
266   buf += outSecOff;
267   uint8_t *orig = buf;
268   (void)orig;
269 
270   const uint8_t *secStart = file->codeSection->Content.data();
271   const uint8_t *funcStart = secStart + getInputSectionOffset();
272   const uint8_t *end = funcStart + function->Size;
273   auto tombstone = getTombstone();
274   uint32_t count;
275   decodeULEB128(funcStart, &count);
276   funcStart += count;
277 
278   LLVM_DEBUG(dbgs() << "write func: " << getName() << "\n");
279   buf += encodeULEB128(compressedFuncSize, buf);
280   const uint8_t *lastRelocEnd = funcStart;
281   for (const WasmRelocation &rel : relocations) {
282     unsigned chunkSize = (secStart + rel.Offset) - lastRelocEnd;
283     LLVM_DEBUG(dbgs() << "  write chunk: " << chunkSize << "\n");
284     memcpy(buf, lastRelocEnd, chunkSize);
285     buf += chunkSize;
286     buf += writeCompressedReloc(buf, rel,
287                                 file->calcNewValue(rel, tombstone, this));
288     lastRelocEnd = secStart + rel.Offset + getRelocWidthPadded(rel);
289   }
290 
291   unsigned chunkSize = end - lastRelocEnd;
292   LLVM_DEBUG(dbgs() << "  write final chunk: " << chunkSize << "\n");
293   memcpy(buf, lastRelocEnd, chunkSize);
294   LLVM_DEBUG(dbgs() << "  total: " << (buf + chunkSize - orig) << "\n");
295 }
296 
297 uint64_t InputSegment::getOffset(uint64_t offset) const {
298   if (const MergeInputSegment *ms = dyn_cast<MergeInputSegment>(this)) {
299     LLVM_DEBUG(dbgs() << "getOffset(merged): " << getName() << "\n");
300     LLVM_DEBUG(dbgs() << "offset: " << offset << "\n");
301     LLVM_DEBUG(dbgs() << "parentOffset: " << ms->getParentOffset(offset)
302                       << "\n");
303     assert(ms->parent);
304     return ms->parent->getOffset(ms->getParentOffset(offset));
305   }
306   return outputSegmentOffset + offset;
307 }
308 
309 uint64_t InputSegment::getVA(uint64_t offset) const {
310   return (outputSeg ? outputSeg->startVA : 0) + getOffset(offset);
311 }
312 
313 // Generate code to apply relocations to the data section at runtime.
314 // This is only called when generating shared libaries (PIC) where address are
315 // not known at static link time.
316 void InputSegment::generateRelocationCode(raw_ostream &os) const {
317   LLVM_DEBUG(dbgs() << "generating runtime relocations: " << getName()
318                     << " count=" << relocations.size() << "\n");
319 
320   unsigned opcode_ptr_const = config->is64.getValueOr(false)
321                                   ? WASM_OPCODE_I64_CONST
322                                   : WASM_OPCODE_I32_CONST;
323   unsigned opcode_ptr_add = config->is64.getValueOr(false)
324                                 ? WASM_OPCODE_I64_ADD
325                                 : WASM_OPCODE_I32_ADD;
326 
327   auto tombstone = getTombstone();
328   // TODO(sbc): Encode the relocations in the data section and write a loop
329   // here to apply them.
330   for (const WasmRelocation &rel : relocations) {
331     uint64_t offset = getVA(rel.Offset) - getInputSectionOffset();
332 
333     LLVM_DEBUG(dbgs() << "gen reloc: type=" << relocTypeToString(rel.Type)
334                       << " addend=" << rel.Addend << " index=" << rel.Index
335                       << " output offset=" << offset << "\n");
336 
337     // Get __memory_base
338     writeU8(os, WASM_OPCODE_GLOBAL_GET, "GLOBAL_GET");
339     writeUleb128(os, WasmSym::memoryBase->getGlobalIndex(), "memory_base");
340 
341     // Add the offset of the relocation
342     writeU8(os, opcode_ptr_const, "CONST");
343     writeSleb128(os, offset, "offset");
344     writeU8(os, opcode_ptr_add, "ADD");
345 
346     bool is64 = relocIs64(rel.Type);
347     unsigned opcode_reloc_const =
348         is64 ? WASM_OPCODE_I64_CONST : WASM_OPCODE_I32_CONST;
349     unsigned opcode_reloc_add =
350         is64 ? WASM_OPCODE_I64_ADD : WASM_OPCODE_I32_ADD;
351     unsigned opcode_reloc_store =
352         is64 ? WASM_OPCODE_I64_STORE : WASM_OPCODE_I32_STORE;
353 
354     Symbol *sym = file->getSymbol(rel);
355     // Now figure out what we want to store
356     if (sym->hasGOTIndex()) {
357       writeU8(os, WASM_OPCODE_GLOBAL_GET, "GLOBAL_GET");
358       writeUleb128(os, sym->getGOTIndex(), "global index");
359       if (rel.Addend) {
360         writeU8(os, opcode_reloc_const, "CONST");
361         writeSleb128(os, rel.Addend, "addend");
362         writeU8(os, opcode_reloc_add, "ADD");
363       }
364     } else {
365       const GlobalSymbol* baseSymbol = WasmSym::memoryBase;
366       if (rel.Type == R_WASM_TABLE_INDEX_I32 ||
367           rel.Type == R_WASM_TABLE_INDEX_I64)
368         baseSymbol = WasmSym::tableBase;
369       writeU8(os, WASM_OPCODE_GLOBAL_GET, "GLOBAL_GET");
370       writeUleb128(os, baseSymbol->getGlobalIndex(), "base");
371       writeU8(os, opcode_reloc_const, "CONST");
372       writeSleb128(os, file->calcNewValue(rel, tombstone, this), "offset");
373       writeU8(os, opcode_reloc_add, "ADD");
374     }
375 
376     // Store that value at the virtual address
377     writeU8(os, opcode_reloc_store, "I32_STORE");
378     writeUleb128(os, 2, "align");
379     writeUleb128(os, 0, "offset");
380   }
381 }
382 
383 // Split WASM_SEG_FLAG_STRINGS section. Such a section is a sequence of
384 // null-terminated strings.
385 void MergeInputSegment::splitStrings(ArrayRef<uint8_t> data) {
386   LLVM_DEBUG(llvm::dbgs() << "splitStrings\n");
387   size_t off = 0;
388   StringRef s = toStringRef(data);
389 
390   while (!s.empty()) {
391     size_t end = s.find(0);
392     if (end == StringRef::npos)
393       fatal(toString(this) + ": string is not null terminated");
394     size_t size = end + 1;
395 
396     pieces.emplace_back(off, xxHash64(s.substr(0, size)), true);
397     s = s.substr(size);
398     off += size;
399   }
400 }
401 
402 // This function is called after we obtain a complete list of input sections
403 // that need to be linked. This is responsible to split section contents
404 // into small chunks for further processing.
405 //
406 // Note that this function is called from parallelForEach. This must be
407 // thread-safe (i.e. no memory allocation from the pools).
408 void MergeInputSegment::splitIntoPieces() {
409   assert(pieces.empty());
410   // As of now we only support WASM_SEG_FLAG_STRINGS but in the future we
411   // could add other types of splitting (see ELF's splitIntoPieces).
412   assert(segment->Data.LinkingFlags & WASM_SEG_FLAG_STRINGS);
413   splitStrings(data());
414 }
415 
416 SegmentPiece *MergeInputSegment::getSegmentPiece(uint64_t offset) {
417   if (this->data().size() <= offset)
418     fatal(toString(this) + ": offset is outside the section");
419 
420   // If Offset is not at beginning of a section piece, it is not in the map.
421   // In that case we need to  do a binary search of the original section piece
422   // vector.
423   auto it = partition_point(
424       pieces, [=](SegmentPiece p) { return p.inputOff <= offset; });
425   return &it[-1];
426 }
427 
428 // Returns the offset in an output section for a given input offset.
429 // Because contents of a mergeable section is not contiguous in output,
430 // it is not just an addition to a base output offset.
431 uint64_t MergeInputSegment::getParentOffset(uint64_t offset) const {
432   // If Offset is not at beginning of a section piece, it is not in the map.
433   // In that case we need to search from the original section piece vector.
434   const SegmentPiece *piece = getSegmentPiece(offset);
435   uint64_t addend = offset - piece->inputOff;
436   return piece->outputOff + addend;
437 }
438 
439 uint32_t SyntheticMergedDataSegment::getSize() const {
440   return builder.getSize();
441 }
442 
443 void SyntheticMergedDataSegment::writeTo(uint8_t *buf) const {
444   builder.write(buf + outSecOff);
445 
446   // Apply relocations
447   relocate(buf + outSecOff);
448 }
449 
450 void SyntheticMergedDataSegment::finalizeContents() {
451   // Add all string pieces to the string table builder to create section
452   // contents.
453   for (MergeInputSegment *sec : segments)
454     for (size_t i = 0, e = sec->pieces.size(); i != e; ++i)
455       if (sec->pieces[i].live)
456         builder.add(sec->getData(i));
457 
458   // Fix the string table content. After this, the contents will never change.
459   builder.finalize();
460 
461   // finalize() fixed tail-optimized strings, so we can now get
462   // offsets of strings. Get an offset for each string and save it
463   // to a corresponding SectionPiece for easy access.
464   for (MergeInputSegment *sec : segments)
465     for (size_t i = 0, e = sec->pieces.size(); i != e; ++i)
466       if (sec->pieces[i].live)
467         sec->pieces[i].outputOff = builder.getOffset(sec->getData(i));
468 }
469 
470 uint64_t InputSection::getTombstoneForSection(StringRef name) {
471   // When a function is not live we need to update relocations referring to it.
472   // If they occur in DWARF debug symbols, we want to change the pc of the
473   // function to -1 to avoid overlapping with a valid range. However for the
474   // debug_ranges and debug_loc sections that would conflict with the existing
475   // meaning of -1 so we use -2.
476   // Returning 0 means there is no tombstone value for this section, and relocation
477   // will just use the addend.
478   if (!name.startswith(".debug_"))
479     return 0;
480   if (name.equals(".debug_ranges") || name.equals(".debug_loc"))
481     return UINT64_C(-2);
482   return UINT64_C(-1);
483 }
484 
485 } // namespace wasm
486 } // namespace lld
487