1 //===- InputChunks.cpp ----------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "InputChunks.h"
10 #include "Config.h"
11 #include "OutputSegment.h"
12 #include "WriterUtils.h"
13 #include "lld/Common/ErrorHandler.h"
14 #include "lld/Common/LLVM.h"
15 #include "llvm/Support/LEB128.h"
16 
17 #define DEBUG_TYPE "lld"
18 
19 using namespace llvm;
20 using namespace llvm::wasm;
21 using namespace llvm::support::endian;
22 
23 namespace lld {
24 StringRef relocTypeToString(uint8_t relocType) {
25   switch (relocType) {
26 #define WASM_RELOC(NAME, REL)                                                  \
27   case REL:                                                                    \
28     return #NAME;
29 #include "llvm/BinaryFormat/WasmRelocs.def"
30 #undef WASM_RELOC
31   }
32   llvm_unreachable("unknown reloc type");
33 }
34 
35 bool relocIs64(uint8_t relocType) {
36   switch (relocType) {
37   case R_WASM_MEMORY_ADDR_LEB64:
38   case R_WASM_MEMORY_ADDR_SLEB64:
39   case R_WASM_MEMORY_ADDR_REL_SLEB64:
40   case R_WASM_MEMORY_ADDR_I64:
41     return true;
42   default:
43     return false;
44   }
45 }
46 
47 std::string toString(const wasm::InputChunk *c) {
48   return (toString(c->file) + ":(" + c->getName() + ")").str();
49 }
50 
51 namespace wasm {
52 StringRef InputChunk::getComdatName() const {
53   uint32_t index = getComdat();
54   if (index == UINT32_MAX)
55     return StringRef();
56   return file->getWasmObj()->linkingData().Comdats[index];
57 }
58 
59 void InputChunk::verifyRelocTargets() const {
60   for (const WasmRelocation &rel : relocations) {
61     uint64_t existingValue;
62     unsigned bytesRead = 0;
63     unsigned paddedLEBWidth = 5;
64     auto offset = rel.Offset - getInputSectionOffset();
65     const uint8_t *loc = data().data() + offset;
66     switch (rel.Type) {
67     case R_WASM_TYPE_INDEX_LEB:
68     case R_WASM_FUNCTION_INDEX_LEB:
69     case R_WASM_GLOBAL_INDEX_LEB:
70     case R_WASM_EVENT_INDEX_LEB:
71     case R_WASM_MEMORY_ADDR_LEB:
72     case R_WASM_TABLE_NUMBER_LEB:
73       existingValue = decodeULEB128(loc, &bytesRead);
74       break;
75     case R_WASM_MEMORY_ADDR_LEB64:
76       existingValue = decodeULEB128(loc, &bytesRead);
77       paddedLEBWidth = 10;
78       break;
79     case R_WASM_TABLE_INDEX_SLEB:
80     case R_WASM_TABLE_INDEX_REL_SLEB:
81     case R_WASM_MEMORY_ADDR_SLEB:
82     case R_WASM_MEMORY_ADDR_REL_SLEB:
83     case R_WASM_MEMORY_ADDR_TLS_SLEB:
84       existingValue = static_cast<uint64_t>(decodeSLEB128(loc, &bytesRead));
85       break;
86     case R_WASM_TABLE_INDEX_SLEB64:
87     case R_WASM_MEMORY_ADDR_SLEB64:
88     case R_WASM_MEMORY_ADDR_REL_SLEB64:
89       existingValue = static_cast<uint64_t>(decodeSLEB128(loc, &bytesRead));
90       paddedLEBWidth = 10;
91       break;
92     case R_WASM_TABLE_INDEX_I32:
93     case R_WASM_MEMORY_ADDR_I32:
94     case R_WASM_FUNCTION_OFFSET_I32:
95     case R_WASM_SECTION_OFFSET_I32:
96     case R_WASM_GLOBAL_INDEX_I32:
97     case R_WASM_MEMORY_ADDR_LOCREL_I32:
98       existingValue = read32le(loc);
99       break;
100     case R_WASM_TABLE_INDEX_I64:
101     case R_WASM_MEMORY_ADDR_I64:
102     case R_WASM_FUNCTION_OFFSET_I64:
103       existingValue = read64le(loc);
104       break;
105     default:
106       llvm_unreachable("unknown relocation type");
107     }
108 
109     if (bytesRead && bytesRead != paddedLEBWidth)
110       warn("expected LEB at relocation site be 5/10-byte padded");
111 
112     if (rel.Type != R_WASM_GLOBAL_INDEX_LEB &&
113         rel.Type != R_WASM_GLOBAL_INDEX_I32) {
114       auto expectedValue = file->calcExpectedValue(rel);
115       if (expectedValue != existingValue)
116         warn(toString(this) + ": unexpected existing value for " +
117              relocTypeToString(rel.Type) + ": existing=" +
118              Twine(existingValue) + " expected=" + Twine(expectedValue));
119     }
120   }
121 }
122 
123 // Copy this input chunk to an mmap'ed output file and apply relocations.
124 void InputChunk::writeTo(uint8_t *buf) const {
125   // Copy contents
126   memcpy(buf + outSecOff, data().data(), data().size());
127 
128   // Apply relocations
129   if (relocations.empty())
130     return;
131 
132 #ifndef NDEBUG
133   verifyRelocTargets();
134 #endif
135 
136   LLVM_DEBUG(dbgs() << "applying relocations: " << toString(this)
137                     << " count=" << relocations.size() << "\n");
138   int32_t off = outSecOff - getInputSectionOffset();
139   auto tombstone = getTombstone();
140 
141   for (const WasmRelocation &rel : relocations) {
142     uint8_t *loc = buf + rel.Offset + off;
143     auto value = file->calcNewValue(rel, tombstone, this);
144     LLVM_DEBUG(dbgs() << "apply reloc: type=" << relocTypeToString(rel.Type));
145     if (rel.Type != R_WASM_TYPE_INDEX_LEB)
146       LLVM_DEBUG(dbgs() << " sym=" << file->getSymbols()[rel.Index]->getName());
147     LLVM_DEBUG(dbgs() << " addend=" << rel.Addend << " index=" << rel.Index
148                       << " value=" << value << " offset=" << rel.Offset
149                       << "\n");
150 
151     switch (rel.Type) {
152     case R_WASM_TYPE_INDEX_LEB:
153     case R_WASM_FUNCTION_INDEX_LEB:
154     case R_WASM_GLOBAL_INDEX_LEB:
155     case R_WASM_EVENT_INDEX_LEB:
156     case R_WASM_MEMORY_ADDR_LEB:
157     case R_WASM_TABLE_NUMBER_LEB:
158       encodeULEB128(value, loc, 5);
159       break;
160     case R_WASM_MEMORY_ADDR_LEB64:
161       encodeULEB128(value, loc, 10);
162       break;
163     case R_WASM_TABLE_INDEX_SLEB:
164     case R_WASM_TABLE_INDEX_REL_SLEB:
165     case R_WASM_MEMORY_ADDR_SLEB:
166     case R_WASM_MEMORY_ADDR_REL_SLEB:
167     case R_WASM_MEMORY_ADDR_TLS_SLEB:
168       encodeSLEB128(static_cast<int32_t>(value), loc, 5);
169       break;
170     case R_WASM_TABLE_INDEX_SLEB64:
171     case R_WASM_MEMORY_ADDR_SLEB64:
172     case R_WASM_MEMORY_ADDR_REL_SLEB64:
173       encodeSLEB128(static_cast<int64_t>(value), loc, 10);
174       break;
175     case R_WASM_TABLE_INDEX_I32:
176     case R_WASM_MEMORY_ADDR_I32:
177     case R_WASM_FUNCTION_OFFSET_I32:
178     case R_WASM_SECTION_OFFSET_I32:
179     case R_WASM_GLOBAL_INDEX_I32:
180     case R_WASM_MEMORY_ADDR_LOCREL_I32:
181       write32le(loc, value);
182       break;
183     case R_WASM_TABLE_INDEX_I64:
184     case R_WASM_MEMORY_ADDR_I64:
185     case R_WASM_FUNCTION_OFFSET_I64:
186       write64le(loc, value);
187       break;
188     default:
189       llvm_unreachable("unknown relocation type");
190     }
191   }
192 }
193 
194 // Copy relocation entries to a given output stream.
195 // This function is used only when a user passes "-r". For a regular link,
196 // we consume relocations instead of copying them to an output file.
197 void InputChunk::writeRelocations(raw_ostream &os) const {
198   if (relocations.empty())
199     return;
200 
201   int32_t off = outSecOff - getInputSectionOffset();
202   LLVM_DEBUG(dbgs() << "writeRelocations: " << file->getName()
203                     << " offset=" << Twine(off) << "\n");
204 
205   for (const WasmRelocation &rel : relocations) {
206     writeUleb128(os, rel.Type, "reloc type");
207     writeUleb128(os, rel.Offset + off, "reloc offset");
208     writeUleb128(os, file->calcNewIndex(rel), "reloc index");
209 
210     if (relocTypeHasAddend(rel.Type))
211       writeSleb128(os, file->calcNewAddend(rel), "reloc addend");
212   }
213 }
214 
215 void InputFunction::setFunctionIndex(uint32_t index) {
216   LLVM_DEBUG(dbgs() << "InputFunction::setFunctionIndex: " << getName()
217                     << " -> " << index << "\n");
218   assert(!hasFunctionIndex());
219   functionIndex = index;
220 }
221 
222 void InputFunction::setTableIndex(uint32_t index) {
223   LLVM_DEBUG(dbgs() << "InputFunction::setTableIndex: " << getName() << " -> "
224                     << index << "\n");
225   assert(!hasTableIndex());
226   tableIndex = index;
227 }
228 
229 // Write a relocation value without padding and return the number of bytes
230 // witten.
231 static unsigned writeCompressedReloc(uint8_t *buf, const WasmRelocation &rel,
232                                      uint64_t value) {
233   switch (rel.Type) {
234   case R_WASM_TYPE_INDEX_LEB:
235   case R_WASM_FUNCTION_INDEX_LEB:
236   case R_WASM_GLOBAL_INDEX_LEB:
237   case R_WASM_EVENT_INDEX_LEB:
238   case R_WASM_MEMORY_ADDR_LEB:
239   case R_WASM_MEMORY_ADDR_LEB64:
240   case R_WASM_TABLE_NUMBER_LEB:
241     return encodeULEB128(value, buf);
242   case R_WASM_TABLE_INDEX_SLEB:
243   case R_WASM_TABLE_INDEX_SLEB64:
244   case R_WASM_MEMORY_ADDR_SLEB:
245   case R_WASM_MEMORY_ADDR_SLEB64:
246     return encodeSLEB128(static_cast<int64_t>(value), buf);
247   default:
248     llvm_unreachable("unexpected relocation type");
249   }
250 }
251 
252 static unsigned getRelocWidthPadded(const WasmRelocation &rel) {
253   switch (rel.Type) {
254   case R_WASM_TYPE_INDEX_LEB:
255   case R_WASM_FUNCTION_INDEX_LEB:
256   case R_WASM_GLOBAL_INDEX_LEB:
257   case R_WASM_EVENT_INDEX_LEB:
258   case R_WASM_MEMORY_ADDR_LEB:
259   case R_WASM_TABLE_NUMBER_LEB:
260   case R_WASM_TABLE_INDEX_SLEB:
261   case R_WASM_MEMORY_ADDR_SLEB:
262     return 5;
263   case R_WASM_TABLE_INDEX_SLEB64:
264   case R_WASM_MEMORY_ADDR_LEB64:
265   case R_WASM_MEMORY_ADDR_SLEB64:
266     return 10;
267   default:
268     llvm_unreachable("unexpected relocation type");
269   }
270 }
271 
272 static unsigned getRelocWidth(const WasmRelocation &rel, uint64_t value) {
273   uint8_t buf[10];
274   return writeCompressedReloc(buf, rel, value);
275 }
276 
277 // Relocations of type LEB and SLEB in the code section are padded to 5 bytes
278 // so that a fast linker can blindly overwrite them without needing to worry
279 // about the number of bytes needed to encode the values.
280 // However, for optimal output the code section can be compressed to remove
281 // the padding then outputting non-relocatable files.
282 // In this case we need to perform a size calculation based on the value at each
283 // relocation.  At best we end up saving 4 bytes for each relocation entry.
284 //
285 // This function only computes the final output size.  It must be called
286 // before getSize() is used to calculate of layout of the code section.
287 void InputFunction::calculateSize() {
288   if (!file || !config->compressRelocations)
289     return;
290 
291   LLVM_DEBUG(dbgs() << "calculateSize: " << getName() << "\n");
292 
293   const uint8_t *secStart = file->codeSection->Content.data();
294   const uint8_t *funcStart = secStart + getInputSectionOffset();
295   uint32_t functionSizeLength;
296   decodeULEB128(funcStart, &functionSizeLength);
297 
298   uint32_t start = getInputSectionOffset();
299   uint32_t end = start + function->Size;
300 
301   auto tombstone = getTombstone();
302 
303   uint32_t lastRelocEnd = start + functionSizeLength;
304   for (const WasmRelocation &rel : relocations) {
305     LLVM_DEBUG(dbgs() << "  region: " << (rel.Offset - lastRelocEnd) << "\n");
306     compressedFuncSize += rel.Offset - lastRelocEnd;
307     compressedFuncSize +=
308         getRelocWidth(rel, file->calcNewValue(rel, tombstone, this));
309     lastRelocEnd = rel.Offset + getRelocWidthPadded(rel);
310   }
311   LLVM_DEBUG(dbgs() << "  final region: " << (end - lastRelocEnd) << "\n");
312   compressedFuncSize += end - lastRelocEnd;
313 
314   // Now we know how long the resulting function is we can add the encoding
315   // of its length
316   uint8_t buf[5];
317   compressedSize = compressedFuncSize + encodeULEB128(compressedFuncSize, buf);
318 
319   LLVM_DEBUG(dbgs() << "  calculateSize orig: " << function->Size << "\n");
320   LLVM_DEBUG(dbgs() << "  calculateSize  new: " << compressedSize << "\n");
321 }
322 
323 // Override the default writeTo method so that we can (optionally) write the
324 // compressed version of the function.
325 void InputFunction::writeTo(uint8_t *buf) const {
326   if (!file || !config->compressRelocations)
327     return InputChunk::writeTo(buf);
328 
329   buf += outSecOff;
330   uint8_t *orig = buf;
331   (void)orig;
332 
333   const uint8_t *secStart = file->codeSection->Content.data();
334   const uint8_t *funcStart = secStart + getInputSectionOffset();
335   const uint8_t *end = funcStart + function->Size;
336   auto tombstone = getTombstone();
337   uint32_t count;
338   decodeULEB128(funcStart, &count);
339   funcStart += count;
340 
341   LLVM_DEBUG(dbgs() << "write func: " << getName() << "\n");
342   buf += encodeULEB128(compressedFuncSize, buf);
343   const uint8_t *lastRelocEnd = funcStart;
344   for (const WasmRelocation &rel : relocations) {
345     unsigned chunkSize = (secStart + rel.Offset) - lastRelocEnd;
346     LLVM_DEBUG(dbgs() << "  write chunk: " << chunkSize << "\n");
347     memcpy(buf, lastRelocEnd, chunkSize);
348     buf += chunkSize;
349     buf += writeCompressedReloc(buf, rel,
350                                 file->calcNewValue(rel, tombstone, this));
351     lastRelocEnd = secStart + rel.Offset + getRelocWidthPadded(rel);
352   }
353 
354   unsigned chunkSize = end - lastRelocEnd;
355   LLVM_DEBUG(dbgs() << "  write final chunk: " << chunkSize << "\n");
356   memcpy(buf, lastRelocEnd, chunkSize);
357   LLVM_DEBUG(dbgs() << "  total: " << (buf + chunkSize - orig) << "\n");
358 }
359 
360 uint64_t InputSegment::getVA(uint64_t offset) const {
361   return outputSeg->startVA + outputSegmentOffset + offset;
362 }
363 
364 // Generate code to apply relocations to the data section at runtime.
365 // This is only called when generating shared libaries (PIC) where address are
366 // not known at static link time.
367 void InputSegment::generateRelocationCode(raw_ostream &os) const {
368   LLVM_DEBUG(dbgs() << "generating runtime relocations: " << getName()
369                     << " count=" << relocations.size() << "\n");
370 
371   unsigned opcode_ptr_const = config->is64.getValueOr(false)
372                                   ? WASM_OPCODE_I64_CONST
373                                   : WASM_OPCODE_I32_CONST;
374   unsigned opcode_ptr_add = config->is64.getValueOr(false)
375                                 ? WASM_OPCODE_I64_ADD
376                                 : WASM_OPCODE_I32_ADD;
377 
378   auto tombstone = getTombstone();
379   // TODO(sbc): Encode the relocations in the data section and write a loop
380   // here to apply them.
381   for (const WasmRelocation &rel : relocations) {
382     uint64_t offset = getVA(rel.Offset) - getInputSectionOffset();
383 
384     LLVM_DEBUG(dbgs() << "gen reloc: type=" << relocTypeToString(rel.Type)
385                       << " addend=" << rel.Addend << " index=" << rel.Index
386                       << " output offset=" << offset << "\n");
387 
388     // Get __memory_base
389     writeU8(os, WASM_OPCODE_GLOBAL_GET, "GLOBAL_GET");
390     writeUleb128(os, WasmSym::memoryBase->getGlobalIndex(), "memory_base");
391 
392     // Add the offset of the relocation
393     writeU8(os, opcode_ptr_const, "CONST");
394     writeSleb128(os, offset, "offset");
395     writeU8(os, opcode_ptr_add, "ADD");
396 
397     bool is64 = relocIs64(rel.Type);
398     unsigned opcode_reloc_const =
399         is64 ? WASM_OPCODE_I64_CONST : WASM_OPCODE_I32_CONST;
400     unsigned opcode_reloc_add =
401         is64 ? WASM_OPCODE_I64_ADD : WASM_OPCODE_I32_ADD;
402     unsigned opcode_reloc_store =
403         is64 ? WASM_OPCODE_I64_STORE : WASM_OPCODE_I32_STORE;
404 
405     Symbol *sym = file->getSymbol(rel);
406     // Now figure out what we want to store
407     if (sym->hasGOTIndex()) {
408       writeU8(os, WASM_OPCODE_GLOBAL_GET, "GLOBAL_GET");
409       writeUleb128(os, sym->getGOTIndex(), "global index");
410       if (rel.Addend) {
411         writeU8(os, opcode_reloc_const, "CONST");
412         writeSleb128(os, rel.Addend, "addend");
413         writeU8(os, opcode_reloc_add, "ADD");
414       }
415     } else {
416       const GlobalSymbol* baseSymbol = WasmSym::memoryBase;
417       if (rel.Type == R_WASM_TABLE_INDEX_I32 ||
418           rel.Type == R_WASM_TABLE_INDEX_I64)
419         baseSymbol = WasmSym::tableBase;
420       writeU8(os, WASM_OPCODE_GLOBAL_GET, "GLOBAL_GET");
421       writeUleb128(os, baseSymbol->getGlobalIndex(), "base");
422       writeU8(os, opcode_reloc_const, "CONST");
423       writeSleb128(os, file->calcNewValue(rel, tombstone, this), "offset");
424       writeU8(os, opcode_reloc_add, "ADD");
425     }
426 
427     // Store that value at the virtual address
428     writeU8(os, opcode_reloc_store, "I32_STORE");
429     writeUleb128(os, 2, "align");
430     writeUleb128(os, 0, "offset");
431   }
432 }
433 
434 uint64_t InputSection::getTombstoneForSection(StringRef name) {
435   // When a function is not live we need to update relocations referring to it.
436   // If they occur in DWARF debug symbols, we want to change the pc of the
437   // function to -1 to avoid overlapping with a valid range. However for the
438   // debug_ranges and debug_loc sections that would conflict with the existing
439   // meaning of -1 so we use -2.
440   // Returning 0 means there is no tombstone value for this section, and relocation
441   // will just use the addend.
442   if (!name.startswith(".debug_"))
443     return 0;
444   if (name.equals(".debug_ranges") || name.equals(".debug_loc"))
445     return UINT64_C(-2);
446   return UINT64_C(-1);
447 }
448 
449 } // namespace wasm
450 } // namespace lld
451