1 //===- InputChunks.cpp ----------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "InputChunks.h"
10 #include "Config.h"
11 #include "OutputSegment.h"
12 #include "WriterUtils.h"
13 #include "lld/Common/ErrorHandler.h"
14 #include "lld/Common/LLVM.h"
15 #include "llvm/Support/LEB128.h"
16 #include "llvm/Support/xxhash.h"
17 
18 #define DEBUG_TYPE "lld"
19 
20 using namespace llvm;
21 using namespace llvm::wasm;
22 using namespace llvm::support::endian;
23 
24 namespace lld {
25 StringRef relocTypeToString(uint8_t relocType) {
26   switch (relocType) {
27 #define WASM_RELOC(NAME, REL)                                                  \
28   case REL:                                                                    \
29     return #NAME;
30 #include "llvm/BinaryFormat/WasmRelocs.def"
31 #undef WASM_RELOC
32   }
33   llvm_unreachable("unknown reloc type");
34 }
35 
36 bool relocIs64(uint8_t relocType) {
37   switch (relocType) {
38   case R_WASM_MEMORY_ADDR_LEB64:
39   case R_WASM_MEMORY_ADDR_SLEB64:
40   case R_WASM_MEMORY_ADDR_REL_SLEB64:
41   case R_WASM_MEMORY_ADDR_I64:
42     return true;
43   default:
44     return false;
45   }
46 }
47 
48 std::string toString(const wasm::InputChunk *c) {
49   return (toString(c->file) + ":(" + c->getName() + ")").str();
50 }
51 
52 namespace wasm {
53 StringRef InputChunk::getComdatName() const {
54   uint32_t index = getComdat();
55   if (index == UINT32_MAX)
56     return StringRef();
57   return file->getWasmObj()->linkingData().Comdats[index];
58 }
59 
60 void InputChunk::verifyRelocTargets() const {
61   for (const WasmRelocation &rel : relocations) {
62     uint64_t existingValue;
63     unsigned bytesRead = 0;
64     unsigned paddedLEBWidth = 5;
65     auto offset = rel.Offset - getInputSectionOffset();
66     const uint8_t *loc = data().data() + offset;
67     switch (rel.Type) {
68     case R_WASM_TYPE_INDEX_LEB:
69     case R_WASM_FUNCTION_INDEX_LEB:
70     case R_WASM_GLOBAL_INDEX_LEB:
71     case R_WASM_EVENT_INDEX_LEB:
72     case R_WASM_MEMORY_ADDR_LEB:
73     case R_WASM_TABLE_NUMBER_LEB:
74       existingValue = decodeULEB128(loc, &bytesRead);
75       break;
76     case R_WASM_MEMORY_ADDR_LEB64:
77       existingValue = decodeULEB128(loc, &bytesRead);
78       paddedLEBWidth = 10;
79       break;
80     case R_WASM_TABLE_INDEX_SLEB:
81     case R_WASM_TABLE_INDEX_REL_SLEB:
82     case R_WASM_MEMORY_ADDR_SLEB:
83     case R_WASM_MEMORY_ADDR_REL_SLEB:
84     case R_WASM_MEMORY_ADDR_TLS_SLEB:
85       existingValue = static_cast<uint64_t>(decodeSLEB128(loc, &bytesRead));
86       break;
87     case R_WASM_TABLE_INDEX_SLEB64:
88     case R_WASM_MEMORY_ADDR_SLEB64:
89     case R_WASM_MEMORY_ADDR_REL_SLEB64:
90       existingValue = static_cast<uint64_t>(decodeSLEB128(loc, &bytesRead));
91       paddedLEBWidth = 10;
92       break;
93     case R_WASM_TABLE_INDEX_I32:
94     case R_WASM_MEMORY_ADDR_I32:
95     case R_WASM_FUNCTION_OFFSET_I32:
96     case R_WASM_SECTION_OFFSET_I32:
97     case R_WASM_GLOBAL_INDEX_I32:
98     case R_WASM_MEMORY_ADDR_LOCREL_I32:
99       existingValue = read32le(loc);
100       break;
101     case R_WASM_TABLE_INDEX_I64:
102     case R_WASM_MEMORY_ADDR_I64:
103     case R_WASM_FUNCTION_OFFSET_I64:
104       existingValue = read64le(loc);
105       break;
106     default:
107       llvm_unreachable("unknown relocation type");
108     }
109 
110     if (bytesRead && bytesRead != paddedLEBWidth)
111       warn("expected LEB at relocation site be 5/10-byte padded");
112 
113     if (rel.Type != R_WASM_GLOBAL_INDEX_LEB &&
114         rel.Type != R_WASM_GLOBAL_INDEX_I32) {
115       auto expectedValue = file->calcExpectedValue(rel);
116       if (expectedValue != existingValue)
117         warn(toString(this) + ": unexpected existing value for " +
118              relocTypeToString(rel.Type) + ": existing=" +
119              Twine(existingValue) + " expected=" + Twine(expectedValue));
120     }
121   }
122 }
123 
124 // Copy this input chunk to an mmap'ed output file and apply relocations.
125 void InputChunk::writeTo(uint8_t *buf) const {
126   // Copy contents
127   memcpy(buf + outSecOff, data().data(), data().size());
128 
129   // Apply relocations
130   relocate(buf + outSecOff);
131 }
132 
133 void InputChunk::relocate(uint8_t *buf) const {
134   if (relocations.empty())
135     return;
136 
137 #ifndef NDEBUG
138   verifyRelocTargets();
139 #endif
140 
141   LLVM_DEBUG(dbgs() << "applying relocations: " << toString(this)
142                     << " count=" << relocations.size() << "\n");
143   int32_t inputSectionOffset = getInputSectionOffset();
144   auto tombstone = getTombstone();
145 
146   for (const WasmRelocation &rel : relocations) {
147     uint8_t *loc = buf + rel.Offset - inputSectionOffset;
148     auto value = file->calcNewValue(rel, tombstone, this);
149     LLVM_DEBUG(dbgs() << "apply reloc: type=" << relocTypeToString(rel.Type));
150     if (rel.Type != R_WASM_TYPE_INDEX_LEB)
151       LLVM_DEBUG(dbgs() << " sym=" << file->getSymbols()[rel.Index]->getName());
152     LLVM_DEBUG(dbgs() << " addend=" << rel.Addend << " index=" << rel.Index
153                       << " value=" << value << " offset=" << rel.Offset
154                       << "\n");
155 
156     switch (rel.Type) {
157     case R_WASM_TYPE_INDEX_LEB:
158     case R_WASM_FUNCTION_INDEX_LEB:
159     case R_WASM_GLOBAL_INDEX_LEB:
160     case R_WASM_EVENT_INDEX_LEB:
161     case R_WASM_MEMORY_ADDR_LEB:
162     case R_WASM_TABLE_NUMBER_LEB:
163       encodeULEB128(value, loc, 5);
164       break;
165     case R_WASM_MEMORY_ADDR_LEB64:
166       encodeULEB128(value, loc, 10);
167       break;
168     case R_WASM_TABLE_INDEX_SLEB:
169     case R_WASM_TABLE_INDEX_REL_SLEB:
170     case R_WASM_MEMORY_ADDR_SLEB:
171     case R_WASM_MEMORY_ADDR_REL_SLEB:
172     case R_WASM_MEMORY_ADDR_TLS_SLEB:
173       encodeSLEB128(static_cast<int32_t>(value), loc, 5);
174       break;
175     case R_WASM_TABLE_INDEX_SLEB64:
176     case R_WASM_MEMORY_ADDR_SLEB64:
177     case R_WASM_MEMORY_ADDR_REL_SLEB64:
178       encodeSLEB128(static_cast<int64_t>(value), loc, 10);
179       break;
180     case R_WASM_TABLE_INDEX_I32:
181     case R_WASM_MEMORY_ADDR_I32:
182     case R_WASM_FUNCTION_OFFSET_I32:
183     case R_WASM_SECTION_OFFSET_I32:
184     case R_WASM_GLOBAL_INDEX_I32:
185     case R_WASM_MEMORY_ADDR_LOCREL_I32:
186       write32le(loc, value);
187       break;
188     case R_WASM_TABLE_INDEX_I64:
189     case R_WASM_MEMORY_ADDR_I64:
190     case R_WASM_FUNCTION_OFFSET_I64:
191       write64le(loc, value);
192       break;
193     default:
194       llvm_unreachable("unknown relocation type");
195     }
196   }
197 }
198 
199 // Copy relocation entries to a given output stream.
200 // This function is used only when a user passes "-r". For a regular link,
201 // we consume relocations instead of copying them to an output file.
202 void InputChunk::writeRelocations(raw_ostream &os) const {
203   if (relocations.empty())
204     return;
205 
206   int32_t off = outSecOff - getInputSectionOffset();
207   LLVM_DEBUG(dbgs() << "writeRelocations: " << file->getName()
208                     << " offset=" << Twine(off) << "\n");
209 
210   for (const WasmRelocation &rel : relocations) {
211     writeUleb128(os, rel.Type, "reloc type");
212     writeUleb128(os, rel.Offset + off, "reloc offset");
213     writeUleb128(os, file->calcNewIndex(rel), "reloc index");
214 
215     if (relocTypeHasAddend(rel.Type))
216       writeSleb128(os, file->calcNewAddend(rel), "reloc addend");
217   }
218 }
219 
220 void InputFunction::setFunctionIndex(uint32_t index) {
221   LLVM_DEBUG(dbgs() << "InputFunction::setFunctionIndex: " << getName()
222                     << " -> " << index << "\n");
223   assert(!hasFunctionIndex());
224   functionIndex = index;
225 }
226 
227 void InputFunction::setTableIndex(uint32_t index) {
228   LLVM_DEBUG(dbgs() << "InputFunction::setTableIndex: " << getName() << " -> "
229                     << index << "\n");
230   assert(!hasTableIndex());
231   tableIndex = index;
232 }
233 
234 // Write a relocation value without padding and return the number of bytes
235 // witten.
236 static unsigned writeCompressedReloc(uint8_t *buf, const WasmRelocation &rel,
237                                      uint64_t value) {
238   switch (rel.Type) {
239   case R_WASM_TYPE_INDEX_LEB:
240   case R_WASM_FUNCTION_INDEX_LEB:
241   case R_WASM_GLOBAL_INDEX_LEB:
242   case R_WASM_EVENT_INDEX_LEB:
243   case R_WASM_MEMORY_ADDR_LEB:
244   case R_WASM_MEMORY_ADDR_LEB64:
245   case R_WASM_TABLE_NUMBER_LEB:
246     return encodeULEB128(value, buf);
247   case R_WASM_TABLE_INDEX_SLEB:
248   case R_WASM_TABLE_INDEX_SLEB64:
249   case R_WASM_MEMORY_ADDR_SLEB:
250   case R_WASM_MEMORY_ADDR_SLEB64:
251     return encodeSLEB128(static_cast<int64_t>(value), buf);
252   default:
253     llvm_unreachable("unexpected relocation type");
254   }
255 }
256 
257 static unsigned getRelocWidthPadded(const WasmRelocation &rel) {
258   switch (rel.Type) {
259   case R_WASM_TYPE_INDEX_LEB:
260   case R_WASM_FUNCTION_INDEX_LEB:
261   case R_WASM_GLOBAL_INDEX_LEB:
262   case R_WASM_EVENT_INDEX_LEB:
263   case R_WASM_MEMORY_ADDR_LEB:
264   case R_WASM_TABLE_NUMBER_LEB:
265   case R_WASM_TABLE_INDEX_SLEB:
266   case R_WASM_MEMORY_ADDR_SLEB:
267     return 5;
268   case R_WASM_TABLE_INDEX_SLEB64:
269   case R_WASM_MEMORY_ADDR_LEB64:
270   case R_WASM_MEMORY_ADDR_SLEB64:
271     return 10;
272   default:
273     llvm_unreachable("unexpected relocation type");
274   }
275 }
276 
277 static unsigned getRelocWidth(const WasmRelocation &rel, uint64_t value) {
278   uint8_t buf[10];
279   return writeCompressedReloc(buf, rel, value);
280 }
281 
282 // Relocations of type LEB and SLEB in the code section are padded to 5 bytes
283 // so that a fast linker can blindly overwrite them without needing to worry
284 // about the number of bytes needed to encode the values.
285 // However, for optimal output the code section can be compressed to remove
286 // the padding then outputting non-relocatable files.
287 // In this case we need to perform a size calculation based on the value at each
288 // relocation.  At best we end up saving 4 bytes for each relocation entry.
289 //
290 // This function only computes the final output size.  It must be called
291 // before getSize() is used to calculate of layout of the code section.
292 void InputFunction::calculateSize() {
293   if (!file || !config->compressRelocations)
294     return;
295 
296   LLVM_DEBUG(dbgs() << "calculateSize: " << getName() << "\n");
297 
298   const uint8_t *secStart = file->codeSection->Content.data();
299   const uint8_t *funcStart = secStart + getInputSectionOffset();
300   uint32_t functionSizeLength;
301   decodeULEB128(funcStart, &functionSizeLength);
302 
303   uint32_t start = getInputSectionOffset();
304   uint32_t end = start + function->Size;
305 
306   auto tombstone = getTombstone();
307 
308   uint32_t lastRelocEnd = start + functionSizeLength;
309   for (const WasmRelocation &rel : relocations) {
310     LLVM_DEBUG(dbgs() << "  region: " << (rel.Offset - lastRelocEnd) << "\n");
311     compressedFuncSize += rel.Offset - lastRelocEnd;
312     compressedFuncSize +=
313         getRelocWidth(rel, file->calcNewValue(rel, tombstone, this));
314     lastRelocEnd = rel.Offset + getRelocWidthPadded(rel);
315   }
316   LLVM_DEBUG(dbgs() << "  final region: " << (end - lastRelocEnd) << "\n");
317   compressedFuncSize += end - lastRelocEnd;
318 
319   // Now we know how long the resulting function is we can add the encoding
320   // of its length
321   uint8_t buf[5];
322   compressedSize = compressedFuncSize + encodeULEB128(compressedFuncSize, buf);
323 
324   LLVM_DEBUG(dbgs() << "  calculateSize orig: " << function->Size << "\n");
325   LLVM_DEBUG(dbgs() << "  calculateSize  new: " << compressedSize << "\n");
326 }
327 
328 // Override the default writeTo method so that we can (optionally) write the
329 // compressed version of the function.
330 void InputFunction::writeTo(uint8_t *buf) const {
331   if (!file || !config->compressRelocations)
332     return InputChunk::writeTo(buf);
333 
334   buf += outSecOff;
335   uint8_t *orig = buf;
336   (void)orig;
337 
338   const uint8_t *secStart = file->codeSection->Content.data();
339   const uint8_t *funcStart = secStart + getInputSectionOffset();
340   const uint8_t *end = funcStart + function->Size;
341   auto tombstone = getTombstone();
342   uint32_t count;
343   decodeULEB128(funcStart, &count);
344   funcStart += count;
345 
346   LLVM_DEBUG(dbgs() << "write func: " << getName() << "\n");
347   buf += encodeULEB128(compressedFuncSize, buf);
348   const uint8_t *lastRelocEnd = funcStart;
349   for (const WasmRelocation &rel : relocations) {
350     unsigned chunkSize = (secStart + rel.Offset) - lastRelocEnd;
351     LLVM_DEBUG(dbgs() << "  write chunk: " << chunkSize << "\n");
352     memcpy(buf, lastRelocEnd, chunkSize);
353     buf += chunkSize;
354     buf += writeCompressedReloc(buf, rel,
355                                 file->calcNewValue(rel, tombstone, this));
356     lastRelocEnd = secStart + rel.Offset + getRelocWidthPadded(rel);
357   }
358 
359   unsigned chunkSize = end - lastRelocEnd;
360   LLVM_DEBUG(dbgs() << "  write final chunk: " << chunkSize << "\n");
361   memcpy(buf, lastRelocEnd, chunkSize);
362   LLVM_DEBUG(dbgs() << "  total: " << (buf + chunkSize - orig) << "\n");
363 }
364 
365 uint64_t InputSegment::getOffset(uint64_t offset) const {
366   if (const MergeInputSegment *ms = dyn_cast<MergeInputSegment>(this)) {
367     LLVM_DEBUG(dbgs() << "getOffset(merged): " << getName() << "\n");
368     LLVM_DEBUG(dbgs() << "offset: " << offset << "\n");
369     LLVM_DEBUG(dbgs() << "parentOffset: " << ms->getParentOffset(offset)
370                       << "\n");
371     assert(ms->parent);
372     return ms->parent->getOffset(ms->getParentOffset(offset));
373   }
374   return outputSegmentOffset + offset;
375 }
376 
377 uint64_t InputSegment::getVA(uint64_t offset) const {
378   return (outputSeg ? outputSeg->startVA : 0) + getOffset(offset);
379 }
380 
381 // Generate code to apply relocations to the data section at runtime.
382 // This is only called when generating shared libaries (PIC) where address are
383 // not known at static link time.
384 void InputSegment::generateRelocationCode(raw_ostream &os) const {
385   LLVM_DEBUG(dbgs() << "generating runtime relocations: " << getName()
386                     << " count=" << relocations.size() << "\n");
387 
388   unsigned opcode_ptr_const = config->is64.getValueOr(false)
389                                   ? WASM_OPCODE_I64_CONST
390                                   : WASM_OPCODE_I32_CONST;
391   unsigned opcode_ptr_add = config->is64.getValueOr(false)
392                                 ? WASM_OPCODE_I64_ADD
393                                 : WASM_OPCODE_I32_ADD;
394 
395   auto tombstone = getTombstone();
396   // TODO(sbc): Encode the relocations in the data section and write a loop
397   // here to apply them.
398   for (const WasmRelocation &rel : relocations) {
399     uint64_t offset = getVA(rel.Offset) - getInputSectionOffset();
400 
401     LLVM_DEBUG(dbgs() << "gen reloc: type=" << relocTypeToString(rel.Type)
402                       << " addend=" << rel.Addend << " index=" << rel.Index
403                       << " output offset=" << offset << "\n");
404 
405     // Get __memory_base
406     writeU8(os, WASM_OPCODE_GLOBAL_GET, "GLOBAL_GET");
407     writeUleb128(os, WasmSym::memoryBase->getGlobalIndex(), "memory_base");
408 
409     // Add the offset of the relocation
410     writeU8(os, opcode_ptr_const, "CONST");
411     writeSleb128(os, offset, "offset");
412     writeU8(os, opcode_ptr_add, "ADD");
413 
414     bool is64 = relocIs64(rel.Type);
415     unsigned opcode_reloc_const =
416         is64 ? WASM_OPCODE_I64_CONST : WASM_OPCODE_I32_CONST;
417     unsigned opcode_reloc_add =
418         is64 ? WASM_OPCODE_I64_ADD : WASM_OPCODE_I32_ADD;
419     unsigned opcode_reloc_store =
420         is64 ? WASM_OPCODE_I64_STORE : WASM_OPCODE_I32_STORE;
421 
422     Symbol *sym = file->getSymbol(rel);
423     // Now figure out what we want to store
424     if (sym->hasGOTIndex()) {
425       writeU8(os, WASM_OPCODE_GLOBAL_GET, "GLOBAL_GET");
426       writeUleb128(os, sym->getGOTIndex(), "global index");
427       if (rel.Addend) {
428         writeU8(os, opcode_reloc_const, "CONST");
429         writeSleb128(os, rel.Addend, "addend");
430         writeU8(os, opcode_reloc_add, "ADD");
431       }
432     } else {
433       const GlobalSymbol* baseSymbol = WasmSym::memoryBase;
434       if (rel.Type == R_WASM_TABLE_INDEX_I32 ||
435           rel.Type == R_WASM_TABLE_INDEX_I64)
436         baseSymbol = WasmSym::tableBase;
437       writeU8(os, WASM_OPCODE_GLOBAL_GET, "GLOBAL_GET");
438       writeUleb128(os, baseSymbol->getGlobalIndex(), "base");
439       writeU8(os, opcode_reloc_const, "CONST");
440       writeSleb128(os, file->calcNewValue(rel, tombstone, this), "offset");
441       writeU8(os, opcode_reloc_add, "ADD");
442     }
443 
444     // Store that value at the virtual address
445     writeU8(os, opcode_reloc_store, "I32_STORE");
446     writeUleb128(os, 2, "align");
447     writeUleb128(os, 0, "offset");
448   }
449 }
450 
451 // Split WASM_SEG_FLAG_STRINGS section. Such a section is a sequence of
452 // null-terminated strings.
453 void MergeInputSegment::splitStrings(ArrayRef<uint8_t> data) {
454   LLVM_DEBUG(llvm::dbgs() << "splitStrings\n");
455   size_t off = 0;
456   StringRef s = toStringRef(data);
457 
458   while (!s.empty()) {
459     size_t end = s.find(0);
460     if (end == StringRef::npos)
461       fatal(toString(this) + ": string is not null terminated");
462     size_t size = end + 1;
463 
464     pieces.emplace_back(off, xxHash64(s.substr(0, size)), true);
465     s = s.substr(size);
466     off += size;
467   }
468 }
469 
470 // This function is called after we obtain a complete list of input sections
471 // that need to be linked. This is responsible to split section contents
472 // into small chunks for further processing.
473 //
474 // Note that this function is called from parallelForEach. This must be
475 // thread-safe (i.e. no memory allocation from the pools).
476 void MergeInputSegment::splitIntoPieces() {
477   assert(pieces.empty());
478   // As of now we only support WASM_SEG_FLAG_STRINGS but in the future we
479   // could add other types of splitting (see ELF's splitIntoPieces).
480   assert(segment->Data.LinkingFlags & WASM_SEG_FLAG_STRINGS);
481   splitStrings(data());
482 }
483 
484 SegmentPiece *MergeInputSegment::getSegmentPiece(uint64_t offset) {
485   if (this->data().size() <= offset)
486     fatal(toString(this) + ": offset is outside the section");
487 
488   // If Offset is not at beginning of a section piece, it is not in the map.
489   // In that case we need to  do a binary search of the original section piece
490   // vector.
491   auto it = partition_point(
492       pieces, [=](SegmentPiece p) { return p.inputOff <= offset; });
493   return &it[-1];
494 }
495 
496 // Returns the offset in an output section for a given input offset.
497 // Because contents of a mergeable section is not contiguous in output,
498 // it is not just an addition to a base output offset.
499 uint64_t MergeInputSegment::getParentOffset(uint64_t offset) const {
500   // If Offset is not at beginning of a section piece, it is not in the map.
501   // In that case we need to search from the original section piece vector.
502   const SegmentPiece *piece = getSegmentPiece(offset);
503   uint64_t addend = offset - piece->inputOff;
504   return piece->outputOff + addend;
505 }
506 
507 uint32_t SyntheticMergedDataSegment::getSize() const {
508   return builder.getSize();
509 }
510 
511 void SyntheticMergedDataSegment::writeTo(uint8_t *buf) const {
512   builder.write(buf + outSecOff);
513 
514   // Apply relocations
515   relocate(buf + outSecOff);
516 }
517 
518 void SyntheticMergedDataSegment::finalizeContents() {
519   // Add all string pieces to the string table builder to create section
520   // contents.
521   for (MergeInputSegment *sec : segments)
522     for (size_t i = 0, e = sec->pieces.size(); i != e; ++i)
523       if (sec->pieces[i].live)
524         builder.add(sec->getData(i));
525 
526   // Fix the string table content. After this, the contents will never change.
527   builder.finalize();
528 
529   // finalize() fixed tail-optimized strings, so we can now get
530   // offsets of strings. Get an offset for each string and save it
531   // to a corresponding SectionPiece for easy access.
532   for (MergeInputSegment *sec : segments)
533     for (size_t i = 0, e = sec->pieces.size(); i != e; ++i)
534       if (sec->pieces[i].live)
535         sec->pieces[i].outputOff = builder.getOffset(sec->getData(i));
536 }
537 
538 uint64_t InputSection::getTombstoneForSection(StringRef name) {
539   // When a function is not live we need to update relocations referring to it.
540   // If they occur in DWARF debug symbols, we want to change the pc of the
541   // function to -1 to avoid overlapping with a valid range. However for the
542   // debug_ranges and debug_loc sections that would conflict with the existing
543   // meaning of -1 so we use -2.
544   // Returning 0 means there is no tombstone value for this section, and relocation
545   // will just use the addend.
546   if (!name.startswith(".debug_"))
547     return 0;
548   if (name.equals(".debug_ranges") || name.equals(".debug_loc"))
549     return UINT64_C(-2);
550   return UINT64_C(-1);
551 }
552 
553 } // namespace wasm
554 } // namespace lld
555