1 //===- AArch64AsmPrinter.cpp - AArch64 LLVM assembly writer ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains a printer that converts from our internal representation
10 // of machine-dependent LLVM code to the AArch64 assembly language.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "AArch64.h"
15 #include "AArch64MCInstLower.h"
16 #include "AArch64MachineFunctionInfo.h"
17 #include "AArch64RegisterInfo.h"
18 #include "AArch64Subtarget.h"
19 #include "AArch64TargetObjectFile.h"
20 #include "MCTargetDesc/AArch64AddressingModes.h"
21 #include "MCTargetDesc/AArch64InstPrinter.h"
22 #include "MCTargetDesc/AArch64MCExpr.h"
23 #include "MCTargetDesc/AArch64MCTargetDesc.h"
24 #include "MCTargetDesc/AArch64TargetStreamer.h"
25 #include "TargetInfo/AArch64TargetInfo.h"
26 #include "Utils/AArch64BaseInfo.h"
27 #include "llvm/ADT/SmallString.h"
28 #include "llvm/ADT/SmallVector.h"
29 #include "llvm/ADT/StringRef.h"
30 #include "llvm/ADT/Triple.h"
31 #include "llvm/ADT/Twine.h"
32 #include "llvm/BinaryFormat/COFF.h"
33 #include "llvm/BinaryFormat/ELF.h"
34 #include "llvm/CodeGen/AsmPrinter.h"
35 #include "llvm/CodeGen/FaultMaps.h"
36 #include "llvm/CodeGen/MachineBasicBlock.h"
37 #include "llvm/CodeGen/MachineFunction.h"
38 #include "llvm/CodeGen/MachineInstr.h"
39 #include "llvm/CodeGen/MachineJumpTableInfo.h"
40 #include "llvm/CodeGen/MachineModuleInfoImpls.h"
41 #include "llvm/CodeGen/MachineOperand.h"
42 #include "llvm/CodeGen/StackMaps.h"
43 #include "llvm/CodeGen/TargetRegisterInfo.h"
44 #include "llvm/IR/DataLayout.h"
45 #include "llvm/IR/DebugInfoMetadata.h"
46 #include "llvm/MC/MCAsmInfo.h"
47 #include "llvm/MC/MCContext.h"
48 #include "llvm/MC/MCInst.h"
49 #include "llvm/MC/MCInstBuilder.h"
50 #include "llvm/MC/MCSectionELF.h"
51 #include "llvm/MC/MCStreamer.h"
52 #include "llvm/MC/MCSymbol.h"
53 #include "llvm/MC/TargetRegistry.h"
54 #include "llvm/Support/Casting.h"
55 #include "llvm/Support/ErrorHandling.h"
56 #include "llvm/Support/raw_ostream.h"
57 #include "llvm/Target/TargetMachine.h"
58 #include "llvm/Transforms/Instrumentation/HWAddressSanitizer.h"
59 #include <algorithm>
60 #include <cassert>
61 #include <cstdint>
62 #include <map>
63 #include <memory>
64
65 using namespace llvm;
66
67 #define DEBUG_TYPE "asm-printer"
68
69 namespace {
70
71 class AArch64AsmPrinter : public AsmPrinter {
72 AArch64MCInstLower MCInstLowering;
73 StackMaps SM;
74 FaultMaps FM;
75 const AArch64Subtarget *STI;
76 bool ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags = false;
77
78 public:
AArch64AsmPrinter(TargetMachine & TM,std::unique_ptr<MCStreamer> Streamer)79 AArch64AsmPrinter(TargetMachine &TM, std::unique_ptr<MCStreamer> Streamer)
80 : AsmPrinter(TM, std::move(Streamer)), MCInstLowering(OutContext, *this),
81 SM(*this), FM(*this) {}
82
getPassName() const83 StringRef getPassName() const override { return "AArch64 Assembly Printer"; }
84
85 /// Wrapper for MCInstLowering.lowerOperand() for the
86 /// tblgen'erated pseudo lowering.
lowerOperand(const MachineOperand & MO,MCOperand & MCOp) const87 bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const {
88 return MCInstLowering.lowerOperand(MO, MCOp);
89 }
90
91 void emitStartOfAsmFile(Module &M) override;
92 void emitJumpTableInfo() override;
93
94 void emitFunctionEntryLabel() override;
95
96 void LowerJumpTableDest(MCStreamer &OutStreamer, const MachineInstr &MI);
97
98 void LowerMOPS(MCStreamer &OutStreamer, const MachineInstr &MI);
99
100 void LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM,
101 const MachineInstr &MI);
102 void LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM,
103 const MachineInstr &MI);
104 void LowerSTATEPOINT(MCStreamer &OutStreamer, StackMaps &SM,
105 const MachineInstr &MI);
106 void LowerFAULTING_OP(const MachineInstr &MI);
107
108 void LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI);
109 void LowerPATCHABLE_FUNCTION_EXIT(const MachineInstr &MI);
110 void LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI);
111
112 typedef std::tuple<unsigned, bool, uint32_t> HwasanMemaccessTuple;
113 std::map<HwasanMemaccessTuple, MCSymbol *> HwasanMemaccessSymbols;
114 void LowerHWASAN_CHECK_MEMACCESS(const MachineInstr &MI);
115 void emitHwasanMemaccessSymbols(Module &M);
116
117 void emitSled(const MachineInstr &MI, SledKind Kind);
118
119 /// tblgen'erated driver function for lowering simple MI->MC
120 /// pseudo instructions.
121 bool emitPseudoExpansionLowering(MCStreamer &OutStreamer,
122 const MachineInstr *MI);
123
124 void emitInstruction(const MachineInstr *MI) override;
125
126 void emitFunctionHeaderComment() override;
127
getAnalysisUsage(AnalysisUsage & AU) const128 void getAnalysisUsage(AnalysisUsage &AU) const override {
129 AsmPrinter::getAnalysisUsage(AU);
130 AU.setPreservesAll();
131 }
132
runOnMachineFunction(MachineFunction & MF)133 bool runOnMachineFunction(MachineFunction &MF) override {
134 AArch64FI = MF.getInfo<AArch64FunctionInfo>();
135 STI = &MF.getSubtarget<AArch64Subtarget>();
136
137 SetupMachineFunction(MF);
138
139 if (STI->isTargetCOFF()) {
140 bool Internal = MF.getFunction().hasInternalLinkage();
141 COFF::SymbolStorageClass Scl = Internal ? COFF::IMAGE_SYM_CLASS_STATIC
142 : COFF::IMAGE_SYM_CLASS_EXTERNAL;
143 int Type =
144 COFF::IMAGE_SYM_DTYPE_FUNCTION << COFF::SCT_COMPLEX_TYPE_SHIFT;
145
146 OutStreamer->beginCOFFSymbolDef(CurrentFnSym);
147 OutStreamer->emitCOFFSymbolStorageClass(Scl);
148 OutStreamer->emitCOFFSymbolType(Type);
149 OutStreamer->endCOFFSymbolDef();
150 }
151
152 // Emit the rest of the function body.
153 emitFunctionBody();
154
155 // Emit the XRay table for this function.
156 emitXRayTable();
157
158 // We didn't modify anything.
159 return false;
160 }
161
162 private:
163 void printOperand(const MachineInstr *MI, unsigned OpNum, raw_ostream &O);
164 bool printAsmMRegister(const MachineOperand &MO, char Mode, raw_ostream &O);
165 bool printAsmRegInClass(const MachineOperand &MO,
166 const TargetRegisterClass *RC, unsigned AltName,
167 raw_ostream &O);
168
169 bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
170 const char *ExtraCode, raw_ostream &O) override;
171 bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNum,
172 const char *ExtraCode, raw_ostream &O) override;
173
174 void PrintDebugValueComment(const MachineInstr *MI, raw_ostream &OS);
175
176 void emitFunctionBodyEnd() override;
177
178 MCSymbol *GetCPISymbol(unsigned CPID) const override;
179 void emitEndOfAsmFile(Module &M) override;
180
181 AArch64FunctionInfo *AArch64FI = nullptr;
182
183 /// Emit the LOHs contained in AArch64FI.
184 void emitLOHs();
185
186 /// Emit instruction to set float register to zero.
187 void emitFMov0(const MachineInstr &MI);
188
189 using MInstToMCSymbol = std::map<const MachineInstr *, MCSymbol *>;
190
191 MInstToMCSymbol LOHInstToLabel;
192
shouldEmitWeakSwiftAsyncExtendedFramePointerFlags() const193 bool shouldEmitWeakSwiftAsyncExtendedFramePointerFlags() const override {
194 return ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags;
195 }
196 };
197
198 } // end anonymous namespace
199
emitStartOfAsmFile(Module & M)200 void AArch64AsmPrinter::emitStartOfAsmFile(Module &M) {
201 const Triple &TT = TM.getTargetTriple();
202
203 if (TT.isOSBinFormatCOFF()) {
204 // Emit an absolute @feat.00 symbol. This appears to be some kind of
205 // compiler features bitfield read by link.exe.
206 MCSymbol *S = MMI->getContext().getOrCreateSymbol(StringRef("@feat.00"));
207 OutStreamer->beginCOFFSymbolDef(S);
208 OutStreamer->emitCOFFSymbolStorageClass(COFF::IMAGE_SYM_CLASS_STATIC);
209 OutStreamer->emitCOFFSymbolType(COFF::IMAGE_SYM_DTYPE_NULL);
210 OutStreamer->endCOFFSymbolDef();
211 int64_t Feat00Flags = 0;
212
213 if (M.getModuleFlag("cfguard")) {
214 Feat00Flags |= 0x800; // Object is CFG-aware.
215 }
216
217 if (M.getModuleFlag("ehcontguard")) {
218 Feat00Flags |= 0x4000; // Object also has EHCont.
219 }
220
221 OutStreamer->emitSymbolAttribute(S, MCSA_Global);
222 OutStreamer->emitAssignment(
223 S, MCConstantExpr::create(Feat00Flags, MMI->getContext()));
224 }
225
226 if (!TT.isOSBinFormatELF())
227 return;
228
229 // Assemble feature flags that may require creation of a note section.
230 unsigned Flags = 0;
231 if (const auto *BTE = mdconst::extract_or_null<ConstantInt>(
232 M.getModuleFlag("branch-target-enforcement")))
233 if (BTE->getZExtValue())
234 Flags |= ELF::GNU_PROPERTY_AARCH64_FEATURE_1_BTI;
235
236 if (const auto *Sign = mdconst::extract_or_null<ConstantInt>(
237 M.getModuleFlag("sign-return-address")))
238 if (Sign->getZExtValue())
239 Flags |= ELF::GNU_PROPERTY_AARCH64_FEATURE_1_PAC;
240
241 if (Flags == 0)
242 return;
243
244 // Emit a .note.gnu.property section with the flags.
245 if (auto *TS = static_cast<AArch64TargetStreamer *>(
246 OutStreamer->getTargetStreamer()))
247 TS->emitNoteSection(Flags);
248 }
249
emitFunctionHeaderComment()250 void AArch64AsmPrinter::emitFunctionHeaderComment() {
251 const AArch64FunctionInfo *FI = MF->getInfo<AArch64FunctionInfo>();
252 Optional<std::string> OutlinerString = FI->getOutliningStyle();
253 if (OutlinerString != None)
254 OutStreamer->getCommentOS() << ' ' << OutlinerString;
255 }
256
LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr & MI)257 void AArch64AsmPrinter::LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI)
258 {
259 const Function &F = MF->getFunction();
260 if (F.hasFnAttribute("patchable-function-entry")) {
261 unsigned Num;
262 if (F.getFnAttribute("patchable-function-entry")
263 .getValueAsString()
264 .getAsInteger(10, Num))
265 return;
266 emitNops(Num);
267 return;
268 }
269
270 emitSled(MI, SledKind::FUNCTION_ENTER);
271 }
272
LowerPATCHABLE_FUNCTION_EXIT(const MachineInstr & MI)273 void AArch64AsmPrinter::LowerPATCHABLE_FUNCTION_EXIT(const MachineInstr &MI) {
274 emitSled(MI, SledKind::FUNCTION_EXIT);
275 }
276
LowerPATCHABLE_TAIL_CALL(const MachineInstr & MI)277 void AArch64AsmPrinter::LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI) {
278 emitSled(MI, SledKind::TAIL_CALL);
279 }
280
emitSled(const MachineInstr & MI,SledKind Kind)281 void AArch64AsmPrinter::emitSled(const MachineInstr &MI, SledKind Kind) {
282 static const int8_t NoopsInSledCount = 7;
283 // We want to emit the following pattern:
284 //
285 // .Lxray_sled_N:
286 // ALIGN
287 // B #32
288 // ; 7 NOP instructions (28 bytes)
289 // .tmpN
290 //
291 // We need the 28 bytes (7 instructions) because at runtime, we'd be patching
292 // over the full 32 bytes (8 instructions) with the following pattern:
293 //
294 // STP X0, X30, [SP, #-16]! ; push X0 and the link register to the stack
295 // LDR W0, #12 ; W0 := function ID
296 // LDR X16,#12 ; X16 := addr of __xray_FunctionEntry or __xray_FunctionExit
297 // BLR X16 ; call the tracing trampoline
298 // ;DATA: 32 bits of function ID
299 // ;DATA: lower 32 bits of the address of the trampoline
300 // ;DATA: higher 32 bits of the address of the trampoline
301 // LDP X0, X30, [SP], #16 ; pop X0 and the link register from the stack
302 //
303 OutStreamer->emitCodeAlignment(4, &getSubtargetInfo());
304 auto CurSled = OutContext.createTempSymbol("xray_sled_", true);
305 OutStreamer->emitLabel(CurSled);
306 auto Target = OutContext.createTempSymbol();
307
308 // Emit "B #32" instruction, which jumps over the next 28 bytes.
309 // The operand has to be the number of 4-byte instructions to jump over,
310 // including the current instruction.
311 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::B).addImm(8));
312
313 for (int8_t I = 0; I < NoopsInSledCount; I++)
314 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::HINT).addImm(0));
315
316 OutStreamer->emitLabel(Target);
317 recordSled(CurSled, MI, Kind, 2);
318 }
319
LowerHWASAN_CHECK_MEMACCESS(const MachineInstr & MI)320 void AArch64AsmPrinter::LowerHWASAN_CHECK_MEMACCESS(const MachineInstr &MI) {
321 Register Reg = MI.getOperand(0).getReg();
322 bool IsShort =
323 MI.getOpcode() == AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES;
324 uint32_t AccessInfo = MI.getOperand(1).getImm();
325 MCSymbol *&Sym =
326 HwasanMemaccessSymbols[HwasanMemaccessTuple(Reg, IsShort, AccessInfo)];
327 if (!Sym) {
328 // FIXME: Make this work on non-ELF.
329 if (!TM.getTargetTriple().isOSBinFormatELF())
330 report_fatal_error("llvm.hwasan.check.memaccess only supported on ELF");
331
332 std::string SymName = "__hwasan_check_x" + utostr(Reg - AArch64::X0) + "_" +
333 utostr(AccessInfo);
334 if (IsShort)
335 SymName += "_short_v2";
336 Sym = OutContext.getOrCreateSymbol(SymName);
337 }
338
339 EmitToStreamer(*OutStreamer,
340 MCInstBuilder(AArch64::BL)
341 .addExpr(MCSymbolRefExpr::create(Sym, OutContext)));
342 }
343
emitHwasanMemaccessSymbols(Module & M)344 void AArch64AsmPrinter::emitHwasanMemaccessSymbols(Module &M) {
345 if (HwasanMemaccessSymbols.empty())
346 return;
347
348 const Triple &TT = TM.getTargetTriple();
349 assert(TT.isOSBinFormatELF());
350 std::unique_ptr<MCSubtargetInfo> STI(
351 TM.getTarget().createMCSubtargetInfo(TT.str(), "", ""));
352 assert(STI && "Unable to create subtarget info");
353
354 MCSymbol *HwasanTagMismatchV1Sym =
355 OutContext.getOrCreateSymbol("__hwasan_tag_mismatch");
356 MCSymbol *HwasanTagMismatchV2Sym =
357 OutContext.getOrCreateSymbol("__hwasan_tag_mismatch_v2");
358
359 const MCSymbolRefExpr *HwasanTagMismatchV1Ref =
360 MCSymbolRefExpr::create(HwasanTagMismatchV1Sym, OutContext);
361 const MCSymbolRefExpr *HwasanTagMismatchV2Ref =
362 MCSymbolRefExpr::create(HwasanTagMismatchV2Sym, OutContext);
363
364 for (auto &P : HwasanMemaccessSymbols) {
365 unsigned Reg = std::get<0>(P.first);
366 bool IsShort = std::get<1>(P.first);
367 uint32_t AccessInfo = std::get<2>(P.first);
368 const MCSymbolRefExpr *HwasanTagMismatchRef =
369 IsShort ? HwasanTagMismatchV2Ref : HwasanTagMismatchV1Ref;
370 MCSymbol *Sym = P.second;
371
372 bool HasMatchAllTag =
373 (AccessInfo >> HWASanAccessInfo::HasMatchAllShift) & 1;
374 uint8_t MatchAllTag =
375 (AccessInfo >> HWASanAccessInfo::MatchAllShift) & 0xff;
376 unsigned Size =
377 1 << ((AccessInfo >> HWASanAccessInfo::AccessSizeShift) & 0xf);
378 bool CompileKernel =
379 (AccessInfo >> HWASanAccessInfo::CompileKernelShift) & 1;
380
381 OutStreamer->switchSection(OutContext.getELFSection(
382 ".text.hot", ELF::SHT_PROGBITS,
383 ELF::SHF_EXECINSTR | ELF::SHF_ALLOC | ELF::SHF_GROUP, 0, Sym->getName(),
384 /*IsComdat=*/true));
385
386 OutStreamer->emitSymbolAttribute(Sym, MCSA_ELF_TypeFunction);
387 OutStreamer->emitSymbolAttribute(Sym, MCSA_Weak);
388 OutStreamer->emitSymbolAttribute(Sym, MCSA_Hidden);
389 OutStreamer->emitLabel(Sym);
390
391 OutStreamer->emitInstruction(MCInstBuilder(AArch64::SBFMXri)
392 .addReg(AArch64::X16)
393 .addReg(Reg)
394 .addImm(4)
395 .addImm(55),
396 *STI);
397 OutStreamer->emitInstruction(
398 MCInstBuilder(AArch64::LDRBBroX)
399 .addReg(AArch64::W16)
400 .addReg(IsShort ? AArch64::X20 : AArch64::X9)
401 .addReg(AArch64::X16)
402 .addImm(0)
403 .addImm(0),
404 *STI);
405 OutStreamer->emitInstruction(
406 MCInstBuilder(AArch64::SUBSXrs)
407 .addReg(AArch64::XZR)
408 .addReg(AArch64::X16)
409 .addReg(Reg)
410 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSR, 56)),
411 *STI);
412 MCSymbol *HandleMismatchOrPartialSym = OutContext.createTempSymbol();
413 OutStreamer->emitInstruction(
414 MCInstBuilder(AArch64::Bcc)
415 .addImm(AArch64CC::NE)
416 .addExpr(MCSymbolRefExpr::create(HandleMismatchOrPartialSym,
417 OutContext)),
418 *STI);
419 MCSymbol *ReturnSym = OutContext.createTempSymbol();
420 OutStreamer->emitLabel(ReturnSym);
421 OutStreamer->emitInstruction(
422 MCInstBuilder(AArch64::RET).addReg(AArch64::LR), *STI);
423 OutStreamer->emitLabel(HandleMismatchOrPartialSym);
424
425 if (HasMatchAllTag) {
426 OutStreamer->emitInstruction(MCInstBuilder(AArch64::UBFMXri)
427 .addReg(AArch64::X16)
428 .addReg(Reg)
429 .addImm(56)
430 .addImm(63),
431 *STI);
432 OutStreamer->emitInstruction(MCInstBuilder(AArch64::SUBSXri)
433 .addReg(AArch64::XZR)
434 .addReg(AArch64::X16)
435 .addImm(MatchAllTag)
436 .addImm(0),
437 *STI);
438 OutStreamer->emitInstruction(
439 MCInstBuilder(AArch64::Bcc)
440 .addImm(AArch64CC::EQ)
441 .addExpr(MCSymbolRefExpr::create(ReturnSym, OutContext)),
442 *STI);
443 }
444
445 if (IsShort) {
446 OutStreamer->emitInstruction(MCInstBuilder(AArch64::SUBSWri)
447 .addReg(AArch64::WZR)
448 .addReg(AArch64::W16)
449 .addImm(15)
450 .addImm(0),
451 *STI);
452 MCSymbol *HandleMismatchSym = OutContext.createTempSymbol();
453 OutStreamer->emitInstruction(
454 MCInstBuilder(AArch64::Bcc)
455 .addImm(AArch64CC::HI)
456 .addExpr(MCSymbolRefExpr::create(HandleMismatchSym, OutContext)),
457 *STI);
458
459 OutStreamer->emitInstruction(
460 MCInstBuilder(AArch64::ANDXri)
461 .addReg(AArch64::X17)
462 .addReg(Reg)
463 .addImm(AArch64_AM::encodeLogicalImmediate(0xf, 64)),
464 *STI);
465 if (Size != 1)
466 OutStreamer->emitInstruction(MCInstBuilder(AArch64::ADDXri)
467 .addReg(AArch64::X17)
468 .addReg(AArch64::X17)
469 .addImm(Size - 1)
470 .addImm(0),
471 *STI);
472 OutStreamer->emitInstruction(MCInstBuilder(AArch64::SUBSWrs)
473 .addReg(AArch64::WZR)
474 .addReg(AArch64::W16)
475 .addReg(AArch64::W17)
476 .addImm(0),
477 *STI);
478 OutStreamer->emitInstruction(
479 MCInstBuilder(AArch64::Bcc)
480 .addImm(AArch64CC::LS)
481 .addExpr(MCSymbolRefExpr::create(HandleMismatchSym, OutContext)),
482 *STI);
483
484 OutStreamer->emitInstruction(
485 MCInstBuilder(AArch64::ORRXri)
486 .addReg(AArch64::X16)
487 .addReg(Reg)
488 .addImm(AArch64_AM::encodeLogicalImmediate(0xf, 64)),
489 *STI);
490 OutStreamer->emitInstruction(MCInstBuilder(AArch64::LDRBBui)
491 .addReg(AArch64::W16)
492 .addReg(AArch64::X16)
493 .addImm(0),
494 *STI);
495 OutStreamer->emitInstruction(
496 MCInstBuilder(AArch64::SUBSXrs)
497 .addReg(AArch64::XZR)
498 .addReg(AArch64::X16)
499 .addReg(Reg)
500 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSR, 56)),
501 *STI);
502 OutStreamer->emitInstruction(
503 MCInstBuilder(AArch64::Bcc)
504 .addImm(AArch64CC::EQ)
505 .addExpr(MCSymbolRefExpr::create(ReturnSym, OutContext)),
506 *STI);
507
508 OutStreamer->emitLabel(HandleMismatchSym);
509 }
510
511 OutStreamer->emitInstruction(MCInstBuilder(AArch64::STPXpre)
512 .addReg(AArch64::SP)
513 .addReg(AArch64::X0)
514 .addReg(AArch64::X1)
515 .addReg(AArch64::SP)
516 .addImm(-32),
517 *STI);
518 OutStreamer->emitInstruction(MCInstBuilder(AArch64::STPXi)
519 .addReg(AArch64::FP)
520 .addReg(AArch64::LR)
521 .addReg(AArch64::SP)
522 .addImm(29),
523 *STI);
524
525 if (Reg != AArch64::X0)
526 OutStreamer->emitInstruction(MCInstBuilder(AArch64::ORRXrs)
527 .addReg(AArch64::X0)
528 .addReg(AArch64::XZR)
529 .addReg(Reg)
530 .addImm(0),
531 *STI);
532 OutStreamer->emitInstruction(
533 MCInstBuilder(AArch64::MOVZXi)
534 .addReg(AArch64::X1)
535 .addImm(AccessInfo & HWASanAccessInfo::RuntimeMask)
536 .addImm(0),
537 *STI);
538
539 if (CompileKernel) {
540 // The Linux kernel's dynamic loader doesn't support GOT relative
541 // relocations, but it doesn't support late binding either, so just call
542 // the function directly.
543 OutStreamer->emitInstruction(
544 MCInstBuilder(AArch64::B).addExpr(HwasanTagMismatchRef), *STI);
545 } else {
546 // Intentionally load the GOT entry and branch to it, rather than possibly
547 // late binding the function, which may clobber the registers before we
548 // have a chance to save them.
549 OutStreamer->emitInstruction(
550 MCInstBuilder(AArch64::ADRP)
551 .addReg(AArch64::X16)
552 .addExpr(AArch64MCExpr::create(
553 HwasanTagMismatchRef, AArch64MCExpr::VariantKind::VK_GOT_PAGE,
554 OutContext)),
555 *STI);
556 OutStreamer->emitInstruction(
557 MCInstBuilder(AArch64::LDRXui)
558 .addReg(AArch64::X16)
559 .addReg(AArch64::X16)
560 .addExpr(AArch64MCExpr::create(
561 HwasanTagMismatchRef, AArch64MCExpr::VariantKind::VK_GOT_LO12,
562 OutContext)),
563 *STI);
564 OutStreamer->emitInstruction(
565 MCInstBuilder(AArch64::BR).addReg(AArch64::X16), *STI);
566 }
567 }
568 }
569
emitEndOfAsmFile(Module & M)570 void AArch64AsmPrinter::emitEndOfAsmFile(Module &M) {
571 emitHwasanMemaccessSymbols(M);
572
573 const Triple &TT = TM.getTargetTriple();
574 if (TT.isOSBinFormatMachO()) {
575 // Funny Darwin hack: This flag tells the linker that no global symbols
576 // contain code that falls through to other global symbols (e.g. the obvious
577 // implementation of multiple entry points). If this doesn't occur, the
578 // linker can safely perform dead code stripping. Since LLVM never
579 // generates code that does this, it is always safe to set.
580 OutStreamer->emitAssemblerFlag(MCAF_SubsectionsViaSymbols);
581 }
582
583 // Emit stack and fault map information.
584 emitStackMaps(SM);
585 FM.serializeToFaultMapSection();
586
587 }
588
emitLOHs()589 void AArch64AsmPrinter::emitLOHs() {
590 SmallVector<MCSymbol *, 3> MCArgs;
591
592 for (const auto &D : AArch64FI->getLOHContainer()) {
593 for (const MachineInstr *MI : D.getArgs()) {
594 MInstToMCSymbol::iterator LabelIt = LOHInstToLabel.find(MI);
595 assert(LabelIt != LOHInstToLabel.end() &&
596 "Label hasn't been inserted for LOH related instruction");
597 MCArgs.push_back(LabelIt->second);
598 }
599 OutStreamer->emitLOHDirective(D.getKind(), MCArgs);
600 MCArgs.clear();
601 }
602 }
603
emitFunctionBodyEnd()604 void AArch64AsmPrinter::emitFunctionBodyEnd() {
605 if (!AArch64FI->getLOHRelated().empty())
606 emitLOHs();
607 }
608
609 /// GetCPISymbol - Return the symbol for the specified constant pool entry.
GetCPISymbol(unsigned CPID) const610 MCSymbol *AArch64AsmPrinter::GetCPISymbol(unsigned CPID) const {
611 // Darwin uses a linker-private symbol name for constant-pools (to
612 // avoid addends on the relocation?), ELF has no such concept and
613 // uses a normal private symbol.
614 if (!getDataLayout().getLinkerPrivateGlobalPrefix().empty())
615 return OutContext.getOrCreateSymbol(
616 Twine(getDataLayout().getLinkerPrivateGlobalPrefix()) + "CPI" +
617 Twine(getFunctionNumber()) + "_" + Twine(CPID));
618
619 return AsmPrinter::GetCPISymbol(CPID);
620 }
621
printOperand(const MachineInstr * MI,unsigned OpNum,raw_ostream & O)622 void AArch64AsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNum,
623 raw_ostream &O) {
624 const MachineOperand &MO = MI->getOperand(OpNum);
625 switch (MO.getType()) {
626 default:
627 llvm_unreachable("<unknown operand type>");
628 case MachineOperand::MO_Register: {
629 Register Reg = MO.getReg();
630 assert(Register::isPhysicalRegister(Reg));
631 assert(!MO.getSubReg() && "Subregs should be eliminated!");
632 O << AArch64InstPrinter::getRegisterName(Reg);
633 break;
634 }
635 case MachineOperand::MO_Immediate: {
636 O << MO.getImm();
637 break;
638 }
639 case MachineOperand::MO_GlobalAddress: {
640 PrintSymbolOperand(MO, O);
641 break;
642 }
643 case MachineOperand::MO_BlockAddress: {
644 MCSymbol *Sym = GetBlockAddressSymbol(MO.getBlockAddress());
645 Sym->print(O, MAI);
646 break;
647 }
648 }
649 }
650
printAsmMRegister(const MachineOperand & MO,char Mode,raw_ostream & O)651 bool AArch64AsmPrinter::printAsmMRegister(const MachineOperand &MO, char Mode,
652 raw_ostream &O) {
653 Register Reg = MO.getReg();
654 switch (Mode) {
655 default:
656 return true; // Unknown mode.
657 case 'w':
658 Reg = getWRegFromXReg(Reg);
659 break;
660 case 'x':
661 Reg = getXRegFromWReg(Reg);
662 break;
663 case 't':
664 Reg = getXRegFromXRegTuple(Reg);
665 break;
666 }
667
668 O << AArch64InstPrinter::getRegisterName(Reg);
669 return false;
670 }
671
672 // Prints the register in MO using class RC using the offset in the
673 // new register class. This should not be used for cross class
674 // printing.
printAsmRegInClass(const MachineOperand & MO,const TargetRegisterClass * RC,unsigned AltName,raw_ostream & O)675 bool AArch64AsmPrinter::printAsmRegInClass(const MachineOperand &MO,
676 const TargetRegisterClass *RC,
677 unsigned AltName, raw_ostream &O) {
678 assert(MO.isReg() && "Should only get here with a register!");
679 const TargetRegisterInfo *RI = STI->getRegisterInfo();
680 Register Reg = MO.getReg();
681 unsigned RegToPrint = RC->getRegister(RI->getEncodingValue(Reg));
682 if (!RI->regsOverlap(RegToPrint, Reg))
683 return true;
684 O << AArch64InstPrinter::getRegisterName(RegToPrint, AltName);
685 return false;
686 }
687
PrintAsmOperand(const MachineInstr * MI,unsigned OpNum,const char * ExtraCode,raw_ostream & O)688 bool AArch64AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
689 const char *ExtraCode, raw_ostream &O) {
690 const MachineOperand &MO = MI->getOperand(OpNum);
691
692 // First try the generic code, which knows about modifiers like 'c' and 'n'.
693 if (!AsmPrinter::PrintAsmOperand(MI, OpNum, ExtraCode, O))
694 return false;
695
696 // Does this asm operand have a single letter operand modifier?
697 if (ExtraCode && ExtraCode[0]) {
698 if (ExtraCode[1] != 0)
699 return true; // Unknown modifier.
700
701 switch (ExtraCode[0]) {
702 default:
703 return true; // Unknown modifier.
704 case 'w': // Print W register
705 case 'x': // Print X register
706 if (MO.isReg())
707 return printAsmMRegister(MO, ExtraCode[0], O);
708 if (MO.isImm() && MO.getImm() == 0) {
709 unsigned Reg = ExtraCode[0] == 'w' ? AArch64::WZR : AArch64::XZR;
710 O << AArch64InstPrinter::getRegisterName(Reg);
711 return false;
712 }
713 printOperand(MI, OpNum, O);
714 return false;
715 case 'b': // Print B register.
716 case 'h': // Print H register.
717 case 's': // Print S register.
718 case 'd': // Print D register.
719 case 'q': // Print Q register.
720 case 'z': // Print Z register.
721 if (MO.isReg()) {
722 const TargetRegisterClass *RC;
723 switch (ExtraCode[0]) {
724 case 'b':
725 RC = &AArch64::FPR8RegClass;
726 break;
727 case 'h':
728 RC = &AArch64::FPR16RegClass;
729 break;
730 case 's':
731 RC = &AArch64::FPR32RegClass;
732 break;
733 case 'd':
734 RC = &AArch64::FPR64RegClass;
735 break;
736 case 'q':
737 RC = &AArch64::FPR128RegClass;
738 break;
739 case 'z':
740 RC = &AArch64::ZPRRegClass;
741 break;
742 default:
743 return true;
744 }
745 return printAsmRegInClass(MO, RC, AArch64::NoRegAltName, O);
746 }
747 printOperand(MI, OpNum, O);
748 return false;
749 }
750 }
751
752 // According to ARM, we should emit x and v registers unless we have a
753 // modifier.
754 if (MO.isReg()) {
755 Register Reg = MO.getReg();
756
757 // If this is a w or x register, print an x register.
758 if (AArch64::GPR32allRegClass.contains(Reg) ||
759 AArch64::GPR64allRegClass.contains(Reg))
760 return printAsmMRegister(MO, 'x', O);
761
762 // If this is an x register tuple, print an x register.
763 if (AArch64::GPR64x8ClassRegClass.contains(Reg))
764 return printAsmMRegister(MO, 't', O);
765
766 unsigned AltName = AArch64::NoRegAltName;
767 const TargetRegisterClass *RegClass;
768 if (AArch64::ZPRRegClass.contains(Reg)) {
769 RegClass = &AArch64::ZPRRegClass;
770 } else if (AArch64::PPRRegClass.contains(Reg)) {
771 RegClass = &AArch64::PPRRegClass;
772 } else {
773 RegClass = &AArch64::FPR128RegClass;
774 AltName = AArch64::vreg;
775 }
776
777 // If this is a b, h, s, d, or q register, print it as a v register.
778 return printAsmRegInClass(MO, RegClass, AltName, O);
779 }
780
781 printOperand(MI, OpNum, O);
782 return false;
783 }
784
PrintAsmMemoryOperand(const MachineInstr * MI,unsigned OpNum,const char * ExtraCode,raw_ostream & O)785 bool AArch64AsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
786 unsigned OpNum,
787 const char *ExtraCode,
788 raw_ostream &O) {
789 if (ExtraCode && ExtraCode[0] && ExtraCode[0] != 'a')
790 return true; // Unknown modifier.
791
792 const MachineOperand &MO = MI->getOperand(OpNum);
793 assert(MO.isReg() && "unexpected inline asm memory operand");
794 O << "[" << AArch64InstPrinter::getRegisterName(MO.getReg()) << "]";
795 return false;
796 }
797
PrintDebugValueComment(const MachineInstr * MI,raw_ostream & OS)798 void AArch64AsmPrinter::PrintDebugValueComment(const MachineInstr *MI,
799 raw_ostream &OS) {
800 unsigned NOps = MI->getNumOperands();
801 assert(NOps == 4);
802 OS << '\t' << MAI->getCommentString() << "DEBUG_VALUE: ";
803 // cast away const; DIetc do not take const operands for some reason.
804 OS << MI->getDebugVariable()->getName();
805 OS << " <- ";
806 // Frame address. Currently handles register +- offset only.
807 assert(MI->isIndirectDebugValue());
808 OS << '[';
809 for (unsigned I = 0, E = std::distance(MI->debug_operands().begin(),
810 MI->debug_operands().end());
811 I < E; ++I) {
812 if (I != 0)
813 OS << ", ";
814 printOperand(MI, I, OS);
815 }
816 OS << ']';
817 OS << "+";
818 printOperand(MI, NOps - 2, OS);
819 }
820
emitJumpTableInfo()821 void AArch64AsmPrinter::emitJumpTableInfo() {
822 const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
823 if (!MJTI) return;
824
825 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
826 if (JT.empty()) return;
827
828 const TargetLoweringObjectFile &TLOF = getObjFileLowering();
829 MCSection *ReadOnlySec = TLOF.getSectionForJumpTable(MF->getFunction(), TM);
830 OutStreamer->switchSection(ReadOnlySec);
831
832 auto AFI = MF->getInfo<AArch64FunctionInfo>();
833 for (unsigned JTI = 0, e = JT.size(); JTI != e; ++JTI) {
834 const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
835
836 // If this jump table was deleted, ignore it.
837 if (JTBBs.empty()) continue;
838
839 unsigned Size = AFI->getJumpTableEntrySize(JTI);
840 emitAlignment(Align(Size));
841 OutStreamer->emitLabel(GetJTISymbol(JTI));
842
843 const MCSymbol *BaseSym = AArch64FI->getJumpTableEntryPCRelSymbol(JTI);
844 const MCExpr *Base = MCSymbolRefExpr::create(BaseSym, OutContext);
845
846 for (auto *JTBB : JTBBs) {
847 const MCExpr *Value =
848 MCSymbolRefExpr::create(JTBB->getSymbol(), OutContext);
849
850 // Each entry is:
851 // .byte/.hword (LBB - Lbase)>>2
852 // or plain:
853 // .word LBB - Lbase
854 Value = MCBinaryExpr::createSub(Value, Base, OutContext);
855 if (Size != 4)
856 Value = MCBinaryExpr::createLShr(
857 Value, MCConstantExpr::create(2, OutContext), OutContext);
858
859 OutStreamer->emitValue(Value, Size);
860 }
861 }
862 }
863
emitFunctionEntryLabel()864 void AArch64AsmPrinter::emitFunctionEntryLabel() {
865 if (MF->getFunction().getCallingConv() == CallingConv::AArch64_VectorCall ||
866 MF->getFunction().getCallingConv() ==
867 CallingConv::AArch64_SVE_VectorCall ||
868 MF->getInfo<AArch64FunctionInfo>()->isSVECC()) {
869 auto *TS =
870 static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
871 TS->emitDirectiveVariantPCS(CurrentFnSym);
872 }
873
874 return AsmPrinter::emitFunctionEntryLabel();
875 }
876
877 /// Small jump tables contain an unsigned byte or half, representing the offset
878 /// from the lowest-addressed possible destination to the desired basic
879 /// block. Since all instructions are 4-byte aligned, this is further compressed
880 /// by counting in instructions rather than bytes (i.e. divided by 4). So, to
881 /// materialize the correct destination we need:
882 ///
883 /// adr xDest, .LBB0_0
884 /// ldrb wScratch, [xTable, xEntry] (with "lsl #1" for ldrh).
885 /// add xDest, xDest, xScratch (with "lsl #2" for smaller entries)
LowerJumpTableDest(llvm::MCStreamer & OutStreamer,const llvm::MachineInstr & MI)886 void AArch64AsmPrinter::LowerJumpTableDest(llvm::MCStreamer &OutStreamer,
887 const llvm::MachineInstr &MI) {
888 Register DestReg = MI.getOperand(0).getReg();
889 Register ScratchReg = MI.getOperand(1).getReg();
890 Register ScratchRegW =
891 STI->getRegisterInfo()->getSubReg(ScratchReg, AArch64::sub_32);
892 Register TableReg = MI.getOperand(2).getReg();
893 Register EntryReg = MI.getOperand(3).getReg();
894 int JTIdx = MI.getOperand(4).getIndex();
895 int Size = AArch64FI->getJumpTableEntrySize(JTIdx);
896
897 // This has to be first because the compression pass based its reachability
898 // calculations on the start of the JumpTableDest instruction.
899 auto Label =
900 MF->getInfo<AArch64FunctionInfo>()->getJumpTableEntryPCRelSymbol(JTIdx);
901
902 // If we don't already have a symbol to use as the base, use the ADR
903 // instruction itself.
904 if (!Label) {
905 Label = MF->getContext().createTempSymbol();
906 AArch64FI->setJumpTableEntryInfo(JTIdx, Size, Label);
907 OutStreamer.emitLabel(Label);
908 }
909
910 auto LabelExpr = MCSymbolRefExpr::create(Label, MF->getContext());
911 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::ADR)
912 .addReg(DestReg)
913 .addExpr(LabelExpr));
914
915 // Load the number of instruction-steps to offset from the label.
916 unsigned LdrOpcode;
917 switch (Size) {
918 case 1: LdrOpcode = AArch64::LDRBBroX; break;
919 case 2: LdrOpcode = AArch64::LDRHHroX; break;
920 case 4: LdrOpcode = AArch64::LDRSWroX; break;
921 default:
922 llvm_unreachable("Unknown jump table size");
923 }
924
925 EmitToStreamer(OutStreamer, MCInstBuilder(LdrOpcode)
926 .addReg(Size == 4 ? ScratchReg : ScratchRegW)
927 .addReg(TableReg)
928 .addReg(EntryReg)
929 .addImm(0)
930 .addImm(Size == 1 ? 0 : 1));
931
932 // Add to the already materialized base label address, multiplying by 4 if
933 // compressed.
934 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::ADDXrs)
935 .addReg(DestReg)
936 .addReg(DestReg)
937 .addReg(ScratchReg)
938 .addImm(Size == 4 ? 0 : 2));
939 }
940
LowerMOPS(llvm::MCStreamer & OutStreamer,const llvm::MachineInstr & MI)941 void AArch64AsmPrinter::LowerMOPS(llvm::MCStreamer &OutStreamer,
942 const llvm::MachineInstr &MI) {
943 unsigned Opcode = MI.getOpcode();
944 assert(STI->hasMOPS());
945 assert(STI->hasMTE() || Opcode != AArch64::MOPSMemorySetTaggingPseudo);
946
947 const auto Ops = [Opcode]() -> std::array<unsigned, 3> {
948 if (Opcode == AArch64::MOPSMemoryCopyPseudo)
949 return {AArch64::CPYFP, AArch64::CPYFM, AArch64::CPYFE};
950 if (Opcode == AArch64::MOPSMemoryMovePseudo)
951 return {AArch64::CPYP, AArch64::CPYM, AArch64::CPYE};
952 if (Opcode == AArch64::MOPSMemorySetPseudo)
953 return {AArch64::SETP, AArch64::SETM, AArch64::SETE};
954 if (Opcode == AArch64::MOPSMemorySetTaggingPseudo)
955 return {AArch64::SETGP, AArch64::SETGM, AArch64::MOPSSETGE};
956 llvm_unreachable("Unhandled memory operation pseudo");
957 }();
958 const bool IsSet = Opcode == AArch64::MOPSMemorySetPseudo ||
959 Opcode == AArch64::MOPSMemorySetTaggingPseudo;
960
961 for (auto Op : Ops) {
962 int i = 0;
963 auto MCIB = MCInstBuilder(Op);
964 // Destination registers
965 MCIB.addReg(MI.getOperand(i++).getReg());
966 MCIB.addReg(MI.getOperand(i++).getReg());
967 if (!IsSet)
968 MCIB.addReg(MI.getOperand(i++).getReg());
969 // Input registers
970 MCIB.addReg(MI.getOperand(i++).getReg());
971 MCIB.addReg(MI.getOperand(i++).getReg());
972 MCIB.addReg(MI.getOperand(i++).getReg());
973
974 EmitToStreamer(OutStreamer, MCIB);
975 }
976 }
977
LowerSTACKMAP(MCStreamer & OutStreamer,StackMaps & SM,const MachineInstr & MI)978 void AArch64AsmPrinter::LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM,
979 const MachineInstr &MI) {
980 unsigned NumNOPBytes = StackMapOpers(&MI).getNumPatchBytes();
981
982 auto &Ctx = OutStreamer.getContext();
983 MCSymbol *MILabel = Ctx.createTempSymbol();
984 OutStreamer.emitLabel(MILabel);
985
986 SM.recordStackMap(*MILabel, MI);
987 assert(NumNOPBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
988
989 // Scan ahead to trim the shadow.
990 const MachineBasicBlock &MBB = *MI.getParent();
991 MachineBasicBlock::const_iterator MII(MI);
992 ++MII;
993 while (NumNOPBytes > 0) {
994 if (MII == MBB.end() || MII->isCall() ||
995 MII->getOpcode() == AArch64::DBG_VALUE ||
996 MII->getOpcode() == TargetOpcode::PATCHPOINT ||
997 MII->getOpcode() == TargetOpcode::STACKMAP)
998 break;
999 ++MII;
1000 NumNOPBytes -= 4;
1001 }
1002
1003 // Emit nops.
1004 for (unsigned i = 0; i < NumNOPBytes; i += 4)
1005 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::HINT).addImm(0));
1006 }
1007
1008 // Lower a patchpoint of the form:
1009 // [<def>], <id>, <numBytes>, <target>, <numArgs>
LowerPATCHPOINT(MCStreamer & OutStreamer,StackMaps & SM,const MachineInstr & MI)1010 void AArch64AsmPrinter::LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM,
1011 const MachineInstr &MI) {
1012 auto &Ctx = OutStreamer.getContext();
1013 MCSymbol *MILabel = Ctx.createTempSymbol();
1014 OutStreamer.emitLabel(MILabel);
1015 SM.recordPatchPoint(*MILabel, MI);
1016
1017 PatchPointOpers Opers(&MI);
1018
1019 int64_t CallTarget = Opers.getCallTarget().getImm();
1020 unsigned EncodedBytes = 0;
1021 if (CallTarget) {
1022 assert((CallTarget & 0xFFFFFFFFFFFF) == CallTarget &&
1023 "High 16 bits of call target should be zero.");
1024 Register ScratchReg = MI.getOperand(Opers.getNextScratchIdx()).getReg();
1025 EncodedBytes = 16;
1026 // Materialize the jump address:
1027 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::MOVZXi)
1028 .addReg(ScratchReg)
1029 .addImm((CallTarget >> 32) & 0xFFFF)
1030 .addImm(32));
1031 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::MOVKXi)
1032 .addReg(ScratchReg)
1033 .addReg(ScratchReg)
1034 .addImm((CallTarget >> 16) & 0xFFFF)
1035 .addImm(16));
1036 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::MOVKXi)
1037 .addReg(ScratchReg)
1038 .addReg(ScratchReg)
1039 .addImm(CallTarget & 0xFFFF)
1040 .addImm(0));
1041 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::BLR).addReg(ScratchReg));
1042 }
1043 // Emit padding.
1044 unsigned NumBytes = Opers.getNumPatchBytes();
1045 assert(NumBytes >= EncodedBytes &&
1046 "Patchpoint can't request size less than the length of a call.");
1047 assert((NumBytes - EncodedBytes) % 4 == 0 &&
1048 "Invalid number of NOP bytes requested!");
1049 for (unsigned i = EncodedBytes; i < NumBytes; i += 4)
1050 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::HINT).addImm(0));
1051 }
1052
LowerSTATEPOINT(MCStreamer & OutStreamer,StackMaps & SM,const MachineInstr & MI)1053 void AArch64AsmPrinter::LowerSTATEPOINT(MCStreamer &OutStreamer, StackMaps &SM,
1054 const MachineInstr &MI) {
1055 StatepointOpers SOpers(&MI);
1056 if (unsigned PatchBytes = SOpers.getNumPatchBytes()) {
1057 assert(PatchBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
1058 for (unsigned i = 0; i < PatchBytes; i += 4)
1059 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::HINT).addImm(0));
1060 } else {
1061 // Lower call target and choose correct opcode
1062 const MachineOperand &CallTarget = SOpers.getCallTarget();
1063 MCOperand CallTargetMCOp;
1064 unsigned CallOpcode;
1065 switch (CallTarget.getType()) {
1066 case MachineOperand::MO_GlobalAddress:
1067 case MachineOperand::MO_ExternalSymbol:
1068 MCInstLowering.lowerOperand(CallTarget, CallTargetMCOp);
1069 CallOpcode = AArch64::BL;
1070 break;
1071 case MachineOperand::MO_Immediate:
1072 CallTargetMCOp = MCOperand::createImm(CallTarget.getImm());
1073 CallOpcode = AArch64::BL;
1074 break;
1075 case MachineOperand::MO_Register:
1076 CallTargetMCOp = MCOperand::createReg(CallTarget.getReg());
1077 CallOpcode = AArch64::BLR;
1078 break;
1079 default:
1080 llvm_unreachable("Unsupported operand type in statepoint call target");
1081 break;
1082 }
1083
1084 EmitToStreamer(OutStreamer,
1085 MCInstBuilder(CallOpcode).addOperand(CallTargetMCOp));
1086 }
1087
1088 auto &Ctx = OutStreamer.getContext();
1089 MCSymbol *MILabel = Ctx.createTempSymbol();
1090 OutStreamer.emitLabel(MILabel);
1091 SM.recordStatepoint(*MILabel, MI);
1092 }
1093
LowerFAULTING_OP(const MachineInstr & FaultingMI)1094 void AArch64AsmPrinter::LowerFAULTING_OP(const MachineInstr &FaultingMI) {
1095 // FAULTING_LOAD_OP <def>, <faltinf type>, <MBB handler>,
1096 // <opcode>, <operands>
1097
1098 Register DefRegister = FaultingMI.getOperand(0).getReg();
1099 FaultMaps::FaultKind FK =
1100 static_cast<FaultMaps::FaultKind>(FaultingMI.getOperand(1).getImm());
1101 MCSymbol *HandlerLabel = FaultingMI.getOperand(2).getMBB()->getSymbol();
1102 unsigned Opcode = FaultingMI.getOperand(3).getImm();
1103 unsigned OperandsBeginIdx = 4;
1104
1105 auto &Ctx = OutStreamer->getContext();
1106 MCSymbol *FaultingLabel = Ctx.createTempSymbol();
1107 OutStreamer->emitLabel(FaultingLabel);
1108
1109 assert(FK < FaultMaps::FaultKindMax && "Invalid Faulting Kind!");
1110 FM.recordFaultingOp(FK, FaultingLabel, HandlerLabel);
1111
1112 MCInst MI;
1113 MI.setOpcode(Opcode);
1114
1115 if (DefRegister != (Register)0)
1116 MI.addOperand(MCOperand::createReg(DefRegister));
1117
1118 for (auto I = FaultingMI.operands_begin() + OperandsBeginIdx,
1119 E = FaultingMI.operands_end();
1120 I != E; ++I) {
1121 MCOperand Dest;
1122 lowerOperand(*I, Dest);
1123 MI.addOperand(Dest);
1124 }
1125
1126 OutStreamer->AddComment("on-fault: " + HandlerLabel->getName());
1127 OutStreamer->emitInstruction(MI, getSubtargetInfo());
1128 }
1129
emitFMov0(const MachineInstr & MI)1130 void AArch64AsmPrinter::emitFMov0(const MachineInstr &MI) {
1131 Register DestReg = MI.getOperand(0).getReg();
1132 if (STI->hasZeroCycleZeroingFP() && !STI->hasZeroCycleZeroingFPWorkaround() &&
1133 STI->hasNEON()) {
1134 // Convert H/S register to corresponding D register
1135 if (AArch64::H0 <= DestReg && DestReg <= AArch64::H31)
1136 DestReg = AArch64::D0 + (DestReg - AArch64::H0);
1137 else if (AArch64::S0 <= DestReg && DestReg <= AArch64::S31)
1138 DestReg = AArch64::D0 + (DestReg - AArch64::S0);
1139 else
1140 assert(AArch64::D0 <= DestReg && DestReg <= AArch64::D31);
1141
1142 MCInst MOVI;
1143 MOVI.setOpcode(AArch64::MOVID);
1144 MOVI.addOperand(MCOperand::createReg(DestReg));
1145 MOVI.addOperand(MCOperand::createImm(0));
1146 EmitToStreamer(*OutStreamer, MOVI);
1147 } else {
1148 MCInst FMov;
1149 switch (MI.getOpcode()) {
1150 default: llvm_unreachable("Unexpected opcode");
1151 case AArch64::FMOVH0:
1152 FMov.setOpcode(AArch64::FMOVWHr);
1153 FMov.addOperand(MCOperand::createReg(DestReg));
1154 FMov.addOperand(MCOperand::createReg(AArch64::WZR));
1155 break;
1156 case AArch64::FMOVS0:
1157 FMov.setOpcode(AArch64::FMOVWSr);
1158 FMov.addOperand(MCOperand::createReg(DestReg));
1159 FMov.addOperand(MCOperand::createReg(AArch64::WZR));
1160 break;
1161 case AArch64::FMOVD0:
1162 FMov.setOpcode(AArch64::FMOVXDr);
1163 FMov.addOperand(MCOperand::createReg(DestReg));
1164 FMov.addOperand(MCOperand::createReg(AArch64::XZR));
1165 break;
1166 }
1167 EmitToStreamer(*OutStreamer, FMov);
1168 }
1169 }
1170
1171 // Simple pseudo-instructions have their lowering (with expansion to real
1172 // instructions) auto-generated.
1173 #include "AArch64GenMCPseudoLowering.inc"
1174
emitInstruction(const MachineInstr * MI)1175 void AArch64AsmPrinter::emitInstruction(const MachineInstr *MI) {
1176 AArch64_MC::verifyInstructionPredicates(MI->getOpcode(), STI->getFeatureBits());
1177
1178 // Do any auto-generated pseudo lowerings.
1179 if (emitPseudoExpansionLowering(*OutStreamer, MI))
1180 return;
1181
1182 if (MI->getOpcode() == AArch64::ADRP) {
1183 for (auto &Opd : MI->operands()) {
1184 if (Opd.isSymbol() && StringRef(Opd.getSymbolName()) ==
1185 "swift_async_extendedFramePointerFlags") {
1186 ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags = true;
1187 }
1188 }
1189 }
1190
1191 if (AArch64FI->getLOHRelated().count(MI)) {
1192 // Generate a label for LOH related instruction
1193 MCSymbol *LOHLabel = createTempSymbol("loh");
1194 // Associate the instruction with the label
1195 LOHInstToLabel[MI] = LOHLabel;
1196 OutStreamer->emitLabel(LOHLabel);
1197 }
1198
1199 AArch64TargetStreamer *TS =
1200 static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
1201 // Do any manual lowerings.
1202 switch (MI->getOpcode()) {
1203 default:
1204 break;
1205 case AArch64::HINT: {
1206 // CurrentPatchableFunctionEntrySym can be CurrentFnBegin only for
1207 // -fpatchable-function-entry=N,0. The entry MBB is guaranteed to be
1208 // non-empty. If MI is the initial BTI, place the
1209 // __patchable_function_entries label after BTI.
1210 if (CurrentPatchableFunctionEntrySym &&
1211 CurrentPatchableFunctionEntrySym == CurrentFnBegin &&
1212 MI == &MF->front().front()) {
1213 int64_t Imm = MI->getOperand(0).getImm();
1214 if ((Imm & 32) && (Imm & 6)) {
1215 MCInst Inst;
1216 MCInstLowering.Lower(MI, Inst);
1217 EmitToStreamer(*OutStreamer, Inst);
1218 CurrentPatchableFunctionEntrySym = createTempSymbol("patch");
1219 OutStreamer->emitLabel(CurrentPatchableFunctionEntrySym);
1220 return;
1221 }
1222 }
1223 break;
1224 }
1225 case AArch64::MOVMCSym: {
1226 Register DestReg = MI->getOperand(0).getReg();
1227 const MachineOperand &MO_Sym = MI->getOperand(1);
1228 MachineOperand Hi_MOSym(MO_Sym), Lo_MOSym(MO_Sym);
1229 MCOperand Hi_MCSym, Lo_MCSym;
1230
1231 Hi_MOSym.setTargetFlags(AArch64II::MO_G1 | AArch64II::MO_S);
1232 Lo_MOSym.setTargetFlags(AArch64II::MO_G0 | AArch64II::MO_NC);
1233
1234 MCInstLowering.lowerOperand(Hi_MOSym, Hi_MCSym);
1235 MCInstLowering.lowerOperand(Lo_MOSym, Lo_MCSym);
1236
1237 MCInst MovZ;
1238 MovZ.setOpcode(AArch64::MOVZXi);
1239 MovZ.addOperand(MCOperand::createReg(DestReg));
1240 MovZ.addOperand(Hi_MCSym);
1241 MovZ.addOperand(MCOperand::createImm(16));
1242 EmitToStreamer(*OutStreamer, MovZ);
1243
1244 MCInst MovK;
1245 MovK.setOpcode(AArch64::MOVKXi);
1246 MovK.addOperand(MCOperand::createReg(DestReg));
1247 MovK.addOperand(MCOperand::createReg(DestReg));
1248 MovK.addOperand(Lo_MCSym);
1249 MovK.addOperand(MCOperand::createImm(0));
1250 EmitToStreamer(*OutStreamer, MovK);
1251 return;
1252 }
1253 case AArch64::MOVIv2d_ns:
1254 // If the target has <rdar://problem/16473581>, lower this
1255 // instruction to movi.16b instead.
1256 if (STI->hasZeroCycleZeroingFPWorkaround() &&
1257 MI->getOperand(1).getImm() == 0) {
1258 MCInst TmpInst;
1259 TmpInst.setOpcode(AArch64::MOVIv16b_ns);
1260 TmpInst.addOperand(MCOperand::createReg(MI->getOperand(0).getReg()));
1261 TmpInst.addOperand(MCOperand::createImm(MI->getOperand(1).getImm()));
1262 EmitToStreamer(*OutStreamer, TmpInst);
1263 return;
1264 }
1265 break;
1266
1267 case AArch64::DBG_VALUE:
1268 case AArch64::DBG_VALUE_LIST:
1269 if (isVerbose() && OutStreamer->hasRawTextSupport()) {
1270 SmallString<128> TmpStr;
1271 raw_svector_ostream OS(TmpStr);
1272 PrintDebugValueComment(MI, OS);
1273 OutStreamer->emitRawText(StringRef(OS.str()));
1274 }
1275 return;
1276
1277 case AArch64::EMITBKEY: {
1278 ExceptionHandling ExceptionHandlingType = MAI->getExceptionHandlingType();
1279 if (ExceptionHandlingType != ExceptionHandling::DwarfCFI &&
1280 ExceptionHandlingType != ExceptionHandling::ARM)
1281 return;
1282
1283 if (getFunctionCFISectionType(*MF) == CFISection::None)
1284 return;
1285
1286 OutStreamer->emitCFIBKeyFrame();
1287 return;
1288 }
1289
1290 case AArch64::EMITMTETAGGED: {
1291 ExceptionHandling ExceptionHandlingType = MAI->getExceptionHandlingType();
1292 if (ExceptionHandlingType != ExceptionHandling::DwarfCFI &&
1293 ExceptionHandlingType != ExceptionHandling::ARM)
1294 return;
1295
1296 if (getFunctionCFISectionType(*MF) != CFISection::None)
1297 OutStreamer->emitCFIMTETaggedFrame();
1298 return;
1299 }
1300
1301 // Tail calls use pseudo instructions so they have the proper code-gen
1302 // attributes (isCall, isReturn, etc.). We lower them to the real
1303 // instruction here.
1304 case AArch64::TCRETURNri:
1305 case AArch64::TCRETURNriBTI:
1306 case AArch64::TCRETURNriALL: {
1307 MCInst TmpInst;
1308 TmpInst.setOpcode(AArch64::BR);
1309 TmpInst.addOperand(MCOperand::createReg(MI->getOperand(0).getReg()));
1310 EmitToStreamer(*OutStreamer, TmpInst);
1311 return;
1312 }
1313 case AArch64::TCRETURNdi: {
1314 MCOperand Dest;
1315 MCInstLowering.lowerOperand(MI->getOperand(0), Dest);
1316 MCInst TmpInst;
1317 TmpInst.setOpcode(AArch64::B);
1318 TmpInst.addOperand(Dest);
1319 EmitToStreamer(*OutStreamer, TmpInst);
1320 return;
1321 }
1322 case AArch64::SpeculationBarrierISBDSBEndBB: {
1323 // Print DSB SYS + ISB
1324 MCInst TmpInstDSB;
1325 TmpInstDSB.setOpcode(AArch64::DSB);
1326 TmpInstDSB.addOperand(MCOperand::createImm(0xf));
1327 EmitToStreamer(*OutStreamer, TmpInstDSB);
1328 MCInst TmpInstISB;
1329 TmpInstISB.setOpcode(AArch64::ISB);
1330 TmpInstISB.addOperand(MCOperand::createImm(0xf));
1331 EmitToStreamer(*OutStreamer, TmpInstISB);
1332 return;
1333 }
1334 case AArch64::SpeculationBarrierSBEndBB: {
1335 // Print SB
1336 MCInst TmpInstSB;
1337 TmpInstSB.setOpcode(AArch64::SB);
1338 EmitToStreamer(*OutStreamer, TmpInstSB);
1339 return;
1340 }
1341 case AArch64::TLSDESC_CALLSEQ: {
1342 /// lower this to:
1343 /// adrp x0, :tlsdesc:var
1344 /// ldr x1, [x0, #:tlsdesc_lo12:var]
1345 /// add x0, x0, #:tlsdesc_lo12:var
1346 /// .tlsdesccall var
1347 /// blr x1
1348 /// (TPIDR_EL0 offset now in x0)
1349 const MachineOperand &MO_Sym = MI->getOperand(0);
1350 MachineOperand MO_TLSDESC_LO12(MO_Sym), MO_TLSDESC(MO_Sym);
1351 MCOperand Sym, SymTLSDescLo12, SymTLSDesc;
1352 MO_TLSDESC_LO12.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGEOFF);
1353 MO_TLSDESC.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGE);
1354 MCInstLowering.lowerOperand(MO_Sym, Sym);
1355 MCInstLowering.lowerOperand(MO_TLSDESC_LO12, SymTLSDescLo12);
1356 MCInstLowering.lowerOperand(MO_TLSDESC, SymTLSDesc);
1357
1358 MCInst Adrp;
1359 Adrp.setOpcode(AArch64::ADRP);
1360 Adrp.addOperand(MCOperand::createReg(AArch64::X0));
1361 Adrp.addOperand(SymTLSDesc);
1362 EmitToStreamer(*OutStreamer, Adrp);
1363
1364 MCInst Ldr;
1365 if (STI->isTargetILP32()) {
1366 Ldr.setOpcode(AArch64::LDRWui);
1367 Ldr.addOperand(MCOperand::createReg(AArch64::W1));
1368 } else {
1369 Ldr.setOpcode(AArch64::LDRXui);
1370 Ldr.addOperand(MCOperand::createReg(AArch64::X1));
1371 }
1372 Ldr.addOperand(MCOperand::createReg(AArch64::X0));
1373 Ldr.addOperand(SymTLSDescLo12);
1374 Ldr.addOperand(MCOperand::createImm(0));
1375 EmitToStreamer(*OutStreamer, Ldr);
1376
1377 MCInst Add;
1378 if (STI->isTargetILP32()) {
1379 Add.setOpcode(AArch64::ADDWri);
1380 Add.addOperand(MCOperand::createReg(AArch64::W0));
1381 Add.addOperand(MCOperand::createReg(AArch64::W0));
1382 } else {
1383 Add.setOpcode(AArch64::ADDXri);
1384 Add.addOperand(MCOperand::createReg(AArch64::X0));
1385 Add.addOperand(MCOperand::createReg(AArch64::X0));
1386 }
1387 Add.addOperand(SymTLSDescLo12);
1388 Add.addOperand(MCOperand::createImm(AArch64_AM::getShiftValue(0)));
1389 EmitToStreamer(*OutStreamer, Add);
1390
1391 // Emit a relocation-annotation. This expands to no code, but requests
1392 // the following instruction gets an R_AARCH64_TLSDESC_CALL.
1393 MCInst TLSDescCall;
1394 TLSDescCall.setOpcode(AArch64::TLSDESCCALL);
1395 TLSDescCall.addOperand(Sym);
1396 EmitToStreamer(*OutStreamer, TLSDescCall);
1397
1398 MCInst Blr;
1399 Blr.setOpcode(AArch64::BLR);
1400 Blr.addOperand(MCOperand::createReg(AArch64::X1));
1401 EmitToStreamer(*OutStreamer, Blr);
1402
1403 return;
1404 }
1405
1406 case AArch64::JumpTableDest32:
1407 case AArch64::JumpTableDest16:
1408 case AArch64::JumpTableDest8:
1409 LowerJumpTableDest(*OutStreamer, *MI);
1410 return;
1411
1412 case AArch64::FMOVH0:
1413 case AArch64::FMOVS0:
1414 case AArch64::FMOVD0:
1415 emitFMov0(*MI);
1416 return;
1417
1418 case AArch64::MOPSMemoryCopyPseudo:
1419 case AArch64::MOPSMemoryMovePseudo:
1420 case AArch64::MOPSMemorySetPseudo:
1421 case AArch64::MOPSMemorySetTaggingPseudo:
1422 LowerMOPS(*OutStreamer, *MI);
1423 return;
1424
1425 case TargetOpcode::STACKMAP:
1426 return LowerSTACKMAP(*OutStreamer, SM, *MI);
1427
1428 case TargetOpcode::PATCHPOINT:
1429 return LowerPATCHPOINT(*OutStreamer, SM, *MI);
1430
1431 case TargetOpcode::STATEPOINT:
1432 return LowerSTATEPOINT(*OutStreamer, SM, *MI);
1433
1434 case TargetOpcode::FAULTING_OP:
1435 return LowerFAULTING_OP(*MI);
1436
1437 case TargetOpcode::PATCHABLE_FUNCTION_ENTER:
1438 LowerPATCHABLE_FUNCTION_ENTER(*MI);
1439 return;
1440
1441 case TargetOpcode::PATCHABLE_FUNCTION_EXIT:
1442 LowerPATCHABLE_FUNCTION_EXIT(*MI);
1443 return;
1444
1445 case TargetOpcode::PATCHABLE_TAIL_CALL:
1446 LowerPATCHABLE_TAIL_CALL(*MI);
1447 return;
1448
1449 case AArch64::HWASAN_CHECK_MEMACCESS:
1450 case AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES:
1451 LowerHWASAN_CHECK_MEMACCESS(*MI);
1452 return;
1453
1454 case AArch64::SEH_StackAlloc:
1455 TS->emitARM64WinCFIAllocStack(MI->getOperand(0).getImm());
1456 return;
1457
1458 case AArch64::SEH_SaveFPLR:
1459 TS->emitARM64WinCFISaveFPLR(MI->getOperand(0).getImm());
1460 return;
1461
1462 case AArch64::SEH_SaveFPLR_X:
1463 assert(MI->getOperand(0).getImm() < 0 &&
1464 "Pre increment SEH opcode must have a negative offset");
1465 TS->emitARM64WinCFISaveFPLRX(-MI->getOperand(0).getImm());
1466 return;
1467
1468 case AArch64::SEH_SaveReg:
1469 TS->emitARM64WinCFISaveReg(MI->getOperand(0).getImm(),
1470 MI->getOperand(1).getImm());
1471 return;
1472
1473 case AArch64::SEH_SaveReg_X:
1474 assert(MI->getOperand(1).getImm() < 0 &&
1475 "Pre increment SEH opcode must have a negative offset");
1476 TS->emitARM64WinCFISaveRegX(MI->getOperand(0).getImm(),
1477 -MI->getOperand(1).getImm());
1478 return;
1479
1480 case AArch64::SEH_SaveRegP:
1481 if (MI->getOperand(1).getImm() == 30 && MI->getOperand(0).getImm() >= 19 &&
1482 MI->getOperand(0).getImm() <= 28) {
1483 assert((MI->getOperand(0).getImm() - 19) % 2 == 0 &&
1484 "Register paired with LR must be odd");
1485 TS->emitARM64WinCFISaveLRPair(MI->getOperand(0).getImm(),
1486 MI->getOperand(2).getImm());
1487 return;
1488 }
1489 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
1490 "Non-consecutive registers not allowed for save_regp");
1491 TS->emitARM64WinCFISaveRegP(MI->getOperand(0).getImm(),
1492 MI->getOperand(2).getImm());
1493 return;
1494
1495 case AArch64::SEH_SaveRegP_X:
1496 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
1497 "Non-consecutive registers not allowed for save_regp_x");
1498 assert(MI->getOperand(2).getImm() < 0 &&
1499 "Pre increment SEH opcode must have a negative offset");
1500 TS->emitARM64WinCFISaveRegPX(MI->getOperand(0).getImm(),
1501 -MI->getOperand(2).getImm());
1502 return;
1503
1504 case AArch64::SEH_SaveFReg:
1505 TS->emitARM64WinCFISaveFReg(MI->getOperand(0).getImm(),
1506 MI->getOperand(1).getImm());
1507 return;
1508
1509 case AArch64::SEH_SaveFReg_X:
1510 assert(MI->getOperand(1).getImm() < 0 &&
1511 "Pre increment SEH opcode must have a negative offset");
1512 TS->emitARM64WinCFISaveFRegX(MI->getOperand(0).getImm(),
1513 -MI->getOperand(1).getImm());
1514 return;
1515
1516 case AArch64::SEH_SaveFRegP:
1517 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
1518 "Non-consecutive registers not allowed for save_regp");
1519 TS->emitARM64WinCFISaveFRegP(MI->getOperand(0).getImm(),
1520 MI->getOperand(2).getImm());
1521 return;
1522
1523 case AArch64::SEH_SaveFRegP_X:
1524 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
1525 "Non-consecutive registers not allowed for save_regp_x");
1526 assert(MI->getOperand(2).getImm() < 0 &&
1527 "Pre increment SEH opcode must have a negative offset");
1528 TS->emitARM64WinCFISaveFRegPX(MI->getOperand(0).getImm(),
1529 -MI->getOperand(2).getImm());
1530 return;
1531
1532 case AArch64::SEH_SetFP:
1533 TS->emitARM64WinCFISetFP();
1534 return;
1535
1536 case AArch64::SEH_AddFP:
1537 TS->emitARM64WinCFIAddFP(MI->getOperand(0).getImm());
1538 return;
1539
1540 case AArch64::SEH_Nop:
1541 TS->emitARM64WinCFINop();
1542 return;
1543
1544 case AArch64::SEH_PrologEnd:
1545 TS->emitARM64WinCFIPrologEnd();
1546 return;
1547
1548 case AArch64::SEH_EpilogStart:
1549 TS->emitARM64WinCFIEpilogStart();
1550 return;
1551
1552 case AArch64::SEH_EpilogEnd:
1553 TS->emitARM64WinCFIEpilogEnd();
1554 return;
1555 }
1556
1557 // Finally, do the automated lowerings for everything else.
1558 MCInst TmpInst;
1559 MCInstLowering.Lower(MI, TmpInst);
1560 EmitToStreamer(*OutStreamer, TmpInst);
1561 }
1562
1563 // Force static initialization.
LLVMInitializeAArch64AsmPrinter()1564 extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAArch64AsmPrinter() {
1565 RegisterAsmPrinter<AArch64AsmPrinter> X(getTheAArch64leTarget());
1566 RegisterAsmPrinter<AArch64AsmPrinter> Y(getTheAArch64beTarget());
1567 RegisterAsmPrinter<AArch64AsmPrinter> Z(getTheARM64Target());
1568 RegisterAsmPrinter<AArch64AsmPrinter> W(getTheARM64_32Target());
1569 RegisterAsmPrinter<AArch64AsmPrinter> V(getTheAArch64_32Target());
1570 }
1571