1 //=- WebAssemblyISelLowering.cpp - WebAssembly DAG Lowering Implementation -==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file implements the WebAssemblyTargetLowering class.
11 ///
12 //===----------------------------------------------------------------------===//
13 
14 #include "WebAssemblyISelLowering.h"
15 #include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
16 #include "WebAssemblyMachineFunctionInfo.h"
17 #include "WebAssemblySubtarget.h"
18 #include "WebAssemblyTargetMachine.h"
19 #include "llvm/CodeGen/Analysis.h"
20 #include "llvm/CodeGen/CallingConvLower.h"
21 #include "llvm/CodeGen/MachineInstrBuilder.h"
22 #include "llvm/CodeGen/MachineJumpTableInfo.h"
23 #include "llvm/CodeGen/MachineModuleInfo.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #include "llvm/CodeGen/SelectionDAG.h"
26 #include "llvm/CodeGen/WasmEHFuncInfo.h"
27 #include "llvm/IR/DiagnosticInfo.h"
28 #include "llvm/IR/DiagnosticPrinter.h"
29 #include "llvm/IR/Function.h"
30 #include "llvm/IR/Intrinsics.h"
31 #include "llvm/Support/Debug.h"
32 #include "llvm/Support/ErrorHandling.h"
33 #include "llvm/Support/raw_ostream.h"
34 #include "llvm/Target/TargetOptions.h"
35 using namespace llvm;
36 
37 #define DEBUG_TYPE "wasm-lower"
38 
39 WebAssemblyTargetLowering::WebAssemblyTargetLowering(
40     const TargetMachine &TM, const WebAssemblySubtarget &STI)
41     : TargetLowering(TM), Subtarget(&STI) {
42   auto MVTPtr = Subtarget->hasAddr64() ? MVT::i64 : MVT::i32;
43 
44   // Booleans always contain 0 or 1.
45   setBooleanContents(ZeroOrOneBooleanContent);
46   // Except in SIMD vectors
47   setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
48   // WebAssembly does not produce floating-point exceptions on normal floating
49   // point operations.
50   setHasFloatingPointExceptions(false);
51   // We don't know the microarchitecture here, so just reduce register pressure.
52   setSchedulingPreference(Sched::RegPressure);
53   // Tell ISel that we have a stack pointer.
54   setStackPointerRegisterToSaveRestore(
55       Subtarget->hasAddr64() ? WebAssembly::SP64 : WebAssembly::SP32);
56   // Set up the register classes.
57   addRegisterClass(MVT::i32, &WebAssembly::I32RegClass);
58   addRegisterClass(MVT::i64, &WebAssembly::I64RegClass);
59   addRegisterClass(MVT::f32, &WebAssembly::F32RegClass);
60   addRegisterClass(MVT::f64, &WebAssembly::F64RegClass);
61   if (Subtarget->hasSIMD128()) {
62     addRegisterClass(MVT::v16i8, &WebAssembly::V128RegClass);
63     addRegisterClass(MVT::v8i16, &WebAssembly::V128RegClass);
64     addRegisterClass(MVT::v4i32, &WebAssembly::V128RegClass);
65     addRegisterClass(MVT::v4f32, &WebAssembly::V128RegClass);
66   }
67   if (Subtarget->hasUnimplementedSIMD128()) {
68     addRegisterClass(MVT::v2i64, &WebAssembly::V128RegClass);
69     addRegisterClass(MVT::v2f64, &WebAssembly::V128RegClass);
70   }
71   // Compute derived properties from the register classes.
72   computeRegisterProperties(Subtarget->getRegisterInfo());
73 
74   setOperationAction(ISD::GlobalAddress, MVTPtr, Custom);
75   setOperationAction(ISD::ExternalSymbol, MVTPtr, Custom);
76   setOperationAction(ISD::JumpTable, MVTPtr, Custom);
77   setOperationAction(ISD::BlockAddress, MVTPtr, Custom);
78   setOperationAction(ISD::BRIND, MVT::Other, Custom);
79 
80   // Take the default expansion for va_arg, va_copy, and va_end. There is no
81   // default action for va_start, so we do that custom.
82   setOperationAction(ISD::VASTART, MVT::Other, Custom);
83   setOperationAction(ISD::VAARG, MVT::Other, Expand);
84   setOperationAction(ISD::VACOPY, MVT::Other, Expand);
85   setOperationAction(ISD::VAEND, MVT::Other, Expand);
86 
87   for (auto T : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) {
88     // Don't expand the floating-point types to constant pools.
89     setOperationAction(ISD::ConstantFP, T, Legal);
90     // Expand floating-point comparisons.
91     for (auto CC : {ISD::SETO, ISD::SETUO, ISD::SETUEQ, ISD::SETONE,
92                     ISD::SETULT, ISD::SETULE, ISD::SETUGT, ISD::SETUGE})
93       setCondCodeAction(CC, T, Expand);
94     // Expand floating-point library function operators.
95     for (auto Op :
96          {ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FMA})
97       setOperationAction(Op, T, Expand);
98     // Note supported floating-point library function operators that otherwise
99     // default to expand.
100     for (auto Op :
101          {ISD::FCEIL, ISD::FFLOOR, ISD::FTRUNC, ISD::FNEARBYINT, ISD::FRINT})
102       setOperationAction(Op, T, Legal);
103     // Support minimum and maximum, which otherwise default to expand.
104     setOperationAction(ISD::FMINIMUM, T, Legal);
105     setOperationAction(ISD::FMAXIMUM, T, Legal);
106     // WebAssembly currently has no builtin f16 support.
107     setOperationAction(ISD::FP16_TO_FP, T, Expand);
108     setOperationAction(ISD::FP_TO_FP16, T, Expand);
109     setLoadExtAction(ISD::EXTLOAD, T, MVT::f16, Expand);
110     setTruncStoreAction(T, MVT::f16, Expand);
111   }
112 
113   // Expand unavailable integer operations.
114   for (auto Op :
115        {ISD::BSWAP, ISD::SMUL_LOHI, ISD::UMUL_LOHI, ISD::MULHS, ISD::MULHU,
116         ISD::SDIVREM, ISD::UDIVREM, ISD::SHL_PARTS, ISD::SRA_PARTS,
117         ISD::SRL_PARTS, ISD::ADDC, ISD::ADDE, ISD::SUBC, ISD::SUBE}) {
118     for (auto T : {MVT::i32, MVT::i64})
119       setOperationAction(Op, T, Expand);
120     if (Subtarget->hasSIMD128())
121       for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
122         setOperationAction(Op, T, Expand);
123     if (Subtarget->hasUnimplementedSIMD128())
124       setOperationAction(Op, MVT::v2i64, Expand);
125   }
126 
127   // SIMD-specific configuration
128   if (Subtarget->hasSIMD128()) {
129     // Support saturating add for i8x16 and i16x8
130     for (auto Op : {ISD::SADDSAT, ISD::UADDSAT})
131       for (auto T : {MVT::v16i8, MVT::v8i16})
132         setOperationAction(Op, T, Legal);
133 
134     // Custom lower BUILD_VECTORs to minimize number of replace_lanes
135     for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32})
136       setOperationAction(ISD::BUILD_VECTOR, T, Custom);
137     if (Subtarget->hasUnimplementedSIMD128())
138       for (auto T : {MVT::v2i64, MVT::v2f64})
139         setOperationAction(ISD::BUILD_VECTOR, T, Custom);
140 
141     // We have custom shuffle lowering to expose the shuffle mask
142     for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32})
143       setOperationAction(ISD::VECTOR_SHUFFLE, T, Custom);
144     if (Subtarget->hasUnimplementedSIMD128())
145       for (auto T: {MVT::v2i64, MVT::v2f64})
146         setOperationAction(ISD::VECTOR_SHUFFLE, T, Custom);
147 
148     // Custom lowering since wasm shifts must have a scalar shift amount
149     for (auto Op : {ISD::SHL, ISD::SRA, ISD::SRL}) {
150       for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
151         setOperationAction(Op, T, Custom);
152       if (Subtarget->hasUnimplementedSIMD128())
153         setOperationAction(Op, MVT::v2i64, Custom);
154     }
155 
156     // Custom lower lane accesses to expand out variable indices
157     for (auto Op : {ISD::EXTRACT_VECTOR_ELT, ISD::INSERT_VECTOR_ELT}) {
158       for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32})
159         setOperationAction(Op, T, Custom);
160       if (Subtarget->hasUnimplementedSIMD128())
161         for (auto T : {MVT::v2i64, MVT::v2f64})
162           setOperationAction(Op, T, Custom);
163     }
164 
165     // There is no i64x2.mul instruction
166     setOperationAction(ISD::MUL, MVT::v2i64, Expand);
167 
168     // There are no vector select instructions
169     for (auto Op : {ISD::VSELECT, ISD::SELECT_CC, ISD::SELECT}) {
170       for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32})
171         setOperationAction(Op, T, Expand);
172       if (Subtarget->hasUnimplementedSIMD128())
173         for (auto T : {MVT::v2i64, MVT::v2f64})
174           setOperationAction(Op, T, Expand);
175     }
176 
177     // Expand additional SIMD ops that V8 hasn't implemented yet
178     if (!Subtarget->hasUnimplementedSIMD128()) {
179       setOperationAction(ISD::FSQRT, MVT::v4f32, Expand);
180       setOperationAction(ISD::FDIV, MVT::v4f32, Expand);
181     }
182   }
183 
184   // As a special case, these operators use the type to mean the type to
185   // sign-extend from.
186   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
187   if (!Subtarget->hasSignExt()) {
188     // Sign extends are legal only when extending a vector extract
189     auto Action = Subtarget->hasSIMD128() ? Custom : Expand;
190     for (auto T : {MVT::i8, MVT::i16, MVT::i32})
191       setOperationAction(ISD::SIGN_EXTEND_INREG, T, Action);
192   }
193   for (auto T : MVT::integer_vector_valuetypes())
194     setOperationAction(ISD::SIGN_EXTEND_INREG, T, Expand);
195 
196   // Dynamic stack allocation: use the default expansion.
197   setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
198   setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
199   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVTPtr, Expand);
200 
201   setOperationAction(ISD::FrameIndex, MVT::i32, Custom);
202   setOperationAction(ISD::CopyToReg, MVT::Other, Custom);
203 
204   // Expand these forms; we pattern-match the forms that we can handle in isel.
205   for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64})
206     for (auto Op : {ISD::BR_CC, ISD::SELECT_CC})
207       setOperationAction(Op, T, Expand);
208 
209   // We have custom switch handling.
210   setOperationAction(ISD::BR_JT, MVT::Other, Custom);
211 
212   // WebAssembly doesn't have:
213   //  - Floating-point extending loads.
214   //  - Floating-point truncating stores.
215   //  - i1 extending loads.
216   //  - extending/truncating SIMD loads/stores
217   setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
218   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
219   for (auto T : MVT::integer_valuetypes())
220     for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD})
221       setLoadExtAction(Ext, T, MVT::i1, Promote);
222   if (Subtarget->hasSIMD128()) {
223     for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v4f32,
224                    MVT::v2f64}) {
225       for (auto MemT : MVT::vector_valuetypes()) {
226         if (MVT(T) != MemT) {
227           setTruncStoreAction(T, MemT, Expand);
228           for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD})
229             setLoadExtAction(Ext, T, MemT, Expand);
230         }
231       }
232     }
233   }
234 
235   // Don't do anything clever with build_pairs
236   setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
237 
238   // Trap lowers to wasm unreachable
239   setOperationAction(ISD::TRAP, MVT::Other, Legal);
240 
241   // Exception handling intrinsics
242   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
243   setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
244 
245   setMaxAtomicSizeInBitsSupported(64);
246 
247   if (Subtarget->hasBulkMemory()) {
248     // Using memory.copy is always better than using multiple loads and stores
249     MaxStoresPerMemcpy = 1;
250     MaxStoresPerMemcpyOptSize = 1;
251     MaxStoresPerMemmove = 1;
252     MaxStoresPerMemmoveOptSize = 1;
253   }
254 }
255 
256 TargetLowering::AtomicExpansionKind
257 WebAssemblyTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
258   // We have wasm instructions for these
259   switch (AI->getOperation()) {
260   case AtomicRMWInst::Add:
261   case AtomicRMWInst::Sub:
262   case AtomicRMWInst::And:
263   case AtomicRMWInst::Or:
264   case AtomicRMWInst::Xor:
265   case AtomicRMWInst::Xchg:
266     return AtomicExpansionKind::None;
267   default:
268     break;
269   }
270   return AtomicExpansionKind::CmpXChg;
271 }
272 
273 FastISel *WebAssemblyTargetLowering::createFastISel(
274     FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo) const {
275   return WebAssembly::createFastISel(FuncInfo, LibInfo);
276 }
277 
278 bool WebAssemblyTargetLowering::isOffsetFoldingLegal(
279     const GlobalAddressSDNode * /*GA*/) const {
280   // All offsets can be folded.
281   return true;
282 }
283 
284 MVT WebAssemblyTargetLowering::getScalarShiftAmountTy(const DataLayout & /*DL*/,
285                                                       EVT VT) const {
286   unsigned BitWidth = NextPowerOf2(VT.getSizeInBits() - 1);
287   if (BitWidth > 1 && BitWidth < 8)
288     BitWidth = 8;
289 
290   if (BitWidth > 64) {
291     // The shift will be lowered to a libcall, and compiler-rt libcalls expect
292     // the count to be an i32.
293     BitWidth = 32;
294     assert(BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) &&
295            "32-bit shift counts ought to be enough for anyone");
296   }
297 
298   MVT Result = MVT::getIntegerVT(BitWidth);
299   assert(Result != MVT::INVALID_SIMPLE_VALUE_TYPE &&
300          "Unable to represent scalar shift amount type");
301   return Result;
302 }
303 
304 // Lower an fp-to-int conversion operator from the LLVM opcode, which has an
305 // undefined result on invalid/overflow, to the WebAssembly opcode, which
306 // traps on invalid/overflow.
307 static MachineBasicBlock *LowerFPToInt(MachineInstr &MI, DebugLoc DL,
308                                        MachineBasicBlock *BB,
309                                        const TargetInstrInfo &TII,
310                                        bool IsUnsigned, bool Int64,
311                                        bool Float64, unsigned LoweredOpcode) {
312   MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
313 
314   unsigned OutReg = MI.getOperand(0).getReg();
315   unsigned InReg = MI.getOperand(1).getReg();
316 
317   unsigned Abs = Float64 ? WebAssembly::ABS_F64 : WebAssembly::ABS_F32;
318   unsigned FConst = Float64 ? WebAssembly::CONST_F64 : WebAssembly::CONST_F32;
319   unsigned LT = Float64 ? WebAssembly::LT_F64 : WebAssembly::LT_F32;
320   unsigned GE = Float64 ? WebAssembly::GE_F64 : WebAssembly::GE_F32;
321   unsigned IConst = Int64 ? WebAssembly::CONST_I64 : WebAssembly::CONST_I32;
322   unsigned Eqz = WebAssembly::EQZ_I32;
323   unsigned And = WebAssembly::AND_I32;
324   int64_t Limit = Int64 ? INT64_MIN : INT32_MIN;
325   int64_t Substitute = IsUnsigned ? 0 : Limit;
326   double CmpVal = IsUnsigned ? -(double)Limit * 2.0 : -(double)Limit;
327   auto &Context = BB->getParent()->getFunction().getContext();
328   Type *Ty = Float64 ? Type::getDoubleTy(Context) : Type::getFloatTy(Context);
329 
330   const BasicBlock *LLVMBB = BB->getBasicBlock();
331   MachineFunction *F = BB->getParent();
332   MachineBasicBlock *TrueMBB = F->CreateMachineBasicBlock(LLVMBB);
333   MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVMBB);
334   MachineBasicBlock *DoneMBB = F->CreateMachineBasicBlock(LLVMBB);
335 
336   MachineFunction::iterator It = ++BB->getIterator();
337   F->insert(It, FalseMBB);
338   F->insert(It, TrueMBB);
339   F->insert(It, DoneMBB);
340 
341   // Transfer the remainder of BB and its successor edges to DoneMBB.
342   DoneMBB->splice(DoneMBB->begin(), BB,
343                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
344   DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
345 
346   BB->addSuccessor(TrueMBB);
347   BB->addSuccessor(FalseMBB);
348   TrueMBB->addSuccessor(DoneMBB);
349   FalseMBB->addSuccessor(DoneMBB);
350 
351   unsigned Tmp0, Tmp1, CmpReg, EqzReg, FalseReg, TrueReg;
352   Tmp0 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
353   Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
354   CmpReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
355   EqzReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
356   FalseReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
357   TrueReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
358 
359   MI.eraseFromParent();
360   // For signed numbers, we can do a single comparison to determine whether
361   // fabs(x) is within range.
362   if (IsUnsigned) {
363     Tmp0 = InReg;
364   } else {
365     BuildMI(BB, DL, TII.get(Abs), Tmp0).addReg(InReg);
366   }
367   BuildMI(BB, DL, TII.get(FConst), Tmp1)
368       .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, CmpVal)));
369   BuildMI(BB, DL, TII.get(LT), CmpReg).addReg(Tmp0).addReg(Tmp1);
370 
371   // For unsigned numbers, we have to do a separate comparison with zero.
372   if (IsUnsigned) {
373     Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
374     unsigned SecondCmpReg =
375         MRI.createVirtualRegister(&WebAssembly::I32RegClass);
376     unsigned AndReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
377     BuildMI(BB, DL, TII.get(FConst), Tmp1)
378         .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, 0.0)));
379     BuildMI(BB, DL, TII.get(GE), SecondCmpReg).addReg(Tmp0).addReg(Tmp1);
380     BuildMI(BB, DL, TII.get(And), AndReg).addReg(CmpReg).addReg(SecondCmpReg);
381     CmpReg = AndReg;
382   }
383 
384   BuildMI(BB, DL, TII.get(Eqz), EqzReg).addReg(CmpReg);
385 
386   // Create the CFG diamond to select between doing the conversion or using
387   // the substitute value.
388   BuildMI(BB, DL, TII.get(WebAssembly::BR_IF)).addMBB(TrueMBB).addReg(EqzReg);
389   BuildMI(FalseMBB, DL, TII.get(LoweredOpcode), FalseReg).addReg(InReg);
390   BuildMI(FalseMBB, DL, TII.get(WebAssembly::BR)).addMBB(DoneMBB);
391   BuildMI(TrueMBB, DL, TII.get(IConst), TrueReg).addImm(Substitute);
392   BuildMI(*DoneMBB, DoneMBB->begin(), DL, TII.get(TargetOpcode::PHI), OutReg)
393       .addReg(FalseReg)
394       .addMBB(FalseMBB)
395       .addReg(TrueReg)
396       .addMBB(TrueMBB);
397 
398   return DoneMBB;
399 }
400 
401 MachineBasicBlock *WebAssemblyTargetLowering::EmitInstrWithCustomInserter(
402     MachineInstr &MI, MachineBasicBlock *BB) const {
403   const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
404   DebugLoc DL = MI.getDebugLoc();
405 
406   switch (MI.getOpcode()) {
407   default:
408     llvm_unreachable("Unexpected instr type to insert");
409   case WebAssembly::FP_TO_SINT_I32_F32:
410     return LowerFPToInt(MI, DL, BB, TII, false, false, false,
411                         WebAssembly::I32_TRUNC_S_F32);
412   case WebAssembly::FP_TO_UINT_I32_F32:
413     return LowerFPToInt(MI, DL, BB, TII, true, false, false,
414                         WebAssembly::I32_TRUNC_U_F32);
415   case WebAssembly::FP_TO_SINT_I64_F32:
416     return LowerFPToInt(MI, DL, BB, TII, false, true, false,
417                         WebAssembly::I64_TRUNC_S_F32);
418   case WebAssembly::FP_TO_UINT_I64_F32:
419     return LowerFPToInt(MI, DL, BB, TII, true, true, false,
420                         WebAssembly::I64_TRUNC_U_F32);
421   case WebAssembly::FP_TO_SINT_I32_F64:
422     return LowerFPToInt(MI, DL, BB, TII, false, false, true,
423                         WebAssembly::I32_TRUNC_S_F64);
424   case WebAssembly::FP_TO_UINT_I32_F64:
425     return LowerFPToInt(MI, DL, BB, TII, true, false, true,
426                         WebAssembly::I32_TRUNC_U_F64);
427   case WebAssembly::FP_TO_SINT_I64_F64:
428     return LowerFPToInt(MI, DL, BB, TII, false, true, true,
429                         WebAssembly::I64_TRUNC_S_F64);
430   case WebAssembly::FP_TO_UINT_I64_F64:
431     return LowerFPToInt(MI, DL, BB, TII, true, true, true,
432                         WebAssembly::I64_TRUNC_U_F64);
433     llvm_unreachable("Unexpected instruction to emit with custom inserter");
434   }
435 }
436 
437 const char *
438 WebAssemblyTargetLowering::getTargetNodeName(unsigned Opcode) const {
439   switch (static_cast<WebAssemblyISD::NodeType>(Opcode)) {
440   case WebAssemblyISD::FIRST_NUMBER:
441     break;
442 #define HANDLE_NODETYPE(NODE)                                                  \
443   case WebAssemblyISD::NODE:                                                   \
444     return "WebAssemblyISD::" #NODE;
445 #include "WebAssemblyISD.def"
446 #undef HANDLE_NODETYPE
447   }
448   return nullptr;
449 }
450 
451 std::pair<unsigned, const TargetRegisterClass *>
452 WebAssemblyTargetLowering::getRegForInlineAsmConstraint(
453     const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
454   // First, see if this is a constraint that directly corresponds to a
455   // WebAssembly register class.
456   if (Constraint.size() == 1) {
457     switch (Constraint[0]) {
458     case 'r':
459       assert(VT != MVT::iPTR && "Pointer MVT not expected here");
460       if (Subtarget->hasSIMD128() && VT.isVector()) {
461         if (VT.getSizeInBits() == 128)
462           return std::make_pair(0U, &WebAssembly::V128RegClass);
463       }
464       if (VT.isInteger() && !VT.isVector()) {
465         if (VT.getSizeInBits() <= 32)
466           return std::make_pair(0U, &WebAssembly::I32RegClass);
467         if (VT.getSizeInBits() <= 64)
468           return std::make_pair(0U, &WebAssembly::I64RegClass);
469       }
470       break;
471     default:
472       break;
473     }
474   }
475 
476   return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
477 }
478 
479 bool WebAssemblyTargetLowering::isCheapToSpeculateCttz() const {
480   // Assume ctz is a relatively cheap operation.
481   return true;
482 }
483 
484 bool WebAssemblyTargetLowering::isCheapToSpeculateCtlz() const {
485   // Assume clz is a relatively cheap operation.
486   return true;
487 }
488 
489 bool WebAssemblyTargetLowering::isLegalAddressingMode(const DataLayout &DL,
490                                                       const AddrMode &AM,
491                                                       Type *Ty, unsigned AS,
492                                                       Instruction *I) const {
493   // WebAssembly offsets are added as unsigned without wrapping. The
494   // isLegalAddressingMode gives us no way to determine if wrapping could be
495   // happening, so we approximate this by accepting only non-negative offsets.
496   if (AM.BaseOffs < 0)
497     return false;
498 
499   // WebAssembly has no scale register operands.
500   if (AM.Scale != 0)
501     return false;
502 
503   // Everything else is legal.
504   return true;
505 }
506 
507 bool WebAssemblyTargetLowering::allowsMisalignedMemoryAccesses(
508     EVT /*VT*/, unsigned /*AddrSpace*/, unsigned /*Align*/, bool *Fast) const {
509   // WebAssembly supports unaligned accesses, though it should be declared
510   // with the p2align attribute on loads and stores which do so, and there
511   // may be a performance impact. We tell LLVM they're "fast" because
512   // for the kinds of things that LLVM uses this for (merging adjacent stores
513   // of constants, etc.), WebAssembly implementations will either want the
514   // unaligned access or they'll split anyway.
515   if (Fast)
516     *Fast = true;
517   return true;
518 }
519 
520 bool WebAssemblyTargetLowering::isIntDivCheap(EVT VT,
521                                               AttributeList Attr) const {
522   // The current thinking is that wasm engines will perform this optimization,
523   // so we can save on code size.
524   return true;
525 }
526 
527 EVT WebAssemblyTargetLowering::getSetCCResultType(const DataLayout &DL,
528                                                   LLVMContext &C,
529                                                   EVT VT) const {
530   if (VT.isVector())
531     return VT.changeVectorElementTypeToInteger();
532 
533   return TargetLowering::getSetCCResultType(DL, C, VT);
534 }
535 
536 bool WebAssemblyTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
537                                                    const CallInst &I,
538                                                    MachineFunction &MF,
539                                                    unsigned Intrinsic) const {
540   switch (Intrinsic) {
541   case Intrinsic::wasm_atomic_notify:
542     Info.opc = ISD::INTRINSIC_W_CHAIN;
543     Info.memVT = MVT::i32;
544     Info.ptrVal = I.getArgOperand(0);
545     Info.offset = 0;
546     Info.align = 4;
547     // atomic.notify instruction does not really load the memory specified with
548     // this argument, but MachineMemOperand should either be load or store, so
549     // we set this to a load.
550     // FIXME Volatile isn't really correct, but currently all LLVM atomic
551     // instructions are treated as volatiles in the backend, so we should be
552     // consistent. The same applies for wasm_atomic_wait intrinsics too.
553     Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
554     return true;
555   case Intrinsic::wasm_atomic_wait_i32:
556     Info.opc = ISD::INTRINSIC_W_CHAIN;
557     Info.memVT = MVT::i32;
558     Info.ptrVal = I.getArgOperand(0);
559     Info.offset = 0;
560     Info.align = 4;
561     Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
562     return true;
563   case Intrinsic::wasm_atomic_wait_i64:
564     Info.opc = ISD::INTRINSIC_W_CHAIN;
565     Info.memVT = MVT::i64;
566     Info.ptrVal = I.getArgOperand(0);
567     Info.offset = 0;
568     Info.align = 8;
569     Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
570     return true;
571   default:
572     return false;
573   }
574 }
575 
576 //===----------------------------------------------------------------------===//
577 // WebAssembly Lowering private implementation.
578 //===----------------------------------------------------------------------===//
579 
580 //===----------------------------------------------------------------------===//
581 // Lowering Code
582 //===----------------------------------------------------------------------===//
583 
584 static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *Msg) {
585   MachineFunction &MF = DAG.getMachineFunction();
586   DAG.getContext()->diagnose(
587       DiagnosticInfoUnsupported(MF.getFunction(), Msg, DL.getDebugLoc()));
588 }
589 
590 // Test whether the given calling convention is supported.
591 static bool callingConvSupported(CallingConv::ID CallConv) {
592   // We currently support the language-independent target-independent
593   // conventions. We don't yet have a way to annotate calls with properties like
594   // "cold", and we don't have any call-clobbered registers, so these are mostly
595   // all handled the same.
596   return CallConv == CallingConv::C || CallConv == CallingConv::Fast ||
597          CallConv == CallingConv::Cold ||
598          CallConv == CallingConv::PreserveMost ||
599          CallConv == CallingConv::PreserveAll ||
600          CallConv == CallingConv::CXX_FAST_TLS;
601 }
602 
603 SDValue
604 WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI,
605                                      SmallVectorImpl<SDValue> &InVals) const {
606   SelectionDAG &DAG = CLI.DAG;
607   SDLoc DL = CLI.DL;
608   SDValue Chain = CLI.Chain;
609   SDValue Callee = CLI.Callee;
610   MachineFunction &MF = DAG.getMachineFunction();
611   auto Layout = MF.getDataLayout();
612 
613   CallingConv::ID CallConv = CLI.CallConv;
614   if (!callingConvSupported(CallConv))
615     fail(DL, DAG,
616          "WebAssembly doesn't support language-specific or target-specific "
617          "calling conventions yet");
618   if (CLI.IsPatchPoint)
619     fail(DL, DAG, "WebAssembly doesn't support patch point yet");
620 
621   // WebAssembly doesn't currently support explicit tail calls. If they are
622   // required, fail. Otherwise, just disable them.
623   if ((CallConv == CallingConv::Fast && CLI.IsTailCall &&
624        MF.getTarget().Options.GuaranteedTailCallOpt) ||
625       (CLI.CS && CLI.CS.isMustTailCall()))
626     fail(DL, DAG, "WebAssembly doesn't support tail call yet");
627   CLI.IsTailCall = false;
628 
629   SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
630   if (Ins.size() > 1)
631     fail(DL, DAG, "WebAssembly doesn't support more than 1 returned value yet");
632 
633   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
634   SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
635   unsigned NumFixedArgs = 0;
636   for (unsigned I = 0; I < Outs.size(); ++I) {
637     const ISD::OutputArg &Out = Outs[I];
638     SDValue &OutVal = OutVals[I];
639     if (Out.Flags.isNest())
640       fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
641     if (Out.Flags.isInAlloca())
642       fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
643     if (Out.Flags.isInConsecutiveRegs())
644       fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
645     if (Out.Flags.isInConsecutiveRegsLast())
646       fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
647     if (Out.Flags.isByVal() && Out.Flags.getByValSize() != 0) {
648       auto &MFI = MF.getFrameInfo();
649       int FI = MFI.CreateStackObject(Out.Flags.getByValSize(),
650                                      Out.Flags.getByValAlign(),
651                                      /*isSS=*/false);
652       SDValue SizeNode =
653           DAG.getConstant(Out.Flags.getByValSize(), DL, MVT::i32);
654       SDValue FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
655       Chain = DAG.getMemcpy(
656           Chain, DL, FINode, OutVal, SizeNode, Out.Flags.getByValAlign(),
657           /*isVolatile*/ false, /*AlwaysInline=*/false,
658           /*isTailCall*/ false, MachinePointerInfo(), MachinePointerInfo());
659       OutVal = FINode;
660     }
661     // Count the number of fixed args *after* legalization.
662     NumFixedArgs += Out.IsFixed;
663   }
664 
665   bool IsVarArg = CLI.IsVarArg;
666   auto PtrVT = getPointerTy(Layout);
667 
668   // Analyze operands of the call, assigning locations to each operand.
669   SmallVector<CCValAssign, 16> ArgLocs;
670   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
671 
672   if (IsVarArg) {
673     // Outgoing non-fixed arguments are placed in a buffer. First
674     // compute their offsets and the total amount of buffer space needed.
675     for (SDValue Arg :
676          make_range(OutVals.begin() + NumFixedArgs, OutVals.end())) {
677       EVT VT = Arg.getValueType();
678       assert(VT != MVT::iPTR && "Legalized args should be concrete");
679       Type *Ty = VT.getTypeForEVT(*DAG.getContext());
680       unsigned Offset = CCInfo.AllocateStack(Layout.getTypeAllocSize(Ty),
681                                              Layout.getABITypeAlignment(Ty));
682       CCInfo.addLoc(CCValAssign::getMem(ArgLocs.size(), VT.getSimpleVT(),
683                                         Offset, VT.getSimpleVT(),
684                                         CCValAssign::Full));
685     }
686   }
687 
688   unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
689 
690   SDValue FINode;
691   if (IsVarArg && NumBytes) {
692     // For non-fixed arguments, next emit stores to store the argument values
693     // to the stack buffer at the offsets computed above.
694     int FI = MF.getFrameInfo().CreateStackObject(NumBytes,
695                                                  Layout.getStackAlignment(),
696                                                  /*isSS=*/false);
697     unsigned ValNo = 0;
698     SmallVector<SDValue, 8> Chains;
699     for (SDValue Arg :
700          make_range(OutVals.begin() + NumFixedArgs, OutVals.end())) {
701       assert(ArgLocs[ValNo].getValNo() == ValNo &&
702              "ArgLocs should remain in order and only hold varargs args");
703       unsigned Offset = ArgLocs[ValNo++].getLocMemOffset();
704       FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
705       SDValue Add = DAG.getNode(ISD::ADD, DL, PtrVT, FINode,
706                                 DAG.getConstant(Offset, DL, PtrVT));
707       Chains.push_back(
708           DAG.getStore(Chain, DL, Arg, Add,
709                        MachinePointerInfo::getFixedStack(MF, FI, Offset), 0));
710     }
711     if (!Chains.empty())
712       Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
713   } else if (IsVarArg) {
714     FINode = DAG.getIntPtrConstant(0, DL);
715   }
716 
717   // Compute the operands for the CALLn node.
718   SmallVector<SDValue, 16> Ops;
719   Ops.push_back(Chain);
720   Ops.push_back(Callee);
721 
722   // Add all fixed arguments. Note that for non-varargs calls, NumFixedArgs
723   // isn't reliable.
724   Ops.append(OutVals.begin(),
725              IsVarArg ? OutVals.begin() + NumFixedArgs : OutVals.end());
726   // Add a pointer to the vararg buffer.
727   if (IsVarArg)
728     Ops.push_back(FINode);
729 
730   SmallVector<EVT, 8> InTys;
731   for (const auto &In : Ins) {
732     assert(!In.Flags.isByVal() && "byval is not valid for return values");
733     assert(!In.Flags.isNest() && "nest is not valid for return values");
734     if (In.Flags.isInAlloca())
735       fail(DL, DAG, "WebAssembly hasn't implemented inalloca return values");
736     if (In.Flags.isInConsecutiveRegs())
737       fail(DL, DAG, "WebAssembly hasn't implemented cons regs return values");
738     if (In.Flags.isInConsecutiveRegsLast())
739       fail(DL, DAG,
740            "WebAssembly hasn't implemented cons regs last return values");
741     // Ignore In.getOrigAlign() because all our arguments are passed in
742     // registers.
743     InTys.push_back(In.VT);
744   }
745   InTys.push_back(MVT::Other);
746   SDVTList InTyList = DAG.getVTList(InTys);
747   SDValue Res =
748       DAG.getNode(Ins.empty() ? WebAssemblyISD::CALL0 : WebAssemblyISD::CALL1,
749                   DL, InTyList, Ops);
750   if (Ins.empty()) {
751     Chain = Res;
752   } else {
753     InVals.push_back(Res);
754     Chain = Res.getValue(1);
755   }
756 
757   return Chain;
758 }
759 
760 bool WebAssemblyTargetLowering::CanLowerReturn(
761     CallingConv::ID /*CallConv*/, MachineFunction & /*MF*/, bool /*IsVarArg*/,
762     const SmallVectorImpl<ISD::OutputArg> &Outs,
763     LLVMContext & /*Context*/) const {
764   // WebAssembly can't currently handle returning tuples.
765   return Outs.size() <= 1;
766 }
767 
768 SDValue WebAssemblyTargetLowering::LowerReturn(
769     SDValue Chain, CallingConv::ID CallConv, bool /*IsVarArg*/,
770     const SmallVectorImpl<ISD::OutputArg> &Outs,
771     const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
772     SelectionDAG &DAG) const {
773   assert(Outs.size() <= 1 && "WebAssembly can only return up to one value");
774   if (!callingConvSupported(CallConv))
775     fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
776 
777   SmallVector<SDValue, 4> RetOps(1, Chain);
778   RetOps.append(OutVals.begin(), OutVals.end());
779   Chain = DAG.getNode(WebAssemblyISD::RETURN, DL, MVT::Other, RetOps);
780 
781   // Record the number and types of the return values.
782   for (const ISD::OutputArg &Out : Outs) {
783     assert(!Out.Flags.isByVal() && "byval is not valid for return values");
784     assert(!Out.Flags.isNest() && "nest is not valid for return values");
785     assert(Out.IsFixed && "non-fixed return value is not valid");
786     if (Out.Flags.isInAlloca())
787       fail(DL, DAG, "WebAssembly hasn't implemented inalloca results");
788     if (Out.Flags.isInConsecutiveRegs())
789       fail(DL, DAG, "WebAssembly hasn't implemented cons regs results");
790     if (Out.Flags.isInConsecutiveRegsLast())
791       fail(DL, DAG, "WebAssembly hasn't implemented cons regs last results");
792   }
793 
794   return Chain;
795 }
796 
797 SDValue WebAssemblyTargetLowering::LowerFormalArguments(
798     SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
799     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
800     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
801   if (!callingConvSupported(CallConv))
802     fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
803 
804   MachineFunction &MF = DAG.getMachineFunction();
805   auto *MFI = MF.getInfo<WebAssemblyFunctionInfo>();
806 
807   // Set up the incoming ARGUMENTS value, which serves to represent the liveness
808   // of the incoming values before they're represented by virtual registers.
809   MF.getRegInfo().addLiveIn(WebAssembly::ARGUMENTS);
810 
811   for (const ISD::InputArg &In : Ins) {
812     if (In.Flags.isInAlloca())
813       fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
814     if (In.Flags.isNest())
815       fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
816     if (In.Flags.isInConsecutiveRegs())
817       fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
818     if (In.Flags.isInConsecutiveRegsLast())
819       fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
820     // Ignore In.getOrigAlign() because all our arguments are passed in
821     // registers.
822     InVals.push_back(In.Used ? DAG.getNode(WebAssemblyISD::ARGUMENT, DL, In.VT,
823                                            DAG.getTargetConstant(InVals.size(),
824                                                                  DL, MVT::i32))
825                              : DAG.getUNDEF(In.VT));
826 
827     // Record the number and types of arguments.
828     MFI->addParam(In.VT);
829   }
830 
831   // Varargs are copied into a buffer allocated by the caller, and a pointer to
832   // the buffer is passed as an argument.
833   if (IsVarArg) {
834     MVT PtrVT = getPointerTy(MF.getDataLayout());
835     unsigned VarargVreg =
836         MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrVT));
837     MFI->setVarargBufferVreg(VarargVreg);
838     Chain = DAG.getCopyToReg(
839         Chain, DL, VarargVreg,
840         DAG.getNode(WebAssemblyISD::ARGUMENT, DL, PtrVT,
841                     DAG.getTargetConstant(Ins.size(), DL, MVT::i32)));
842     MFI->addParam(PtrVT);
843   }
844 
845   // Record the number and types of arguments and results.
846   SmallVector<MVT, 4> Params;
847   SmallVector<MVT, 4> Results;
848   computeSignatureVTs(MF.getFunction().getFunctionType(), MF.getFunction(),
849                       DAG.getTarget(), Params, Results);
850   for (MVT VT : Results)
851     MFI->addResult(VT);
852   // TODO: Use signatures in WebAssemblyMachineFunctionInfo too and unify
853   // the param logic here with ComputeSignatureVTs
854   assert(MFI->getParams().size() == Params.size() &&
855          std::equal(MFI->getParams().begin(), MFI->getParams().end(),
856                     Params.begin()));
857 
858   return Chain;
859 }
860 
861 //===----------------------------------------------------------------------===//
862 //  Custom lowering hooks.
863 //===----------------------------------------------------------------------===//
864 
865 SDValue WebAssemblyTargetLowering::LowerOperation(SDValue Op,
866                                                   SelectionDAG &DAG) const {
867   SDLoc DL(Op);
868   switch (Op.getOpcode()) {
869   default:
870     llvm_unreachable("unimplemented operation lowering");
871     return SDValue();
872   case ISD::FrameIndex:
873     return LowerFrameIndex(Op, DAG);
874   case ISD::GlobalAddress:
875     return LowerGlobalAddress(Op, DAG);
876   case ISD::ExternalSymbol:
877     return LowerExternalSymbol(Op, DAG);
878   case ISD::JumpTable:
879     return LowerJumpTable(Op, DAG);
880   case ISD::BR_JT:
881     return LowerBR_JT(Op, DAG);
882   case ISD::VASTART:
883     return LowerVASTART(Op, DAG);
884   case ISD::BlockAddress:
885   case ISD::BRIND:
886     fail(DL, DAG, "WebAssembly hasn't implemented computed gotos");
887     return SDValue();
888   case ISD::RETURNADDR: // Probably nothing meaningful can be returned here.
889     fail(DL, DAG, "WebAssembly hasn't implemented __builtin_return_address");
890     return SDValue();
891   case ISD::FRAMEADDR:
892     return LowerFRAMEADDR(Op, DAG);
893   case ISD::CopyToReg:
894     return LowerCopyToReg(Op, DAG);
895   case ISD::EXTRACT_VECTOR_ELT:
896   case ISD::INSERT_VECTOR_ELT:
897     return LowerAccessVectorElement(Op, DAG);
898   case ISD::INTRINSIC_VOID:
899   case ISD::INTRINSIC_WO_CHAIN:
900   case ISD::INTRINSIC_W_CHAIN:
901     return LowerIntrinsic(Op, DAG);
902   case ISD::SIGN_EXTEND_INREG:
903     return LowerSIGN_EXTEND_INREG(Op, DAG);
904   case ISD::BUILD_VECTOR:
905     return LowerBUILD_VECTOR(Op, DAG);
906   case ISD::VECTOR_SHUFFLE:
907     return LowerVECTOR_SHUFFLE(Op, DAG);
908   case ISD::SHL:
909   case ISD::SRA:
910   case ISD::SRL:
911     return LowerShift(Op, DAG);
912   }
913 }
914 
915 SDValue WebAssemblyTargetLowering::LowerCopyToReg(SDValue Op,
916                                                   SelectionDAG &DAG) const {
917   SDValue Src = Op.getOperand(2);
918   if (isa<FrameIndexSDNode>(Src.getNode())) {
919     // CopyToReg nodes don't support FrameIndex operands. Other targets select
920     // the FI to some LEA-like instruction, but since we don't have that, we
921     // need to insert some kind of instruction that can take an FI operand and
922     // produces a value usable by CopyToReg (i.e. in a vreg). So insert a dummy
923     // local.copy between Op and its FI operand.
924     SDValue Chain = Op.getOperand(0);
925     SDLoc DL(Op);
926     unsigned Reg = cast<RegisterSDNode>(Op.getOperand(1))->getReg();
927     EVT VT = Src.getValueType();
928     SDValue Copy(DAG.getMachineNode(VT == MVT::i32 ? WebAssembly::COPY_I32
929                                                    : WebAssembly::COPY_I64,
930                                     DL, VT, Src),
931                  0);
932     return Op.getNode()->getNumValues() == 1
933                ? DAG.getCopyToReg(Chain, DL, Reg, Copy)
934                : DAG.getCopyToReg(Chain, DL, Reg, Copy,
935                                   Op.getNumOperands() == 4 ? Op.getOperand(3)
936                                                            : SDValue());
937   }
938   return SDValue();
939 }
940 
941 SDValue WebAssemblyTargetLowering::LowerFrameIndex(SDValue Op,
942                                                    SelectionDAG &DAG) const {
943   int FI = cast<FrameIndexSDNode>(Op)->getIndex();
944   return DAG.getTargetFrameIndex(FI, Op.getValueType());
945 }
946 
947 SDValue WebAssemblyTargetLowering::LowerFRAMEADDR(SDValue Op,
948                                                   SelectionDAG &DAG) const {
949   // Non-zero depths are not supported by WebAssembly currently. Use the
950   // legalizer's default expansion, which is to return 0 (what this function is
951   // documented to do).
952   if (Op.getConstantOperandVal(0) > 0)
953     return SDValue();
954 
955   DAG.getMachineFunction().getFrameInfo().setFrameAddressIsTaken(true);
956   EVT VT = Op.getValueType();
957   unsigned FP =
958       Subtarget->getRegisterInfo()->getFrameRegister(DAG.getMachineFunction());
959   return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), FP, VT);
960 }
961 
962 SDValue WebAssemblyTargetLowering::LowerGlobalAddress(SDValue Op,
963                                                       SelectionDAG &DAG) const {
964   SDLoc DL(Op);
965   const auto *GA = cast<GlobalAddressSDNode>(Op);
966   EVT VT = Op.getValueType();
967   assert(GA->getTargetFlags() == 0 &&
968          "Unexpected target flags on generic GlobalAddressSDNode");
969   if (GA->getAddressSpace() != 0)
970     fail(DL, DAG, "WebAssembly only expects the 0 address space");
971   return DAG.getNode(
972       WebAssemblyISD::Wrapper, DL, VT,
973       DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT, GA->getOffset()));
974 }
975 
976 SDValue
977 WebAssemblyTargetLowering::LowerExternalSymbol(SDValue Op,
978                                                SelectionDAG &DAG) const {
979   SDLoc DL(Op);
980   const auto *ES = cast<ExternalSymbolSDNode>(Op);
981   EVT VT = Op.getValueType();
982   assert(ES->getTargetFlags() == 0 &&
983          "Unexpected target flags on generic ExternalSymbolSDNode");
984   // Set the TargetFlags to 0x1 which indicates that this is a "function"
985   // symbol rather than a data symbol. We do this unconditionally even though
986   // we don't know anything about the symbol other than its name, because all
987   // external symbols used in target-independent SelectionDAG code are for
988   // functions.
989   return DAG.getNode(
990       WebAssemblyISD::Wrapper, DL, VT,
991       DAG.getTargetExternalSymbol(ES->getSymbol(), VT,
992                                   WebAssemblyII::MO_SYMBOL_FUNCTION));
993 }
994 
995 SDValue WebAssemblyTargetLowering::LowerJumpTable(SDValue Op,
996                                                   SelectionDAG &DAG) const {
997   // There's no need for a Wrapper node because we always incorporate a jump
998   // table operand into a BR_TABLE instruction, rather than ever
999   // materializing it in a register.
1000   const JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
1001   return DAG.getTargetJumpTable(JT->getIndex(), Op.getValueType(),
1002                                 JT->getTargetFlags());
1003 }
1004 
1005 SDValue WebAssemblyTargetLowering::LowerBR_JT(SDValue Op,
1006                                               SelectionDAG &DAG) const {
1007   SDLoc DL(Op);
1008   SDValue Chain = Op.getOperand(0);
1009   const auto *JT = cast<JumpTableSDNode>(Op.getOperand(1));
1010   SDValue Index = Op.getOperand(2);
1011   assert(JT->getTargetFlags() == 0 && "WebAssembly doesn't set target flags");
1012 
1013   SmallVector<SDValue, 8> Ops;
1014   Ops.push_back(Chain);
1015   Ops.push_back(Index);
1016 
1017   MachineJumpTableInfo *MJTI = DAG.getMachineFunction().getJumpTableInfo();
1018   const auto &MBBs = MJTI->getJumpTables()[JT->getIndex()].MBBs;
1019 
1020   // Add an operand for each case.
1021   for (auto MBB : MBBs)
1022     Ops.push_back(DAG.getBasicBlock(MBB));
1023 
1024   // TODO: For now, we just pick something arbitrary for a default case for now.
1025   // We really want to sniff out the guard and put in the real default case (and
1026   // delete the guard).
1027   Ops.push_back(DAG.getBasicBlock(MBBs[0]));
1028 
1029   return DAG.getNode(WebAssemblyISD::BR_TABLE, DL, MVT::Other, Ops);
1030 }
1031 
1032 SDValue WebAssemblyTargetLowering::LowerVASTART(SDValue Op,
1033                                                 SelectionDAG &DAG) const {
1034   SDLoc DL(Op);
1035   EVT PtrVT = getPointerTy(DAG.getMachineFunction().getDataLayout());
1036 
1037   auto *MFI = DAG.getMachineFunction().getInfo<WebAssemblyFunctionInfo>();
1038   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
1039 
1040   SDValue ArgN = DAG.getCopyFromReg(DAG.getEntryNode(), DL,
1041                                     MFI->getVarargBufferVreg(), PtrVT);
1042   return DAG.getStore(Op.getOperand(0), DL, ArgN, Op.getOperand(1),
1043                       MachinePointerInfo(SV), 0);
1044 }
1045 
1046 SDValue WebAssemblyTargetLowering::LowerIntrinsic(SDValue Op,
1047                                                   SelectionDAG &DAG) const {
1048   MachineFunction &MF = DAG.getMachineFunction();
1049   unsigned IntNo;
1050   switch (Op.getOpcode()) {
1051   case ISD::INTRINSIC_VOID:
1052   case ISD::INTRINSIC_W_CHAIN:
1053     IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
1054     break;
1055   case ISD::INTRINSIC_WO_CHAIN:
1056     IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1057     break;
1058   default:
1059     llvm_unreachable("Invalid intrinsic");
1060   }
1061   SDLoc DL(Op);
1062 
1063   switch (IntNo) {
1064   default:
1065     return SDValue(); // Don't custom lower most intrinsics.
1066 
1067   case Intrinsic::wasm_lsda: {
1068     EVT VT = Op.getValueType();
1069     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1070     MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
1071     auto &Context = MF.getMMI().getContext();
1072     MCSymbol *S = Context.getOrCreateSymbol(Twine("GCC_except_table") +
1073                                             Twine(MF.getFunctionNumber()));
1074     return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1075                        DAG.getMCSymbol(S, PtrVT));
1076   }
1077 
1078   case Intrinsic::wasm_throw: {
1079     // We only support C++ exceptions for now
1080     int Tag = cast<ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1081     if (Tag != CPP_EXCEPTION)
1082       llvm_unreachable("Invalid tag!");
1083     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1084     MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
1085     const char *SymName = MF.createExternalSymbolName("__cpp_exception");
1086     SDValue SymNode =
1087         DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT,
1088                     DAG.getTargetExternalSymbol(
1089                         SymName, PtrVT, WebAssemblyII::MO_SYMBOL_EVENT));
1090     return DAG.getNode(WebAssemblyISD::THROW, DL,
1091                        MVT::Other, // outchain type
1092                        {
1093                            Op.getOperand(0), // inchain
1094                            SymNode,          // exception symbol
1095                            Op.getOperand(3)  // thrown value
1096                        });
1097   }
1098   }
1099 }
1100 
1101 SDValue
1102 WebAssemblyTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
1103                                                   SelectionDAG &DAG) const {
1104   // If sign extension operations are disabled, allow sext_inreg only if operand
1105   // is a vector extract. SIMD does not depend on sign extension operations, but
1106   // allowing sext_inreg in this context lets us have simple patterns to select
1107   // extract_lane_s instructions. Expanding sext_inreg everywhere would be
1108   // simpler in this file, but would necessitate large and brittle patterns to
1109   // undo the expansion and select extract_lane_s instructions.
1110   assert(!Subtarget->hasSignExt() && Subtarget->hasSIMD128());
1111   if (Op.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT)
1112     return Op;
1113   // Otherwise expand
1114   return SDValue();
1115 }
1116 
1117 SDValue WebAssemblyTargetLowering::LowerBUILD_VECTOR(SDValue Op,
1118                                                      SelectionDAG &DAG) const {
1119   SDLoc DL(Op);
1120   const EVT VecT = Op.getValueType();
1121   const EVT LaneT = Op.getOperand(0).getValueType();
1122   const size_t Lanes = Op.getNumOperands();
1123   auto IsConstant = [](const SDValue &V) {
1124     return V.getOpcode() == ISD::Constant || V.getOpcode() == ISD::ConstantFP;
1125   };
1126 
1127   // Find the most common operand, which is approximately the best to splat
1128   using Entry = std::pair<SDValue, size_t>;
1129   SmallVector<Entry, 16> ValueCounts;
1130   size_t NumConst = 0, NumDynamic = 0;
1131   for (const SDValue &Lane : Op->op_values()) {
1132     if (Lane.isUndef()) {
1133       continue;
1134     } else if (IsConstant(Lane)) {
1135       NumConst++;
1136     } else {
1137       NumDynamic++;
1138     }
1139     auto CountIt = std::find_if(ValueCounts.begin(), ValueCounts.end(),
1140                                 [&Lane](Entry A) { return A.first == Lane; });
1141     if (CountIt == ValueCounts.end()) {
1142       ValueCounts.emplace_back(Lane, 1);
1143     } else {
1144       CountIt->second++;
1145     }
1146   }
1147   auto CommonIt =
1148       std::max_element(ValueCounts.begin(), ValueCounts.end(),
1149                        [](Entry A, Entry B) { return A.second < B.second; });
1150   assert(CommonIt != ValueCounts.end() && "Unexpected all-undef build_vector");
1151   SDValue SplatValue = CommonIt->first;
1152   size_t NumCommon = CommonIt->second;
1153 
1154   // If v128.const is available, consider using it instead of a splat
1155   if (Subtarget->hasUnimplementedSIMD128()) {
1156     // {i32,i64,f32,f64}.const opcode, and value
1157     const size_t ConstBytes = 1 + std::max(size_t(4), 16 / Lanes);
1158     // SIMD prefix and opcode
1159     const size_t SplatBytes = 2;
1160     const size_t SplatConstBytes = SplatBytes + ConstBytes;
1161     // SIMD prefix, opcode, and lane index
1162     const size_t ReplaceBytes = 3;
1163     const size_t ReplaceConstBytes = ReplaceBytes + ConstBytes;
1164     // SIMD prefix, v128.const opcode, and 128-bit value
1165     const size_t VecConstBytes = 18;
1166     // Initial v128.const and a replace_lane for each non-const operand
1167     const size_t ConstInitBytes = VecConstBytes + NumDynamic * ReplaceBytes;
1168     // Initial splat and all necessary replace_lanes
1169     const size_t SplatInitBytes =
1170         IsConstant(SplatValue)
1171             // Initial constant splat
1172             ? (SplatConstBytes +
1173                // Constant replace_lanes
1174                (NumConst - NumCommon) * ReplaceConstBytes +
1175                // Dynamic replace_lanes
1176                (NumDynamic * ReplaceBytes))
1177             // Initial dynamic splat
1178             : (SplatBytes +
1179                // Constant replace_lanes
1180                (NumConst * ReplaceConstBytes) +
1181                // Dynamic replace_lanes
1182                (NumDynamic - NumCommon) * ReplaceBytes);
1183     if (ConstInitBytes < SplatInitBytes) {
1184       // Create build_vector that will lower to initial v128.const
1185       SmallVector<SDValue, 16> ConstLanes;
1186       for (const SDValue &Lane : Op->op_values()) {
1187         if (IsConstant(Lane)) {
1188           ConstLanes.push_back(Lane);
1189         } else if (LaneT.isFloatingPoint()) {
1190           ConstLanes.push_back(DAG.getConstantFP(0, DL, LaneT));
1191         } else {
1192           ConstLanes.push_back(DAG.getConstant(0, DL, LaneT));
1193         }
1194       }
1195       SDValue Result = DAG.getBuildVector(VecT, DL, ConstLanes);
1196       // Add replace_lane instructions for non-const lanes
1197       for (size_t I = 0; I < Lanes; ++I) {
1198         const SDValue &Lane = Op->getOperand(I);
1199         if (!Lane.isUndef() && !IsConstant(Lane))
1200           Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VecT, Result, Lane,
1201                                DAG.getConstant(I, DL, MVT::i32));
1202       }
1203       return Result;
1204     }
1205   }
1206   // Use a splat for the initial vector
1207   SDValue Result = DAG.getSplatBuildVector(VecT, DL, SplatValue);
1208   // Add replace_lane instructions for other values
1209   for (size_t I = 0; I < Lanes; ++I) {
1210     const SDValue &Lane = Op->getOperand(I);
1211     if (Lane != SplatValue)
1212       Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VecT, Result, Lane,
1213                            DAG.getConstant(I, DL, MVT::i32));
1214   }
1215   return Result;
1216 }
1217 
1218 SDValue
1219 WebAssemblyTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
1220                                                SelectionDAG &DAG) const {
1221   SDLoc DL(Op);
1222   ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op.getNode())->getMask();
1223   MVT VecType = Op.getOperand(0).getSimpleValueType();
1224   assert(VecType.is128BitVector() && "Unexpected shuffle vector type");
1225   size_t LaneBytes = VecType.getVectorElementType().getSizeInBits() / 8;
1226 
1227   // Space for two vector args and sixteen mask indices
1228   SDValue Ops[18];
1229   size_t OpIdx = 0;
1230   Ops[OpIdx++] = Op.getOperand(0);
1231   Ops[OpIdx++] = Op.getOperand(1);
1232 
1233   // Expand mask indices to byte indices and materialize them as operands
1234   for (int M : Mask) {
1235     for (size_t J = 0; J < LaneBytes; ++J) {
1236       // Lower undefs (represented by -1 in mask) to zero
1237       uint64_t ByteIndex = M == -1 ? 0 : (uint64_t)M * LaneBytes + J;
1238       Ops[OpIdx++] = DAG.getConstant(ByteIndex, DL, MVT::i32);
1239     }
1240   }
1241 
1242   return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, Op.getValueType(), Ops);
1243 }
1244 
1245 SDValue
1246 WebAssemblyTargetLowering::LowerAccessVectorElement(SDValue Op,
1247                                                     SelectionDAG &DAG) const {
1248   // Allow constant lane indices, expand variable lane indices
1249   SDNode *IdxNode = Op.getOperand(Op.getNumOperands() - 1).getNode();
1250   if (isa<ConstantSDNode>(IdxNode) || IdxNode->isUndef())
1251     return Op;
1252   else
1253     // Perform default expansion
1254     return SDValue();
1255 }
1256 
1257 static SDValue unrollVectorShift(SDValue Op, SelectionDAG &DAG) {
1258   EVT LaneT = Op.getSimpleValueType().getVectorElementType();
1259   // 32-bit and 64-bit unrolled shifts will have proper semantics
1260   if (LaneT.bitsGE(MVT::i32))
1261     return DAG.UnrollVectorOp(Op.getNode());
1262   // Otherwise mask the shift value to get proper semantics from 32-bit shift
1263   SDLoc DL(Op);
1264   SDValue ShiftVal = Op.getOperand(1);
1265   uint64_t MaskVal = LaneT.getSizeInBits() - 1;
1266   SDValue MaskedShiftVal = DAG.getNode(
1267       ISD::AND,                    // mask opcode
1268       DL, ShiftVal.getValueType(), // masked value type
1269       ShiftVal,                    // original shift value operand
1270       DAG.getConstant(MaskVal, DL, ShiftVal.getValueType()) // mask operand
1271   );
1272 
1273   return DAG.UnrollVectorOp(
1274       DAG.getNode(Op.getOpcode(),        // original shift opcode
1275                   DL, Op.getValueType(), // original return type
1276                   Op.getOperand(0),      // original vector operand,
1277                   MaskedShiftVal         // new masked shift value operand
1278                   )
1279           .getNode());
1280 }
1281 
1282 SDValue WebAssemblyTargetLowering::LowerShift(SDValue Op,
1283                                               SelectionDAG &DAG) const {
1284   SDLoc DL(Op);
1285 
1286   // Only manually lower vector shifts
1287   assert(Op.getSimpleValueType().isVector());
1288 
1289   // Expand all vector shifts until V8 fixes its implementation
1290   // TODO: remove this once V8 is fixed
1291   if (!Subtarget->hasUnimplementedSIMD128())
1292     return unrollVectorShift(Op, DAG);
1293 
1294   // Unroll non-splat vector shifts
1295   BuildVectorSDNode *ShiftVec;
1296   SDValue SplatVal;
1297   if (!(ShiftVec = dyn_cast<BuildVectorSDNode>(Op.getOperand(1).getNode())) ||
1298       !(SplatVal = ShiftVec->getSplatValue()))
1299     return unrollVectorShift(Op, DAG);
1300 
1301   // All splats except i64x2 const splats are handled by patterns
1302   auto *SplatConst = dyn_cast<ConstantSDNode>(SplatVal);
1303   if (!SplatConst || Op.getSimpleValueType() != MVT::v2i64)
1304     return Op;
1305 
1306   // i64x2 const splats are custom lowered to avoid unnecessary wraps
1307   unsigned Opcode;
1308   switch (Op.getOpcode()) {
1309   case ISD::SHL:
1310     Opcode = WebAssemblyISD::VEC_SHL;
1311     break;
1312   case ISD::SRA:
1313     Opcode = WebAssemblyISD::VEC_SHR_S;
1314     break;
1315   case ISD::SRL:
1316     Opcode = WebAssemblyISD::VEC_SHR_U;
1317     break;
1318   default:
1319     llvm_unreachable("unexpected opcode");
1320   }
1321   APInt Shift = SplatConst->getAPIntValue().zextOrTrunc(32);
1322   return DAG.getNode(Opcode, DL, Op.getValueType(), Op.getOperand(0),
1323                      DAG.getConstant(Shift, DL, MVT::i32));
1324 }
1325 
1326 //===----------------------------------------------------------------------===//
1327 //                          WebAssembly Optimization Hooks
1328 //===----------------------------------------------------------------------===//
1329