1 //=- WebAssemblyISelLowering.cpp - WebAssembly DAG Lowering Implementation -==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file implements the WebAssemblyTargetLowering class.
11 ///
12 //===----------------------------------------------------------------------===//
13 
14 #include "WebAssemblyISelLowering.h"
15 #include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
16 #include "WebAssemblyMachineFunctionInfo.h"
17 #include "WebAssemblySubtarget.h"
18 #include "WebAssemblyTargetMachine.h"
19 #include "WebAssemblyUtilities.h"
20 #include "llvm/CodeGen/Analysis.h"
21 #include "llvm/CodeGen/CallingConvLower.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineJumpTableInfo.h"
24 #include "llvm/CodeGen/MachineModuleInfo.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/CodeGen/SelectionDAG.h"
27 #include "llvm/CodeGen/WasmEHFuncInfo.h"
28 #include "llvm/IR/DiagnosticInfo.h"
29 #include "llvm/IR/DiagnosticPrinter.h"
30 #include "llvm/IR/Function.h"
31 #include "llvm/IR/Intrinsics.h"
32 #include "llvm/IR/IntrinsicsWebAssembly.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/ErrorHandling.h"
35 #include "llvm/Support/MathExtras.h"
36 #include "llvm/Support/raw_ostream.h"
37 #include "llvm/Target/TargetOptions.h"
38 using namespace llvm;
39 
40 #define DEBUG_TYPE "wasm-lower"
41 
42 WebAssemblyTargetLowering::WebAssemblyTargetLowering(
43     const TargetMachine &TM, const WebAssemblySubtarget &STI)
44     : TargetLowering(TM), Subtarget(&STI) {
45   auto MVTPtr = Subtarget->hasAddr64() ? MVT::i64 : MVT::i32;
46 
47   // Booleans always contain 0 or 1.
48   setBooleanContents(ZeroOrOneBooleanContent);
49   // Except in SIMD vectors
50   setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
51   // We don't know the microarchitecture here, so just reduce register pressure.
52   setSchedulingPreference(Sched::RegPressure);
53   // Tell ISel that we have a stack pointer.
54   setStackPointerRegisterToSaveRestore(
55       Subtarget->hasAddr64() ? WebAssembly::SP64 : WebAssembly::SP32);
56   // Set up the register classes.
57   addRegisterClass(MVT::i32, &WebAssembly::I32RegClass);
58   addRegisterClass(MVT::i64, &WebAssembly::I64RegClass);
59   addRegisterClass(MVT::f32, &WebAssembly::F32RegClass);
60   addRegisterClass(MVT::f64, &WebAssembly::F64RegClass);
61   if (Subtarget->hasSIMD128()) {
62     addRegisterClass(MVT::v16i8, &WebAssembly::V128RegClass);
63     addRegisterClass(MVT::v8i16, &WebAssembly::V128RegClass);
64     addRegisterClass(MVT::v4i32, &WebAssembly::V128RegClass);
65     addRegisterClass(MVT::v4f32, &WebAssembly::V128RegClass);
66     addRegisterClass(MVT::v2i64, &WebAssembly::V128RegClass);
67     addRegisterClass(MVT::v2f64, &WebAssembly::V128RegClass);
68   }
69   // Compute derived properties from the register classes.
70   computeRegisterProperties(Subtarget->getRegisterInfo());
71 
72   setOperationAction(ISD::GlobalAddress, MVTPtr, Custom);
73   setOperationAction(ISD::GlobalTLSAddress, MVTPtr, Custom);
74   setOperationAction(ISD::ExternalSymbol, MVTPtr, Custom);
75   setOperationAction(ISD::JumpTable, MVTPtr, Custom);
76   setOperationAction(ISD::BlockAddress, MVTPtr, Custom);
77   setOperationAction(ISD::BRIND, MVT::Other, Custom);
78 
79   // Take the default expansion for va_arg, va_copy, and va_end. There is no
80   // default action for va_start, so we do that custom.
81   setOperationAction(ISD::VASTART, MVT::Other, Custom);
82   setOperationAction(ISD::VAARG, MVT::Other, Expand);
83   setOperationAction(ISD::VACOPY, MVT::Other, Expand);
84   setOperationAction(ISD::VAEND, MVT::Other, Expand);
85 
86   for (auto T : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) {
87     // Don't expand the floating-point types to constant pools.
88     setOperationAction(ISD::ConstantFP, T, Legal);
89     // Expand floating-point comparisons.
90     for (auto CC : {ISD::SETO, ISD::SETUO, ISD::SETUEQ, ISD::SETONE,
91                     ISD::SETULT, ISD::SETULE, ISD::SETUGT, ISD::SETUGE})
92       setCondCodeAction(CC, T, Expand);
93     // Expand floating-point library function operators.
94     for (auto Op :
95          {ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FMA})
96       setOperationAction(Op, T, Expand);
97     // Note supported floating-point library function operators that otherwise
98     // default to expand.
99     for (auto Op :
100          {ISD::FCEIL, ISD::FFLOOR, ISD::FTRUNC, ISD::FNEARBYINT, ISD::FRINT})
101       setOperationAction(Op, T, Legal);
102     // Support minimum and maximum, which otherwise default to expand.
103     setOperationAction(ISD::FMINIMUM, T, Legal);
104     setOperationAction(ISD::FMAXIMUM, T, Legal);
105     // WebAssembly currently has no builtin f16 support.
106     setOperationAction(ISD::FP16_TO_FP, T, Expand);
107     setOperationAction(ISD::FP_TO_FP16, T, Expand);
108     setLoadExtAction(ISD::EXTLOAD, T, MVT::f16, Expand);
109     setTruncStoreAction(T, MVT::f16, Expand);
110   }
111 
112   // Expand unavailable integer operations.
113   for (auto Op :
114        {ISD::BSWAP, ISD::SMUL_LOHI, ISD::UMUL_LOHI, ISD::MULHS, ISD::MULHU,
115         ISD::SDIVREM, ISD::UDIVREM, ISD::SHL_PARTS, ISD::SRA_PARTS,
116         ISD::SRL_PARTS, ISD::ADDC, ISD::ADDE, ISD::SUBC, ISD::SUBE}) {
117     for (auto T : {MVT::i32, MVT::i64})
118       setOperationAction(Op, T, Expand);
119     if (Subtarget->hasSIMD128())
120       for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
121         setOperationAction(Op, T, Expand);
122   }
123 
124   if (Subtarget->hasNontrappingFPToInt())
125     for (auto Op : {ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT})
126       for (auto T : {MVT::i32, MVT::i64})
127         setOperationAction(Op, T, Custom);
128 
129   // SIMD-specific configuration
130   if (Subtarget->hasSIMD128()) {
131     // Hoist bitcasts out of shuffles
132     setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
133 
134     // Combine extends of extract_subvectors into widening ops
135     setTargetDAGCombine(ISD::SIGN_EXTEND);
136     setTargetDAGCombine(ISD::ZERO_EXTEND);
137 
138     // Combine int_to_fp of extract_vectors and vice versa into conversions ops
139     setTargetDAGCombine(ISD::SINT_TO_FP);
140     setTargetDAGCombine(ISD::UINT_TO_FP);
141     setTargetDAGCombine(ISD::EXTRACT_SUBVECTOR);
142 
143     // Combine concat of {s,u}int_to_fp_sat to i32x4.trunc_sat_f64x2_zero_{s,u}
144     setTargetDAGCombine(ISD::CONCAT_VECTORS);
145 
146     // Support saturating add for i8x16 and i16x8
147     for (auto Op : {ISD::SADDSAT, ISD::UADDSAT})
148       for (auto T : {MVT::v16i8, MVT::v8i16})
149         setOperationAction(Op, T, Legal);
150 
151     // Support integer abs
152     for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
153       setOperationAction(ISD::ABS, T, Legal);
154 
155     // Custom lower BUILD_VECTORs to minimize number of replace_lanes
156     for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
157                    MVT::v2f64})
158       setOperationAction(ISD::BUILD_VECTOR, T, Custom);
159 
160     // We have custom shuffle lowering to expose the shuffle mask
161     for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
162                    MVT::v2f64})
163       setOperationAction(ISD::VECTOR_SHUFFLE, T, Custom);
164 
165     // Custom lowering since wasm shifts must have a scalar shift amount
166     for (auto Op : {ISD::SHL, ISD::SRA, ISD::SRL})
167       for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
168         setOperationAction(Op, T, Custom);
169 
170     // Custom lower lane accesses to expand out variable indices
171     for (auto Op : {ISD::EXTRACT_VECTOR_ELT, ISD::INSERT_VECTOR_ELT})
172       for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
173                      MVT::v2f64})
174         setOperationAction(Op, T, Custom);
175 
176     // There is no i8x16.mul instruction
177     setOperationAction(ISD::MUL, MVT::v16i8, Expand);
178 
179     // There is no vector conditional select instruction
180     for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
181                    MVT::v2f64})
182       setOperationAction(ISD::SELECT_CC, T, Expand);
183 
184     // Expand integer operations supported for scalars but not SIMD
185     for (auto Op : {ISD::CTLZ, ISD::CTTZ, ISD::CTPOP, ISD::SDIV, ISD::UDIV,
186                     ISD::SREM, ISD::UREM, ISD::ROTL, ISD::ROTR})
187       for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
188         setOperationAction(Op, T, Expand);
189 
190     // But we do have integer min and max operations
191     for (auto Op : {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX})
192       for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
193         setOperationAction(Op, T, Legal);
194 
195     // Expand float operations supported for scalars but not SIMD
196     for (auto Op : {ISD::FCOPYSIGN, ISD::FLOG, ISD::FLOG2, ISD::FLOG10,
197                     ISD::FEXP, ISD::FEXP2, ISD::FRINT})
198       for (auto T : {MVT::v4f32, MVT::v2f64})
199         setOperationAction(Op, T, Expand);
200 
201     // Unsigned comparison operations are unavailable for i64x2 vectors.
202     for (auto CC : {ISD::SETUGT, ISD::SETUGE, ISD::SETULT, ISD::SETULE})
203       setCondCodeAction(CC, MVT::v2i64, Custom);
204 
205     // 64x2 conversions are not in the spec
206     for (auto Op :
207          {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT})
208       for (auto T : {MVT::v2i64, MVT::v2f64})
209         setOperationAction(Op, T, Expand);
210 
211     // But saturating fp_to_int converstions are
212     for (auto Op : {ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT})
213       setOperationAction(Op, MVT::v4i32, Custom);
214   }
215 
216   // As a special case, these operators use the type to mean the type to
217   // sign-extend from.
218   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
219   if (!Subtarget->hasSignExt()) {
220     // Sign extends are legal only when extending a vector extract
221     auto Action = Subtarget->hasSIMD128() ? Custom : Expand;
222     for (auto T : {MVT::i8, MVT::i16, MVT::i32})
223       setOperationAction(ISD::SIGN_EXTEND_INREG, T, Action);
224   }
225   for (auto T : MVT::integer_fixedlen_vector_valuetypes())
226     setOperationAction(ISD::SIGN_EXTEND_INREG, T, Expand);
227 
228   // Dynamic stack allocation: use the default expansion.
229   setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
230   setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
231   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVTPtr, Expand);
232 
233   setOperationAction(ISD::FrameIndex, MVT::i32, Custom);
234   setOperationAction(ISD::FrameIndex, MVT::i64, Custom);
235   setOperationAction(ISD::CopyToReg, MVT::Other, Custom);
236 
237   // Expand these forms; we pattern-match the forms that we can handle in isel.
238   for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64})
239     for (auto Op : {ISD::BR_CC, ISD::SELECT_CC})
240       setOperationAction(Op, T, Expand);
241 
242   // We have custom switch handling.
243   setOperationAction(ISD::BR_JT, MVT::Other, Custom);
244 
245   // WebAssembly doesn't have:
246   //  - Floating-point extending loads.
247   //  - Floating-point truncating stores.
248   //  - i1 extending loads.
249   //  - truncating SIMD stores and most extending loads
250   setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
251   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
252   for (auto T : MVT::integer_valuetypes())
253     for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD})
254       setLoadExtAction(Ext, T, MVT::i1, Promote);
255   if (Subtarget->hasSIMD128()) {
256     for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v4f32,
257                    MVT::v2f64}) {
258       for (auto MemT : MVT::fixedlen_vector_valuetypes()) {
259         if (MVT(T) != MemT) {
260           setTruncStoreAction(T, MemT, Expand);
261           for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD})
262             setLoadExtAction(Ext, T, MemT, Expand);
263         }
264       }
265     }
266     // But some vector extending loads are legal
267     for (auto Ext : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}) {
268       setLoadExtAction(Ext, MVT::v8i16, MVT::v8i8, Legal);
269       setLoadExtAction(Ext, MVT::v4i32, MVT::v4i16, Legal);
270       setLoadExtAction(Ext, MVT::v2i64, MVT::v2i32, Legal);
271     }
272     // And some truncating stores are legal as well
273     setTruncStoreAction(MVT::v8i16, MVT::v8i8, Legal);
274     setTruncStoreAction(MVT::v4i32, MVT::v4i16, Legal);
275   }
276 
277   // Don't do anything clever with build_pairs
278   setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
279 
280   // Trap lowers to wasm unreachable
281   setOperationAction(ISD::TRAP, MVT::Other, Legal);
282   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
283 
284   // Exception handling intrinsics
285   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
286   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
287   setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
288 
289   setMaxAtomicSizeInBitsSupported(64);
290 
291   // Override the __gnu_f2h_ieee/__gnu_h2f_ieee names so that the f32 name is
292   // consistent with the f64 and f128 names.
293   setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
294   setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
295 
296   // Define the emscripten name for return address helper.
297   // TODO: when implementing other Wasm backends, make this generic or only do
298   // this on emscripten depending on what they end up doing.
299   setLibcallName(RTLIB::RETURN_ADDRESS, "emscripten_return_address");
300 
301   // Always convert switches to br_tables unless there is only one case, which
302   // is equivalent to a simple branch. This reduces code size for wasm, and we
303   // defer possible jump table optimizations to the VM.
304   setMinimumJumpTableEntries(2);
305 }
306 
307 TargetLowering::AtomicExpansionKind
308 WebAssemblyTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
309   // We have wasm instructions for these
310   switch (AI->getOperation()) {
311   case AtomicRMWInst::Add:
312   case AtomicRMWInst::Sub:
313   case AtomicRMWInst::And:
314   case AtomicRMWInst::Or:
315   case AtomicRMWInst::Xor:
316   case AtomicRMWInst::Xchg:
317     return AtomicExpansionKind::None;
318   default:
319     break;
320   }
321   return AtomicExpansionKind::CmpXChg;
322 }
323 
324 FastISel *WebAssemblyTargetLowering::createFastISel(
325     FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo) const {
326   return WebAssembly::createFastISel(FuncInfo, LibInfo);
327 }
328 
329 MVT WebAssemblyTargetLowering::getScalarShiftAmountTy(const DataLayout & /*DL*/,
330                                                       EVT VT) const {
331   unsigned BitWidth = NextPowerOf2(VT.getSizeInBits() - 1);
332   if (BitWidth > 1 && BitWidth < 8)
333     BitWidth = 8;
334 
335   if (BitWidth > 64) {
336     // The shift will be lowered to a libcall, and compiler-rt libcalls expect
337     // the count to be an i32.
338     BitWidth = 32;
339     assert(BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) &&
340            "32-bit shift counts ought to be enough for anyone");
341   }
342 
343   MVT Result = MVT::getIntegerVT(BitWidth);
344   assert(Result != MVT::INVALID_SIMPLE_VALUE_TYPE &&
345          "Unable to represent scalar shift amount type");
346   return Result;
347 }
348 
349 // Lower an fp-to-int conversion operator from the LLVM opcode, which has an
350 // undefined result on invalid/overflow, to the WebAssembly opcode, which
351 // traps on invalid/overflow.
352 static MachineBasicBlock *LowerFPToInt(MachineInstr &MI, DebugLoc DL,
353                                        MachineBasicBlock *BB,
354                                        const TargetInstrInfo &TII,
355                                        bool IsUnsigned, bool Int64,
356                                        bool Float64, unsigned LoweredOpcode) {
357   MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
358 
359   Register OutReg = MI.getOperand(0).getReg();
360   Register InReg = MI.getOperand(1).getReg();
361 
362   unsigned Abs = Float64 ? WebAssembly::ABS_F64 : WebAssembly::ABS_F32;
363   unsigned FConst = Float64 ? WebAssembly::CONST_F64 : WebAssembly::CONST_F32;
364   unsigned LT = Float64 ? WebAssembly::LT_F64 : WebAssembly::LT_F32;
365   unsigned GE = Float64 ? WebAssembly::GE_F64 : WebAssembly::GE_F32;
366   unsigned IConst = Int64 ? WebAssembly::CONST_I64 : WebAssembly::CONST_I32;
367   unsigned Eqz = WebAssembly::EQZ_I32;
368   unsigned And = WebAssembly::AND_I32;
369   int64_t Limit = Int64 ? INT64_MIN : INT32_MIN;
370   int64_t Substitute = IsUnsigned ? 0 : Limit;
371   double CmpVal = IsUnsigned ? -(double)Limit * 2.0 : -(double)Limit;
372   auto &Context = BB->getParent()->getFunction().getContext();
373   Type *Ty = Float64 ? Type::getDoubleTy(Context) : Type::getFloatTy(Context);
374 
375   const BasicBlock *LLVMBB = BB->getBasicBlock();
376   MachineFunction *F = BB->getParent();
377   MachineBasicBlock *TrueMBB = F->CreateMachineBasicBlock(LLVMBB);
378   MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVMBB);
379   MachineBasicBlock *DoneMBB = F->CreateMachineBasicBlock(LLVMBB);
380 
381   MachineFunction::iterator It = ++BB->getIterator();
382   F->insert(It, FalseMBB);
383   F->insert(It, TrueMBB);
384   F->insert(It, DoneMBB);
385 
386   // Transfer the remainder of BB and its successor edges to DoneMBB.
387   DoneMBB->splice(DoneMBB->begin(), BB, std::next(MI.getIterator()), BB->end());
388   DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
389 
390   BB->addSuccessor(TrueMBB);
391   BB->addSuccessor(FalseMBB);
392   TrueMBB->addSuccessor(DoneMBB);
393   FalseMBB->addSuccessor(DoneMBB);
394 
395   unsigned Tmp0, Tmp1, CmpReg, EqzReg, FalseReg, TrueReg;
396   Tmp0 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
397   Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
398   CmpReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
399   EqzReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
400   FalseReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
401   TrueReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
402 
403   MI.eraseFromParent();
404   // For signed numbers, we can do a single comparison to determine whether
405   // fabs(x) is within range.
406   if (IsUnsigned) {
407     Tmp0 = InReg;
408   } else {
409     BuildMI(BB, DL, TII.get(Abs), Tmp0).addReg(InReg);
410   }
411   BuildMI(BB, DL, TII.get(FConst), Tmp1)
412       .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, CmpVal)));
413   BuildMI(BB, DL, TII.get(LT), CmpReg).addReg(Tmp0).addReg(Tmp1);
414 
415   // For unsigned numbers, we have to do a separate comparison with zero.
416   if (IsUnsigned) {
417     Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
418     Register SecondCmpReg =
419         MRI.createVirtualRegister(&WebAssembly::I32RegClass);
420     Register AndReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
421     BuildMI(BB, DL, TII.get(FConst), Tmp1)
422         .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, 0.0)));
423     BuildMI(BB, DL, TII.get(GE), SecondCmpReg).addReg(Tmp0).addReg(Tmp1);
424     BuildMI(BB, DL, TII.get(And), AndReg).addReg(CmpReg).addReg(SecondCmpReg);
425     CmpReg = AndReg;
426   }
427 
428   BuildMI(BB, DL, TII.get(Eqz), EqzReg).addReg(CmpReg);
429 
430   // Create the CFG diamond to select between doing the conversion or using
431   // the substitute value.
432   BuildMI(BB, DL, TII.get(WebAssembly::BR_IF)).addMBB(TrueMBB).addReg(EqzReg);
433   BuildMI(FalseMBB, DL, TII.get(LoweredOpcode), FalseReg).addReg(InReg);
434   BuildMI(FalseMBB, DL, TII.get(WebAssembly::BR)).addMBB(DoneMBB);
435   BuildMI(TrueMBB, DL, TII.get(IConst), TrueReg).addImm(Substitute);
436   BuildMI(*DoneMBB, DoneMBB->begin(), DL, TII.get(TargetOpcode::PHI), OutReg)
437       .addReg(FalseReg)
438       .addMBB(FalseMBB)
439       .addReg(TrueReg)
440       .addMBB(TrueMBB);
441 
442   return DoneMBB;
443 }
444 
445 static MachineBasicBlock *
446 LowerCallResults(MachineInstr &CallResults, DebugLoc DL, MachineBasicBlock *BB,
447                  const WebAssemblySubtarget *Subtarget,
448                  const TargetInstrInfo &TII) {
449   MachineInstr &CallParams = *CallResults.getPrevNode();
450   assert(CallParams.getOpcode() == WebAssembly::CALL_PARAMS);
451   assert(CallResults.getOpcode() == WebAssembly::CALL_RESULTS ||
452          CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS);
453 
454   bool IsIndirect = CallParams.getOperand(0).isReg();
455   bool IsRetCall = CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS;
456 
457   unsigned CallOp;
458   if (IsIndirect && IsRetCall) {
459     CallOp = WebAssembly::RET_CALL_INDIRECT;
460   } else if (IsIndirect) {
461     CallOp = WebAssembly::CALL_INDIRECT;
462   } else if (IsRetCall) {
463     CallOp = WebAssembly::RET_CALL;
464   } else {
465     CallOp = WebAssembly::CALL;
466   }
467 
468   MachineFunction &MF = *BB->getParent();
469   const MCInstrDesc &MCID = TII.get(CallOp);
470   MachineInstrBuilder MIB(MF, MF.CreateMachineInstr(MCID, DL));
471 
472   // See if we must truncate the function pointer.
473   // CALL_INDIRECT takes an i32, but in wasm64 we represent function pointers
474   // as 64-bit for uniformity with other pointer types.
475   // See also: WebAssemblyFastISel::selectCall
476   if (IsIndirect && MF.getSubtarget<WebAssemblySubtarget>().hasAddr64()) {
477     Register Reg32 =
478         MF.getRegInfo().createVirtualRegister(&WebAssembly::I32RegClass);
479     auto &FnPtr = CallParams.getOperand(0);
480     BuildMI(*BB, CallResults.getIterator(), DL,
481             TII.get(WebAssembly::I32_WRAP_I64), Reg32)
482         .addReg(FnPtr.getReg());
483     FnPtr.setReg(Reg32);
484   }
485 
486   // Move the function pointer to the end of the arguments for indirect calls
487   if (IsIndirect) {
488     auto FnPtr = CallParams.getOperand(0);
489     CallParams.RemoveOperand(0);
490     CallParams.addOperand(FnPtr);
491   }
492 
493   for (auto Def : CallResults.defs())
494     MIB.add(Def);
495 
496   if (IsIndirect) {
497     // Placeholder for the type index.
498     MIB.addImm(0);
499     // The table into which this call_indirect indexes.
500     MCSymbolWasm *Table =
501         WebAssembly::getOrCreateFunctionTableSymbol(MF.getContext(), Subtarget);
502     if (Subtarget->hasReferenceTypes()) {
503       MIB.addSym(Table);
504     } else {
505       // For the MVP there is at most one table whose number is 0, but we can't
506       // write a table symbol or issue relocations.  Instead we just ensure the
507       // table is live and write a zero.
508       Table->setNoStrip();
509       MIB.addImm(0);
510     }
511   }
512 
513   for (auto Use : CallParams.uses())
514     MIB.add(Use);
515 
516   BB->insert(CallResults.getIterator(), MIB);
517   CallParams.eraseFromParent();
518   CallResults.eraseFromParent();
519 
520   return BB;
521 }
522 
523 MachineBasicBlock *WebAssemblyTargetLowering::EmitInstrWithCustomInserter(
524     MachineInstr &MI, MachineBasicBlock *BB) const {
525   const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
526   DebugLoc DL = MI.getDebugLoc();
527 
528   switch (MI.getOpcode()) {
529   default:
530     llvm_unreachable("Unexpected instr type to insert");
531   case WebAssembly::FP_TO_SINT_I32_F32:
532     return LowerFPToInt(MI, DL, BB, TII, false, false, false,
533                         WebAssembly::I32_TRUNC_S_F32);
534   case WebAssembly::FP_TO_UINT_I32_F32:
535     return LowerFPToInt(MI, DL, BB, TII, true, false, false,
536                         WebAssembly::I32_TRUNC_U_F32);
537   case WebAssembly::FP_TO_SINT_I64_F32:
538     return LowerFPToInt(MI, DL, BB, TII, false, true, false,
539                         WebAssembly::I64_TRUNC_S_F32);
540   case WebAssembly::FP_TO_UINT_I64_F32:
541     return LowerFPToInt(MI, DL, BB, TII, true, true, false,
542                         WebAssembly::I64_TRUNC_U_F32);
543   case WebAssembly::FP_TO_SINT_I32_F64:
544     return LowerFPToInt(MI, DL, BB, TII, false, false, true,
545                         WebAssembly::I32_TRUNC_S_F64);
546   case WebAssembly::FP_TO_UINT_I32_F64:
547     return LowerFPToInt(MI, DL, BB, TII, true, false, true,
548                         WebAssembly::I32_TRUNC_U_F64);
549   case WebAssembly::FP_TO_SINT_I64_F64:
550     return LowerFPToInt(MI, DL, BB, TII, false, true, true,
551                         WebAssembly::I64_TRUNC_S_F64);
552   case WebAssembly::FP_TO_UINT_I64_F64:
553     return LowerFPToInt(MI, DL, BB, TII, true, true, true,
554                         WebAssembly::I64_TRUNC_U_F64);
555   case WebAssembly::CALL_RESULTS:
556   case WebAssembly::RET_CALL_RESULTS:
557     return LowerCallResults(MI, DL, BB, Subtarget, TII);
558   }
559 }
560 
561 const char *
562 WebAssemblyTargetLowering::getTargetNodeName(unsigned Opcode) const {
563   switch (static_cast<WebAssemblyISD::NodeType>(Opcode)) {
564   case WebAssemblyISD::FIRST_NUMBER:
565   case WebAssemblyISD::FIRST_MEM_OPCODE:
566     break;
567 #define HANDLE_NODETYPE(NODE)                                                  \
568   case WebAssemblyISD::NODE:                                                   \
569     return "WebAssemblyISD::" #NODE;
570 #define HANDLE_MEM_NODETYPE(NODE) HANDLE_NODETYPE(NODE)
571 #include "WebAssemblyISD.def"
572 #undef HANDLE_MEM_NODETYPE
573 #undef HANDLE_NODETYPE
574   }
575   return nullptr;
576 }
577 
578 std::pair<unsigned, const TargetRegisterClass *>
579 WebAssemblyTargetLowering::getRegForInlineAsmConstraint(
580     const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
581   // First, see if this is a constraint that directly corresponds to a
582   // WebAssembly register class.
583   if (Constraint.size() == 1) {
584     switch (Constraint[0]) {
585     case 'r':
586       assert(VT != MVT::iPTR && "Pointer MVT not expected here");
587       if (Subtarget->hasSIMD128() && VT.isVector()) {
588         if (VT.getSizeInBits() == 128)
589           return std::make_pair(0U, &WebAssembly::V128RegClass);
590       }
591       if (VT.isInteger() && !VT.isVector()) {
592         if (VT.getSizeInBits() <= 32)
593           return std::make_pair(0U, &WebAssembly::I32RegClass);
594         if (VT.getSizeInBits() <= 64)
595           return std::make_pair(0U, &WebAssembly::I64RegClass);
596       }
597       if (VT.isFloatingPoint() && !VT.isVector()) {
598         switch (VT.getSizeInBits()) {
599         case 32:
600           return std::make_pair(0U, &WebAssembly::F32RegClass);
601         case 64:
602           return std::make_pair(0U, &WebAssembly::F64RegClass);
603         default:
604           break;
605         }
606       }
607       break;
608     default:
609       break;
610     }
611   }
612 
613   return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
614 }
615 
616 bool WebAssemblyTargetLowering::isCheapToSpeculateCttz() const {
617   // Assume ctz is a relatively cheap operation.
618   return true;
619 }
620 
621 bool WebAssemblyTargetLowering::isCheapToSpeculateCtlz() const {
622   // Assume clz is a relatively cheap operation.
623   return true;
624 }
625 
626 bool WebAssemblyTargetLowering::isLegalAddressingMode(const DataLayout &DL,
627                                                       const AddrMode &AM,
628                                                       Type *Ty, unsigned AS,
629                                                       Instruction *I) const {
630   // WebAssembly offsets are added as unsigned without wrapping. The
631   // isLegalAddressingMode gives us no way to determine if wrapping could be
632   // happening, so we approximate this by accepting only non-negative offsets.
633   if (AM.BaseOffs < 0)
634     return false;
635 
636   // WebAssembly has no scale register operands.
637   if (AM.Scale != 0)
638     return false;
639 
640   // Everything else is legal.
641   return true;
642 }
643 
644 bool WebAssemblyTargetLowering::allowsMisalignedMemoryAccesses(
645     EVT /*VT*/, unsigned /*AddrSpace*/, Align /*Align*/,
646     MachineMemOperand::Flags /*Flags*/, bool *Fast) const {
647   // WebAssembly supports unaligned accesses, though it should be declared
648   // with the p2align attribute on loads and stores which do so, and there
649   // may be a performance impact. We tell LLVM they're "fast" because
650   // for the kinds of things that LLVM uses this for (merging adjacent stores
651   // of constants, etc.), WebAssembly implementations will either want the
652   // unaligned access or they'll split anyway.
653   if (Fast)
654     *Fast = true;
655   return true;
656 }
657 
658 bool WebAssemblyTargetLowering::isIntDivCheap(EVT VT,
659                                               AttributeList Attr) const {
660   // The current thinking is that wasm engines will perform this optimization,
661   // so we can save on code size.
662   return true;
663 }
664 
665 bool WebAssemblyTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
666   EVT ExtT = ExtVal.getValueType();
667   EVT MemT = cast<LoadSDNode>(ExtVal->getOperand(0))->getValueType(0);
668   return (ExtT == MVT::v8i16 && MemT == MVT::v8i8) ||
669          (ExtT == MVT::v4i32 && MemT == MVT::v4i16) ||
670          (ExtT == MVT::v2i64 && MemT == MVT::v2i32);
671 }
672 
673 EVT WebAssemblyTargetLowering::getSetCCResultType(const DataLayout &DL,
674                                                   LLVMContext &C,
675                                                   EVT VT) const {
676   if (VT.isVector())
677     return VT.changeVectorElementTypeToInteger();
678 
679   // So far, all branch instructions in Wasm take an I32 condition.
680   // The default TargetLowering::getSetCCResultType returns the pointer size,
681   // which would be useful to reduce instruction counts when testing
682   // against 64-bit pointers/values if at some point Wasm supports that.
683   return EVT::getIntegerVT(C, 32);
684 }
685 
686 bool WebAssemblyTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
687                                                    const CallInst &I,
688                                                    MachineFunction &MF,
689                                                    unsigned Intrinsic) const {
690   switch (Intrinsic) {
691   case Intrinsic::wasm_memory_atomic_notify:
692     Info.opc = ISD::INTRINSIC_W_CHAIN;
693     Info.memVT = MVT::i32;
694     Info.ptrVal = I.getArgOperand(0);
695     Info.offset = 0;
696     Info.align = Align(4);
697     // atomic.notify instruction does not really load the memory specified with
698     // this argument, but MachineMemOperand should either be load or store, so
699     // we set this to a load.
700     // FIXME Volatile isn't really correct, but currently all LLVM atomic
701     // instructions are treated as volatiles in the backend, so we should be
702     // consistent. The same applies for wasm_atomic_wait intrinsics too.
703     Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
704     return true;
705   case Intrinsic::wasm_memory_atomic_wait32:
706     Info.opc = ISD::INTRINSIC_W_CHAIN;
707     Info.memVT = MVT::i32;
708     Info.ptrVal = I.getArgOperand(0);
709     Info.offset = 0;
710     Info.align = Align(4);
711     Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
712     return true;
713   case Intrinsic::wasm_memory_atomic_wait64:
714     Info.opc = ISD::INTRINSIC_W_CHAIN;
715     Info.memVT = MVT::i64;
716     Info.ptrVal = I.getArgOperand(0);
717     Info.offset = 0;
718     Info.align = Align(8);
719     Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
720     return true;
721   case Intrinsic::wasm_load32_zero:
722   case Intrinsic::wasm_load64_zero:
723     Info.opc = ISD::INTRINSIC_W_CHAIN;
724     Info.memVT = Intrinsic == Intrinsic::wasm_load32_zero ? MVT::i32 : MVT::i64;
725     Info.ptrVal = I.getArgOperand(0);
726     Info.offset = 0;
727     Info.align = Info.memVT == MVT::i32 ? Align(4) : Align(8);
728     Info.flags = MachineMemOperand::MOLoad;
729     return true;
730   case Intrinsic::wasm_load8_lane:
731   case Intrinsic::wasm_load16_lane:
732   case Intrinsic::wasm_load32_lane:
733   case Intrinsic::wasm_load64_lane:
734   case Intrinsic::wasm_store8_lane:
735   case Intrinsic::wasm_store16_lane:
736   case Intrinsic::wasm_store32_lane:
737   case Intrinsic::wasm_store64_lane: {
738     MVT MemVT;
739     Align MemAlign;
740     switch (Intrinsic) {
741     case Intrinsic::wasm_load8_lane:
742     case Intrinsic::wasm_store8_lane:
743       MemVT = MVT::i8;
744       MemAlign = Align(1);
745       break;
746     case Intrinsic::wasm_load16_lane:
747     case Intrinsic::wasm_store16_lane:
748       MemVT = MVT::i16;
749       MemAlign = Align(2);
750       break;
751     case Intrinsic::wasm_load32_lane:
752     case Intrinsic::wasm_store32_lane:
753       MemVT = MVT::i32;
754       MemAlign = Align(4);
755       break;
756     case Intrinsic::wasm_load64_lane:
757     case Intrinsic::wasm_store64_lane:
758       MemVT = MVT::i64;
759       MemAlign = Align(8);
760       break;
761     default:
762       llvm_unreachable("unexpected intrinsic");
763     }
764     if (Intrinsic == Intrinsic::wasm_load8_lane ||
765         Intrinsic == Intrinsic::wasm_load16_lane ||
766         Intrinsic == Intrinsic::wasm_load32_lane ||
767         Intrinsic == Intrinsic::wasm_load64_lane) {
768       Info.opc = ISD::INTRINSIC_W_CHAIN;
769       Info.flags = MachineMemOperand::MOLoad;
770     } else {
771       Info.opc = ISD::INTRINSIC_VOID;
772       Info.flags = MachineMemOperand::MOStore;
773     }
774     Info.ptrVal = I.getArgOperand(0);
775     Info.memVT = MemVT;
776     Info.offset = 0;
777     Info.align = MemAlign;
778     return true;
779   }
780   default:
781     return false;
782   }
783 }
784 
785 //===----------------------------------------------------------------------===//
786 // WebAssembly Lowering private implementation.
787 //===----------------------------------------------------------------------===//
788 
789 //===----------------------------------------------------------------------===//
790 // Lowering Code
791 //===----------------------------------------------------------------------===//
792 
793 static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *Msg) {
794   MachineFunction &MF = DAG.getMachineFunction();
795   DAG.getContext()->diagnose(
796       DiagnosticInfoUnsupported(MF.getFunction(), Msg, DL.getDebugLoc()));
797 }
798 
799 // Test whether the given calling convention is supported.
800 static bool callingConvSupported(CallingConv::ID CallConv) {
801   // We currently support the language-independent target-independent
802   // conventions. We don't yet have a way to annotate calls with properties like
803   // "cold", and we don't have any call-clobbered registers, so these are mostly
804   // all handled the same.
805   return CallConv == CallingConv::C || CallConv == CallingConv::Fast ||
806          CallConv == CallingConv::Cold ||
807          CallConv == CallingConv::PreserveMost ||
808          CallConv == CallingConv::PreserveAll ||
809          CallConv == CallingConv::CXX_FAST_TLS ||
810          CallConv == CallingConv::WASM_EmscriptenInvoke ||
811          CallConv == CallingConv::Swift;
812 }
813 
814 SDValue
815 WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI,
816                                      SmallVectorImpl<SDValue> &InVals) const {
817   SelectionDAG &DAG = CLI.DAG;
818   SDLoc DL = CLI.DL;
819   SDValue Chain = CLI.Chain;
820   SDValue Callee = CLI.Callee;
821   MachineFunction &MF = DAG.getMachineFunction();
822   auto Layout = MF.getDataLayout();
823 
824   CallingConv::ID CallConv = CLI.CallConv;
825   if (!callingConvSupported(CallConv))
826     fail(DL, DAG,
827          "WebAssembly doesn't support language-specific or target-specific "
828          "calling conventions yet");
829   if (CLI.IsPatchPoint)
830     fail(DL, DAG, "WebAssembly doesn't support patch point yet");
831 
832   if (CLI.IsTailCall) {
833     auto NoTail = [&](const char *Msg) {
834       if (CLI.CB && CLI.CB->isMustTailCall())
835         fail(DL, DAG, Msg);
836       CLI.IsTailCall = false;
837     };
838 
839     if (!Subtarget->hasTailCall())
840       NoTail("WebAssembly 'tail-call' feature not enabled");
841 
842     // Varargs calls cannot be tail calls because the buffer is on the stack
843     if (CLI.IsVarArg)
844       NoTail("WebAssembly does not support varargs tail calls");
845 
846     // Do not tail call unless caller and callee return types match
847     const Function &F = MF.getFunction();
848     const TargetMachine &TM = getTargetMachine();
849     Type *RetTy = F.getReturnType();
850     SmallVector<MVT, 4> CallerRetTys;
851     SmallVector<MVT, 4> CalleeRetTys;
852     computeLegalValueVTs(F, TM, RetTy, CallerRetTys);
853     computeLegalValueVTs(F, TM, CLI.RetTy, CalleeRetTys);
854     bool TypesMatch = CallerRetTys.size() == CalleeRetTys.size() &&
855                       std::equal(CallerRetTys.begin(), CallerRetTys.end(),
856                                  CalleeRetTys.begin());
857     if (!TypesMatch)
858       NoTail("WebAssembly tail call requires caller and callee return types to "
859              "match");
860 
861     // If pointers to local stack values are passed, we cannot tail call
862     if (CLI.CB) {
863       for (auto &Arg : CLI.CB->args()) {
864         Value *Val = Arg.get();
865         // Trace the value back through pointer operations
866         while (true) {
867           Value *Src = Val->stripPointerCastsAndAliases();
868           if (auto *GEP = dyn_cast<GetElementPtrInst>(Src))
869             Src = GEP->getPointerOperand();
870           if (Val == Src)
871             break;
872           Val = Src;
873         }
874         if (isa<AllocaInst>(Val)) {
875           NoTail(
876               "WebAssembly does not support tail calling with stack arguments");
877           break;
878         }
879       }
880     }
881   }
882 
883   SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
884   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
885   SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
886 
887   // The generic code may have added an sret argument. If we're lowering an
888   // invoke function, the ABI requires that the function pointer be the first
889   // argument, so we may have to swap the arguments.
890   if (CallConv == CallingConv::WASM_EmscriptenInvoke && Outs.size() >= 2 &&
891       Outs[0].Flags.isSRet()) {
892     std::swap(Outs[0], Outs[1]);
893     std::swap(OutVals[0], OutVals[1]);
894   }
895 
896   bool HasSwiftSelfArg = false;
897   bool HasSwiftErrorArg = false;
898   unsigned NumFixedArgs = 0;
899   for (unsigned I = 0; I < Outs.size(); ++I) {
900     const ISD::OutputArg &Out = Outs[I];
901     SDValue &OutVal = OutVals[I];
902     HasSwiftSelfArg |= Out.Flags.isSwiftSelf();
903     HasSwiftErrorArg |= Out.Flags.isSwiftError();
904     if (Out.Flags.isNest())
905       fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
906     if (Out.Flags.isInAlloca())
907       fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
908     if (Out.Flags.isInConsecutiveRegs())
909       fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
910     if (Out.Flags.isInConsecutiveRegsLast())
911       fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
912     if (Out.Flags.isByVal() && Out.Flags.getByValSize() != 0) {
913       auto &MFI = MF.getFrameInfo();
914       int FI = MFI.CreateStackObject(Out.Flags.getByValSize(),
915                                      Out.Flags.getNonZeroByValAlign(),
916                                      /*isSS=*/false);
917       SDValue SizeNode =
918           DAG.getConstant(Out.Flags.getByValSize(), DL, MVT::i32);
919       SDValue FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
920       Chain = DAG.getMemcpy(
921           Chain, DL, FINode, OutVal, SizeNode, Out.Flags.getNonZeroByValAlign(),
922           /*isVolatile*/ false, /*AlwaysInline=*/false,
923           /*isTailCall*/ false, MachinePointerInfo(), MachinePointerInfo());
924       OutVal = FINode;
925     }
926     // Count the number of fixed args *after* legalization.
927     NumFixedArgs += Out.IsFixed;
928   }
929 
930   bool IsVarArg = CLI.IsVarArg;
931   auto PtrVT = getPointerTy(Layout);
932 
933   // For swiftcc, emit additional swiftself and swifterror arguments
934   // if there aren't. These additional arguments are also added for callee
935   // signature They are necessary to match callee and caller signature for
936   // indirect call.
937   if (CallConv == CallingConv::Swift) {
938     if (!HasSwiftSelfArg) {
939       NumFixedArgs++;
940       ISD::OutputArg Arg;
941       Arg.Flags.setSwiftSelf();
942       CLI.Outs.push_back(Arg);
943       SDValue ArgVal = DAG.getUNDEF(PtrVT);
944       CLI.OutVals.push_back(ArgVal);
945     }
946     if (!HasSwiftErrorArg) {
947       NumFixedArgs++;
948       ISD::OutputArg Arg;
949       Arg.Flags.setSwiftError();
950       CLI.Outs.push_back(Arg);
951       SDValue ArgVal = DAG.getUNDEF(PtrVT);
952       CLI.OutVals.push_back(ArgVal);
953     }
954   }
955 
956   // Analyze operands of the call, assigning locations to each operand.
957   SmallVector<CCValAssign, 16> ArgLocs;
958   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
959 
960   if (IsVarArg) {
961     // Outgoing non-fixed arguments are placed in a buffer. First
962     // compute their offsets and the total amount of buffer space needed.
963     for (unsigned I = NumFixedArgs; I < Outs.size(); ++I) {
964       const ISD::OutputArg &Out = Outs[I];
965       SDValue &Arg = OutVals[I];
966       EVT VT = Arg.getValueType();
967       assert(VT != MVT::iPTR && "Legalized args should be concrete");
968       Type *Ty = VT.getTypeForEVT(*DAG.getContext());
969       Align Alignment =
970           std::max(Out.Flags.getNonZeroOrigAlign(), Layout.getABITypeAlign(Ty));
971       unsigned Offset =
972           CCInfo.AllocateStack(Layout.getTypeAllocSize(Ty), Alignment);
973       CCInfo.addLoc(CCValAssign::getMem(ArgLocs.size(), VT.getSimpleVT(),
974                                         Offset, VT.getSimpleVT(),
975                                         CCValAssign::Full));
976     }
977   }
978 
979   unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
980 
981   SDValue FINode;
982   if (IsVarArg && NumBytes) {
983     // For non-fixed arguments, next emit stores to store the argument values
984     // to the stack buffer at the offsets computed above.
985     int FI = MF.getFrameInfo().CreateStackObject(NumBytes,
986                                                  Layout.getStackAlignment(),
987                                                  /*isSS=*/false);
988     unsigned ValNo = 0;
989     SmallVector<SDValue, 8> Chains;
990     for (SDValue Arg : drop_begin(OutVals, NumFixedArgs)) {
991       assert(ArgLocs[ValNo].getValNo() == ValNo &&
992              "ArgLocs should remain in order and only hold varargs args");
993       unsigned Offset = ArgLocs[ValNo++].getLocMemOffset();
994       FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
995       SDValue Add = DAG.getNode(ISD::ADD, DL, PtrVT, FINode,
996                                 DAG.getConstant(Offset, DL, PtrVT));
997       Chains.push_back(
998           DAG.getStore(Chain, DL, Arg, Add,
999                        MachinePointerInfo::getFixedStack(MF, FI, Offset)));
1000     }
1001     if (!Chains.empty())
1002       Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
1003   } else if (IsVarArg) {
1004     FINode = DAG.getIntPtrConstant(0, DL);
1005   }
1006 
1007   if (Callee->getOpcode() == ISD::GlobalAddress) {
1008     // If the callee is a GlobalAddress node (quite common, every direct call
1009     // is) turn it into a TargetGlobalAddress node so that LowerGlobalAddress
1010     // doesn't at MO_GOT which is not needed for direct calls.
1011     GlobalAddressSDNode* GA = cast<GlobalAddressSDNode>(Callee);
1012     Callee = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
1013                                         getPointerTy(DAG.getDataLayout()),
1014                                         GA->getOffset());
1015     Callee = DAG.getNode(WebAssemblyISD::Wrapper, DL,
1016                          getPointerTy(DAG.getDataLayout()), Callee);
1017   }
1018 
1019   // Compute the operands for the CALLn node.
1020   SmallVector<SDValue, 16> Ops;
1021   Ops.push_back(Chain);
1022   Ops.push_back(Callee);
1023 
1024   // Add all fixed arguments. Note that for non-varargs calls, NumFixedArgs
1025   // isn't reliable.
1026   Ops.append(OutVals.begin(),
1027              IsVarArg ? OutVals.begin() + NumFixedArgs : OutVals.end());
1028   // Add a pointer to the vararg buffer.
1029   if (IsVarArg)
1030     Ops.push_back(FINode);
1031 
1032   SmallVector<EVT, 8> InTys;
1033   for (const auto &In : Ins) {
1034     assert(!In.Flags.isByVal() && "byval is not valid for return values");
1035     assert(!In.Flags.isNest() && "nest is not valid for return values");
1036     if (In.Flags.isInAlloca())
1037       fail(DL, DAG, "WebAssembly hasn't implemented inalloca return values");
1038     if (In.Flags.isInConsecutiveRegs())
1039       fail(DL, DAG, "WebAssembly hasn't implemented cons regs return values");
1040     if (In.Flags.isInConsecutiveRegsLast())
1041       fail(DL, DAG,
1042            "WebAssembly hasn't implemented cons regs last return values");
1043     // Ignore In.getNonZeroOrigAlign() because all our arguments are passed in
1044     // registers.
1045     InTys.push_back(In.VT);
1046   }
1047 
1048   if (CLI.IsTailCall) {
1049     // ret_calls do not return values to the current frame
1050     SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1051     return DAG.getNode(WebAssemblyISD::RET_CALL, DL, NodeTys, Ops);
1052   }
1053 
1054   InTys.push_back(MVT::Other);
1055   SDVTList InTyList = DAG.getVTList(InTys);
1056   SDValue Res = DAG.getNode(WebAssemblyISD::CALL, DL, InTyList, Ops);
1057 
1058   for (size_t I = 0; I < Ins.size(); ++I)
1059     InVals.push_back(Res.getValue(I));
1060 
1061   // Return the chain
1062   return Res.getValue(Ins.size());
1063 }
1064 
1065 bool WebAssemblyTargetLowering::CanLowerReturn(
1066     CallingConv::ID /*CallConv*/, MachineFunction & /*MF*/, bool /*IsVarArg*/,
1067     const SmallVectorImpl<ISD::OutputArg> &Outs,
1068     LLVMContext & /*Context*/) const {
1069   // WebAssembly can only handle returning tuples with multivalue enabled
1070   return Subtarget->hasMultivalue() || Outs.size() <= 1;
1071 }
1072 
1073 SDValue WebAssemblyTargetLowering::LowerReturn(
1074     SDValue Chain, CallingConv::ID CallConv, bool /*IsVarArg*/,
1075     const SmallVectorImpl<ISD::OutputArg> &Outs,
1076     const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
1077     SelectionDAG &DAG) const {
1078   assert((Subtarget->hasMultivalue() || Outs.size() <= 1) &&
1079          "MVP WebAssembly can only return up to one value");
1080   if (!callingConvSupported(CallConv))
1081     fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
1082 
1083   SmallVector<SDValue, 4> RetOps(1, Chain);
1084   RetOps.append(OutVals.begin(), OutVals.end());
1085   Chain = DAG.getNode(WebAssemblyISD::RETURN, DL, MVT::Other, RetOps);
1086 
1087   // Record the number and types of the return values.
1088   for (const ISD::OutputArg &Out : Outs) {
1089     assert(!Out.Flags.isByVal() && "byval is not valid for return values");
1090     assert(!Out.Flags.isNest() && "nest is not valid for return values");
1091     assert(Out.IsFixed && "non-fixed return value is not valid");
1092     if (Out.Flags.isInAlloca())
1093       fail(DL, DAG, "WebAssembly hasn't implemented inalloca results");
1094     if (Out.Flags.isInConsecutiveRegs())
1095       fail(DL, DAG, "WebAssembly hasn't implemented cons regs results");
1096     if (Out.Flags.isInConsecutiveRegsLast())
1097       fail(DL, DAG, "WebAssembly hasn't implemented cons regs last results");
1098   }
1099 
1100   return Chain;
1101 }
1102 
1103 SDValue WebAssemblyTargetLowering::LowerFormalArguments(
1104     SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
1105     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
1106     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1107   if (!callingConvSupported(CallConv))
1108     fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
1109 
1110   MachineFunction &MF = DAG.getMachineFunction();
1111   auto *MFI = MF.getInfo<WebAssemblyFunctionInfo>();
1112 
1113   // Set up the incoming ARGUMENTS value, which serves to represent the liveness
1114   // of the incoming values before they're represented by virtual registers.
1115   MF.getRegInfo().addLiveIn(WebAssembly::ARGUMENTS);
1116 
1117   bool HasSwiftErrorArg = false;
1118   bool HasSwiftSelfArg = false;
1119   for (const ISD::InputArg &In : Ins) {
1120     HasSwiftSelfArg |= In.Flags.isSwiftSelf();
1121     HasSwiftErrorArg |= In.Flags.isSwiftError();
1122     if (In.Flags.isInAlloca())
1123       fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
1124     if (In.Flags.isNest())
1125       fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
1126     if (In.Flags.isInConsecutiveRegs())
1127       fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
1128     if (In.Flags.isInConsecutiveRegsLast())
1129       fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
1130     // Ignore In.getNonZeroOrigAlign() because all our arguments are passed in
1131     // registers.
1132     InVals.push_back(In.Used ? DAG.getNode(WebAssemblyISD::ARGUMENT, DL, In.VT,
1133                                            DAG.getTargetConstant(InVals.size(),
1134                                                                  DL, MVT::i32))
1135                              : DAG.getUNDEF(In.VT));
1136 
1137     // Record the number and types of arguments.
1138     MFI->addParam(In.VT);
1139   }
1140 
1141   // For swiftcc, emit additional swiftself and swifterror arguments
1142   // if there aren't. These additional arguments are also added for callee
1143   // signature They are necessary to match callee and caller signature for
1144   // indirect call.
1145   auto PtrVT = getPointerTy(MF.getDataLayout());
1146   if (CallConv == CallingConv::Swift) {
1147     if (!HasSwiftSelfArg) {
1148       MFI->addParam(PtrVT);
1149     }
1150     if (!HasSwiftErrorArg) {
1151       MFI->addParam(PtrVT);
1152     }
1153   }
1154   // Varargs are copied into a buffer allocated by the caller, and a pointer to
1155   // the buffer is passed as an argument.
1156   if (IsVarArg) {
1157     MVT PtrVT = getPointerTy(MF.getDataLayout());
1158     Register VarargVreg =
1159         MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrVT));
1160     MFI->setVarargBufferVreg(VarargVreg);
1161     Chain = DAG.getCopyToReg(
1162         Chain, DL, VarargVreg,
1163         DAG.getNode(WebAssemblyISD::ARGUMENT, DL, PtrVT,
1164                     DAG.getTargetConstant(Ins.size(), DL, MVT::i32)));
1165     MFI->addParam(PtrVT);
1166   }
1167 
1168   // Record the number and types of arguments and results.
1169   SmallVector<MVT, 4> Params;
1170   SmallVector<MVT, 4> Results;
1171   computeSignatureVTs(MF.getFunction().getFunctionType(), &MF.getFunction(),
1172                       MF.getFunction(), DAG.getTarget(), Params, Results);
1173   for (MVT VT : Results)
1174     MFI->addResult(VT);
1175   // TODO: Use signatures in WebAssemblyMachineFunctionInfo too and unify
1176   // the param logic here with ComputeSignatureVTs
1177   assert(MFI->getParams().size() == Params.size() &&
1178          std::equal(MFI->getParams().begin(), MFI->getParams().end(),
1179                     Params.begin()));
1180 
1181   return Chain;
1182 }
1183 
1184 void WebAssemblyTargetLowering::ReplaceNodeResults(
1185     SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
1186   switch (N->getOpcode()) {
1187   case ISD::SIGN_EXTEND_INREG:
1188     // Do not add any results, signifying that N should not be custom lowered
1189     // after all. This happens because simd128 turns on custom lowering for
1190     // SIGN_EXTEND_INREG, but for non-vector sign extends the result might be an
1191     // illegal type.
1192     break;
1193   default:
1194     llvm_unreachable(
1195         "ReplaceNodeResults not implemented for this op for WebAssembly!");
1196   }
1197 }
1198 
1199 //===----------------------------------------------------------------------===//
1200 //  Custom lowering hooks.
1201 //===----------------------------------------------------------------------===//
1202 
1203 SDValue WebAssemblyTargetLowering::LowerOperation(SDValue Op,
1204                                                   SelectionDAG &DAG) const {
1205   SDLoc DL(Op);
1206   switch (Op.getOpcode()) {
1207   default:
1208     llvm_unreachable("unimplemented operation lowering");
1209     return SDValue();
1210   case ISD::FrameIndex:
1211     return LowerFrameIndex(Op, DAG);
1212   case ISD::GlobalAddress:
1213     return LowerGlobalAddress(Op, DAG);
1214   case ISD::GlobalTLSAddress:
1215     return LowerGlobalTLSAddress(Op, DAG);
1216   case ISD::ExternalSymbol:
1217     return LowerExternalSymbol(Op, DAG);
1218   case ISD::JumpTable:
1219     return LowerJumpTable(Op, DAG);
1220   case ISD::BR_JT:
1221     return LowerBR_JT(Op, DAG);
1222   case ISD::VASTART:
1223     return LowerVASTART(Op, DAG);
1224   case ISD::BlockAddress:
1225   case ISD::BRIND:
1226     fail(DL, DAG, "WebAssembly hasn't implemented computed gotos");
1227     return SDValue();
1228   case ISD::RETURNADDR:
1229     return LowerRETURNADDR(Op, DAG);
1230   case ISD::FRAMEADDR:
1231     return LowerFRAMEADDR(Op, DAG);
1232   case ISD::CopyToReg:
1233     return LowerCopyToReg(Op, DAG);
1234   case ISD::EXTRACT_VECTOR_ELT:
1235   case ISD::INSERT_VECTOR_ELT:
1236     return LowerAccessVectorElement(Op, DAG);
1237   case ISD::INTRINSIC_VOID:
1238   case ISD::INTRINSIC_WO_CHAIN:
1239   case ISD::INTRINSIC_W_CHAIN:
1240     return LowerIntrinsic(Op, DAG);
1241   case ISD::SIGN_EXTEND_INREG:
1242     return LowerSIGN_EXTEND_INREG(Op, DAG);
1243   case ISD::BUILD_VECTOR:
1244     return LowerBUILD_VECTOR(Op, DAG);
1245   case ISD::VECTOR_SHUFFLE:
1246     return LowerVECTOR_SHUFFLE(Op, DAG);
1247   case ISD::SETCC:
1248     return LowerSETCC(Op, DAG);
1249   case ISD::SHL:
1250   case ISD::SRA:
1251   case ISD::SRL:
1252     return LowerShift(Op, DAG);
1253   case ISD::FP_TO_SINT_SAT:
1254   case ISD::FP_TO_UINT_SAT:
1255     return LowerFP_TO_INT_SAT(Op, DAG);
1256   }
1257 }
1258 
1259 SDValue WebAssemblyTargetLowering::LowerCopyToReg(SDValue Op,
1260                                                   SelectionDAG &DAG) const {
1261   SDValue Src = Op.getOperand(2);
1262   if (isa<FrameIndexSDNode>(Src.getNode())) {
1263     // CopyToReg nodes don't support FrameIndex operands. Other targets select
1264     // the FI to some LEA-like instruction, but since we don't have that, we
1265     // need to insert some kind of instruction that can take an FI operand and
1266     // produces a value usable by CopyToReg (i.e. in a vreg). So insert a dummy
1267     // local.copy between Op and its FI operand.
1268     SDValue Chain = Op.getOperand(0);
1269     SDLoc DL(Op);
1270     unsigned Reg = cast<RegisterSDNode>(Op.getOperand(1))->getReg();
1271     EVT VT = Src.getValueType();
1272     SDValue Copy(DAG.getMachineNode(VT == MVT::i32 ? WebAssembly::COPY_I32
1273                                                    : WebAssembly::COPY_I64,
1274                                     DL, VT, Src),
1275                  0);
1276     return Op.getNode()->getNumValues() == 1
1277                ? DAG.getCopyToReg(Chain, DL, Reg, Copy)
1278                : DAG.getCopyToReg(Chain, DL, Reg, Copy,
1279                                   Op.getNumOperands() == 4 ? Op.getOperand(3)
1280                                                            : SDValue());
1281   }
1282   return SDValue();
1283 }
1284 
1285 SDValue WebAssemblyTargetLowering::LowerFrameIndex(SDValue Op,
1286                                                    SelectionDAG &DAG) const {
1287   int FI = cast<FrameIndexSDNode>(Op)->getIndex();
1288   return DAG.getTargetFrameIndex(FI, Op.getValueType());
1289 }
1290 
1291 SDValue WebAssemblyTargetLowering::LowerRETURNADDR(SDValue Op,
1292                                                    SelectionDAG &DAG) const {
1293   SDLoc DL(Op);
1294 
1295   if (!Subtarget->getTargetTriple().isOSEmscripten()) {
1296     fail(DL, DAG,
1297          "Non-Emscripten WebAssembly hasn't implemented "
1298          "__builtin_return_address");
1299     return SDValue();
1300   }
1301 
1302   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
1303     return SDValue();
1304 
1305   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1306   MakeLibCallOptions CallOptions;
1307   return makeLibCall(DAG, RTLIB::RETURN_ADDRESS, Op.getValueType(),
1308                      {DAG.getConstant(Depth, DL, MVT::i32)}, CallOptions, DL)
1309       .first;
1310 }
1311 
1312 SDValue WebAssemblyTargetLowering::LowerFRAMEADDR(SDValue Op,
1313                                                   SelectionDAG &DAG) const {
1314   // Non-zero depths are not supported by WebAssembly currently. Use the
1315   // legalizer's default expansion, which is to return 0 (what this function is
1316   // documented to do).
1317   if (Op.getConstantOperandVal(0) > 0)
1318     return SDValue();
1319 
1320   DAG.getMachineFunction().getFrameInfo().setFrameAddressIsTaken(true);
1321   EVT VT = Op.getValueType();
1322   Register FP =
1323       Subtarget->getRegisterInfo()->getFrameRegister(DAG.getMachineFunction());
1324   return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), FP, VT);
1325 }
1326 
1327 SDValue
1328 WebAssemblyTargetLowering::LowerGlobalTLSAddress(SDValue Op,
1329                                                  SelectionDAG &DAG) const {
1330   SDLoc DL(Op);
1331   const auto *GA = cast<GlobalAddressSDNode>(Op);
1332   MVT PtrVT = getPointerTy(DAG.getDataLayout());
1333 
1334   MachineFunction &MF = DAG.getMachineFunction();
1335   if (!MF.getSubtarget<WebAssemblySubtarget>().hasBulkMemory())
1336     report_fatal_error("cannot use thread-local storage without bulk memory",
1337                        false);
1338 
1339   const GlobalValue *GV = GA->getGlobal();
1340 
1341   // Currently Emscripten does not support dynamic linking with threads.
1342   // Therefore, if we have thread-local storage, only the local-exec model
1343   // is possible.
1344   // TODO: remove this and implement proper TLS models once Emscripten
1345   // supports dynamic linking with threads.
1346   if (GV->getThreadLocalMode() != GlobalValue::LocalExecTLSModel &&
1347       !Subtarget->getTargetTriple().isOSEmscripten()) {
1348     report_fatal_error("only -ftls-model=local-exec is supported for now on "
1349                        "non-Emscripten OSes: variable " +
1350                            GV->getName(),
1351                        false);
1352   }
1353 
1354   auto GlobalGet = PtrVT == MVT::i64 ? WebAssembly::GLOBAL_GET_I64
1355                                      : WebAssembly::GLOBAL_GET_I32;
1356   const char *BaseName = MF.createExternalSymbolName("__tls_base");
1357 
1358   SDValue BaseAddr(
1359       DAG.getMachineNode(GlobalGet, DL, PtrVT,
1360                          DAG.getTargetExternalSymbol(BaseName, PtrVT)),
1361       0);
1362 
1363   SDValue TLSOffset = DAG.getTargetGlobalAddress(
1364       GV, DL, PtrVT, GA->getOffset(), WebAssemblyII::MO_TLS_BASE_REL);
1365   SDValue SymAddr = DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT, TLSOffset);
1366 
1367   return DAG.getNode(ISD::ADD, DL, PtrVT, BaseAddr, SymAddr);
1368 }
1369 
1370 SDValue WebAssemblyTargetLowering::LowerGlobalAddress(SDValue Op,
1371                                                       SelectionDAG &DAG) const {
1372   SDLoc DL(Op);
1373   const auto *GA = cast<GlobalAddressSDNode>(Op);
1374   EVT VT = Op.getValueType();
1375   assert(GA->getTargetFlags() == 0 &&
1376          "Unexpected target flags on generic GlobalAddressSDNode");
1377   if (GA->getAddressSpace() != 0)
1378     fail(DL, DAG, "WebAssembly only expects the 0 address space");
1379 
1380   unsigned OperandFlags = 0;
1381   if (isPositionIndependent()) {
1382     const GlobalValue *GV = GA->getGlobal();
1383     if (getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV)) {
1384       MachineFunction &MF = DAG.getMachineFunction();
1385       MVT PtrVT = getPointerTy(MF.getDataLayout());
1386       const char *BaseName;
1387       if (GV->getValueType()->isFunctionTy()) {
1388         BaseName = MF.createExternalSymbolName("__table_base");
1389         OperandFlags = WebAssemblyII::MO_TABLE_BASE_REL;
1390       }
1391       else {
1392         BaseName = MF.createExternalSymbolName("__memory_base");
1393         OperandFlags = WebAssemblyII::MO_MEMORY_BASE_REL;
1394       }
1395       SDValue BaseAddr =
1396           DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT,
1397                       DAG.getTargetExternalSymbol(BaseName, PtrVT));
1398 
1399       SDValue SymAddr = DAG.getNode(
1400           WebAssemblyISD::WrapperPIC, DL, VT,
1401           DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT, GA->getOffset(),
1402                                      OperandFlags));
1403 
1404       return DAG.getNode(ISD::ADD, DL, VT, BaseAddr, SymAddr);
1405     } else {
1406       OperandFlags = WebAssemblyII::MO_GOT;
1407     }
1408   }
1409 
1410   return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1411                      DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT,
1412                                                 GA->getOffset(), OperandFlags));
1413 }
1414 
1415 SDValue
1416 WebAssemblyTargetLowering::LowerExternalSymbol(SDValue Op,
1417                                                SelectionDAG &DAG) const {
1418   SDLoc DL(Op);
1419   const auto *ES = cast<ExternalSymbolSDNode>(Op);
1420   EVT VT = Op.getValueType();
1421   assert(ES->getTargetFlags() == 0 &&
1422          "Unexpected target flags on generic ExternalSymbolSDNode");
1423   return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1424                      DAG.getTargetExternalSymbol(ES->getSymbol(), VT));
1425 }
1426 
1427 SDValue WebAssemblyTargetLowering::LowerJumpTable(SDValue Op,
1428                                                   SelectionDAG &DAG) const {
1429   // There's no need for a Wrapper node because we always incorporate a jump
1430   // table operand into a BR_TABLE instruction, rather than ever
1431   // materializing it in a register.
1432   const JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
1433   return DAG.getTargetJumpTable(JT->getIndex(), Op.getValueType(),
1434                                 JT->getTargetFlags());
1435 }
1436 
1437 SDValue WebAssemblyTargetLowering::LowerBR_JT(SDValue Op,
1438                                               SelectionDAG &DAG) const {
1439   SDLoc DL(Op);
1440   SDValue Chain = Op.getOperand(0);
1441   const auto *JT = cast<JumpTableSDNode>(Op.getOperand(1));
1442   SDValue Index = Op.getOperand(2);
1443   assert(JT->getTargetFlags() == 0 && "WebAssembly doesn't set target flags");
1444 
1445   SmallVector<SDValue, 8> Ops;
1446   Ops.push_back(Chain);
1447   Ops.push_back(Index);
1448 
1449   MachineJumpTableInfo *MJTI = DAG.getMachineFunction().getJumpTableInfo();
1450   const auto &MBBs = MJTI->getJumpTables()[JT->getIndex()].MBBs;
1451 
1452   // Add an operand for each case.
1453   for (auto MBB : MBBs)
1454     Ops.push_back(DAG.getBasicBlock(MBB));
1455 
1456   // Add the first MBB as a dummy default target for now. This will be replaced
1457   // with the proper default target (and the preceding range check eliminated)
1458   // if possible by WebAssemblyFixBrTableDefaults.
1459   Ops.push_back(DAG.getBasicBlock(*MBBs.begin()));
1460   return DAG.getNode(WebAssemblyISD::BR_TABLE, DL, MVT::Other, Ops);
1461 }
1462 
1463 SDValue WebAssemblyTargetLowering::LowerVASTART(SDValue Op,
1464                                                 SelectionDAG &DAG) const {
1465   SDLoc DL(Op);
1466   EVT PtrVT = getPointerTy(DAG.getMachineFunction().getDataLayout());
1467 
1468   auto *MFI = DAG.getMachineFunction().getInfo<WebAssemblyFunctionInfo>();
1469   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
1470 
1471   SDValue ArgN = DAG.getCopyFromReg(DAG.getEntryNode(), DL,
1472                                     MFI->getVarargBufferVreg(), PtrVT);
1473   return DAG.getStore(Op.getOperand(0), DL, ArgN, Op.getOperand(1),
1474                       MachinePointerInfo(SV));
1475 }
1476 
1477 static SDValue getCppExceptionSymNode(SDValue Op, unsigned TagIndex,
1478                                       SelectionDAG &DAG) {
1479   // We only support C++ exceptions for now
1480   int Tag =
1481       cast<ConstantSDNode>(Op.getOperand(TagIndex).getNode())->getZExtValue();
1482   if (Tag != WebAssembly::CPP_EXCEPTION)
1483     llvm_unreachable("Invalid tag: We only support C++ exceptions for now");
1484   auto &MF = DAG.getMachineFunction();
1485   const auto &TLI = DAG.getTargetLoweringInfo();
1486   MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
1487   const char *SymName = MF.createExternalSymbolName("__cpp_exception");
1488   return DAG.getNode(WebAssemblyISD::Wrapper, SDLoc(Op), PtrVT,
1489                      DAG.getTargetExternalSymbol(SymName, PtrVT));
1490 }
1491 
1492 SDValue WebAssemblyTargetLowering::LowerIntrinsic(SDValue Op,
1493                                                   SelectionDAG &DAG) const {
1494   MachineFunction &MF = DAG.getMachineFunction();
1495   unsigned IntNo;
1496   switch (Op.getOpcode()) {
1497   case ISD::INTRINSIC_VOID:
1498   case ISD::INTRINSIC_W_CHAIN:
1499     IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
1500     break;
1501   case ISD::INTRINSIC_WO_CHAIN:
1502     IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1503     break;
1504   default:
1505     llvm_unreachable("Invalid intrinsic");
1506   }
1507   SDLoc DL(Op);
1508 
1509   switch (IntNo) {
1510   default:
1511     return SDValue(); // Don't custom lower most intrinsics.
1512 
1513   case Intrinsic::wasm_lsda: {
1514     EVT VT = Op.getValueType();
1515     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1516     MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
1517     auto &Context = MF.getMMI().getContext();
1518     MCSymbol *S = Context.getOrCreateSymbol(Twine("GCC_except_table") +
1519                                             Twine(MF.getFunctionNumber()));
1520     return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1521                        DAG.getMCSymbol(S, PtrVT));
1522   }
1523 
1524   case Intrinsic::wasm_throw: {
1525     SDValue SymNode = getCppExceptionSymNode(Op, 2, DAG);
1526     return DAG.getNode(WebAssemblyISD::THROW, DL,
1527                        MVT::Other, // outchain type
1528                        {
1529                            Op.getOperand(0), // inchain
1530                            SymNode,          // exception symbol
1531                            Op.getOperand(3)  // thrown value
1532                        });
1533   }
1534 
1535   case Intrinsic::wasm_catch: {
1536     SDValue SymNode = getCppExceptionSymNode(Op, 2, DAG);
1537     return DAG.getNode(WebAssemblyISD::CATCH, DL,
1538                        {
1539                            MVT::i32,  // outchain type
1540                            MVT::Other // return value
1541                        },
1542                        {
1543                            Op.getOperand(0), // inchain
1544                            SymNode           // exception symbol
1545                        });
1546   }
1547 
1548   case Intrinsic::wasm_shuffle: {
1549     // Drop in-chain and replace undefs, but otherwise pass through unchanged
1550     SDValue Ops[18];
1551     size_t OpIdx = 0;
1552     Ops[OpIdx++] = Op.getOperand(1);
1553     Ops[OpIdx++] = Op.getOperand(2);
1554     while (OpIdx < 18) {
1555       const SDValue &MaskIdx = Op.getOperand(OpIdx + 1);
1556       if (MaskIdx.isUndef() ||
1557           cast<ConstantSDNode>(MaskIdx.getNode())->getZExtValue() >= 32) {
1558         Ops[OpIdx++] = DAG.getConstant(0, DL, MVT::i32);
1559       } else {
1560         Ops[OpIdx++] = MaskIdx;
1561       }
1562     }
1563     return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, Op.getValueType(), Ops);
1564   }
1565   }
1566 }
1567 
1568 SDValue
1569 WebAssemblyTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
1570                                                   SelectionDAG &DAG) const {
1571   SDLoc DL(Op);
1572   // If sign extension operations are disabled, allow sext_inreg only if operand
1573   // is a vector extract of an i8 or i16 lane. SIMD does not depend on sign
1574   // extension operations, but allowing sext_inreg in this context lets us have
1575   // simple patterns to select extract_lane_s instructions. Expanding sext_inreg
1576   // everywhere would be simpler in this file, but would necessitate large and
1577   // brittle patterns to undo the expansion and select extract_lane_s
1578   // instructions.
1579   assert(!Subtarget->hasSignExt() && Subtarget->hasSIMD128());
1580   if (Op.getOperand(0).getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1581     return SDValue();
1582 
1583   const SDValue &Extract = Op.getOperand(0);
1584   MVT VecT = Extract.getOperand(0).getSimpleValueType();
1585   if (VecT.getVectorElementType().getSizeInBits() > 32)
1586     return SDValue();
1587   MVT ExtractedLaneT =
1588       cast<VTSDNode>(Op.getOperand(1).getNode())->getVT().getSimpleVT();
1589   MVT ExtractedVecT =
1590       MVT::getVectorVT(ExtractedLaneT, 128 / ExtractedLaneT.getSizeInBits());
1591   if (ExtractedVecT == VecT)
1592     return Op;
1593 
1594   // Bitcast vector to appropriate type to ensure ISel pattern coverage
1595   const SDNode *Index = Extract.getOperand(1).getNode();
1596   if (!isa<ConstantSDNode>(Index))
1597     return SDValue();
1598   unsigned IndexVal = cast<ConstantSDNode>(Index)->getZExtValue();
1599   unsigned Scale =
1600       ExtractedVecT.getVectorNumElements() / VecT.getVectorNumElements();
1601   assert(Scale > 1);
1602   SDValue NewIndex =
1603       DAG.getConstant(IndexVal * Scale, DL, Index->getValueType(0));
1604   SDValue NewExtract = DAG.getNode(
1605       ISD::EXTRACT_VECTOR_ELT, DL, Extract.getValueType(),
1606       DAG.getBitcast(ExtractedVecT, Extract.getOperand(0)), NewIndex);
1607   return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, Op.getValueType(), NewExtract,
1608                      Op.getOperand(1));
1609 }
1610 
1611 SDValue WebAssemblyTargetLowering::LowerBUILD_VECTOR(SDValue Op,
1612                                                      SelectionDAG &DAG) const {
1613   SDLoc DL(Op);
1614   const EVT VecT = Op.getValueType();
1615   const EVT LaneT = Op.getOperand(0).getValueType();
1616   const size_t Lanes = Op.getNumOperands();
1617   bool CanSwizzle = VecT == MVT::v16i8;
1618 
1619   // BUILD_VECTORs are lowered to the instruction that initializes the highest
1620   // possible number of lanes at once followed by a sequence of replace_lane
1621   // instructions to individually initialize any remaining lanes.
1622 
1623   // TODO: Tune this. For example, lanewise swizzling is very expensive, so
1624   // swizzled lanes should be given greater weight.
1625 
1626   // TODO: Investigate looping rather than always extracting/replacing specific
1627   // lanes to fill gaps.
1628 
1629   auto IsConstant = [](const SDValue &V) {
1630     return V.getOpcode() == ISD::Constant || V.getOpcode() == ISD::ConstantFP;
1631   };
1632 
1633   // Returns the source vector and index vector pair if they exist. Checks for:
1634   //   (extract_vector_elt
1635   //     $src,
1636   //     (sign_extend_inreg (extract_vector_elt $indices, $i))
1637   //   )
1638   auto GetSwizzleSrcs = [](size_t I, const SDValue &Lane) {
1639     auto Bail = std::make_pair(SDValue(), SDValue());
1640     if (Lane->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1641       return Bail;
1642     const SDValue &SwizzleSrc = Lane->getOperand(0);
1643     const SDValue &IndexExt = Lane->getOperand(1);
1644     if (IndexExt->getOpcode() != ISD::SIGN_EXTEND_INREG)
1645       return Bail;
1646     const SDValue &Index = IndexExt->getOperand(0);
1647     if (Index->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1648       return Bail;
1649     const SDValue &SwizzleIndices = Index->getOperand(0);
1650     if (SwizzleSrc.getValueType() != MVT::v16i8 ||
1651         SwizzleIndices.getValueType() != MVT::v16i8 ||
1652         Index->getOperand(1)->getOpcode() != ISD::Constant ||
1653         Index->getConstantOperandVal(1) != I)
1654       return Bail;
1655     return std::make_pair(SwizzleSrc, SwizzleIndices);
1656   };
1657 
1658   // If the lane is extracted from another vector at a constant index, return
1659   // that vector. The source vector must not have more lanes than the dest
1660   // because the shufflevector indices are in terms of the destination lanes and
1661   // would not be able to address the smaller individual source lanes.
1662   auto GetShuffleSrc = [&](const SDValue &Lane) {
1663     if (Lane->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1664       return SDValue();
1665     if (!isa<ConstantSDNode>(Lane->getOperand(1).getNode()))
1666       return SDValue();
1667     if (Lane->getOperand(0).getValueType().getVectorNumElements() >
1668         VecT.getVectorNumElements())
1669       return SDValue();
1670     return Lane->getOperand(0);
1671   };
1672 
1673   using ValueEntry = std::pair<SDValue, size_t>;
1674   SmallVector<ValueEntry, 16> SplatValueCounts;
1675 
1676   using SwizzleEntry = std::pair<std::pair<SDValue, SDValue>, size_t>;
1677   SmallVector<SwizzleEntry, 16> SwizzleCounts;
1678 
1679   using ShuffleEntry = std::pair<SDValue, size_t>;
1680   SmallVector<ShuffleEntry, 16> ShuffleCounts;
1681 
1682   auto AddCount = [](auto &Counts, const auto &Val) {
1683     auto CountIt =
1684         llvm::find_if(Counts, [&Val](auto E) { return E.first == Val; });
1685     if (CountIt == Counts.end()) {
1686       Counts.emplace_back(Val, 1);
1687     } else {
1688       CountIt->second++;
1689     }
1690   };
1691 
1692   auto GetMostCommon = [](auto &Counts) {
1693     auto CommonIt =
1694         std::max_element(Counts.begin(), Counts.end(),
1695                          [](auto A, auto B) { return A.second < B.second; });
1696     assert(CommonIt != Counts.end() && "Unexpected all-undef build_vector");
1697     return *CommonIt;
1698   };
1699 
1700   size_t NumConstantLanes = 0;
1701 
1702   // Count eligible lanes for each type of vector creation op
1703   for (size_t I = 0; I < Lanes; ++I) {
1704     const SDValue &Lane = Op->getOperand(I);
1705     if (Lane.isUndef())
1706       continue;
1707 
1708     AddCount(SplatValueCounts, Lane);
1709 
1710     if (IsConstant(Lane))
1711       NumConstantLanes++;
1712     if (auto ShuffleSrc = GetShuffleSrc(Lane))
1713       AddCount(ShuffleCounts, ShuffleSrc);
1714     if (CanSwizzle) {
1715       auto SwizzleSrcs = GetSwizzleSrcs(I, Lane);
1716       if (SwizzleSrcs.first)
1717         AddCount(SwizzleCounts, SwizzleSrcs);
1718     }
1719   }
1720 
1721   SDValue SplatValue;
1722   size_t NumSplatLanes;
1723   std::tie(SplatValue, NumSplatLanes) = GetMostCommon(SplatValueCounts);
1724 
1725   SDValue SwizzleSrc;
1726   SDValue SwizzleIndices;
1727   size_t NumSwizzleLanes = 0;
1728   if (SwizzleCounts.size())
1729     std::forward_as_tuple(std::tie(SwizzleSrc, SwizzleIndices),
1730                           NumSwizzleLanes) = GetMostCommon(SwizzleCounts);
1731 
1732   // Shuffles can draw from up to two vectors, so find the two most common
1733   // sources.
1734   SDValue ShuffleSrc1, ShuffleSrc2;
1735   size_t NumShuffleLanes = 0;
1736   if (ShuffleCounts.size()) {
1737     std::tie(ShuffleSrc1, NumShuffleLanes) = GetMostCommon(ShuffleCounts);
1738     ShuffleCounts.erase(std::remove_if(ShuffleCounts.begin(),
1739                                        ShuffleCounts.end(),
1740                                        [&](const auto &Pair) {
1741                                          return Pair.first == ShuffleSrc1;
1742                                        }),
1743                         ShuffleCounts.end());
1744   }
1745   if (ShuffleCounts.size()) {
1746     size_t AdditionalShuffleLanes;
1747     std::tie(ShuffleSrc2, AdditionalShuffleLanes) =
1748         GetMostCommon(ShuffleCounts);
1749     NumShuffleLanes += AdditionalShuffleLanes;
1750   }
1751 
1752   // Predicate returning true if the lane is properly initialized by the
1753   // original instruction
1754   std::function<bool(size_t, const SDValue &)> IsLaneConstructed;
1755   SDValue Result;
1756   // Prefer swizzles over shuffles over vector consts over splats
1757   if (NumSwizzleLanes >= NumShuffleLanes &&
1758       NumSwizzleLanes >= NumConstantLanes && NumSwizzleLanes >= NumSplatLanes) {
1759     Result = DAG.getNode(WebAssemblyISD::SWIZZLE, DL, VecT, SwizzleSrc,
1760                          SwizzleIndices);
1761     auto Swizzled = std::make_pair(SwizzleSrc, SwizzleIndices);
1762     IsLaneConstructed = [&, Swizzled](size_t I, const SDValue &Lane) {
1763       return Swizzled == GetSwizzleSrcs(I, Lane);
1764     };
1765   } else if (NumShuffleLanes >= NumConstantLanes &&
1766              NumShuffleLanes >= NumSplatLanes) {
1767     size_t DestLaneSize = VecT.getVectorElementType().getFixedSizeInBits() / 8;
1768     size_t DestLaneCount = VecT.getVectorNumElements();
1769     size_t Scale1 = 1;
1770     size_t Scale2 = 1;
1771     SDValue Src1 = ShuffleSrc1;
1772     SDValue Src2 = ShuffleSrc2 ? ShuffleSrc2 : DAG.getUNDEF(VecT);
1773     if (Src1.getValueType() != VecT) {
1774       size_t LaneSize =
1775           Src1.getValueType().getVectorElementType().getFixedSizeInBits() / 8;
1776       assert(LaneSize > DestLaneSize);
1777       Scale1 = LaneSize / DestLaneSize;
1778       Src1 = DAG.getBitcast(VecT, Src1);
1779     }
1780     if (Src2.getValueType() != VecT) {
1781       size_t LaneSize =
1782           Src2.getValueType().getVectorElementType().getFixedSizeInBits() / 8;
1783       assert(LaneSize > DestLaneSize);
1784       Scale2 = LaneSize / DestLaneSize;
1785       Src2 = DAG.getBitcast(VecT, Src2);
1786     }
1787 
1788     int Mask[16];
1789     assert(DestLaneCount <= 16);
1790     for (size_t I = 0; I < DestLaneCount; ++I) {
1791       const SDValue &Lane = Op->getOperand(I);
1792       SDValue Src = GetShuffleSrc(Lane);
1793       if (Src == ShuffleSrc1) {
1794         Mask[I] = Lane->getConstantOperandVal(1) * Scale1;
1795       } else if (Src && Src == ShuffleSrc2) {
1796         Mask[I] = DestLaneCount + Lane->getConstantOperandVal(1) * Scale2;
1797       } else {
1798         Mask[I] = -1;
1799       }
1800     }
1801     ArrayRef<int> MaskRef(Mask, DestLaneCount);
1802     Result = DAG.getVectorShuffle(VecT, DL, Src1, Src2, MaskRef);
1803     IsLaneConstructed = [&](size_t, const SDValue &Lane) {
1804       auto Src = GetShuffleSrc(Lane);
1805       return Src == ShuffleSrc1 || (Src && Src == ShuffleSrc2);
1806     };
1807   } else if (NumConstantLanes >= NumSplatLanes) {
1808     SmallVector<SDValue, 16> ConstLanes;
1809     for (const SDValue &Lane : Op->op_values()) {
1810       if (IsConstant(Lane)) {
1811         ConstLanes.push_back(Lane);
1812       } else if (LaneT.isFloatingPoint()) {
1813         ConstLanes.push_back(DAG.getConstantFP(0, DL, LaneT));
1814       } else {
1815         ConstLanes.push_back(DAG.getConstant(0, DL, LaneT));
1816       }
1817     }
1818     Result = DAG.getBuildVector(VecT, DL, ConstLanes);
1819     IsLaneConstructed = [&IsConstant](size_t _, const SDValue &Lane) {
1820       return IsConstant(Lane);
1821     };
1822   } else {
1823     // Use a splat, but possibly a load_splat
1824     LoadSDNode *SplattedLoad;
1825     if ((SplattedLoad = dyn_cast<LoadSDNode>(SplatValue)) &&
1826         SplattedLoad->getMemoryVT() == VecT.getVectorElementType()) {
1827       Result = DAG.getMemIntrinsicNode(
1828           WebAssemblyISD::LOAD_SPLAT, DL, DAG.getVTList(VecT),
1829           {SplattedLoad->getChain(), SplattedLoad->getBasePtr(),
1830            SplattedLoad->getOffset()},
1831           SplattedLoad->getMemoryVT(), SplattedLoad->getMemOperand());
1832     } else {
1833       Result = DAG.getSplatBuildVector(VecT, DL, SplatValue);
1834     }
1835     IsLaneConstructed = [&SplatValue](size_t _, const SDValue &Lane) {
1836       return Lane == SplatValue;
1837     };
1838   }
1839 
1840   assert(Result);
1841   assert(IsLaneConstructed);
1842 
1843   // Add replace_lane instructions for any unhandled values
1844   for (size_t I = 0; I < Lanes; ++I) {
1845     const SDValue &Lane = Op->getOperand(I);
1846     if (!Lane.isUndef() && !IsLaneConstructed(I, Lane))
1847       Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VecT, Result, Lane,
1848                            DAG.getConstant(I, DL, MVT::i32));
1849   }
1850 
1851   return Result;
1852 }
1853 
1854 SDValue
1855 WebAssemblyTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
1856                                                SelectionDAG &DAG) const {
1857   SDLoc DL(Op);
1858   ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op.getNode())->getMask();
1859   MVT VecType = Op.getOperand(0).getSimpleValueType();
1860   assert(VecType.is128BitVector() && "Unexpected shuffle vector type");
1861   size_t LaneBytes = VecType.getVectorElementType().getSizeInBits() / 8;
1862 
1863   // Space for two vector args and sixteen mask indices
1864   SDValue Ops[18];
1865   size_t OpIdx = 0;
1866   Ops[OpIdx++] = Op.getOperand(0);
1867   Ops[OpIdx++] = Op.getOperand(1);
1868 
1869   // Expand mask indices to byte indices and materialize them as operands
1870   for (int M : Mask) {
1871     for (size_t J = 0; J < LaneBytes; ++J) {
1872       // Lower undefs (represented by -1 in mask) to zero
1873       uint64_t ByteIndex = M == -1 ? 0 : (uint64_t)M * LaneBytes + J;
1874       Ops[OpIdx++] = DAG.getConstant(ByteIndex, DL, MVT::i32);
1875     }
1876   }
1877 
1878   return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, Op.getValueType(), Ops);
1879 }
1880 
1881 SDValue WebAssemblyTargetLowering::LowerSETCC(SDValue Op,
1882                                               SelectionDAG &DAG) const {
1883   SDLoc DL(Op);
1884   // The legalizer does not know how to expand the unsupported comparison modes
1885   // of i64x2 vectors, so we manually unroll them here.
1886   assert(Op->getOperand(0)->getSimpleValueType(0) == MVT::v2i64);
1887   SmallVector<SDValue, 2> LHS, RHS;
1888   DAG.ExtractVectorElements(Op->getOperand(0), LHS);
1889   DAG.ExtractVectorElements(Op->getOperand(1), RHS);
1890   const SDValue &CC = Op->getOperand(2);
1891   auto MakeLane = [&](unsigned I) {
1892     return DAG.getNode(ISD::SELECT_CC, DL, MVT::i64, LHS[I], RHS[I],
1893                        DAG.getConstant(uint64_t(-1), DL, MVT::i64),
1894                        DAG.getConstant(uint64_t(0), DL, MVT::i64), CC);
1895   };
1896   return DAG.getBuildVector(Op->getValueType(0), DL,
1897                             {MakeLane(0), MakeLane(1)});
1898 }
1899 
1900 SDValue
1901 WebAssemblyTargetLowering::LowerAccessVectorElement(SDValue Op,
1902                                                     SelectionDAG &DAG) const {
1903   // Allow constant lane indices, expand variable lane indices
1904   SDNode *IdxNode = Op.getOperand(Op.getNumOperands() - 1).getNode();
1905   if (isa<ConstantSDNode>(IdxNode) || IdxNode->isUndef())
1906     return Op;
1907   else
1908     // Perform default expansion
1909     return SDValue();
1910 }
1911 
1912 static SDValue unrollVectorShift(SDValue Op, SelectionDAG &DAG) {
1913   EVT LaneT = Op.getSimpleValueType().getVectorElementType();
1914   // 32-bit and 64-bit unrolled shifts will have proper semantics
1915   if (LaneT.bitsGE(MVT::i32))
1916     return DAG.UnrollVectorOp(Op.getNode());
1917   // Otherwise mask the shift value to get proper semantics from 32-bit shift
1918   SDLoc DL(Op);
1919   size_t NumLanes = Op.getSimpleValueType().getVectorNumElements();
1920   SDValue Mask = DAG.getConstant(LaneT.getSizeInBits() - 1, DL, MVT::i32);
1921   unsigned ShiftOpcode = Op.getOpcode();
1922   SmallVector<SDValue, 16> ShiftedElements;
1923   DAG.ExtractVectorElements(Op.getOperand(0), ShiftedElements, 0, 0, MVT::i32);
1924   SmallVector<SDValue, 16> ShiftElements;
1925   DAG.ExtractVectorElements(Op.getOperand(1), ShiftElements, 0, 0, MVT::i32);
1926   SmallVector<SDValue, 16> UnrolledOps;
1927   for (size_t i = 0; i < NumLanes; ++i) {
1928     SDValue MaskedShiftValue =
1929         DAG.getNode(ISD::AND, DL, MVT::i32, ShiftElements[i], Mask);
1930     SDValue ShiftedValue = ShiftedElements[i];
1931     if (ShiftOpcode == ISD::SRA)
1932       ShiftedValue = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32,
1933                                  ShiftedValue, DAG.getValueType(LaneT));
1934     UnrolledOps.push_back(
1935         DAG.getNode(ShiftOpcode, DL, MVT::i32, ShiftedValue, MaskedShiftValue));
1936   }
1937   return DAG.getBuildVector(Op.getValueType(), DL, UnrolledOps);
1938 }
1939 
1940 SDValue WebAssemblyTargetLowering::LowerShift(SDValue Op,
1941                                               SelectionDAG &DAG) const {
1942   SDLoc DL(Op);
1943 
1944   // Only manually lower vector shifts
1945   assert(Op.getSimpleValueType().isVector());
1946 
1947   auto ShiftVal = DAG.getSplatValue(Op.getOperand(1));
1948   if (!ShiftVal)
1949     return unrollVectorShift(Op, DAG);
1950 
1951   // Use anyext because none of the high bits can affect the shift
1952   ShiftVal = DAG.getAnyExtOrTrunc(ShiftVal, DL, MVT::i32);
1953 
1954   unsigned Opcode;
1955   switch (Op.getOpcode()) {
1956   case ISD::SHL:
1957     Opcode = WebAssemblyISD::VEC_SHL;
1958     break;
1959   case ISD::SRA:
1960     Opcode = WebAssemblyISD::VEC_SHR_S;
1961     break;
1962   case ISD::SRL:
1963     Opcode = WebAssemblyISD::VEC_SHR_U;
1964     break;
1965   default:
1966     llvm_unreachable("unexpected opcode");
1967   }
1968 
1969   return DAG.getNode(Opcode, DL, Op.getValueType(), Op.getOperand(0), ShiftVal);
1970 }
1971 
1972 SDValue WebAssemblyTargetLowering::LowerFP_TO_INT_SAT(SDValue Op,
1973                                                       SelectionDAG &DAG) const {
1974   SDLoc DL(Op);
1975   EVT ResT = Op.getValueType();
1976   uint64_t Width = Op.getConstantOperandVal(1);
1977 
1978   if ((ResT == MVT::i32 || ResT == MVT::i64) && (Width == 32 || Width == 64))
1979     return Op;
1980 
1981   if (ResT == MVT::v4i32 && Width == 32)
1982     return Op;
1983 
1984   return SDValue();
1985 }
1986 
1987 //===----------------------------------------------------------------------===//
1988 //   Custom DAG combine hooks
1989 //===----------------------------------------------------------------------===//
1990 static SDValue
1991 performVECTOR_SHUFFLECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
1992   auto &DAG = DCI.DAG;
1993   auto Shuffle = cast<ShuffleVectorSDNode>(N);
1994 
1995   // Hoist vector bitcasts that don't change the number of lanes out of unary
1996   // shuffles, where they are less likely to get in the way of other combines.
1997   // (shuffle (vNxT1 (bitcast (vNxT0 x))), undef, mask) ->
1998   //  (vNxT1 (bitcast (vNxT0 (shuffle x, undef, mask))))
1999   SDValue Bitcast = N->getOperand(0);
2000   if (Bitcast.getOpcode() != ISD::BITCAST)
2001     return SDValue();
2002   if (!N->getOperand(1).isUndef())
2003     return SDValue();
2004   SDValue CastOp = Bitcast.getOperand(0);
2005   MVT SrcType = CastOp.getSimpleValueType();
2006   MVT DstType = Bitcast.getSimpleValueType();
2007   if (!SrcType.is128BitVector() ||
2008       SrcType.getVectorNumElements() != DstType.getVectorNumElements())
2009     return SDValue();
2010   SDValue NewShuffle = DAG.getVectorShuffle(
2011       SrcType, SDLoc(N), CastOp, DAG.getUNDEF(SrcType), Shuffle->getMask());
2012   return DAG.getBitcast(DstType, NewShuffle);
2013 }
2014 
2015 static SDValue
2016 performVectorExtendCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
2017   auto &DAG = DCI.DAG;
2018   assert(N->getOpcode() == ISD::SIGN_EXTEND ||
2019          N->getOpcode() == ISD::ZERO_EXTEND);
2020 
2021   // Combine ({s,z}ext (extract_subvector src, i)) into a widening operation if
2022   // possible before the extract_subvector can be expanded.
2023   auto Extract = N->getOperand(0);
2024   if (Extract.getOpcode() != ISD::EXTRACT_SUBVECTOR)
2025     return SDValue();
2026   auto Source = Extract.getOperand(0);
2027   auto *IndexNode = dyn_cast<ConstantSDNode>(Extract.getOperand(1));
2028   if (IndexNode == nullptr)
2029     return SDValue();
2030   auto Index = IndexNode->getZExtValue();
2031 
2032   // Only v8i8, v4i16, and v2i32 extracts can be widened, and only if the
2033   // extracted subvector is the low or high half of its source.
2034   EVT ResVT = N->getValueType(0);
2035   if (ResVT == MVT::v8i16) {
2036     if (Extract.getValueType() != MVT::v8i8 ||
2037         Source.getValueType() != MVT::v16i8 || (Index != 0 && Index != 8))
2038       return SDValue();
2039   } else if (ResVT == MVT::v4i32) {
2040     if (Extract.getValueType() != MVT::v4i16 ||
2041         Source.getValueType() != MVT::v8i16 || (Index != 0 && Index != 4))
2042       return SDValue();
2043   } else if (ResVT == MVT::v2i64) {
2044     if (Extract.getValueType() != MVT::v2i32 ||
2045         Source.getValueType() != MVT::v4i32 || (Index != 0 && Index != 2))
2046       return SDValue();
2047   } else {
2048     return SDValue();
2049   }
2050 
2051   bool IsSext = N->getOpcode() == ISD::SIGN_EXTEND;
2052   bool IsLow = Index == 0;
2053 
2054   unsigned Op = IsSext ? (IsLow ? WebAssemblyISD::EXTEND_LOW_S
2055                                 : WebAssemblyISD::EXTEND_HIGH_S)
2056                        : (IsLow ? WebAssemblyISD::EXTEND_LOW_U
2057                                 : WebAssemblyISD::EXTEND_HIGH_U);
2058 
2059   return DAG.getNode(Op, SDLoc(N), ResVT, Source);
2060 }
2061 
2062 static SDValue
2063 performVectorConvertLowCombine(SDNode *N,
2064                                TargetLowering::DAGCombinerInfo &DCI) {
2065   auto &DAG = DCI.DAG;
2066 
2067   EVT ResVT = N->getValueType(0);
2068   if (ResVT != MVT::v2f64)
2069     return SDValue();
2070 
2071   if (N->getOpcode() == ISD::SINT_TO_FP || N->getOpcode() == ISD::UINT_TO_FP) {
2072     // Combine this:
2073     //
2074     //   (v2f64 ({s,u}int_to_fp
2075     //     (v2i32 (extract_subvector (v4i32 $x), 0))))
2076     //
2077     // into (f64x2.convert_low_i32x4_{s,u} $x).
2078     auto Extract = N->getOperand(0);
2079     if (Extract.getOpcode() != ISD::EXTRACT_SUBVECTOR)
2080       return SDValue();
2081     if (Extract.getValueType() != MVT::v2i32)
2082       return SDValue();
2083     auto Source = Extract.getOperand(0);
2084     if (Source.getValueType() != MVT::v4i32)
2085       return SDValue();
2086     auto *IndexNode = dyn_cast<ConstantSDNode>(Extract.getOperand(1));
2087     if (IndexNode == nullptr || IndexNode->getZExtValue() != 0)
2088       return SDValue();
2089 
2090     unsigned Op = N->getOpcode() == ISD::SINT_TO_FP
2091                       ? WebAssemblyISD::CONVERT_LOW_S
2092                       : WebAssemblyISD::CONVERT_LOW_U;
2093 
2094     return DAG.getNode(Op, SDLoc(N), ResVT, Source);
2095 
2096   } else if (N->getOpcode() == ISD::EXTRACT_SUBVECTOR) {
2097     // Combine this:
2098     //
2099     //   (v2f64 (extract_subvector
2100     //     (v4f64 ({s,u}int_to_fp (v4i32 $x))), 0))
2101     //
2102     // into (f64x2.convert_low_i32x4_{s,u} $x).
2103     auto IntToFP = N->getOperand(0);
2104     if (IntToFP.getOpcode() != ISD::SINT_TO_FP &&
2105         IntToFP.getOpcode() != ISD::UINT_TO_FP)
2106       return SDValue();
2107     if (IntToFP.getValueType() != MVT::v4f64)
2108       return SDValue();
2109     auto Source = IntToFP.getOperand(0);
2110     if (Source.getValueType() != MVT::v4i32)
2111       return SDValue();
2112     auto IndexNode = dyn_cast<ConstantSDNode>(N->getOperand(1));
2113     if (IndexNode == nullptr || IndexNode->getZExtValue() != 0)
2114       return SDValue();
2115 
2116     unsigned Op = IntToFP->getOpcode() == ISD::SINT_TO_FP
2117                       ? WebAssemblyISD::CONVERT_LOW_S
2118                       : WebAssemblyISD::CONVERT_LOW_U;
2119 
2120     return DAG.getNode(Op, SDLoc(N), ResVT, Source);
2121 
2122   } else {
2123     llvm_unreachable("unexpected opcode");
2124   }
2125 }
2126 
2127 static SDValue
2128 performVectorTruncSatLowCombine(SDNode *N,
2129                                 TargetLowering::DAGCombinerInfo &DCI) {
2130   auto &DAG = DCI.DAG;
2131   assert(N->getOpcode() == ISD::CONCAT_VECTORS);
2132 
2133   // Combine this:
2134   //
2135   //   (concat_vectors (v2i32 (fp_to_{s,u}int_sat $x, 32)), (v2i32 (splat 0)))
2136   //
2137   // into (i32x4.trunc_sat_f64x2_zero_{s,u} $x).
2138   EVT ResVT = N->getValueType(0);
2139   if (ResVT != MVT::v4i32)
2140     return SDValue();
2141 
2142   auto FPToInt = N->getOperand(0);
2143   auto FPToIntOp = FPToInt.getOpcode();
2144   if (FPToIntOp != ISD::FP_TO_SINT_SAT && FPToIntOp != ISD::FP_TO_UINT_SAT)
2145     return SDValue();
2146   if (FPToInt.getConstantOperandVal(1) != 32)
2147     return SDValue();
2148 
2149   auto Source = FPToInt.getOperand(0);
2150   if (Source.getValueType() != MVT::v2f64)
2151     return SDValue();
2152 
2153   auto *Splat = dyn_cast<BuildVectorSDNode>(N->getOperand(1));
2154   APInt SplatValue, SplatUndef;
2155   unsigned SplatBitSize;
2156   bool HasAnyUndefs;
2157   if (!Splat || !Splat->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
2158                                         HasAnyUndefs))
2159     return SDValue();
2160   if (SplatValue != 0)
2161     return SDValue();
2162 
2163   unsigned Op = FPToIntOp == ISD::FP_TO_SINT_SAT
2164                     ? WebAssemblyISD::TRUNC_SAT_ZERO_S
2165                     : WebAssemblyISD::TRUNC_SAT_ZERO_U;
2166 
2167   return DAG.getNode(Op, SDLoc(N), ResVT, Source);
2168 }
2169 
2170 SDValue
2171 WebAssemblyTargetLowering::PerformDAGCombine(SDNode *N,
2172                                              DAGCombinerInfo &DCI) const {
2173   switch (N->getOpcode()) {
2174   default:
2175     return SDValue();
2176   case ISD::VECTOR_SHUFFLE:
2177     return performVECTOR_SHUFFLECombine(N, DCI);
2178   case ISD::SIGN_EXTEND:
2179   case ISD::ZERO_EXTEND:
2180     return performVectorExtendCombine(N, DCI);
2181   case ISD::SINT_TO_FP:
2182   case ISD::UINT_TO_FP:
2183   case ISD::EXTRACT_SUBVECTOR:
2184     return performVectorConvertLowCombine(N, DCI);
2185   case ISD::CONCAT_VECTORS:
2186     return performVectorTruncSatLowCombine(N, DCI);
2187   }
2188 }
2189