1 //=- WebAssemblyISelLowering.cpp - WebAssembly DAG Lowering Implementation -==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file implements the WebAssemblyTargetLowering class.
11 ///
12 //===----------------------------------------------------------------------===//
13 
14 #include "WebAssemblyISelLowering.h"
15 #include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
16 #include "Utils/WebAssemblyUtilities.h"
17 #include "WebAssemblyMachineFunctionInfo.h"
18 #include "WebAssemblySubtarget.h"
19 #include "WebAssemblyTargetMachine.h"
20 #include "llvm/CodeGen/CallingConvLower.h"
21 #include "llvm/CodeGen/MachineInstrBuilder.h"
22 #include "llvm/CodeGen/MachineJumpTableInfo.h"
23 #include "llvm/CodeGen/MachineModuleInfo.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #include "llvm/CodeGen/SelectionDAG.h"
26 #include "llvm/CodeGen/SelectionDAGNodes.h"
27 #include "llvm/CodeGen/WasmEHFuncInfo.h"
28 #include "llvm/IR/DiagnosticInfo.h"
29 #include "llvm/IR/DiagnosticPrinter.h"
30 #include "llvm/IR/Function.h"
31 #include "llvm/IR/Intrinsics.h"
32 #include "llvm/IR/IntrinsicsWebAssembly.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/ErrorHandling.h"
35 #include "llvm/Support/MathExtras.h"
36 #include "llvm/Support/raw_ostream.h"
37 #include "llvm/Target/TargetOptions.h"
38 using namespace llvm;
39 
40 #define DEBUG_TYPE "wasm-lower"
41 
42 WebAssemblyTargetLowering::WebAssemblyTargetLowering(
43     const TargetMachine &TM, const WebAssemblySubtarget &STI)
44     : TargetLowering(TM), Subtarget(&STI) {
45   auto MVTPtr = Subtarget->hasAddr64() ? MVT::i64 : MVT::i32;
46 
47   // Booleans always contain 0 or 1.
48   setBooleanContents(ZeroOrOneBooleanContent);
49   // Except in SIMD vectors
50   setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
51   // We don't know the microarchitecture here, so just reduce register pressure.
52   setSchedulingPreference(Sched::RegPressure);
53   // Tell ISel that we have a stack pointer.
54   setStackPointerRegisterToSaveRestore(
55       Subtarget->hasAddr64() ? WebAssembly::SP64 : WebAssembly::SP32);
56   // Set up the register classes.
57   addRegisterClass(MVT::i32, &WebAssembly::I32RegClass);
58   addRegisterClass(MVT::i64, &WebAssembly::I64RegClass);
59   addRegisterClass(MVT::f32, &WebAssembly::F32RegClass);
60   addRegisterClass(MVT::f64, &WebAssembly::F64RegClass);
61   if (Subtarget->hasSIMD128()) {
62     addRegisterClass(MVT::v16i8, &WebAssembly::V128RegClass);
63     addRegisterClass(MVT::v8i16, &WebAssembly::V128RegClass);
64     addRegisterClass(MVT::v4i32, &WebAssembly::V128RegClass);
65     addRegisterClass(MVT::v4f32, &WebAssembly::V128RegClass);
66     addRegisterClass(MVT::v2i64, &WebAssembly::V128RegClass);
67     addRegisterClass(MVT::v2f64, &WebAssembly::V128RegClass);
68   }
69   // Compute derived properties from the register classes.
70   computeRegisterProperties(Subtarget->getRegisterInfo());
71 
72   // Transform loads and stores to pointers in address space 1 to loads and
73   // stores to WebAssembly global variables, outside linear memory.
74   for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64}) {
75     setOperationAction(ISD::LOAD, T, Custom);
76     setOperationAction(ISD::STORE, T, Custom);
77   }
78   if (Subtarget->hasSIMD128()) {
79     for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
80                    MVT::v2f64}) {
81       setOperationAction(ISD::LOAD, T, Custom);
82       setOperationAction(ISD::STORE, T, Custom);
83     }
84   }
85 
86   setOperationAction(ISD::GlobalAddress, MVTPtr, Custom);
87   setOperationAction(ISD::GlobalTLSAddress, MVTPtr, Custom);
88   setOperationAction(ISD::ExternalSymbol, MVTPtr, Custom);
89   setOperationAction(ISD::JumpTable, MVTPtr, Custom);
90   setOperationAction(ISD::BlockAddress, MVTPtr, Custom);
91   setOperationAction(ISD::BRIND, MVT::Other, Custom);
92 
93   // Take the default expansion for va_arg, va_copy, and va_end. There is no
94   // default action for va_start, so we do that custom.
95   setOperationAction(ISD::VASTART, MVT::Other, Custom);
96   setOperationAction(ISD::VAARG, MVT::Other, Expand);
97   setOperationAction(ISD::VACOPY, MVT::Other, Expand);
98   setOperationAction(ISD::VAEND, MVT::Other, Expand);
99 
100   for (auto T : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) {
101     // Don't expand the floating-point types to constant pools.
102     setOperationAction(ISD::ConstantFP, T, Legal);
103     // Expand floating-point comparisons.
104     for (auto CC : {ISD::SETO, ISD::SETUO, ISD::SETUEQ, ISD::SETONE,
105                     ISD::SETULT, ISD::SETULE, ISD::SETUGT, ISD::SETUGE})
106       setCondCodeAction(CC, T, Expand);
107     // Expand floating-point library function operators.
108     for (auto Op :
109          {ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FMA})
110       setOperationAction(Op, T, Expand);
111     // Note supported floating-point library function operators that otherwise
112     // default to expand.
113     for (auto Op :
114          {ISD::FCEIL, ISD::FFLOOR, ISD::FTRUNC, ISD::FNEARBYINT, ISD::FRINT})
115       setOperationAction(Op, T, Legal);
116     // Support minimum and maximum, which otherwise default to expand.
117     setOperationAction(ISD::FMINIMUM, T, Legal);
118     setOperationAction(ISD::FMAXIMUM, T, Legal);
119     // WebAssembly currently has no builtin f16 support.
120     setOperationAction(ISD::FP16_TO_FP, T, Expand);
121     setOperationAction(ISD::FP_TO_FP16, T, Expand);
122     setLoadExtAction(ISD::EXTLOAD, T, MVT::f16, Expand);
123     setTruncStoreAction(T, MVT::f16, Expand);
124   }
125 
126   // Expand unavailable integer operations.
127   for (auto Op :
128        {ISD::BSWAP, ISD::SMUL_LOHI, ISD::UMUL_LOHI, ISD::MULHS, ISD::MULHU,
129         ISD::SDIVREM, ISD::UDIVREM, ISD::SHL_PARTS, ISD::SRA_PARTS,
130         ISD::SRL_PARTS, ISD::ADDC, ISD::ADDE, ISD::SUBC, ISD::SUBE}) {
131     for (auto T : {MVT::i32, MVT::i64})
132       setOperationAction(Op, T, Expand);
133     if (Subtarget->hasSIMD128())
134       for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
135         setOperationAction(Op, T, Expand);
136   }
137 
138   if (Subtarget->hasNontrappingFPToInt())
139     for (auto Op : {ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT})
140       for (auto T : {MVT::i32, MVT::i64})
141         setOperationAction(Op, T, Custom);
142 
143   // SIMD-specific configuration
144   if (Subtarget->hasSIMD128()) {
145     // Hoist bitcasts out of shuffles
146     setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
147 
148     // Combine extends of extract_subvectors into widening ops
149     setTargetDAGCombine(ISD::SIGN_EXTEND);
150     setTargetDAGCombine(ISD::ZERO_EXTEND);
151 
152     // Combine int_to_fp or fp_extend of extract_vectors and vice versa into
153     // conversions ops
154     setTargetDAGCombine(ISD::SINT_TO_FP);
155     setTargetDAGCombine(ISD::UINT_TO_FP);
156     setTargetDAGCombine(ISD::FP_EXTEND);
157     setTargetDAGCombine(ISD::EXTRACT_SUBVECTOR);
158 
159     // Combine fp_to_{s,u}int_sat or fp_round of concat_vectors or vice versa
160     // into conversion ops
161     setTargetDAGCombine(ISD::FP_TO_SINT_SAT);
162     setTargetDAGCombine(ISD::FP_TO_UINT_SAT);
163     setTargetDAGCombine(ISD::FP_ROUND);
164     setTargetDAGCombine(ISD::CONCAT_VECTORS);
165 
166     // Support saturating add for i8x16 and i16x8
167     for (auto Op : {ISD::SADDSAT, ISD::UADDSAT})
168       for (auto T : {MVT::v16i8, MVT::v8i16})
169         setOperationAction(Op, T, Legal);
170 
171     // Support integer abs
172     for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
173       setOperationAction(ISD::ABS, T, Legal);
174 
175     // Custom lower BUILD_VECTORs to minimize number of replace_lanes
176     for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
177                    MVT::v2f64})
178       setOperationAction(ISD::BUILD_VECTOR, T, Custom);
179 
180     // We have custom shuffle lowering to expose the shuffle mask
181     for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
182                    MVT::v2f64})
183       setOperationAction(ISD::VECTOR_SHUFFLE, T, Custom);
184 
185     // Custom lowering since wasm shifts must have a scalar shift amount
186     for (auto Op : {ISD::SHL, ISD::SRA, ISD::SRL})
187       for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
188         setOperationAction(Op, T, Custom);
189 
190     // Custom lower lane accesses to expand out variable indices
191     for (auto Op : {ISD::EXTRACT_VECTOR_ELT, ISD::INSERT_VECTOR_ELT})
192       for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
193                      MVT::v2f64})
194         setOperationAction(Op, T, Custom);
195 
196     // There is no i8x16.mul instruction
197     setOperationAction(ISD::MUL, MVT::v16i8, Expand);
198 
199     // There is no vector conditional select instruction
200     for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
201                    MVT::v2f64})
202       setOperationAction(ISD::SELECT_CC, T, Expand);
203 
204     // Expand integer operations supported for scalars but not SIMD
205     for (auto Op : {ISD::CTLZ, ISD::CTTZ, ISD::CTPOP, ISD::SDIV, ISD::UDIV,
206                     ISD::SREM, ISD::UREM, ISD::ROTL, ISD::ROTR})
207       for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
208         setOperationAction(Op, T, Expand);
209 
210     // But we do have integer min and max operations
211     for (auto Op : {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX})
212       for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
213         setOperationAction(Op, T, Legal);
214 
215     // And we have popcnt for i8x16
216     setOperationAction(ISD::CTPOP, MVT::v16i8, Legal);
217 
218     // Expand float operations supported for scalars but not SIMD
219     for (auto Op : {ISD::FCOPYSIGN, ISD::FLOG, ISD::FLOG2, ISD::FLOG10,
220                     ISD::FEXP, ISD::FEXP2, ISD::FRINT})
221       for (auto T : {MVT::v4f32, MVT::v2f64})
222         setOperationAction(Op, T, Expand);
223 
224     // Unsigned comparison operations are unavailable for i64x2 vectors.
225     for (auto CC : {ISD::SETUGT, ISD::SETUGE, ISD::SETULT, ISD::SETULE})
226       setCondCodeAction(CC, MVT::v2i64, Custom);
227 
228     // 64x2 conversions are not in the spec
229     for (auto Op :
230          {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT})
231       for (auto T : {MVT::v2i64, MVT::v2f64})
232         setOperationAction(Op, T, Expand);
233 
234     // But saturating fp_to_int converstions are
235     for (auto Op : {ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT})
236       setOperationAction(Op, MVT::v4i32, Custom);
237   }
238 
239   // As a special case, these operators use the type to mean the type to
240   // sign-extend from.
241   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
242   if (!Subtarget->hasSignExt()) {
243     // Sign extends are legal only when extending a vector extract
244     auto Action = Subtarget->hasSIMD128() ? Custom : Expand;
245     for (auto T : {MVT::i8, MVT::i16, MVT::i32})
246       setOperationAction(ISD::SIGN_EXTEND_INREG, T, Action);
247   }
248   for (auto T : MVT::integer_fixedlen_vector_valuetypes())
249     setOperationAction(ISD::SIGN_EXTEND_INREG, T, Expand);
250 
251   // Dynamic stack allocation: use the default expansion.
252   setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
253   setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
254   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVTPtr, Expand);
255 
256   setOperationAction(ISD::FrameIndex, MVT::i32, Custom);
257   setOperationAction(ISD::FrameIndex, MVT::i64, Custom);
258   setOperationAction(ISD::CopyToReg, MVT::Other, Custom);
259 
260   // Expand these forms; we pattern-match the forms that we can handle in isel.
261   for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64})
262     for (auto Op : {ISD::BR_CC, ISD::SELECT_CC})
263       setOperationAction(Op, T, Expand);
264 
265   // We have custom switch handling.
266   setOperationAction(ISD::BR_JT, MVT::Other, Custom);
267 
268   // WebAssembly doesn't have:
269   //  - Floating-point extending loads.
270   //  - Floating-point truncating stores.
271   //  - i1 extending loads.
272   //  - truncating SIMD stores and most extending loads
273   setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
274   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
275   for (auto T : MVT::integer_valuetypes())
276     for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD})
277       setLoadExtAction(Ext, T, MVT::i1, Promote);
278   if (Subtarget->hasSIMD128()) {
279     for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v4f32,
280                    MVT::v2f64}) {
281       for (auto MemT : MVT::fixedlen_vector_valuetypes()) {
282         if (MVT(T) != MemT) {
283           setTruncStoreAction(T, MemT, Expand);
284           for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD})
285             setLoadExtAction(Ext, T, MemT, Expand);
286         }
287       }
288     }
289     // But some vector extending loads are legal
290     for (auto Ext : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}) {
291       setLoadExtAction(Ext, MVT::v8i16, MVT::v8i8, Legal);
292       setLoadExtAction(Ext, MVT::v4i32, MVT::v4i16, Legal);
293       setLoadExtAction(Ext, MVT::v2i64, MVT::v2i32, Legal);
294     }
295     // And some truncating stores are legal as well
296     setTruncStoreAction(MVT::v8i16, MVT::v8i8, Legal);
297     setTruncStoreAction(MVT::v4i32, MVT::v4i16, Legal);
298   }
299 
300   // Don't do anything clever with build_pairs
301   setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
302 
303   // Trap lowers to wasm unreachable
304   setOperationAction(ISD::TRAP, MVT::Other, Legal);
305   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
306 
307   // Exception handling intrinsics
308   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
309   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
310   setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
311 
312   setMaxAtomicSizeInBitsSupported(64);
313 
314   // Override the __gnu_f2h_ieee/__gnu_h2f_ieee names so that the f32 name is
315   // consistent with the f64 and f128 names.
316   setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
317   setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
318 
319   // Define the emscripten name for return address helper.
320   // TODO: when implementing other Wasm backends, make this generic or only do
321   // this on emscripten depending on what they end up doing.
322   setLibcallName(RTLIB::RETURN_ADDRESS, "emscripten_return_address");
323 
324   // Always convert switches to br_tables unless there is only one case, which
325   // is equivalent to a simple branch. This reduces code size for wasm, and we
326   // defer possible jump table optimizations to the VM.
327   setMinimumJumpTableEntries(2);
328 }
329 
330 TargetLowering::AtomicExpansionKind
331 WebAssemblyTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
332   // We have wasm instructions for these
333   switch (AI->getOperation()) {
334   case AtomicRMWInst::Add:
335   case AtomicRMWInst::Sub:
336   case AtomicRMWInst::And:
337   case AtomicRMWInst::Or:
338   case AtomicRMWInst::Xor:
339   case AtomicRMWInst::Xchg:
340     return AtomicExpansionKind::None;
341   default:
342     break;
343   }
344   return AtomicExpansionKind::CmpXChg;
345 }
346 
347 bool WebAssemblyTargetLowering::shouldScalarizeBinop(SDValue VecOp) const {
348   // Implementation copied from X86TargetLowering.
349   unsigned Opc = VecOp.getOpcode();
350 
351   // Assume target opcodes can't be scalarized.
352   // TODO - do we have any exceptions?
353   if (Opc >= ISD::BUILTIN_OP_END)
354     return false;
355 
356   // If the vector op is not supported, try to convert to scalar.
357   EVT VecVT = VecOp.getValueType();
358   if (!isOperationLegalOrCustomOrPromote(Opc, VecVT))
359     return true;
360 
361   // If the vector op is supported, but the scalar op is not, the transform may
362   // not be worthwhile.
363   EVT ScalarVT = VecVT.getScalarType();
364   return isOperationLegalOrCustomOrPromote(Opc, ScalarVT);
365 }
366 
367 FastISel *WebAssemblyTargetLowering::createFastISel(
368     FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo) const {
369   return WebAssembly::createFastISel(FuncInfo, LibInfo);
370 }
371 
372 MVT WebAssemblyTargetLowering::getScalarShiftAmountTy(const DataLayout & /*DL*/,
373                                                       EVT VT) const {
374   unsigned BitWidth = NextPowerOf2(VT.getSizeInBits() - 1);
375   if (BitWidth > 1 && BitWidth < 8)
376     BitWidth = 8;
377 
378   if (BitWidth > 64) {
379     // The shift will be lowered to a libcall, and compiler-rt libcalls expect
380     // the count to be an i32.
381     BitWidth = 32;
382     assert(BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) &&
383            "32-bit shift counts ought to be enough for anyone");
384   }
385 
386   MVT Result = MVT::getIntegerVT(BitWidth);
387   assert(Result != MVT::INVALID_SIMPLE_VALUE_TYPE &&
388          "Unable to represent scalar shift amount type");
389   return Result;
390 }
391 
392 // Lower an fp-to-int conversion operator from the LLVM opcode, which has an
393 // undefined result on invalid/overflow, to the WebAssembly opcode, which
394 // traps on invalid/overflow.
395 static MachineBasicBlock *LowerFPToInt(MachineInstr &MI, DebugLoc DL,
396                                        MachineBasicBlock *BB,
397                                        const TargetInstrInfo &TII,
398                                        bool IsUnsigned, bool Int64,
399                                        bool Float64, unsigned LoweredOpcode) {
400   MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
401 
402   Register OutReg = MI.getOperand(0).getReg();
403   Register InReg = MI.getOperand(1).getReg();
404 
405   unsigned Abs = Float64 ? WebAssembly::ABS_F64 : WebAssembly::ABS_F32;
406   unsigned FConst = Float64 ? WebAssembly::CONST_F64 : WebAssembly::CONST_F32;
407   unsigned LT = Float64 ? WebAssembly::LT_F64 : WebAssembly::LT_F32;
408   unsigned GE = Float64 ? WebAssembly::GE_F64 : WebAssembly::GE_F32;
409   unsigned IConst = Int64 ? WebAssembly::CONST_I64 : WebAssembly::CONST_I32;
410   unsigned Eqz = WebAssembly::EQZ_I32;
411   unsigned And = WebAssembly::AND_I32;
412   int64_t Limit = Int64 ? INT64_MIN : INT32_MIN;
413   int64_t Substitute = IsUnsigned ? 0 : Limit;
414   double CmpVal = IsUnsigned ? -(double)Limit * 2.0 : -(double)Limit;
415   auto &Context = BB->getParent()->getFunction().getContext();
416   Type *Ty = Float64 ? Type::getDoubleTy(Context) : Type::getFloatTy(Context);
417 
418   const BasicBlock *LLVMBB = BB->getBasicBlock();
419   MachineFunction *F = BB->getParent();
420   MachineBasicBlock *TrueMBB = F->CreateMachineBasicBlock(LLVMBB);
421   MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVMBB);
422   MachineBasicBlock *DoneMBB = F->CreateMachineBasicBlock(LLVMBB);
423 
424   MachineFunction::iterator It = ++BB->getIterator();
425   F->insert(It, FalseMBB);
426   F->insert(It, TrueMBB);
427   F->insert(It, DoneMBB);
428 
429   // Transfer the remainder of BB and its successor edges to DoneMBB.
430   DoneMBB->splice(DoneMBB->begin(), BB, std::next(MI.getIterator()), BB->end());
431   DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
432 
433   BB->addSuccessor(TrueMBB);
434   BB->addSuccessor(FalseMBB);
435   TrueMBB->addSuccessor(DoneMBB);
436   FalseMBB->addSuccessor(DoneMBB);
437 
438   unsigned Tmp0, Tmp1, CmpReg, EqzReg, FalseReg, TrueReg;
439   Tmp0 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
440   Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
441   CmpReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
442   EqzReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
443   FalseReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
444   TrueReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
445 
446   MI.eraseFromParent();
447   // For signed numbers, we can do a single comparison to determine whether
448   // fabs(x) is within range.
449   if (IsUnsigned) {
450     Tmp0 = InReg;
451   } else {
452     BuildMI(BB, DL, TII.get(Abs), Tmp0).addReg(InReg);
453   }
454   BuildMI(BB, DL, TII.get(FConst), Tmp1)
455       .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, CmpVal)));
456   BuildMI(BB, DL, TII.get(LT), CmpReg).addReg(Tmp0).addReg(Tmp1);
457 
458   // For unsigned numbers, we have to do a separate comparison with zero.
459   if (IsUnsigned) {
460     Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
461     Register SecondCmpReg =
462         MRI.createVirtualRegister(&WebAssembly::I32RegClass);
463     Register AndReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
464     BuildMI(BB, DL, TII.get(FConst), Tmp1)
465         .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, 0.0)));
466     BuildMI(BB, DL, TII.get(GE), SecondCmpReg).addReg(Tmp0).addReg(Tmp1);
467     BuildMI(BB, DL, TII.get(And), AndReg).addReg(CmpReg).addReg(SecondCmpReg);
468     CmpReg = AndReg;
469   }
470 
471   BuildMI(BB, DL, TII.get(Eqz), EqzReg).addReg(CmpReg);
472 
473   // Create the CFG diamond to select between doing the conversion or using
474   // the substitute value.
475   BuildMI(BB, DL, TII.get(WebAssembly::BR_IF)).addMBB(TrueMBB).addReg(EqzReg);
476   BuildMI(FalseMBB, DL, TII.get(LoweredOpcode), FalseReg).addReg(InReg);
477   BuildMI(FalseMBB, DL, TII.get(WebAssembly::BR)).addMBB(DoneMBB);
478   BuildMI(TrueMBB, DL, TII.get(IConst), TrueReg).addImm(Substitute);
479   BuildMI(*DoneMBB, DoneMBB->begin(), DL, TII.get(TargetOpcode::PHI), OutReg)
480       .addReg(FalseReg)
481       .addMBB(FalseMBB)
482       .addReg(TrueReg)
483       .addMBB(TrueMBB);
484 
485   return DoneMBB;
486 }
487 
488 static MachineBasicBlock *
489 LowerCallResults(MachineInstr &CallResults, DebugLoc DL, MachineBasicBlock *BB,
490                  const WebAssemblySubtarget *Subtarget,
491                  const TargetInstrInfo &TII) {
492   MachineInstr &CallParams = *CallResults.getPrevNode();
493   assert(CallParams.getOpcode() == WebAssembly::CALL_PARAMS);
494   assert(CallResults.getOpcode() == WebAssembly::CALL_RESULTS ||
495          CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS);
496 
497   bool IsIndirect = CallParams.getOperand(0).isReg();
498   bool IsRetCall = CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS;
499 
500   unsigned CallOp;
501   if (IsIndirect && IsRetCall) {
502     CallOp = WebAssembly::RET_CALL_INDIRECT;
503   } else if (IsIndirect) {
504     CallOp = WebAssembly::CALL_INDIRECT;
505   } else if (IsRetCall) {
506     CallOp = WebAssembly::RET_CALL;
507   } else {
508     CallOp = WebAssembly::CALL;
509   }
510 
511   MachineFunction &MF = *BB->getParent();
512   const MCInstrDesc &MCID = TII.get(CallOp);
513   MachineInstrBuilder MIB(MF, MF.CreateMachineInstr(MCID, DL));
514 
515   // See if we must truncate the function pointer.
516   // CALL_INDIRECT takes an i32, but in wasm64 we represent function pointers
517   // as 64-bit for uniformity with other pointer types.
518   // See also: WebAssemblyFastISel::selectCall
519   if (IsIndirect && MF.getSubtarget<WebAssemblySubtarget>().hasAddr64()) {
520     Register Reg32 =
521         MF.getRegInfo().createVirtualRegister(&WebAssembly::I32RegClass);
522     auto &FnPtr = CallParams.getOperand(0);
523     BuildMI(*BB, CallResults.getIterator(), DL,
524             TII.get(WebAssembly::I32_WRAP_I64), Reg32)
525         .addReg(FnPtr.getReg());
526     FnPtr.setReg(Reg32);
527   }
528 
529   // Move the function pointer to the end of the arguments for indirect calls
530   if (IsIndirect) {
531     auto FnPtr = CallParams.getOperand(0);
532     CallParams.RemoveOperand(0);
533     CallParams.addOperand(FnPtr);
534   }
535 
536   for (auto Def : CallResults.defs())
537     MIB.add(Def);
538 
539   if (IsIndirect) {
540     // Placeholder for the type index.
541     MIB.addImm(0);
542     // The table into which this call_indirect indexes.
543     MCSymbolWasm *Table =
544         WebAssembly::getOrCreateFunctionTableSymbol(MF.getContext(), Subtarget);
545     if (Subtarget->hasReferenceTypes()) {
546       MIB.addSym(Table);
547     } else {
548       // For the MVP there is at most one table whose number is 0, but we can't
549       // write a table symbol or issue relocations.  Instead we just ensure the
550       // table is live and write a zero.
551       Table->setNoStrip();
552       MIB.addImm(0);
553     }
554   }
555 
556   for (auto Use : CallParams.uses())
557     MIB.add(Use);
558 
559   BB->insert(CallResults.getIterator(), MIB);
560   CallParams.eraseFromParent();
561   CallResults.eraseFromParent();
562 
563   return BB;
564 }
565 
566 MachineBasicBlock *WebAssemblyTargetLowering::EmitInstrWithCustomInserter(
567     MachineInstr &MI, MachineBasicBlock *BB) const {
568   const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
569   DebugLoc DL = MI.getDebugLoc();
570 
571   switch (MI.getOpcode()) {
572   default:
573     llvm_unreachable("Unexpected instr type to insert");
574   case WebAssembly::FP_TO_SINT_I32_F32:
575     return LowerFPToInt(MI, DL, BB, TII, false, false, false,
576                         WebAssembly::I32_TRUNC_S_F32);
577   case WebAssembly::FP_TO_UINT_I32_F32:
578     return LowerFPToInt(MI, DL, BB, TII, true, false, false,
579                         WebAssembly::I32_TRUNC_U_F32);
580   case WebAssembly::FP_TO_SINT_I64_F32:
581     return LowerFPToInt(MI, DL, BB, TII, false, true, false,
582                         WebAssembly::I64_TRUNC_S_F32);
583   case WebAssembly::FP_TO_UINT_I64_F32:
584     return LowerFPToInt(MI, DL, BB, TII, true, true, false,
585                         WebAssembly::I64_TRUNC_U_F32);
586   case WebAssembly::FP_TO_SINT_I32_F64:
587     return LowerFPToInt(MI, DL, BB, TII, false, false, true,
588                         WebAssembly::I32_TRUNC_S_F64);
589   case WebAssembly::FP_TO_UINT_I32_F64:
590     return LowerFPToInt(MI, DL, BB, TII, true, false, true,
591                         WebAssembly::I32_TRUNC_U_F64);
592   case WebAssembly::FP_TO_SINT_I64_F64:
593     return LowerFPToInt(MI, DL, BB, TII, false, true, true,
594                         WebAssembly::I64_TRUNC_S_F64);
595   case WebAssembly::FP_TO_UINT_I64_F64:
596     return LowerFPToInt(MI, DL, BB, TII, true, true, true,
597                         WebAssembly::I64_TRUNC_U_F64);
598   case WebAssembly::CALL_RESULTS:
599   case WebAssembly::RET_CALL_RESULTS:
600     return LowerCallResults(MI, DL, BB, Subtarget, TII);
601   }
602 }
603 
604 const char *
605 WebAssemblyTargetLowering::getTargetNodeName(unsigned Opcode) const {
606   switch (static_cast<WebAssemblyISD::NodeType>(Opcode)) {
607   case WebAssemblyISD::FIRST_NUMBER:
608   case WebAssemblyISD::FIRST_MEM_OPCODE:
609     break;
610 #define HANDLE_NODETYPE(NODE)                                                  \
611   case WebAssemblyISD::NODE:                                                   \
612     return "WebAssemblyISD::" #NODE;
613 #define HANDLE_MEM_NODETYPE(NODE) HANDLE_NODETYPE(NODE)
614 #include "WebAssemblyISD.def"
615 #undef HANDLE_MEM_NODETYPE
616 #undef HANDLE_NODETYPE
617   }
618   return nullptr;
619 }
620 
621 std::pair<unsigned, const TargetRegisterClass *>
622 WebAssemblyTargetLowering::getRegForInlineAsmConstraint(
623     const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
624   // First, see if this is a constraint that directly corresponds to a
625   // WebAssembly register class.
626   if (Constraint.size() == 1) {
627     switch (Constraint[0]) {
628     case 'r':
629       assert(VT != MVT::iPTR && "Pointer MVT not expected here");
630       if (Subtarget->hasSIMD128() && VT.isVector()) {
631         if (VT.getSizeInBits() == 128)
632           return std::make_pair(0U, &WebAssembly::V128RegClass);
633       }
634       if (VT.isInteger() && !VT.isVector()) {
635         if (VT.getSizeInBits() <= 32)
636           return std::make_pair(0U, &WebAssembly::I32RegClass);
637         if (VT.getSizeInBits() <= 64)
638           return std::make_pair(0U, &WebAssembly::I64RegClass);
639       }
640       if (VT.isFloatingPoint() && !VT.isVector()) {
641         switch (VT.getSizeInBits()) {
642         case 32:
643           return std::make_pair(0U, &WebAssembly::F32RegClass);
644         case 64:
645           return std::make_pair(0U, &WebAssembly::F64RegClass);
646         default:
647           break;
648         }
649       }
650       break;
651     default:
652       break;
653     }
654   }
655 
656   return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
657 }
658 
659 bool WebAssemblyTargetLowering::isCheapToSpeculateCttz() const {
660   // Assume ctz is a relatively cheap operation.
661   return true;
662 }
663 
664 bool WebAssemblyTargetLowering::isCheapToSpeculateCtlz() const {
665   // Assume clz is a relatively cheap operation.
666   return true;
667 }
668 
669 bool WebAssemblyTargetLowering::isLegalAddressingMode(const DataLayout &DL,
670                                                       const AddrMode &AM,
671                                                       Type *Ty, unsigned AS,
672                                                       Instruction *I) const {
673   // WebAssembly offsets are added as unsigned without wrapping. The
674   // isLegalAddressingMode gives us no way to determine if wrapping could be
675   // happening, so we approximate this by accepting only non-negative offsets.
676   if (AM.BaseOffs < 0)
677     return false;
678 
679   // WebAssembly has no scale register operands.
680   if (AM.Scale != 0)
681     return false;
682 
683   // Everything else is legal.
684   return true;
685 }
686 
687 bool WebAssemblyTargetLowering::allowsMisalignedMemoryAccesses(
688     EVT /*VT*/, unsigned /*AddrSpace*/, Align /*Align*/,
689     MachineMemOperand::Flags /*Flags*/, bool *Fast) const {
690   // WebAssembly supports unaligned accesses, though it should be declared
691   // with the p2align attribute on loads and stores which do so, and there
692   // may be a performance impact. We tell LLVM they're "fast" because
693   // for the kinds of things that LLVM uses this for (merging adjacent stores
694   // of constants, etc.), WebAssembly implementations will either want the
695   // unaligned access or they'll split anyway.
696   if (Fast)
697     *Fast = true;
698   return true;
699 }
700 
701 bool WebAssemblyTargetLowering::isIntDivCheap(EVT VT,
702                                               AttributeList Attr) const {
703   // The current thinking is that wasm engines will perform this optimization,
704   // so we can save on code size.
705   return true;
706 }
707 
708 bool WebAssemblyTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
709   EVT ExtT = ExtVal.getValueType();
710   EVT MemT = cast<LoadSDNode>(ExtVal->getOperand(0))->getValueType(0);
711   return (ExtT == MVT::v8i16 && MemT == MVT::v8i8) ||
712          (ExtT == MVT::v4i32 && MemT == MVT::v4i16) ||
713          (ExtT == MVT::v2i64 && MemT == MVT::v2i32);
714 }
715 
716 EVT WebAssemblyTargetLowering::getSetCCResultType(const DataLayout &DL,
717                                                   LLVMContext &C,
718                                                   EVT VT) const {
719   if (VT.isVector())
720     return VT.changeVectorElementTypeToInteger();
721 
722   // So far, all branch instructions in Wasm take an I32 condition.
723   // The default TargetLowering::getSetCCResultType returns the pointer size,
724   // which would be useful to reduce instruction counts when testing
725   // against 64-bit pointers/values if at some point Wasm supports that.
726   return EVT::getIntegerVT(C, 32);
727 }
728 
729 bool WebAssemblyTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
730                                                    const CallInst &I,
731                                                    MachineFunction &MF,
732                                                    unsigned Intrinsic) const {
733   switch (Intrinsic) {
734   case Intrinsic::wasm_memory_atomic_notify:
735     Info.opc = ISD::INTRINSIC_W_CHAIN;
736     Info.memVT = MVT::i32;
737     Info.ptrVal = I.getArgOperand(0);
738     Info.offset = 0;
739     Info.align = Align(4);
740     // atomic.notify instruction does not really load the memory specified with
741     // this argument, but MachineMemOperand should either be load or store, so
742     // we set this to a load.
743     // FIXME Volatile isn't really correct, but currently all LLVM atomic
744     // instructions are treated as volatiles in the backend, so we should be
745     // consistent. The same applies for wasm_atomic_wait intrinsics too.
746     Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
747     return true;
748   case Intrinsic::wasm_memory_atomic_wait32:
749     Info.opc = ISD::INTRINSIC_W_CHAIN;
750     Info.memVT = MVT::i32;
751     Info.ptrVal = I.getArgOperand(0);
752     Info.offset = 0;
753     Info.align = Align(4);
754     Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
755     return true;
756   case Intrinsic::wasm_memory_atomic_wait64:
757     Info.opc = ISD::INTRINSIC_W_CHAIN;
758     Info.memVT = MVT::i64;
759     Info.ptrVal = I.getArgOperand(0);
760     Info.offset = 0;
761     Info.align = Align(8);
762     Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
763     return true;
764   default:
765     return false;
766   }
767 }
768 
769 //===----------------------------------------------------------------------===//
770 // WebAssembly Lowering private implementation.
771 //===----------------------------------------------------------------------===//
772 
773 //===----------------------------------------------------------------------===//
774 // Lowering Code
775 //===----------------------------------------------------------------------===//
776 
777 static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *Msg) {
778   MachineFunction &MF = DAG.getMachineFunction();
779   DAG.getContext()->diagnose(
780       DiagnosticInfoUnsupported(MF.getFunction(), Msg, DL.getDebugLoc()));
781 }
782 
783 // Test whether the given calling convention is supported.
784 static bool callingConvSupported(CallingConv::ID CallConv) {
785   // We currently support the language-independent target-independent
786   // conventions. We don't yet have a way to annotate calls with properties like
787   // "cold", and we don't have any call-clobbered registers, so these are mostly
788   // all handled the same.
789   return CallConv == CallingConv::C || CallConv == CallingConv::Fast ||
790          CallConv == CallingConv::Cold ||
791          CallConv == CallingConv::PreserveMost ||
792          CallConv == CallingConv::PreserveAll ||
793          CallConv == CallingConv::CXX_FAST_TLS ||
794          CallConv == CallingConv::WASM_EmscriptenInvoke ||
795          CallConv == CallingConv::Swift;
796 }
797 
798 SDValue
799 WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI,
800                                      SmallVectorImpl<SDValue> &InVals) const {
801   SelectionDAG &DAG = CLI.DAG;
802   SDLoc DL = CLI.DL;
803   SDValue Chain = CLI.Chain;
804   SDValue Callee = CLI.Callee;
805   MachineFunction &MF = DAG.getMachineFunction();
806   auto Layout = MF.getDataLayout();
807 
808   CallingConv::ID CallConv = CLI.CallConv;
809   if (!callingConvSupported(CallConv))
810     fail(DL, DAG,
811          "WebAssembly doesn't support language-specific or target-specific "
812          "calling conventions yet");
813   if (CLI.IsPatchPoint)
814     fail(DL, DAG, "WebAssembly doesn't support patch point yet");
815 
816   if (CLI.IsTailCall) {
817     auto NoTail = [&](const char *Msg) {
818       if (CLI.CB && CLI.CB->isMustTailCall())
819         fail(DL, DAG, Msg);
820       CLI.IsTailCall = false;
821     };
822 
823     if (!Subtarget->hasTailCall())
824       NoTail("WebAssembly 'tail-call' feature not enabled");
825 
826     // Varargs calls cannot be tail calls because the buffer is on the stack
827     if (CLI.IsVarArg)
828       NoTail("WebAssembly does not support varargs tail calls");
829 
830     // Do not tail call unless caller and callee return types match
831     const Function &F = MF.getFunction();
832     const TargetMachine &TM = getTargetMachine();
833     Type *RetTy = F.getReturnType();
834     SmallVector<MVT, 4> CallerRetTys;
835     SmallVector<MVT, 4> CalleeRetTys;
836     computeLegalValueVTs(F, TM, RetTy, CallerRetTys);
837     computeLegalValueVTs(F, TM, CLI.RetTy, CalleeRetTys);
838     bool TypesMatch = CallerRetTys.size() == CalleeRetTys.size() &&
839                       std::equal(CallerRetTys.begin(), CallerRetTys.end(),
840                                  CalleeRetTys.begin());
841     if (!TypesMatch)
842       NoTail("WebAssembly tail call requires caller and callee return types to "
843              "match");
844 
845     // If pointers to local stack values are passed, we cannot tail call
846     if (CLI.CB) {
847       for (auto &Arg : CLI.CB->args()) {
848         Value *Val = Arg.get();
849         // Trace the value back through pointer operations
850         while (true) {
851           Value *Src = Val->stripPointerCastsAndAliases();
852           if (auto *GEP = dyn_cast<GetElementPtrInst>(Src))
853             Src = GEP->getPointerOperand();
854           if (Val == Src)
855             break;
856           Val = Src;
857         }
858         if (isa<AllocaInst>(Val)) {
859           NoTail(
860               "WebAssembly does not support tail calling with stack arguments");
861           break;
862         }
863       }
864     }
865   }
866 
867   SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
868   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
869   SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
870 
871   // The generic code may have added an sret argument. If we're lowering an
872   // invoke function, the ABI requires that the function pointer be the first
873   // argument, so we may have to swap the arguments.
874   if (CallConv == CallingConv::WASM_EmscriptenInvoke && Outs.size() >= 2 &&
875       Outs[0].Flags.isSRet()) {
876     std::swap(Outs[0], Outs[1]);
877     std::swap(OutVals[0], OutVals[1]);
878   }
879 
880   bool HasSwiftSelfArg = false;
881   bool HasSwiftErrorArg = false;
882   unsigned NumFixedArgs = 0;
883   for (unsigned I = 0; I < Outs.size(); ++I) {
884     const ISD::OutputArg &Out = Outs[I];
885     SDValue &OutVal = OutVals[I];
886     HasSwiftSelfArg |= Out.Flags.isSwiftSelf();
887     HasSwiftErrorArg |= Out.Flags.isSwiftError();
888     if (Out.Flags.isNest())
889       fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
890     if (Out.Flags.isInAlloca())
891       fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
892     if (Out.Flags.isInConsecutiveRegs())
893       fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
894     if (Out.Flags.isInConsecutiveRegsLast())
895       fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
896     if (Out.Flags.isByVal() && Out.Flags.getByValSize() != 0) {
897       auto &MFI = MF.getFrameInfo();
898       int FI = MFI.CreateStackObject(Out.Flags.getByValSize(),
899                                      Out.Flags.getNonZeroByValAlign(),
900                                      /*isSS=*/false);
901       SDValue SizeNode =
902           DAG.getConstant(Out.Flags.getByValSize(), DL, MVT::i32);
903       SDValue FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
904       Chain = DAG.getMemcpy(
905           Chain, DL, FINode, OutVal, SizeNode, Out.Flags.getNonZeroByValAlign(),
906           /*isVolatile*/ false, /*AlwaysInline=*/false,
907           /*isTailCall*/ false, MachinePointerInfo(), MachinePointerInfo());
908       OutVal = FINode;
909     }
910     // Count the number of fixed args *after* legalization.
911     NumFixedArgs += Out.IsFixed;
912   }
913 
914   bool IsVarArg = CLI.IsVarArg;
915   auto PtrVT = getPointerTy(Layout);
916 
917   // For swiftcc, emit additional swiftself and swifterror arguments
918   // if there aren't. These additional arguments are also added for callee
919   // signature They are necessary to match callee and caller signature for
920   // indirect call.
921   if (CallConv == CallingConv::Swift) {
922     if (!HasSwiftSelfArg) {
923       NumFixedArgs++;
924       ISD::OutputArg Arg;
925       Arg.Flags.setSwiftSelf();
926       CLI.Outs.push_back(Arg);
927       SDValue ArgVal = DAG.getUNDEF(PtrVT);
928       CLI.OutVals.push_back(ArgVal);
929     }
930     if (!HasSwiftErrorArg) {
931       NumFixedArgs++;
932       ISD::OutputArg Arg;
933       Arg.Flags.setSwiftError();
934       CLI.Outs.push_back(Arg);
935       SDValue ArgVal = DAG.getUNDEF(PtrVT);
936       CLI.OutVals.push_back(ArgVal);
937     }
938   }
939 
940   // Analyze operands of the call, assigning locations to each operand.
941   SmallVector<CCValAssign, 16> ArgLocs;
942   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
943 
944   if (IsVarArg) {
945     // Outgoing non-fixed arguments are placed in a buffer. First
946     // compute their offsets and the total amount of buffer space needed.
947     for (unsigned I = NumFixedArgs; I < Outs.size(); ++I) {
948       const ISD::OutputArg &Out = Outs[I];
949       SDValue &Arg = OutVals[I];
950       EVT VT = Arg.getValueType();
951       assert(VT != MVT::iPTR && "Legalized args should be concrete");
952       Type *Ty = VT.getTypeForEVT(*DAG.getContext());
953       Align Alignment =
954           std::max(Out.Flags.getNonZeroOrigAlign(), Layout.getABITypeAlign(Ty));
955       unsigned Offset =
956           CCInfo.AllocateStack(Layout.getTypeAllocSize(Ty), Alignment);
957       CCInfo.addLoc(CCValAssign::getMem(ArgLocs.size(), VT.getSimpleVT(),
958                                         Offset, VT.getSimpleVT(),
959                                         CCValAssign::Full));
960     }
961   }
962 
963   unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
964 
965   SDValue FINode;
966   if (IsVarArg && NumBytes) {
967     // For non-fixed arguments, next emit stores to store the argument values
968     // to the stack buffer at the offsets computed above.
969     int FI = MF.getFrameInfo().CreateStackObject(NumBytes,
970                                                  Layout.getStackAlignment(),
971                                                  /*isSS=*/false);
972     unsigned ValNo = 0;
973     SmallVector<SDValue, 8> Chains;
974     for (SDValue Arg : drop_begin(OutVals, NumFixedArgs)) {
975       assert(ArgLocs[ValNo].getValNo() == ValNo &&
976              "ArgLocs should remain in order and only hold varargs args");
977       unsigned Offset = ArgLocs[ValNo++].getLocMemOffset();
978       FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
979       SDValue Add = DAG.getNode(ISD::ADD, DL, PtrVT, FINode,
980                                 DAG.getConstant(Offset, DL, PtrVT));
981       Chains.push_back(
982           DAG.getStore(Chain, DL, Arg, Add,
983                        MachinePointerInfo::getFixedStack(MF, FI, Offset)));
984     }
985     if (!Chains.empty())
986       Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
987   } else if (IsVarArg) {
988     FINode = DAG.getIntPtrConstant(0, DL);
989   }
990 
991   if (Callee->getOpcode() == ISD::GlobalAddress) {
992     // If the callee is a GlobalAddress node (quite common, every direct call
993     // is) turn it into a TargetGlobalAddress node so that LowerGlobalAddress
994     // doesn't at MO_GOT which is not needed for direct calls.
995     GlobalAddressSDNode* GA = cast<GlobalAddressSDNode>(Callee);
996     Callee = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
997                                         getPointerTy(DAG.getDataLayout()),
998                                         GA->getOffset());
999     Callee = DAG.getNode(WebAssemblyISD::Wrapper, DL,
1000                          getPointerTy(DAG.getDataLayout()), Callee);
1001   }
1002 
1003   // Compute the operands for the CALLn node.
1004   SmallVector<SDValue, 16> Ops;
1005   Ops.push_back(Chain);
1006   Ops.push_back(Callee);
1007 
1008   // Add all fixed arguments. Note that for non-varargs calls, NumFixedArgs
1009   // isn't reliable.
1010   Ops.append(OutVals.begin(),
1011              IsVarArg ? OutVals.begin() + NumFixedArgs : OutVals.end());
1012   // Add a pointer to the vararg buffer.
1013   if (IsVarArg)
1014     Ops.push_back(FINode);
1015 
1016   SmallVector<EVT, 8> InTys;
1017   for (const auto &In : Ins) {
1018     assert(!In.Flags.isByVal() && "byval is not valid for return values");
1019     assert(!In.Flags.isNest() && "nest is not valid for return values");
1020     if (In.Flags.isInAlloca())
1021       fail(DL, DAG, "WebAssembly hasn't implemented inalloca return values");
1022     if (In.Flags.isInConsecutiveRegs())
1023       fail(DL, DAG, "WebAssembly hasn't implemented cons regs return values");
1024     if (In.Flags.isInConsecutiveRegsLast())
1025       fail(DL, DAG,
1026            "WebAssembly hasn't implemented cons regs last return values");
1027     // Ignore In.getNonZeroOrigAlign() because all our arguments are passed in
1028     // registers.
1029     InTys.push_back(In.VT);
1030   }
1031 
1032   if (CLI.IsTailCall) {
1033     // ret_calls do not return values to the current frame
1034     SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1035     return DAG.getNode(WebAssemblyISD::RET_CALL, DL, NodeTys, Ops);
1036   }
1037 
1038   InTys.push_back(MVT::Other);
1039   SDVTList InTyList = DAG.getVTList(InTys);
1040   SDValue Res = DAG.getNode(WebAssemblyISD::CALL, DL, InTyList, Ops);
1041 
1042   for (size_t I = 0; I < Ins.size(); ++I)
1043     InVals.push_back(Res.getValue(I));
1044 
1045   // Return the chain
1046   return Res.getValue(Ins.size());
1047 }
1048 
1049 bool WebAssemblyTargetLowering::CanLowerReturn(
1050     CallingConv::ID /*CallConv*/, MachineFunction & /*MF*/, bool /*IsVarArg*/,
1051     const SmallVectorImpl<ISD::OutputArg> &Outs,
1052     LLVMContext & /*Context*/) const {
1053   // WebAssembly can only handle returning tuples with multivalue enabled
1054   return Subtarget->hasMultivalue() || Outs.size() <= 1;
1055 }
1056 
1057 SDValue WebAssemblyTargetLowering::LowerReturn(
1058     SDValue Chain, CallingConv::ID CallConv, bool /*IsVarArg*/,
1059     const SmallVectorImpl<ISD::OutputArg> &Outs,
1060     const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
1061     SelectionDAG &DAG) const {
1062   assert((Subtarget->hasMultivalue() || Outs.size() <= 1) &&
1063          "MVP WebAssembly can only return up to one value");
1064   if (!callingConvSupported(CallConv))
1065     fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
1066 
1067   SmallVector<SDValue, 4> RetOps(1, Chain);
1068   RetOps.append(OutVals.begin(), OutVals.end());
1069   Chain = DAG.getNode(WebAssemblyISD::RETURN, DL, MVT::Other, RetOps);
1070 
1071   // Record the number and types of the return values.
1072   for (const ISD::OutputArg &Out : Outs) {
1073     assert(!Out.Flags.isByVal() && "byval is not valid for return values");
1074     assert(!Out.Flags.isNest() && "nest is not valid for return values");
1075     assert(Out.IsFixed && "non-fixed return value is not valid");
1076     if (Out.Flags.isInAlloca())
1077       fail(DL, DAG, "WebAssembly hasn't implemented inalloca results");
1078     if (Out.Flags.isInConsecutiveRegs())
1079       fail(DL, DAG, "WebAssembly hasn't implemented cons regs results");
1080     if (Out.Flags.isInConsecutiveRegsLast())
1081       fail(DL, DAG, "WebAssembly hasn't implemented cons regs last results");
1082   }
1083 
1084   return Chain;
1085 }
1086 
1087 SDValue WebAssemblyTargetLowering::LowerFormalArguments(
1088     SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
1089     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
1090     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1091   if (!callingConvSupported(CallConv))
1092     fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
1093 
1094   MachineFunction &MF = DAG.getMachineFunction();
1095   auto *MFI = MF.getInfo<WebAssemblyFunctionInfo>();
1096 
1097   // Set up the incoming ARGUMENTS value, which serves to represent the liveness
1098   // of the incoming values before they're represented by virtual registers.
1099   MF.getRegInfo().addLiveIn(WebAssembly::ARGUMENTS);
1100 
1101   bool HasSwiftErrorArg = false;
1102   bool HasSwiftSelfArg = false;
1103   for (const ISD::InputArg &In : Ins) {
1104     HasSwiftSelfArg |= In.Flags.isSwiftSelf();
1105     HasSwiftErrorArg |= In.Flags.isSwiftError();
1106     if (In.Flags.isInAlloca())
1107       fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
1108     if (In.Flags.isNest())
1109       fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
1110     if (In.Flags.isInConsecutiveRegs())
1111       fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
1112     if (In.Flags.isInConsecutiveRegsLast())
1113       fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
1114     // Ignore In.getNonZeroOrigAlign() because all our arguments are passed in
1115     // registers.
1116     InVals.push_back(In.Used ? DAG.getNode(WebAssemblyISD::ARGUMENT, DL, In.VT,
1117                                            DAG.getTargetConstant(InVals.size(),
1118                                                                  DL, MVT::i32))
1119                              : DAG.getUNDEF(In.VT));
1120 
1121     // Record the number and types of arguments.
1122     MFI->addParam(In.VT);
1123   }
1124 
1125   // For swiftcc, emit additional swiftself and swifterror arguments
1126   // if there aren't. These additional arguments are also added for callee
1127   // signature They are necessary to match callee and caller signature for
1128   // indirect call.
1129   auto PtrVT = getPointerTy(MF.getDataLayout());
1130   if (CallConv == CallingConv::Swift) {
1131     if (!HasSwiftSelfArg) {
1132       MFI->addParam(PtrVT);
1133     }
1134     if (!HasSwiftErrorArg) {
1135       MFI->addParam(PtrVT);
1136     }
1137   }
1138   // Varargs are copied into a buffer allocated by the caller, and a pointer to
1139   // the buffer is passed as an argument.
1140   if (IsVarArg) {
1141     MVT PtrVT = getPointerTy(MF.getDataLayout());
1142     Register VarargVreg =
1143         MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrVT));
1144     MFI->setVarargBufferVreg(VarargVreg);
1145     Chain = DAG.getCopyToReg(
1146         Chain, DL, VarargVreg,
1147         DAG.getNode(WebAssemblyISD::ARGUMENT, DL, PtrVT,
1148                     DAG.getTargetConstant(Ins.size(), DL, MVT::i32)));
1149     MFI->addParam(PtrVT);
1150   }
1151 
1152   // Record the number and types of arguments and results.
1153   SmallVector<MVT, 4> Params;
1154   SmallVector<MVT, 4> Results;
1155   computeSignatureVTs(MF.getFunction().getFunctionType(), &MF.getFunction(),
1156                       MF.getFunction(), DAG.getTarget(), Params, Results);
1157   for (MVT VT : Results)
1158     MFI->addResult(VT);
1159   // TODO: Use signatures in WebAssemblyMachineFunctionInfo too and unify
1160   // the param logic here with ComputeSignatureVTs
1161   assert(MFI->getParams().size() == Params.size() &&
1162          std::equal(MFI->getParams().begin(), MFI->getParams().end(),
1163                     Params.begin()));
1164 
1165   return Chain;
1166 }
1167 
1168 void WebAssemblyTargetLowering::ReplaceNodeResults(
1169     SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
1170   switch (N->getOpcode()) {
1171   case ISD::SIGN_EXTEND_INREG:
1172     // Do not add any results, signifying that N should not be custom lowered
1173     // after all. This happens because simd128 turns on custom lowering for
1174     // SIGN_EXTEND_INREG, but for non-vector sign extends the result might be an
1175     // illegal type.
1176     break;
1177   default:
1178     llvm_unreachable(
1179         "ReplaceNodeResults not implemented for this op for WebAssembly!");
1180   }
1181 }
1182 
1183 //===----------------------------------------------------------------------===//
1184 //  Custom lowering hooks.
1185 //===----------------------------------------------------------------------===//
1186 
1187 SDValue WebAssemblyTargetLowering::LowerOperation(SDValue Op,
1188                                                   SelectionDAG &DAG) const {
1189   SDLoc DL(Op);
1190   switch (Op.getOpcode()) {
1191   default:
1192     llvm_unreachable("unimplemented operation lowering");
1193     return SDValue();
1194   case ISD::FrameIndex:
1195     return LowerFrameIndex(Op, DAG);
1196   case ISD::GlobalAddress:
1197     return LowerGlobalAddress(Op, DAG);
1198   case ISD::GlobalTLSAddress:
1199     return LowerGlobalTLSAddress(Op, DAG);
1200   case ISD::ExternalSymbol:
1201     return LowerExternalSymbol(Op, DAG);
1202   case ISD::JumpTable:
1203     return LowerJumpTable(Op, DAG);
1204   case ISD::BR_JT:
1205     return LowerBR_JT(Op, DAG);
1206   case ISD::VASTART:
1207     return LowerVASTART(Op, DAG);
1208   case ISD::BlockAddress:
1209   case ISD::BRIND:
1210     fail(DL, DAG, "WebAssembly hasn't implemented computed gotos");
1211     return SDValue();
1212   case ISD::RETURNADDR:
1213     return LowerRETURNADDR(Op, DAG);
1214   case ISD::FRAMEADDR:
1215     return LowerFRAMEADDR(Op, DAG);
1216   case ISD::CopyToReg:
1217     return LowerCopyToReg(Op, DAG);
1218   case ISD::EXTRACT_VECTOR_ELT:
1219   case ISD::INSERT_VECTOR_ELT:
1220     return LowerAccessVectorElement(Op, DAG);
1221   case ISD::INTRINSIC_VOID:
1222   case ISD::INTRINSIC_WO_CHAIN:
1223   case ISD::INTRINSIC_W_CHAIN:
1224     return LowerIntrinsic(Op, DAG);
1225   case ISD::SIGN_EXTEND_INREG:
1226     return LowerSIGN_EXTEND_INREG(Op, DAG);
1227   case ISD::BUILD_VECTOR:
1228     return LowerBUILD_VECTOR(Op, DAG);
1229   case ISD::VECTOR_SHUFFLE:
1230     return LowerVECTOR_SHUFFLE(Op, DAG);
1231   case ISD::SETCC:
1232     return LowerSETCC(Op, DAG);
1233   case ISD::SHL:
1234   case ISD::SRA:
1235   case ISD::SRL:
1236     return LowerShift(Op, DAG);
1237   case ISD::FP_TO_SINT_SAT:
1238   case ISD::FP_TO_UINT_SAT:
1239     return LowerFP_TO_INT_SAT(Op, DAG);
1240   case ISD::LOAD:
1241     return LowerLoad(Op, DAG);
1242   case ISD::STORE:
1243     return LowerStore(Op, DAG);
1244   }
1245 }
1246 
1247 static bool IsWebAssemblyGlobal(SDValue Op) {
1248   if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op))
1249     return WebAssembly::isWasmVarAddressSpace(GA->getAddressSpace());
1250 
1251   return false;
1252 }
1253 
1254 static Optional<unsigned> IsWebAssemblyLocal(SDValue Op, SelectionDAG &DAG) {
1255   const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op);
1256   if (!FI)
1257     return None;
1258 
1259   auto &MF = DAG.getMachineFunction();
1260   return WebAssemblyFrameLowering::getLocalForStackObject(MF, FI->getIndex());
1261 }
1262 
1263 SDValue WebAssemblyTargetLowering::LowerStore(SDValue Op,
1264                                               SelectionDAG &DAG) const {
1265   SDLoc DL(Op);
1266   StoreSDNode *SN = cast<StoreSDNode>(Op.getNode());
1267   const SDValue &Value = SN->getValue();
1268   const SDValue &Base = SN->getBasePtr();
1269   const SDValue &Offset = SN->getOffset();
1270 
1271   if (IsWebAssemblyGlobal(Base)) {
1272     if (!Offset->isUndef())
1273       report_fatal_error("unexpected offset when storing to webassembly global",
1274                          false);
1275 
1276     SDVTList Tys = DAG.getVTList(MVT::Other);
1277     SDValue Ops[] = {SN->getChain(), Value, Base};
1278     return DAG.getMemIntrinsicNode(WebAssemblyISD::GLOBAL_SET, DL, Tys, Ops,
1279                                    SN->getMemoryVT(), SN->getMemOperand());
1280   }
1281 
1282   if (Optional<unsigned> Local = IsWebAssemblyLocal(Base, DAG)) {
1283     if (!Offset->isUndef())
1284       report_fatal_error("unexpected offset when storing to webassembly local",
1285                          false);
1286 
1287     SDValue Idx = DAG.getTargetConstant(*Local, Base, MVT::i32);
1288     SDVTList Tys = DAG.getVTList(MVT::Other); // The chain.
1289     SDValue Ops[] = {SN->getChain(), Idx, Value};
1290     return DAG.getNode(WebAssemblyISD::LOCAL_SET, DL, Tys, Ops);
1291   }
1292 
1293   return Op;
1294 }
1295 
1296 SDValue WebAssemblyTargetLowering::LowerLoad(SDValue Op,
1297                                              SelectionDAG &DAG) const {
1298   SDLoc DL(Op);
1299   LoadSDNode *LN = cast<LoadSDNode>(Op.getNode());
1300   const SDValue &Base = LN->getBasePtr();
1301   const SDValue &Offset = LN->getOffset();
1302 
1303   if (IsWebAssemblyGlobal(Base)) {
1304     if (!Offset->isUndef())
1305       report_fatal_error(
1306           "unexpected offset when loading from webassembly global", false);
1307 
1308     SDVTList Tys = DAG.getVTList(LN->getValueType(0), MVT::Other);
1309     SDValue Ops[] = {LN->getChain(), Base};
1310     return DAG.getMemIntrinsicNode(WebAssemblyISD::GLOBAL_GET, DL, Tys, Ops,
1311                                    LN->getMemoryVT(), LN->getMemOperand());
1312   }
1313 
1314   if (Optional<unsigned> Local = IsWebAssemblyLocal(Base, DAG)) {
1315     if (!Offset->isUndef())
1316       report_fatal_error(
1317           "unexpected offset when loading from webassembly local", false);
1318 
1319     SDValue Idx = DAG.getTargetConstant(*Local, Base, MVT::i32);
1320     EVT LocalVT = LN->getValueType(0);
1321     SDValue LocalGet = DAG.getNode(WebAssemblyISD::LOCAL_GET, DL, LocalVT,
1322                                    {LN->getChain(), Idx});
1323     SDValue Result = DAG.getMergeValues({LocalGet, LN->getChain()}, DL);
1324     assert(Result->getNumValues() == 2 && "Loads must carry a chain!");
1325     return Result;
1326   }
1327 
1328   return Op;
1329 }
1330 
1331 SDValue WebAssemblyTargetLowering::LowerCopyToReg(SDValue Op,
1332                                                   SelectionDAG &DAG) const {
1333   SDValue Src = Op.getOperand(2);
1334   if (isa<FrameIndexSDNode>(Src.getNode())) {
1335     // CopyToReg nodes don't support FrameIndex operands. Other targets select
1336     // the FI to some LEA-like instruction, but since we don't have that, we
1337     // need to insert some kind of instruction that can take an FI operand and
1338     // produces a value usable by CopyToReg (i.e. in a vreg). So insert a dummy
1339     // local.copy between Op and its FI operand.
1340     SDValue Chain = Op.getOperand(0);
1341     SDLoc DL(Op);
1342     unsigned Reg = cast<RegisterSDNode>(Op.getOperand(1))->getReg();
1343     EVT VT = Src.getValueType();
1344     SDValue Copy(DAG.getMachineNode(VT == MVT::i32 ? WebAssembly::COPY_I32
1345                                                    : WebAssembly::COPY_I64,
1346                                     DL, VT, Src),
1347                  0);
1348     return Op.getNode()->getNumValues() == 1
1349                ? DAG.getCopyToReg(Chain, DL, Reg, Copy)
1350                : DAG.getCopyToReg(Chain, DL, Reg, Copy,
1351                                   Op.getNumOperands() == 4 ? Op.getOperand(3)
1352                                                            : SDValue());
1353   }
1354   return SDValue();
1355 }
1356 
1357 SDValue WebAssemblyTargetLowering::LowerFrameIndex(SDValue Op,
1358                                                    SelectionDAG &DAG) const {
1359   int FI = cast<FrameIndexSDNode>(Op)->getIndex();
1360   return DAG.getTargetFrameIndex(FI, Op.getValueType());
1361 }
1362 
1363 SDValue WebAssemblyTargetLowering::LowerRETURNADDR(SDValue Op,
1364                                                    SelectionDAG &DAG) const {
1365   SDLoc DL(Op);
1366 
1367   if (!Subtarget->getTargetTriple().isOSEmscripten()) {
1368     fail(DL, DAG,
1369          "Non-Emscripten WebAssembly hasn't implemented "
1370          "__builtin_return_address");
1371     return SDValue();
1372   }
1373 
1374   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
1375     return SDValue();
1376 
1377   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1378   MakeLibCallOptions CallOptions;
1379   return makeLibCall(DAG, RTLIB::RETURN_ADDRESS, Op.getValueType(),
1380                      {DAG.getConstant(Depth, DL, MVT::i32)}, CallOptions, DL)
1381       .first;
1382 }
1383 
1384 SDValue WebAssemblyTargetLowering::LowerFRAMEADDR(SDValue Op,
1385                                                   SelectionDAG &DAG) const {
1386   // Non-zero depths are not supported by WebAssembly currently. Use the
1387   // legalizer's default expansion, which is to return 0 (what this function is
1388   // documented to do).
1389   if (Op.getConstantOperandVal(0) > 0)
1390     return SDValue();
1391 
1392   DAG.getMachineFunction().getFrameInfo().setFrameAddressIsTaken(true);
1393   EVT VT = Op.getValueType();
1394   Register FP =
1395       Subtarget->getRegisterInfo()->getFrameRegister(DAG.getMachineFunction());
1396   return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), FP, VT);
1397 }
1398 
1399 SDValue
1400 WebAssemblyTargetLowering::LowerGlobalTLSAddress(SDValue Op,
1401                                                  SelectionDAG &DAG) const {
1402   SDLoc DL(Op);
1403   const auto *GA = cast<GlobalAddressSDNode>(Op);
1404   MVT PtrVT = getPointerTy(DAG.getDataLayout());
1405 
1406   MachineFunction &MF = DAG.getMachineFunction();
1407   if (!MF.getSubtarget<WebAssemblySubtarget>().hasBulkMemory())
1408     report_fatal_error("cannot use thread-local storage without bulk memory",
1409                        false);
1410 
1411   const GlobalValue *GV = GA->getGlobal();
1412 
1413   // Currently Emscripten does not support dynamic linking with threads.
1414   // Therefore, if we have thread-local storage, only the local-exec model
1415   // is possible.
1416   // TODO: remove this and implement proper TLS models once Emscripten
1417   // supports dynamic linking with threads.
1418   if (GV->getThreadLocalMode() != GlobalValue::LocalExecTLSModel &&
1419       !Subtarget->getTargetTriple().isOSEmscripten()) {
1420     report_fatal_error("only -ftls-model=local-exec is supported for now on "
1421                        "non-Emscripten OSes: variable " +
1422                            GV->getName(),
1423                        false);
1424   }
1425 
1426   auto GlobalGet = PtrVT == MVT::i64 ? WebAssembly::GLOBAL_GET_I64
1427                                      : WebAssembly::GLOBAL_GET_I32;
1428   const char *BaseName = MF.createExternalSymbolName("__tls_base");
1429 
1430   SDValue BaseAddr(
1431       DAG.getMachineNode(GlobalGet, DL, PtrVT,
1432                          DAG.getTargetExternalSymbol(BaseName, PtrVT)),
1433       0);
1434 
1435   SDValue TLSOffset = DAG.getTargetGlobalAddress(
1436       GV, DL, PtrVT, GA->getOffset(), WebAssemblyII::MO_TLS_BASE_REL);
1437   SDValue SymAddr = DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT, TLSOffset);
1438 
1439   return DAG.getNode(ISD::ADD, DL, PtrVT, BaseAddr, SymAddr);
1440 }
1441 
1442 SDValue WebAssemblyTargetLowering::LowerGlobalAddress(SDValue Op,
1443                                                       SelectionDAG &DAG) const {
1444   SDLoc DL(Op);
1445   const auto *GA = cast<GlobalAddressSDNode>(Op);
1446   EVT VT = Op.getValueType();
1447   assert(GA->getTargetFlags() == 0 &&
1448          "Unexpected target flags on generic GlobalAddressSDNode");
1449   if (!WebAssembly::isValidAddressSpace(GA->getAddressSpace()))
1450     fail(DL, DAG, "Invalid address space for WebAssembly target");
1451 
1452   unsigned OperandFlags = 0;
1453   if (isPositionIndependent()) {
1454     const GlobalValue *GV = GA->getGlobal();
1455     if (getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV)) {
1456       MachineFunction &MF = DAG.getMachineFunction();
1457       MVT PtrVT = getPointerTy(MF.getDataLayout());
1458       const char *BaseName;
1459       if (GV->getValueType()->isFunctionTy()) {
1460         BaseName = MF.createExternalSymbolName("__table_base");
1461         OperandFlags = WebAssemblyII::MO_TABLE_BASE_REL;
1462       }
1463       else {
1464         BaseName = MF.createExternalSymbolName("__memory_base");
1465         OperandFlags = WebAssemblyII::MO_MEMORY_BASE_REL;
1466       }
1467       SDValue BaseAddr =
1468           DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT,
1469                       DAG.getTargetExternalSymbol(BaseName, PtrVT));
1470 
1471       SDValue SymAddr = DAG.getNode(
1472           WebAssemblyISD::WrapperPIC, DL, VT,
1473           DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT, GA->getOffset(),
1474                                      OperandFlags));
1475 
1476       return DAG.getNode(ISD::ADD, DL, VT, BaseAddr, SymAddr);
1477     } else {
1478       OperandFlags = WebAssemblyII::MO_GOT;
1479     }
1480   }
1481 
1482   return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1483                      DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT,
1484                                                 GA->getOffset(), OperandFlags));
1485 }
1486 
1487 SDValue
1488 WebAssemblyTargetLowering::LowerExternalSymbol(SDValue Op,
1489                                                SelectionDAG &DAG) const {
1490   SDLoc DL(Op);
1491   const auto *ES = cast<ExternalSymbolSDNode>(Op);
1492   EVT VT = Op.getValueType();
1493   assert(ES->getTargetFlags() == 0 &&
1494          "Unexpected target flags on generic ExternalSymbolSDNode");
1495   return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1496                      DAG.getTargetExternalSymbol(ES->getSymbol(), VT));
1497 }
1498 
1499 SDValue WebAssemblyTargetLowering::LowerJumpTable(SDValue Op,
1500                                                   SelectionDAG &DAG) const {
1501   // There's no need for a Wrapper node because we always incorporate a jump
1502   // table operand into a BR_TABLE instruction, rather than ever
1503   // materializing it in a register.
1504   const JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
1505   return DAG.getTargetJumpTable(JT->getIndex(), Op.getValueType(),
1506                                 JT->getTargetFlags());
1507 }
1508 
1509 SDValue WebAssemblyTargetLowering::LowerBR_JT(SDValue Op,
1510                                               SelectionDAG &DAG) const {
1511   SDLoc DL(Op);
1512   SDValue Chain = Op.getOperand(0);
1513   const auto *JT = cast<JumpTableSDNode>(Op.getOperand(1));
1514   SDValue Index = Op.getOperand(2);
1515   assert(JT->getTargetFlags() == 0 && "WebAssembly doesn't set target flags");
1516 
1517   SmallVector<SDValue, 8> Ops;
1518   Ops.push_back(Chain);
1519   Ops.push_back(Index);
1520 
1521   MachineJumpTableInfo *MJTI = DAG.getMachineFunction().getJumpTableInfo();
1522   const auto &MBBs = MJTI->getJumpTables()[JT->getIndex()].MBBs;
1523 
1524   // Add an operand for each case.
1525   for (auto MBB : MBBs)
1526     Ops.push_back(DAG.getBasicBlock(MBB));
1527 
1528   // Add the first MBB as a dummy default target for now. This will be replaced
1529   // with the proper default target (and the preceding range check eliminated)
1530   // if possible by WebAssemblyFixBrTableDefaults.
1531   Ops.push_back(DAG.getBasicBlock(*MBBs.begin()));
1532   return DAG.getNode(WebAssemblyISD::BR_TABLE, DL, MVT::Other, Ops);
1533 }
1534 
1535 SDValue WebAssemblyTargetLowering::LowerVASTART(SDValue Op,
1536                                                 SelectionDAG &DAG) const {
1537   SDLoc DL(Op);
1538   EVT PtrVT = getPointerTy(DAG.getMachineFunction().getDataLayout());
1539 
1540   auto *MFI = DAG.getMachineFunction().getInfo<WebAssemblyFunctionInfo>();
1541   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
1542 
1543   SDValue ArgN = DAG.getCopyFromReg(DAG.getEntryNode(), DL,
1544                                     MFI->getVarargBufferVreg(), PtrVT);
1545   return DAG.getStore(Op.getOperand(0), DL, ArgN, Op.getOperand(1),
1546                       MachinePointerInfo(SV));
1547 }
1548 
1549 static SDValue getCppExceptionSymNode(SDValue Op, unsigned TagIndex,
1550                                       SelectionDAG &DAG) {
1551   // We only support C++ exceptions for now
1552   int Tag =
1553       cast<ConstantSDNode>(Op.getOperand(TagIndex).getNode())->getZExtValue();
1554   if (Tag != WebAssembly::CPP_EXCEPTION)
1555     llvm_unreachable("Invalid tag: We only support C++ exceptions for now");
1556   auto &MF = DAG.getMachineFunction();
1557   const auto &TLI = DAG.getTargetLoweringInfo();
1558   MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
1559   const char *SymName = MF.createExternalSymbolName("__cpp_exception");
1560   return DAG.getNode(WebAssemblyISD::Wrapper, SDLoc(Op), PtrVT,
1561                      DAG.getTargetExternalSymbol(SymName, PtrVT));
1562 }
1563 
1564 SDValue WebAssemblyTargetLowering::LowerIntrinsic(SDValue Op,
1565                                                   SelectionDAG &DAG) const {
1566   MachineFunction &MF = DAG.getMachineFunction();
1567   unsigned IntNo;
1568   switch (Op.getOpcode()) {
1569   case ISD::INTRINSIC_VOID:
1570   case ISD::INTRINSIC_W_CHAIN:
1571     IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
1572     break;
1573   case ISD::INTRINSIC_WO_CHAIN:
1574     IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1575     break;
1576   default:
1577     llvm_unreachable("Invalid intrinsic");
1578   }
1579   SDLoc DL(Op);
1580 
1581   switch (IntNo) {
1582   default:
1583     return SDValue(); // Don't custom lower most intrinsics.
1584 
1585   case Intrinsic::wasm_lsda: {
1586     EVT VT = Op.getValueType();
1587     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1588     MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
1589     auto &Context = MF.getMMI().getContext();
1590     MCSymbol *S = Context.getOrCreateSymbol(Twine("GCC_except_table") +
1591                                             Twine(MF.getFunctionNumber()));
1592     return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1593                        DAG.getMCSymbol(S, PtrVT));
1594   }
1595 
1596   case Intrinsic::wasm_throw: {
1597     SDValue SymNode = getCppExceptionSymNode(Op, 2, DAG);
1598     return DAG.getNode(WebAssemblyISD::THROW, DL,
1599                        MVT::Other, // outchain type
1600                        {
1601                            Op.getOperand(0), // inchain
1602                            SymNode,          // exception symbol
1603                            Op.getOperand(3)  // thrown value
1604                        });
1605   }
1606 
1607   case Intrinsic::wasm_catch: {
1608     SDValue SymNode = getCppExceptionSymNode(Op, 2, DAG);
1609     return DAG.getNode(WebAssemblyISD::CATCH, DL,
1610                        {
1611                            MVT::i32,  // outchain type
1612                            MVT::Other // return value
1613                        },
1614                        {
1615                            Op.getOperand(0), // inchain
1616                            SymNode           // exception symbol
1617                        });
1618   }
1619 
1620   case Intrinsic::wasm_shuffle: {
1621     // Drop in-chain and replace undefs, but otherwise pass through unchanged
1622     SDValue Ops[18];
1623     size_t OpIdx = 0;
1624     Ops[OpIdx++] = Op.getOperand(1);
1625     Ops[OpIdx++] = Op.getOperand(2);
1626     while (OpIdx < 18) {
1627       const SDValue &MaskIdx = Op.getOperand(OpIdx + 1);
1628       if (MaskIdx.isUndef() ||
1629           cast<ConstantSDNode>(MaskIdx.getNode())->getZExtValue() >= 32) {
1630         Ops[OpIdx++] = DAG.getConstant(0, DL, MVT::i32);
1631       } else {
1632         Ops[OpIdx++] = MaskIdx;
1633       }
1634     }
1635     return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, Op.getValueType(), Ops);
1636   }
1637   }
1638 }
1639 
1640 SDValue
1641 WebAssemblyTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
1642                                                   SelectionDAG &DAG) const {
1643   SDLoc DL(Op);
1644   // If sign extension operations are disabled, allow sext_inreg only if operand
1645   // is a vector extract of an i8 or i16 lane. SIMD does not depend on sign
1646   // extension operations, but allowing sext_inreg in this context lets us have
1647   // simple patterns to select extract_lane_s instructions. Expanding sext_inreg
1648   // everywhere would be simpler in this file, but would necessitate large and
1649   // brittle patterns to undo the expansion and select extract_lane_s
1650   // instructions.
1651   assert(!Subtarget->hasSignExt() && Subtarget->hasSIMD128());
1652   if (Op.getOperand(0).getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1653     return SDValue();
1654 
1655   const SDValue &Extract = Op.getOperand(0);
1656   MVT VecT = Extract.getOperand(0).getSimpleValueType();
1657   if (VecT.getVectorElementType().getSizeInBits() > 32)
1658     return SDValue();
1659   MVT ExtractedLaneT =
1660       cast<VTSDNode>(Op.getOperand(1).getNode())->getVT().getSimpleVT();
1661   MVT ExtractedVecT =
1662       MVT::getVectorVT(ExtractedLaneT, 128 / ExtractedLaneT.getSizeInBits());
1663   if (ExtractedVecT == VecT)
1664     return Op;
1665 
1666   // Bitcast vector to appropriate type to ensure ISel pattern coverage
1667   const SDNode *Index = Extract.getOperand(1).getNode();
1668   if (!isa<ConstantSDNode>(Index))
1669     return SDValue();
1670   unsigned IndexVal = cast<ConstantSDNode>(Index)->getZExtValue();
1671   unsigned Scale =
1672       ExtractedVecT.getVectorNumElements() / VecT.getVectorNumElements();
1673   assert(Scale > 1);
1674   SDValue NewIndex =
1675       DAG.getConstant(IndexVal * Scale, DL, Index->getValueType(0));
1676   SDValue NewExtract = DAG.getNode(
1677       ISD::EXTRACT_VECTOR_ELT, DL, Extract.getValueType(),
1678       DAG.getBitcast(ExtractedVecT, Extract.getOperand(0)), NewIndex);
1679   return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, Op.getValueType(), NewExtract,
1680                      Op.getOperand(1));
1681 }
1682 
1683 SDValue WebAssemblyTargetLowering::LowerBUILD_VECTOR(SDValue Op,
1684                                                      SelectionDAG &DAG) const {
1685   SDLoc DL(Op);
1686   const EVT VecT = Op.getValueType();
1687   const EVT LaneT = Op.getOperand(0).getValueType();
1688   const size_t Lanes = Op.getNumOperands();
1689   bool CanSwizzle = VecT == MVT::v16i8;
1690 
1691   // BUILD_VECTORs are lowered to the instruction that initializes the highest
1692   // possible number of lanes at once followed by a sequence of replace_lane
1693   // instructions to individually initialize any remaining lanes.
1694 
1695   // TODO: Tune this. For example, lanewise swizzling is very expensive, so
1696   // swizzled lanes should be given greater weight.
1697 
1698   // TODO: Investigate looping rather than always extracting/replacing specific
1699   // lanes to fill gaps.
1700 
1701   auto IsConstant = [](const SDValue &V) {
1702     return V.getOpcode() == ISD::Constant || V.getOpcode() == ISD::ConstantFP;
1703   };
1704 
1705   // Returns the source vector and index vector pair if they exist. Checks for:
1706   //   (extract_vector_elt
1707   //     $src,
1708   //     (sign_extend_inreg (extract_vector_elt $indices, $i))
1709   //   )
1710   auto GetSwizzleSrcs = [](size_t I, const SDValue &Lane) {
1711     auto Bail = std::make_pair(SDValue(), SDValue());
1712     if (Lane->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1713       return Bail;
1714     const SDValue &SwizzleSrc = Lane->getOperand(0);
1715     const SDValue &IndexExt = Lane->getOperand(1);
1716     if (IndexExt->getOpcode() != ISD::SIGN_EXTEND_INREG)
1717       return Bail;
1718     const SDValue &Index = IndexExt->getOperand(0);
1719     if (Index->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1720       return Bail;
1721     const SDValue &SwizzleIndices = Index->getOperand(0);
1722     if (SwizzleSrc.getValueType() != MVT::v16i8 ||
1723         SwizzleIndices.getValueType() != MVT::v16i8 ||
1724         Index->getOperand(1)->getOpcode() != ISD::Constant ||
1725         Index->getConstantOperandVal(1) != I)
1726       return Bail;
1727     return std::make_pair(SwizzleSrc, SwizzleIndices);
1728   };
1729 
1730   // If the lane is extracted from another vector at a constant index, return
1731   // that vector. The source vector must not have more lanes than the dest
1732   // because the shufflevector indices are in terms of the destination lanes and
1733   // would not be able to address the smaller individual source lanes.
1734   auto GetShuffleSrc = [&](const SDValue &Lane) {
1735     if (Lane->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1736       return SDValue();
1737     if (!isa<ConstantSDNode>(Lane->getOperand(1).getNode()))
1738       return SDValue();
1739     if (Lane->getOperand(0).getValueType().getVectorNumElements() >
1740         VecT.getVectorNumElements())
1741       return SDValue();
1742     return Lane->getOperand(0);
1743   };
1744 
1745   using ValueEntry = std::pair<SDValue, size_t>;
1746   SmallVector<ValueEntry, 16> SplatValueCounts;
1747 
1748   using SwizzleEntry = std::pair<std::pair<SDValue, SDValue>, size_t>;
1749   SmallVector<SwizzleEntry, 16> SwizzleCounts;
1750 
1751   using ShuffleEntry = std::pair<SDValue, size_t>;
1752   SmallVector<ShuffleEntry, 16> ShuffleCounts;
1753 
1754   auto AddCount = [](auto &Counts, const auto &Val) {
1755     auto CountIt =
1756         llvm::find_if(Counts, [&Val](auto E) { return E.first == Val; });
1757     if (CountIt == Counts.end()) {
1758       Counts.emplace_back(Val, 1);
1759     } else {
1760       CountIt->second++;
1761     }
1762   };
1763 
1764   auto GetMostCommon = [](auto &Counts) {
1765     auto CommonIt =
1766         std::max_element(Counts.begin(), Counts.end(),
1767                          [](auto A, auto B) { return A.second < B.second; });
1768     assert(CommonIt != Counts.end() && "Unexpected all-undef build_vector");
1769     return *CommonIt;
1770   };
1771 
1772   size_t NumConstantLanes = 0;
1773 
1774   // Count eligible lanes for each type of vector creation op
1775   for (size_t I = 0; I < Lanes; ++I) {
1776     const SDValue &Lane = Op->getOperand(I);
1777     if (Lane.isUndef())
1778       continue;
1779 
1780     AddCount(SplatValueCounts, Lane);
1781 
1782     if (IsConstant(Lane))
1783       NumConstantLanes++;
1784     if (auto ShuffleSrc = GetShuffleSrc(Lane))
1785       AddCount(ShuffleCounts, ShuffleSrc);
1786     if (CanSwizzle) {
1787       auto SwizzleSrcs = GetSwizzleSrcs(I, Lane);
1788       if (SwizzleSrcs.first)
1789         AddCount(SwizzleCounts, SwizzleSrcs);
1790     }
1791   }
1792 
1793   SDValue SplatValue;
1794   size_t NumSplatLanes;
1795   std::tie(SplatValue, NumSplatLanes) = GetMostCommon(SplatValueCounts);
1796 
1797   SDValue SwizzleSrc;
1798   SDValue SwizzleIndices;
1799   size_t NumSwizzleLanes = 0;
1800   if (SwizzleCounts.size())
1801     std::forward_as_tuple(std::tie(SwizzleSrc, SwizzleIndices),
1802                           NumSwizzleLanes) = GetMostCommon(SwizzleCounts);
1803 
1804   // Shuffles can draw from up to two vectors, so find the two most common
1805   // sources.
1806   SDValue ShuffleSrc1, ShuffleSrc2;
1807   size_t NumShuffleLanes = 0;
1808   if (ShuffleCounts.size()) {
1809     std::tie(ShuffleSrc1, NumShuffleLanes) = GetMostCommon(ShuffleCounts);
1810     ShuffleCounts.erase(std::remove_if(ShuffleCounts.begin(),
1811                                        ShuffleCounts.end(),
1812                                        [&](const auto &Pair) {
1813                                          return Pair.first == ShuffleSrc1;
1814                                        }),
1815                         ShuffleCounts.end());
1816   }
1817   if (ShuffleCounts.size()) {
1818     size_t AdditionalShuffleLanes;
1819     std::tie(ShuffleSrc2, AdditionalShuffleLanes) =
1820         GetMostCommon(ShuffleCounts);
1821     NumShuffleLanes += AdditionalShuffleLanes;
1822   }
1823 
1824   // Predicate returning true if the lane is properly initialized by the
1825   // original instruction
1826   std::function<bool(size_t, const SDValue &)> IsLaneConstructed;
1827   SDValue Result;
1828   // Prefer swizzles over shuffles over vector consts over splats
1829   if (NumSwizzleLanes >= NumShuffleLanes &&
1830       NumSwizzleLanes >= NumConstantLanes && NumSwizzleLanes >= NumSplatLanes) {
1831     Result = DAG.getNode(WebAssemblyISD::SWIZZLE, DL, VecT, SwizzleSrc,
1832                          SwizzleIndices);
1833     auto Swizzled = std::make_pair(SwizzleSrc, SwizzleIndices);
1834     IsLaneConstructed = [&, Swizzled](size_t I, const SDValue &Lane) {
1835       return Swizzled == GetSwizzleSrcs(I, Lane);
1836     };
1837   } else if (NumShuffleLanes >= NumConstantLanes &&
1838              NumShuffleLanes >= NumSplatLanes) {
1839     size_t DestLaneSize = VecT.getVectorElementType().getFixedSizeInBits() / 8;
1840     size_t DestLaneCount = VecT.getVectorNumElements();
1841     size_t Scale1 = 1;
1842     size_t Scale2 = 1;
1843     SDValue Src1 = ShuffleSrc1;
1844     SDValue Src2 = ShuffleSrc2 ? ShuffleSrc2 : DAG.getUNDEF(VecT);
1845     if (Src1.getValueType() != VecT) {
1846       size_t LaneSize =
1847           Src1.getValueType().getVectorElementType().getFixedSizeInBits() / 8;
1848       assert(LaneSize > DestLaneSize);
1849       Scale1 = LaneSize / DestLaneSize;
1850       Src1 = DAG.getBitcast(VecT, Src1);
1851     }
1852     if (Src2.getValueType() != VecT) {
1853       size_t LaneSize =
1854           Src2.getValueType().getVectorElementType().getFixedSizeInBits() / 8;
1855       assert(LaneSize > DestLaneSize);
1856       Scale2 = LaneSize / DestLaneSize;
1857       Src2 = DAG.getBitcast(VecT, Src2);
1858     }
1859 
1860     int Mask[16];
1861     assert(DestLaneCount <= 16);
1862     for (size_t I = 0; I < DestLaneCount; ++I) {
1863       const SDValue &Lane = Op->getOperand(I);
1864       SDValue Src = GetShuffleSrc(Lane);
1865       if (Src == ShuffleSrc1) {
1866         Mask[I] = Lane->getConstantOperandVal(1) * Scale1;
1867       } else if (Src && Src == ShuffleSrc2) {
1868         Mask[I] = DestLaneCount + Lane->getConstantOperandVal(1) * Scale2;
1869       } else {
1870         Mask[I] = -1;
1871       }
1872     }
1873     ArrayRef<int> MaskRef(Mask, DestLaneCount);
1874     Result = DAG.getVectorShuffle(VecT, DL, Src1, Src2, MaskRef);
1875     IsLaneConstructed = [&](size_t, const SDValue &Lane) {
1876       auto Src = GetShuffleSrc(Lane);
1877       return Src == ShuffleSrc1 || (Src && Src == ShuffleSrc2);
1878     };
1879   } else if (NumConstantLanes >= NumSplatLanes) {
1880     SmallVector<SDValue, 16> ConstLanes;
1881     for (const SDValue &Lane : Op->op_values()) {
1882       if (IsConstant(Lane)) {
1883         ConstLanes.push_back(Lane);
1884       } else if (LaneT.isFloatingPoint()) {
1885         ConstLanes.push_back(DAG.getConstantFP(0, DL, LaneT));
1886       } else {
1887         ConstLanes.push_back(DAG.getConstant(0, DL, LaneT));
1888       }
1889     }
1890     Result = DAG.getBuildVector(VecT, DL, ConstLanes);
1891     IsLaneConstructed = [&IsConstant](size_t _, const SDValue &Lane) {
1892       return IsConstant(Lane);
1893     };
1894   } else {
1895     // Use a splat, but possibly a load_splat
1896     LoadSDNode *SplattedLoad;
1897     if ((SplattedLoad = dyn_cast<LoadSDNode>(SplatValue)) &&
1898         SplattedLoad->getMemoryVT() == VecT.getVectorElementType()) {
1899       Result = DAG.getMemIntrinsicNode(
1900           WebAssemblyISD::LOAD_SPLAT, DL, DAG.getVTList(VecT),
1901           {SplattedLoad->getChain(), SplattedLoad->getBasePtr(),
1902            SplattedLoad->getOffset()},
1903           SplattedLoad->getMemoryVT(), SplattedLoad->getMemOperand());
1904     } else {
1905       Result = DAG.getSplatBuildVector(VecT, DL, SplatValue);
1906     }
1907     IsLaneConstructed = [&SplatValue](size_t _, const SDValue &Lane) {
1908       return Lane == SplatValue;
1909     };
1910   }
1911 
1912   assert(Result);
1913   assert(IsLaneConstructed);
1914 
1915   // Add replace_lane instructions for any unhandled values
1916   for (size_t I = 0; I < Lanes; ++I) {
1917     const SDValue &Lane = Op->getOperand(I);
1918     if (!Lane.isUndef() && !IsLaneConstructed(I, Lane))
1919       Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VecT, Result, Lane,
1920                            DAG.getConstant(I, DL, MVT::i32));
1921   }
1922 
1923   return Result;
1924 }
1925 
1926 SDValue
1927 WebAssemblyTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
1928                                                SelectionDAG &DAG) const {
1929   SDLoc DL(Op);
1930   ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op.getNode())->getMask();
1931   MVT VecType = Op.getOperand(0).getSimpleValueType();
1932   assert(VecType.is128BitVector() && "Unexpected shuffle vector type");
1933   size_t LaneBytes = VecType.getVectorElementType().getSizeInBits() / 8;
1934 
1935   // Space for two vector args and sixteen mask indices
1936   SDValue Ops[18];
1937   size_t OpIdx = 0;
1938   Ops[OpIdx++] = Op.getOperand(0);
1939   Ops[OpIdx++] = Op.getOperand(1);
1940 
1941   // Expand mask indices to byte indices and materialize them as operands
1942   for (int M : Mask) {
1943     for (size_t J = 0; J < LaneBytes; ++J) {
1944       // Lower undefs (represented by -1 in mask) to zero
1945       uint64_t ByteIndex = M == -1 ? 0 : (uint64_t)M * LaneBytes + J;
1946       Ops[OpIdx++] = DAG.getConstant(ByteIndex, DL, MVT::i32);
1947     }
1948   }
1949 
1950   return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, Op.getValueType(), Ops);
1951 }
1952 
1953 SDValue WebAssemblyTargetLowering::LowerSETCC(SDValue Op,
1954                                               SelectionDAG &DAG) const {
1955   SDLoc DL(Op);
1956   // The legalizer does not know how to expand the unsupported comparison modes
1957   // of i64x2 vectors, so we manually unroll them here.
1958   assert(Op->getOperand(0)->getSimpleValueType(0) == MVT::v2i64);
1959   SmallVector<SDValue, 2> LHS, RHS;
1960   DAG.ExtractVectorElements(Op->getOperand(0), LHS);
1961   DAG.ExtractVectorElements(Op->getOperand(1), RHS);
1962   const SDValue &CC = Op->getOperand(2);
1963   auto MakeLane = [&](unsigned I) {
1964     return DAG.getNode(ISD::SELECT_CC, DL, MVT::i64, LHS[I], RHS[I],
1965                        DAG.getConstant(uint64_t(-1), DL, MVT::i64),
1966                        DAG.getConstant(uint64_t(0), DL, MVT::i64), CC);
1967   };
1968   return DAG.getBuildVector(Op->getValueType(0), DL,
1969                             {MakeLane(0), MakeLane(1)});
1970 }
1971 
1972 SDValue
1973 WebAssemblyTargetLowering::LowerAccessVectorElement(SDValue Op,
1974                                                     SelectionDAG &DAG) const {
1975   // Allow constant lane indices, expand variable lane indices
1976   SDNode *IdxNode = Op.getOperand(Op.getNumOperands() - 1).getNode();
1977   if (isa<ConstantSDNode>(IdxNode) || IdxNode->isUndef())
1978     return Op;
1979   else
1980     // Perform default expansion
1981     return SDValue();
1982 }
1983 
1984 static SDValue unrollVectorShift(SDValue Op, SelectionDAG &DAG) {
1985   EVT LaneT = Op.getSimpleValueType().getVectorElementType();
1986   // 32-bit and 64-bit unrolled shifts will have proper semantics
1987   if (LaneT.bitsGE(MVT::i32))
1988     return DAG.UnrollVectorOp(Op.getNode());
1989   // Otherwise mask the shift value to get proper semantics from 32-bit shift
1990   SDLoc DL(Op);
1991   size_t NumLanes = Op.getSimpleValueType().getVectorNumElements();
1992   SDValue Mask = DAG.getConstant(LaneT.getSizeInBits() - 1, DL, MVT::i32);
1993   unsigned ShiftOpcode = Op.getOpcode();
1994   SmallVector<SDValue, 16> ShiftedElements;
1995   DAG.ExtractVectorElements(Op.getOperand(0), ShiftedElements, 0, 0, MVT::i32);
1996   SmallVector<SDValue, 16> ShiftElements;
1997   DAG.ExtractVectorElements(Op.getOperand(1), ShiftElements, 0, 0, MVT::i32);
1998   SmallVector<SDValue, 16> UnrolledOps;
1999   for (size_t i = 0; i < NumLanes; ++i) {
2000     SDValue MaskedShiftValue =
2001         DAG.getNode(ISD::AND, DL, MVT::i32, ShiftElements[i], Mask);
2002     SDValue ShiftedValue = ShiftedElements[i];
2003     if (ShiftOpcode == ISD::SRA)
2004       ShiftedValue = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32,
2005                                  ShiftedValue, DAG.getValueType(LaneT));
2006     UnrolledOps.push_back(
2007         DAG.getNode(ShiftOpcode, DL, MVT::i32, ShiftedValue, MaskedShiftValue));
2008   }
2009   return DAG.getBuildVector(Op.getValueType(), DL, UnrolledOps);
2010 }
2011 
2012 SDValue WebAssemblyTargetLowering::LowerShift(SDValue Op,
2013                                               SelectionDAG &DAG) const {
2014   SDLoc DL(Op);
2015 
2016   // Only manually lower vector shifts
2017   assert(Op.getSimpleValueType().isVector());
2018 
2019   auto ShiftVal = DAG.getSplatValue(Op.getOperand(1));
2020   if (!ShiftVal)
2021     return unrollVectorShift(Op, DAG);
2022 
2023   // Use anyext because none of the high bits can affect the shift
2024   ShiftVal = DAG.getAnyExtOrTrunc(ShiftVal, DL, MVT::i32);
2025 
2026   unsigned Opcode;
2027   switch (Op.getOpcode()) {
2028   case ISD::SHL:
2029     Opcode = WebAssemblyISD::VEC_SHL;
2030     break;
2031   case ISD::SRA:
2032     Opcode = WebAssemblyISD::VEC_SHR_S;
2033     break;
2034   case ISD::SRL:
2035     Opcode = WebAssemblyISD::VEC_SHR_U;
2036     break;
2037   default:
2038     llvm_unreachable("unexpected opcode");
2039   }
2040 
2041   return DAG.getNode(Opcode, DL, Op.getValueType(), Op.getOperand(0), ShiftVal);
2042 }
2043 
2044 SDValue WebAssemblyTargetLowering::LowerFP_TO_INT_SAT(SDValue Op,
2045                                                       SelectionDAG &DAG) const {
2046   SDLoc DL(Op);
2047   EVT ResT = Op.getValueType();
2048   EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2049 
2050   if ((ResT == MVT::i32 || ResT == MVT::i64) &&
2051       (SatVT == MVT::i32 || SatVT == MVT::i64))
2052     return Op;
2053 
2054   if (ResT == MVT::v4i32 && SatVT == MVT::i32)
2055     return Op;
2056 
2057   return SDValue();
2058 }
2059 
2060 //===----------------------------------------------------------------------===//
2061 //   Custom DAG combine hooks
2062 //===----------------------------------------------------------------------===//
2063 static SDValue
2064 performVECTOR_SHUFFLECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
2065   auto &DAG = DCI.DAG;
2066   auto Shuffle = cast<ShuffleVectorSDNode>(N);
2067 
2068   // Hoist vector bitcasts that don't change the number of lanes out of unary
2069   // shuffles, where they are less likely to get in the way of other combines.
2070   // (shuffle (vNxT1 (bitcast (vNxT0 x))), undef, mask) ->
2071   //  (vNxT1 (bitcast (vNxT0 (shuffle x, undef, mask))))
2072   SDValue Bitcast = N->getOperand(0);
2073   if (Bitcast.getOpcode() != ISD::BITCAST)
2074     return SDValue();
2075   if (!N->getOperand(1).isUndef())
2076     return SDValue();
2077   SDValue CastOp = Bitcast.getOperand(0);
2078   MVT SrcType = CastOp.getSimpleValueType();
2079   MVT DstType = Bitcast.getSimpleValueType();
2080   if (!SrcType.is128BitVector() ||
2081       SrcType.getVectorNumElements() != DstType.getVectorNumElements())
2082     return SDValue();
2083   SDValue NewShuffle = DAG.getVectorShuffle(
2084       SrcType, SDLoc(N), CastOp, DAG.getUNDEF(SrcType), Shuffle->getMask());
2085   return DAG.getBitcast(DstType, NewShuffle);
2086 }
2087 
2088 static SDValue
2089 performVectorExtendCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
2090   auto &DAG = DCI.DAG;
2091   assert(N->getOpcode() == ISD::SIGN_EXTEND ||
2092          N->getOpcode() == ISD::ZERO_EXTEND);
2093 
2094   // Combine ({s,z}ext (extract_subvector src, i)) into a widening operation if
2095   // possible before the extract_subvector can be expanded.
2096   auto Extract = N->getOperand(0);
2097   if (Extract.getOpcode() != ISD::EXTRACT_SUBVECTOR)
2098     return SDValue();
2099   auto Source = Extract.getOperand(0);
2100   auto *IndexNode = dyn_cast<ConstantSDNode>(Extract.getOperand(1));
2101   if (IndexNode == nullptr)
2102     return SDValue();
2103   auto Index = IndexNode->getZExtValue();
2104 
2105   // Only v8i8, v4i16, and v2i32 extracts can be widened, and only if the
2106   // extracted subvector is the low or high half of its source.
2107   EVT ResVT = N->getValueType(0);
2108   if (ResVT == MVT::v8i16) {
2109     if (Extract.getValueType() != MVT::v8i8 ||
2110         Source.getValueType() != MVT::v16i8 || (Index != 0 && Index != 8))
2111       return SDValue();
2112   } else if (ResVT == MVT::v4i32) {
2113     if (Extract.getValueType() != MVT::v4i16 ||
2114         Source.getValueType() != MVT::v8i16 || (Index != 0 && Index != 4))
2115       return SDValue();
2116   } else if (ResVT == MVT::v2i64) {
2117     if (Extract.getValueType() != MVT::v2i32 ||
2118         Source.getValueType() != MVT::v4i32 || (Index != 0 && Index != 2))
2119       return SDValue();
2120   } else {
2121     return SDValue();
2122   }
2123 
2124   bool IsSext = N->getOpcode() == ISD::SIGN_EXTEND;
2125   bool IsLow = Index == 0;
2126 
2127   unsigned Op = IsSext ? (IsLow ? WebAssemblyISD::EXTEND_LOW_S
2128                                 : WebAssemblyISD::EXTEND_HIGH_S)
2129                        : (IsLow ? WebAssemblyISD::EXTEND_LOW_U
2130                                 : WebAssemblyISD::EXTEND_HIGH_U);
2131 
2132   return DAG.getNode(Op, SDLoc(N), ResVT, Source);
2133 }
2134 
2135 static SDValue
2136 performVectorConvertLowCombine(SDNode *N,
2137                                TargetLowering::DAGCombinerInfo &DCI) {
2138   auto &DAG = DCI.DAG;
2139 
2140   EVT ResVT = N->getValueType(0);
2141   if (ResVT != MVT::v2f64)
2142     return SDValue();
2143 
2144   auto GetWasmConversionOp = [](unsigned Op) {
2145     switch (Op) {
2146     case ISD::SINT_TO_FP:
2147       return WebAssemblyISD::CONVERT_LOW_S;
2148     case ISD::UINT_TO_FP:
2149       return WebAssemblyISD::CONVERT_LOW_U;
2150     case ISD::FP_EXTEND:
2151       return WebAssemblyISD::PROMOTE_LOW;
2152     }
2153     llvm_unreachable("unexpected op");
2154   };
2155 
2156   if (N->getOpcode() == ISD::EXTRACT_SUBVECTOR) {
2157     // Combine this:
2158     //
2159     //   (v2f64 (extract_subvector
2160     //     (v4f64 ({s,u}int_to_fp (v4i32 $x))), 0))
2161     //
2162     // into (f64x2.convert_low_i32x4_{s,u} $x).
2163     //
2164     // Or this:
2165     //
2166     //  (v2f64 (extract_subvector
2167     //    (v4f64 (fp_extend (v4f32 $x))), 0))
2168     //
2169     // into (f64x2.promote_low_f32x4 $x).
2170     auto Conversion = N->getOperand(0);
2171     auto ConversionOp = Conversion.getOpcode();
2172     MVT ExpectedSourceType;
2173     switch (ConversionOp) {
2174     case ISD::SINT_TO_FP:
2175     case ISD::UINT_TO_FP:
2176       ExpectedSourceType = MVT::v4i32;
2177       break;
2178     case ISD::FP_EXTEND:
2179       ExpectedSourceType = MVT::v4f32;
2180       break;
2181     default:
2182       return SDValue();
2183     }
2184 
2185     if (Conversion.getValueType() != MVT::v4f64)
2186       return SDValue();
2187 
2188     auto Source = Conversion.getOperand(0);
2189     if (Source.getValueType() != ExpectedSourceType)
2190       return SDValue();
2191 
2192     auto IndexNode = dyn_cast<ConstantSDNode>(N->getOperand(1));
2193     if (IndexNode == nullptr || IndexNode->getZExtValue() != 0)
2194       return SDValue();
2195 
2196     auto Op = GetWasmConversionOp(ConversionOp);
2197     return DAG.getNode(Op, SDLoc(N), ResVT, Source);
2198   }
2199 
2200   // Combine this:
2201   //
2202   //   (v2f64 ({s,u}int_to_fp
2203   //     (v2i32 (extract_subvector (v4i32 $x), 0))))
2204   //
2205   // into (f64x2.convert_low_i32x4_{s,u} $x).
2206   //
2207   // Or this:
2208   //
2209   //   (v2f64 (fp_extend
2210   //     (v2f32 (extract_subvector (v4f32 $x), 0))))
2211   //
2212   // into (f64x2.promote_low_f32x4 $x).
2213   auto ConversionOp = N->getOpcode();
2214   MVT ExpectedExtractType;
2215   MVT ExpectedSourceType;
2216   switch (ConversionOp) {
2217   case ISD::SINT_TO_FP:
2218   case ISD::UINT_TO_FP:
2219     ExpectedExtractType = MVT::v2i32;
2220     ExpectedSourceType = MVT::v4i32;
2221     break;
2222   case ISD::FP_EXTEND:
2223     ExpectedExtractType = MVT::v2f32;
2224     ExpectedSourceType = MVT::v4f32;
2225     break;
2226   default:
2227     llvm_unreachable("unexpected opcode");
2228   }
2229 
2230   auto Extract = N->getOperand(0);
2231   if (Extract.getOpcode() != ISD::EXTRACT_SUBVECTOR)
2232     return SDValue();
2233 
2234   if (Extract.getValueType() != ExpectedExtractType)
2235     return SDValue();
2236 
2237   auto Source = Extract.getOperand(0);
2238   if (Source.getValueType() != ExpectedSourceType)
2239     return SDValue();
2240 
2241   auto *IndexNode = dyn_cast<ConstantSDNode>(Extract.getOperand(1));
2242   if (IndexNode == nullptr || IndexNode->getZExtValue() != 0)
2243     return SDValue();
2244 
2245   unsigned Op = GetWasmConversionOp(ConversionOp);
2246   return DAG.getNode(Op, SDLoc(N), ResVT, Source);
2247 }
2248 
2249 static SDValue
2250 performVectorTruncZeroCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
2251   auto &DAG = DCI.DAG;
2252 
2253   auto GetWasmConversionOp = [](unsigned Op) {
2254     switch (Op) {
2255     case ISD::FP_TO_SINT_SAT:
2256       return WebAssemblyISD::TRUNC_SAT_ZERO_S;
2257     case ISD::FP_TO_UINT_SAT:
2258       return WebAssemblyISD::TRUNC_SAT_ZERO_U;
2259     case ISD::FP_ROUND:
2260       return WebAssemblyISD::DEMOTE_ZERO;
2261     }
2262     llvm_unreachable("unexpected op");
2263   };
2264 
2265   auto IsZeroSplat = [](SDValue SplatVal) {
2266     auto *Splat = dyn_cast<BuildVectorSDNode>(SplatVal.getNode());
2267     APInt SplatValue, SplatUndef;
2268     unsigned SplatBitSize;
2269     bool HasAnyUndefs;
2270     return Splat &&
2271            Splat->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
2272                                   HasAnyUndefs) &&
2273            SplatValue == 0;
2274   };
2275 
2276   if (N->getOpcode() == ISD::CONCAT_VECTORS) {
2277     // Combine this:
2278     //
2279     //   (concat_vectors (v2i32 (fp_to_{s,u}int_sat $x, 32)), (v2i32 (splat 0)))
2280     //
2281     // into (i32x4.trunc_sat_f64x2_zero_{s,u} $x).
2282     //
2283     // Or this:
2284     //
2285     //   (concat_vectors (v2f32 (fp_round (v2f64 $x))), (v2f32 (splat 0)))
2286     //
2287     // into (f32x4.demote_zero_f64x2 $x).
2288     EVT ResVT;
2289     EVT ExpectedConversionType;
2290     auto Conversion = N->getOperand(0);
2291     auto ConversionOp = Conversion.getOpcode();
2292     switch (ConversionOp) {
2293     case ISD::FP_TO_SINT_SAT:
2294     case ISD::FP_TO_UINT_SAT:
2295       ResVT = MVT::v4i32;
2296       ExpectedConversionType = MVT::v2i32;
2297       break;
2298     case ISD::FP_ROUND:
2299       ResVT = MVT::v4f32;
2300       ExpectedConversionType = MVT::v2f32;
2301       break;
2302     default:
2303       return SDValue();
2304     }
2305 
2306     if (N->getValueType(0) != ResVT)
2307       return SDValue();
2308 
2309     if (Conversion.getValueType() != ExpectedConversionType)
2310       return SDValue();
2311 
2312     auto Source = Conversion.getOperand(0);
2313     if (Source.getValueType() != MVT::v2f64)
2314       return SDValue();
2315 
2316     if (!IsZeroSplat(N->getOperand(1)) ||
2317         N->getOperand(1).getValueType() != ExpectedConversionType)
2318       return SDValue();
2319 
2320     unsigned Op = GetWasmConversionOp(ConversionOp);
2321     return DAG.getNode(Op, SDLoc(N), ResVT, Source);
2322   }
2323 
2324   // Combine this:
2325   //
2326   //   (fp_to_{s,u}int_sat (concat_vectors $x, (v2f64 (splat 0))), 32)
2327   //
2328   // into (i32x4.trunc_sat_f64x2_zero_{s,u} $x).
2329   //
2330   // Or this:
2331   //
2332   //   (v4f32 (fp_round (concat_vectors $x, (v2f64 (splat 0)))))
2333   //
2334   // into (f32x4.demote_zero_f64x2 $x).
2335   EVT ResVT;
2336   auto ConversionOp = N->getOpcode();
2337   switch (ConversionOp) {
2338   case ISD::FP_TO_SINT_SAT:
2339   case ISD::FP_TO_UINT_SAT:
2340     ResVT = MVT::v4i32;
2341     break;
2342   case ISD::FP_ROUND:
2343     ResVT = MVT::v4f32;
2344     break;
2345   default:
2346     llvm_unreachable("unexpected op");
2347   }
2348 
2349   if (N->getValueType(0) != ResVT)
2350     return SDValue();
2351 
2352   auto Concat = N->getOperand(0);
2353   if (Concat.getValueType() != MVT::v4f64)
2354     return SDValue();
2355 
2356   auto Source = Concat.getOperand(0);
2357   if (Source.getValueType() != MVT::v2f64)
2358     return SDValue();
2359 
2360   if (!IsZeroSplat(Concat.getOperand(1)) ||
2361       Concat.getOperand(1).getValueType() != MVT::v2f64)
2362     return SDValue();
2363 
2364   unsigned Op = GetWasmConversionOp(ConversionOp);
2365   return DAG.getNode(Op, SDLoc(N), ResVT, Source);
2366 }
2367 
2368 SDValue
2369 WebAssemblyTargetLowering::PerformDAGCombine(SDNode *N,
2370                                              DAGCombinerInfo &DCI) const {
2371   switch (N->getOpcode()) {
2372   default:
2373     return SDValue();
2374   case ISD::VECTOR_SHUFFLE:
2375     return performVECTOR_SHUFFLECombine(N, DCI);
2376   case ISD::SIGN_EXTEND:
2377   case ISD::ZERO_EXTEND:
2378     return performVectorExtendCombine(N, DCI);
2379   case ISD::SINT_TO_FP:
2380   case ISD::UINT_TO_FP:
2381   case ISD::FP_EXTEND:
2382   case ISD::EXTRACT_SUBVECTOR:
2383     return performVectorConvertLowCombine(N, DCI);
2384   case ISD::FP_TO_SINT_SAT:
2385   case ISD::FP_TO_UINT_SAT:
2386   case ISD::FP_ROUND:
2387   case ISD::CONCAT_VECTORS:
2388     return performVectorTruncZeroCombine(N, DCI);
2389   }
2390 }
2391