1 //=- WebAssemblyISelLowering.cpp - WebAssembly DAG Lowering Implementation -==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file implements the WebAssemblyTargetLowering class.
11 ///
12 //===----------------------------------------------------------------------===//
13 
14 #include "WebAssemblyISelLowering.h"
15 #include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
16 #include "WebAssemblyMachineFunctionInfo.h"
17 #include "WebAssemblySubtarget.h"
18 #include "WebAssemblyTargetMachine.h"
19 #include "llvm/CodeGen/Analysis.h"
20 #include "llvm/CodeGen/CallingConvLower.h"
21 #include "llvm/CodeGen/MachineInstrBuilder.h"
22 #include "llvm/CodeGen/MachineJumpTableInfo.h"
23 #include "llvm/CodeGen/MachineModuleInfo.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #include "llvm/CodeGen/SelectionDAG.h"
26 #include "llvm/CodeGen/WasmEHFuncInfo.h"
27 #include "llvm/IR/DiagnosticInfo.h"
28 #include "llvm/IR/DiagnosticPrinter.h"
29 #include "llvm/IR/Function.h"
30 #include "llvm/IR/Intrinsics.h"
31 #include "llvm/Support/Debug.h"
32 #include "llvm/Support/ErrorHandling.h"
33 #include "llvm/Support/raw_ostream.h"
34 #include "llvm/Target/TargetOptions.h"
35 using namespace llvm;
36 
37 #define DEBUG_TYPE "wasm-lower"
38 
39 WebAssemblyTargetLowering::WebAssemblyTargetLowering(
40     const TargetMachine &TM, const WebAssemblySubtarget &STI)
41     : TargetLowering(TM), Subtarget(&STI) {
42   auto MVTPtr = Subtarget->hasAddr64() ? MVT::i64 : MVT::i32;
43 
44   // Booleans always contain 0 or 1.
45   setBooleanContents(ZeroOrOneBooleanContent);
46   // Except in SIMD vectors
47   setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
48   // We don't know the microarchitecture here, so just reduce register pressure.
49   setSchedulingPreference(Sched::RegPressure);
50   // Tell ISel that we have a stack pointer.
51   setStackPointerRegisterToSaveRestore(
52       Subtarget->hasAddr64() ? WebAssembly::SP64 : WebAssembly::SP32);
53   // Set up the register classes.
54   addRegisterClass(MVT::i32, &WebAssembly::I32RegClass);
55   addRegisterClass(MVT::i64, &WebAssembly::I64RegClass);
56   addRegisterClass(MVT::f32, &WebAssembly::F32RegClass);
57   addRegisterClass(MVT::f64, &WebAssembly::F64RegClass);
58   if (Subtarget->hasSIMD128()) {
59     addRegisterClass(MVT::v16i8, &WebAssembly::V128RegClass);
60     addRegisterClass(MVT::v8i16, &WebAssembly::V128RegClass);
61     addRegisterClass(MVT::v4i32, &WebAssembly::V128RegClass);
62     addRegisterClass(MVT::v4f32, &WebAssembly::V128RegClass);
63   }
64   if (Subtarget->hasUnimplementedSIMD128()) {
65     addRegisterClass(MVT::v2i64, &WebAssembly::V128RegClass);
66     addRegisterClass(MVT::v2f64, &WebAssembly::V128RegClass);
67   }
68   // Compute derived properties from the register classes.
69   computeRegisterProperties(Subtarget->getRegisterInfo());
70 
71   setOperationAction(ISD::GlobalAddress, MVTPtr, Custom);
72   setOperationAction(ISD::ExternalSymbol, MVTPtr, Custom);
73   setOperationAction(ISD::JumpTable, MVTPtr, Custom);
74   setOperationAction(ISD::BlockAddress, MVTPtr, Custom);
75   setOperationAction(ISD::BRIND, MVT::Other, Custom);
76 
77   // Take the default expansion for va_arg, va_copy, and va_end. There is no
78   // default action for va_start, so we do that custom.
79   setOperationAction(ISD::VASTART, MVT::Other, Custom);
80   setOperationAction(ISD::VAARG, MVT::Other, Expand);
81   setOperationAction(ISD::VACOPY, MVT::Other, Expand);
82   setOperationAction(ISD::VAEND, MVT::Other, Expand);
83 
84   for (auto T : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) {
85     // Don't expand the floating-point types to constant pools.
86     setOperationAction(ISD::ConstantFP, T, Legal);
87     // Expand floating-point comparisons.
88     for (auto CC : {ISD::SETO, ISD::SETUO, ISD::SETUEQ, ISD::SETONE,
89                     ISD::SETULT, ISD::SETULE, ISD::SETUGT, ISD::SETUGE})
90       setCondCodeAction(CC, T, Expand);
91     // Expand floating-point library function operators.
92     for (auto Op :
93          {ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FMA})
94       setOperationAction(Op, T, Expand);
95     // Note supported floating-point library function operators that otherwise
96     // default to expand.
97     for (auto Op :
98          {ISD::FCEIL, ISD::FFLOOR, ISD::FTRUNC, ISD::FNEARBYINT, ISD::FRINT})
99       setOperationAction(Op, T, Legal);
100     // Support minimum and maximum, which otherwise default to expand.
101     setOperationAction(ISD::FMINIMUM, T, Legal);
102     setOperationAction(ISD::FMAXIMUM, T, Legal);
103     // WebAssembly currently has no builtin f16 support.
104     setOperationAction(ISD::FP16_TO_FP, T, Expand);
105     setOperationAction(ISD::FP_TO_FP16, T, Expand);
106     setLoadExtAction(ISD::EXTLOAD, T, MVT::f16, Expand);
107     setTruncStoreAction(T, MVT::f16, Expand);
108   }
109 
110   // Expand unavailable integer operations.
111   for (auto Op :
112        {ISD::BSWAP, ISD::SMUL_LOHI, ISD::UMUL_LOHI, ISD::MULHS, ISD::MULHU,
113         ISD::SDIVREM, ISD::UDIVREM, ISD::SHL_PARTS, ISD::SRA_PARTS,
114         ISD::SRL_PARTS, ISD::ADDC, ISD::ADDE, ISD::SUBC, ISD::SUBE}) {
115     for (auto T : {MVT::i32, MVT::i64})
116       setOperationAction(Op, T, Expand);
117     if (Subtarget->hasSIMD128())
118       for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
119         setOperationAction(Op, T, Expand);
120     if (Subtarget->hasUnimplementedSIMD128())
121       setOperationAction(Op, MVT::v2i64, Expand);
122   }
123 
124   // SIMD-specific configuration
125   if (Subtarget->hasSIMD128()) {
126     // Support saturating add for i8x16 and i16x8
127     for (auto Op : {ISD::SADDSAT, ISD::UADDSAT})
128       for (auto T : {MVT::v16i8, MVT::v8i16})
129         setOperationAction(Op, T, Legal);
130 
131     // Custom lower BUILD_VECTORs to minimize number of replace_lanes
132     for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32})
133       setOperationAction(ISD::BUILD_VECTOR, T, Custom);
134     if (Subtarget->hasUnimplementedSIMD128())
135       for (auto T : {MVT::v2i64, MVT::v2f64})
136         setOperationAction(ISD::BUILD_VECTOR, T, Custom);
137 
138     // We have custom shuffle lowering to expose the shuffle mask
139     for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32})
140       setOperationAction(ISD::VECTOR_SHUFFLE, T, Custom);
141     if (Subtarget->hasUnimplementedSIMD128())
142       for (auto T: {MVT::v2i64, MVT::v2f64})
143         setOperationAction(ISD::VECTOR_SHUFFLE, T, Custom);
144 
145     // Custom lowering since wasm shifts must have a scalar shift amount
146     for (auto Op : {ISD::SHL, ISD::SRA, ISD::SRL}) {
147       for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
148         setOperationAction(Op, T, Custom);
149       if (Subtarget->hasUnimplementedSIMD128())
150         setOperationAction(Op, MVT::v2i64, Custom);
151     }
152 
153     // Custom lower lane accesses to expand out variable indices
154     for (auto Op : {ISD::EXTRACT_VECTOR_ELT, ISD::INSERT_VECTOR_ELT}) {
155       for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32})
156         setOperationAction(Op, T, Custom);
157       if (Subtarget->hasUnimplementedSIMD128())
158         for (auto T : {MVT::v2i64, MVT::v2f64})
159           setOperationAction(Op, T, Custom);
160     }
161 
162     // There is no i64x2.mul instruction
163     setOperationAction(ISD::MUL, MVT::v2i64, Expand);
164 
165     // There are no vector select instructions
166     for (auto Op : {ISD::VSELECT, ISD::SELECT_CC, ISD::SELECT}) {
167       for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32})
168         setOperationAction(Op, T, Expand);
169       if (Subtarget->hasUnimplementedSIMD128())
170         for (auto T : {MVT::v2i64, MVT::v2f64})
171           setOperationAction(Op, T, Expand);
172     }
173 
174     // Expand integer operations supported for scalars but not SIMD
175     for (auto Op : {ISD::CTLZ, ISD::CTTZ, ISD::CTPOP, ISD::SDIV, ISD::UDIV,
176                     ISD::SREM, ISD::UREM, ISD::ROTL, ISD::ROTR}) {
177       for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
178         setOperationAction(Op, T, Expand);
179       if (Subtarget->hasUnimplementedSIMD128())
180         setOperationAction(Op, MVT::v2i64, Expand);
181     }
182 
183     // Expand float operations supported for scalars but not SIMD
184     for (auto Op : {ISD::FCEIL, ISD::FFLOOR, ISD::FTRUNC, ISD::FNEARBYINT,
185                     ISD::FCOPYSIGN, ISD::FLOG, ISD::FLOG2, ISD::FLOG10,
186                     ISD::FEXP, ISD::FEXP2, ISD::FRINT}) {
187       setOperationAction(Op, MVT::v4f32, Expand);
188       if (Subtarget->hasUnimplementedSIMD128())
189         setOperationAction(Op, MVT::v2f64, Expand);
190     }
191 
192     // Expand additional SIMD ops that V8 hasn't implemented yet
193     if (!Subtarget->hasUnimplementedSIMD128()) {
194       setOperationAction(ISD::FSQRT, MVT::v4f32, Expand);
195       setOperationAction(ISD::FDIV, MVT::v4f32, Expand);
196     }
197   }
198 
199   // As a special case, these operators use the type to mean the type to
200   // sign-extend from.
201   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
202   if (!Subtarget->hasSignExt()) {
203     // Sign extends are legal only when extending a vector extract
204     auto Action = Subtarget->hasSIMD128() ? Custom : Expand;
205     for (auto T : {MVT::i8, MVT::i16, MVT::i32})
206       setOperationAction(ISD::SIGN_EXTEND_INREG, T, Action);
207   }
208   for (auto T : MVT::integer_vector_valuetypes())
209     setOperationAction(ISD::SIGN_EXTEND_INREG, T, Expand);
210 
211   // Dynamic stack allocation: use the default expansion.
212   setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
213   setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
214   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVTPtr, Expand);
215 
216   setOperationAction(ISD::FrameIndex, MVT::i32, Custom);
217   setOperationAction(ISD::CopyToReg, MVT::Other, Custom);
218 
219   // Expand these forms; we pattern-match the forms that we can handle in isel.
220   for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64})
221     for (auto Op : {ISD::BR_CC, ISD::SELECT_CC})
222       setOperationAction(Op, T, Expand);
223 
224   // We have custom switch handling.
225   setOperationAction(ISD::BR_JT, MVT::Other, Custom);
226 
227   // WebAssembly doesn't have:
228   //  - Floating-point extending loads.
229   //  - Floating-point truncating stores.
230   //  - i1 extending loads.
231   //  - extending/truncating SIMD loads/stores
232   setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
233   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
234   for (auto T : MVT::integer_valuetypes())
235     for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD})
236       setLoadExtAction(Ext, T, MVT::i1, Promote);
237   if (Subtarget->hasSIMD128()) {
238     for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v4f32,
239                    MVT::v2f64}) {
240       for (auto MemT : MVT::vector_valuetypes()) {
241         if (MVT(T) != MemT) {
242           setTruncStoreAction(T, MemT, Expand);
243           for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD})
244             setLoadExtAction(Ext, T, MemT, Expand);
245         }
246       }
247     }
248   }
249 
250   // Don't do anything clever with build_pairs
251   setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
252 
253   // Trap lowers to wasm unreachable
254   setOperationAction(ISD::TRAP, MVT::Other, Legal);
255 
256   // Exception handling intrinsics
257   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
258   setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
259 
260   setMaxAtomicSizeInBitsSupported(64);
261 
262   if (Subtarget->hasBulkMemory()) {
263     // Use memory.copy and friends over multiple loads and stores
264     MaxStoresPerMemcpy = 1;
265     MaxStoresPerMemcpyOptSize = 1;
266     MaxStoresPerMemmove = 1;
267     MaxStoresPerMemmoveOptSize = 1;
268     MaxStoresPerMemset = 1;
269     MaxStoresPerMemsetOptSize = 1;
270   }
271 
272   // Override the __gnu_f2h_ieee/__gnu_h2f_ieee names so that the f32 name is
273   // consistent with the f64 and f128 names.
274   setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
275   setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
276 
277   // Define the emscripten name for return address helper.
278   // TODO: when implementing other WASM backends, make this generic or only do
279   // this on emscripten depending on what they end up doing.
280   setLibcallName(RTLIB::RETURN_ADDRESS, "emscripten_return_address");
281 
282   // Always convert switches to br_tables unless there is only one case, which
283   // is equivalent to a simple branch. This reduces code size for wasm, and we
284   // defer possible jump table optimizations to the VM.
285   setMinimumJumpTableEntries(2);
286 }
287 
288 TargetLowering::AtomicExpansionKind
289 WebAssemblyTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
290   // We have wasm instructions for these
291   switch (AI->getOperation()) {
292   case AtomicRMWInst::Add:
293   case AtomicRMWInst::Sub:
294   case AtomicRMWInst::And:
295   case AtomicRMWInst::Or:
296   case AtomicRMWInst::Xor:
297   case AtomicRMWInst::Xchg:
298     return AtomicExpansionKind::None;
299   default:
300     break;
301   }
302   return AtomicExpansionKind::CmpXChg;
303 }
304 
305 FastISel *WebAssemblyTargetLowering::createFastISel(
306     FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo) const {
307   return WebAssembly::createFastISel(FuncInfo, LibInfo);
308 }
309 
310 MVT WebAssemblyTargetLowering::getScalarShiftAmountTy(const DataLayout & /*DL*/,
311                                                       EVT VT) const {
312   unsigned BitWidth = NextPowerOf2(VT.getSizeInBits() - 1);
313   if (BitWidth > 1 && BitWidth < 8)
314     BitWidth = 8;
315 
316   if (BitWidth > 64) {
317     // The shift will be lowered to a libcall, and compiler-rt libcalls expect
318     // the count to be an i32.
319     BitWidth = 32;
320     assert(BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) &&
321            "32-bit shift counts ought to be enough for anyone");
322   }
323 
324   MVT Result = MVT::getIntegerVT(BitWidth);
325   assert(Result != MVT::INVALID_SIMPLE_VALUE_TYPE &&
326          "Unable to represent scalar shift amount type");
327   return Result;
328 }
329 
330 // Lower an fp-to-int conversion operator from the LLVM opcode, which has an
331 // undefined result on invalid/overflow, to the WebAssembly opcode, which
332 // traps on invalid/overflow.
333 static MachineBasicBlock *LowerFPToInt(MachineInstr &MI, DebugLoc DL,
334                                        MachineBasicBlock *BB,
335                                        const TargetInstrInfo &TII,
336                                        bool IsUnsigned, bool Int64,
337                                        bool Float64, unsigned LoweredOpcode) {
338   MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
339 
340   Register OutReg = MI.getOperand(0).getReg();
341   Register InReg = MI.getOperand(1).getReg();
342 
343   unsigned Abs = Float64 ? WebAssembly::ABS_F64 : WebAssembly::ABS_F32;
344   unsigned FConst = Float64 ? WebAssembly::CONST_F64 : WebAssembly::CONST_F32;
345   unsigned LT = Float64 ? WebAssembly::LT_F64 : WebAssembly::LT_F32;
346   unsigned GE = Float64 ? WebAssembly::GE_F64 : WebAssembly::GE_F32;
347   unsigned IConst = Int64 ? WebAssembly::CONST_I64 : WebAssembly::CONST_I32;
348   unsigned Eqz = WebAssembly::EQZ_I32;
349   unsigned And = WebAssembly::AND_I32;
350   int64_t Limit = Int64 ? INT64_MIN : INT32_MIN;
351   int64_t Substitute = IsUnsigned ? 0 : Limit;
352   double CmpVal = IsUnsigned ? -(double)Limit * 2.0 : -(double)Limit;
353   auto &Context = BB->getParent()->getFunction().getContext();
354   Type *Ty = Float64 ? Type::getDoubleTy(Context) : Type::getFloatTy(Context);
355 
356   const BasicBlock *LLVMBB = BB->getBasicBlock();
357   MachineFunction *F = BB->getParent();
358   MachineBasicBlock *TrueMBB = F->CreateMachineBasicBlock(LLVMBB);
359   MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVMBB);
360   MachineBasicBlock *DoneMBB = F->CreateMachineBasicBlock(LLVMBB);
361 
362   MachineFunction::iterator It = ++BB->getIterator();
363   F->insert(It, FalseMBB);
364   F->insert(It, TrueMBB);
365   F->insert(It, DoneMBB);
366 
367   // Transfer the remainder of BB and its successor edges to DoneMBB.
368   DoneMBB->splice(DoneMBB->begin(), BB, std::next(MI.getIterator()), BB->end());
369   DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
370 
371   BB->addSuccessor(TrueMBB);
372   BB->addSuccessor(FalseMBB);
373   TrueMBB->addSuccessor(DoneMBB);
374   FalseMBB->addSuccessor(DoneMBB);
375 
376   unsigned Tmp0, Tmp1, CmpReg, EqzReg, FalseReg, TrueReg;
377   Tmp0 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
378   Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
379   CmpReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
380   EqzReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
381   FalseReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
382   TrueReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
383 
384   MI.eraseFromParent();
385   // For signed numbers, we can do a single comparison to determine whether
386   // fabs(x) is within range.
387   if (IsUnsigned) {
388     Tmp0 = InReg;
389   } else {
390     BuildMI(BB, DL, TII.get(Abs), Tmp0).addReg(InReg);
391   }
392   BuildMI(BB, DL, TII.get(FConst), Tmp1)
393       .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, CmpVal)));
394   BuildMI(BB, DL, TII.get(LT), CmpReg).addReg(Tmp0).addReg(Tmp1);
395 
396   // For unsigned numbers, we have to do a separate comparison with zero.
397   if (IsUnsigned) {
398     Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
399     Register SecondCmpReg =
400         MRI.createVirtualRegister(&WebAssembly::I32RegClass);
401     Register AndReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
402     BuildMI(BB, DL, TII.get(FConst), Tmp1)
403         .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, 0.0)));
404     BuildMI(BB, DL, TII.get(GE), SecondCmpReg).addReg(Tmp0).addReg(Tmp1);
405     BuildMI(BB, DL, TII.get(And), AndReg).addReg(CmpReg).addReg(SecondCmpReg);
406     CmpReg = AndReg;
407   }
408 
409   BuildMI(BB, DL, TII.get(Eqz), EqzReg).addReg(CmpReg);
410 
411   // Create the CFG diamond to select between doing the conversion or using
412   // the substitute value.
413   BuildMI(BB, DL, TII.get(WebAssembly::BR_IF)).addMBB(TrueMBB).addReg(EqzReg);
414   BuildMI(FalseMBB, DL, TII.get(LoweredOpcode), FalseReg).addReg(InReg);
415   BuildMI(FalseMBB, DL, TII.get(WebAssembly::BR)).addMBB(DoneMBB);
416   BuildMI(TrueMBB, DL, TII.get(IConst), TrueReg).addImm(Substitute);
417   BuildMI(*DoneMBB, DoneMBB->begin(), DL, TII.get(TargetOpcode::PHI), OutReg)
418       .addReg(FalseReg)
419       .addMBB(FalseMBB)
420       .addReg(TrueReg)
421       .addMBB(TrueMBB);
422 
423   return DoneMBB;
424 }
425 
426 MachineBasicBlock *WebAssemblyTargetLowering::EmitInstrWithCustomInserter(
427     MachineInstr &MI, MachineBasicBlock *BB) const {
428   const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
429   DebugLoc DL = MI.getDebugLoc();
430 
431   switch (MI.getOpcode()) {
432   default:
433     llvm_unreachable("Unexpected instr type to insert");
434   case WebAssembly::FP_TO_SINT_I32_F32:
435     return LowerFPToInt(MI, DL, BB, TII, false, false, false,
436                         WebAssembly::I32_TRUNC_S_F32);
437   case WebAssembly::FP_TO_UINT_I32_F32:
438     return LowerFPToInt(MI, DL, BB, TII, true, false, false,
439                         WebAssembly::I32_TRUNC_U_F32);
440   case WebAssembly::FP_TO_SINT_I64_F32:
441     return LowerFPToInt(MI, DL, BB, TII, false, true, false,
442                         WebAssembly::I64_TRUNC_S_F32);
443   case WebAssembly::FP_TO_UINT_I64_F32:
444     return LowerFPToInt(MI, DL, BB, TII, true, true, false,
445                         WebAssembly::I64_TRUNC_U_F32);
446   case WebAssembly::FP_TO_SINT_I32_F64:
447     return LowerFPToInt(MI, DL, BB, TII, false, false, true,
448                         WebAssembly::I32_TRUNC_S_F64);
449   case WebAssembly::FP_TO_UINT_I32_F64:
450     return LowerFPToInt(MI, DL, BB, TII, true, false, true,
451                         WebAssembly::I32_TRUNC_U_F64);
452   case WebAssembly::FP_TO_SINT_I64_F64:
453     return LowerFPToInt(MI, DL, BB, TII, false, true, true,
454                         WebAssembly::I64_TRUNC_S_F64);
455   case WebAssembly::FP_TO_UINT_I64_F64:
456     return LowerFPToInt(MI, DL, BB, TII, true, true, true,
457                         WebAssembly::I64_TRUNC_U_F64);
458     llvm_unreachable("Unexpected instruction to emit with custom inserter");
459   }
460 }
461 
462 const char *
463 WebAssemblyTargetLowering::getTargetNodeName(unsigned Opcode) const {
464   switch (static_cast<WebAssemblyISD::NodeType>(Opcode)) {
465   case WebAssemblyISD::FIRST_NUMBER:
466     break;
467 #define HANDLE_NODETYPE(NODE)                                                  \
468   case WebAssemblyISD::NODE:                                                   \
469     return "WebAssemblyISD::" #NODE;
470 #include "WebAssemblyISD.def"
471 #undef HANDLE_NODETYPE
472   }
473   return nullptr;
474 }
475 
476 std::pair<unsigned, const TargetRegisterClass *>
477 WebAssemblyTargetLowering::getRegForInlineAsmConstraint(
478     const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
479   // First, see if this is a constraint that directly corresponds to a
480   // WebAssembly register class.
481   if (Constraint.size() == 1) {
482     switch (Constraint[0]) {
483     case 'r':
484       assert(VT != MVT::iPTR && "Pointer MVT not expected here");
485       if (Subtarget->hasSIMD128() && VT.isVector()) {
486         if (VT.getSizeInBits() == 128)
487           return std::make_pair(0U, &WebAssembly::V128RegClass);
488       }
489       if (VT.isInteger() && !VT.isVector()) {
490         if (VT.getSizeInBits() <= 32)
491           return std::make_pair(0U, &WebAssembly::I32RegClass);
492         if (VT.getSizeInBits() <= 64)
493           return std::make_pair(0U, &WebAssembly::I64RegClass);
494       }
495       break;
496     default:
497       break;
498     }
499   }
500 
501   return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
502 }
503 
504 bool WebAssemblyTargetLowering::isCheapToSpeculateCttz() const {
505   // Assume ctz is a relatively cheap operation.
506   return true;
507 }
508 
509 bool WebAssemblyTargetLowering::isCheapToSpeculateCtlz() const {
510   // Assume clz is a relatively cheap operation.
511   return true;
512 }
513 
514 bool WebAssemblyTargetLowering::isLegalAddressingMode(const DataLayout &DL,
515                                                       const AddrMode &AM,
516                                                       Type *Ty, unsigned AS,
517                                                       Instruction *I) const {
518   // WebAssembly offsets are added as unsigned without wrapping. The
519   // isLegalAddressingMode gives us no way to determine if wrapping could be
520   // happening, so we approximate this by accepting only non-negative offsets.
521   if (AM.BaseOffs < 0)
522     return false;
523 
524   // WebAssembly has no scale register operands.
525   if (AM.Scale != 0)
526     return false;
527 
528   // Everything else is legal.
529   return true;
530 }
531 
532 bool WebAssemblyTargetLowering::allowsMisalignedMemoryAccesses(
533     EVT /*VT*/, unsigned /*AddrSpace*/, unsigned /*Align*/,
534     MachineMemOperand::Flags /*Flags*/, bool *Fast) const {
535   // WebAssembly supports unaligned accesses, though it should be declared
536   // with the p2align attribute on loads and stores which do so, and there
537   // may be a performance impact. We tell LLVM they're "fast" because
538   // for the kinds of things that LLVM uses this for (merging adjacent stores
539   // of constants, etc.), WebAssembly implementations will either want the
540   // unaligned access or they'll split anyway.
541   if (Fast)
542     *Fast = true;
543   return true;
544 }
545 
546 bool WebAssemblyTargetLowering::isIntDivCheap(EVT VT,
547                                               AttributeList Attr) const {
548   // The current thinking is that wasm engines will perform this optimization,
549   // so we can save on code size.
550   return true;
551 }
552 
553 EVT WebAssemblyTargetLowering::getSetCCResultType(const DataLayout &DL,
554                                                   LLVMContext &C,
555                                                   EVT VT) const {
556   if (VT.isVector())
557     return VT.changeVectorElementTypeToInteger();
558 
559   return TargetLowering::getSetCCResultType(DL, C, VT);
560 }
561 
562 bool WebAssemblyTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
563                                                    const CallInst &I,
564                                                    MachineFunction &MF,
565                                                    unsigned Intrinsic) const {
566   switch (Intrinsic) {
567   case Intrinsic::wasm_atomic_notify:
568     Info.opc = ISD::INTRINSIC_W_CHAIN;
569     Info.memVT = MVT::i32;
570     Info.ptrVal = I.getArgOperand(0);
571     Info.offset = 0;
572     Info.align = Align(4);
573     // atomic.notify instruction does not really load the memory specified with
574     // this argument, but MachineMemOperand should either be load or store, so
575     // we set this to a load.
576     // FIXME Volatile isn't really correct, but currently all LLVM atomic
577     // instructions are treated as volatiles in the backend, so we should be
578     // consistent. The same applies for wasm_atomic_wait intrinsics too.
579     Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
580     return true;
581   case Intrinsic::wasm_atomic_wait_i32:
582     Info.opc = ISD::INTRINSIC_W_CHAIN;
583     Info.memVT = MVT::i32;
584     Info.ptrVal = I.getArgOperand(0);
585     Info.offset = 0;
586     Info.align = Align(4);
587     Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
588     return true;
589   case Intrinsic::wasm_atomic_wait_i64:
590     Info.opc = ISD::INTRINSIC_W_CHAIN;
591     Info.memVT = MVT::i64;
592     Info.ptrVal = I.getArgOperand(0);
593     Info.offset = 0;
594     Info.align = Align(8);
595     Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
596     return true;
597   default:
598     return false;
599   }
600 }
601 
602 //===----------------------------------------------------------------------===//
603 // WebAssembly Lowering private implementation.
604 //===----------------------------------------------------------------------===//
605 
606 //===----------------------------------------------------------------------===//
607 // Lowering Code
608 //===----------------------------------------------------------------------===//
609 
610 static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *Msg) {
611   MachineFunction &MF = DAG.getMachineFunction();
612   DAG.getContext()->diagnose(
613       DiagnosticInfoUnsupported(MF.getFunction(), Msg, DL.getDebugLoc()));
614 }
615 
616 // Test whether the given calling convention is supported.
617 static bool callingConvSupported(CallingConv::ID CallConv) {
618   // We currently support the language-independent target-independent
619   // conventions. We don't yet have a way to annotate calls with properties like
620   // "cold", and we don't have any call-clobbered registers, so these are mostly
621   // all handled the same.
622   return CallConv == CallingConv::C || CallConv == CallingConv::Fast ||
623          CallConv == CallingConv::Cold ||
624          CallConv == CallingConv::PreserveMost ||
625          CallConv == CallingConv::PreserveAll ||
626          CallConv == CallingConv::CXX_FAST_TLS ||
627          CallConv == CallingConv::WASM_EmscriptenInvoke;
628 }
629 
630 SDValue
631 WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI,
632                                      SmallVectorImpl<SDValue> &InVals) const {
633   SelectionDAG &DAG = CLI.DAG;
634   SDLoc DL = CLI.DL;
635   SDValue Chain = CLI.Chain;
636   SDValue Callee = CLI.Callee;
637   MachineFunction &MF = DAG.getMachineFunction();
638   auto Layout = MF.getDataLayout();
639 
640   CallingConv::ID CallConv = CLI.CallConv;
641   if (!callingConvSupported(CallConv))
642     fail(DL, DAG,
643          "WebAssembly doesn't support language-specific or target-specific "
644          "calling conventions yet");
645   if (CLI.IsPatchPoint)
646     fail(DL, DAG, "WebAssembly doesn't support patch point yet");
647 
648   if (CLI.IsTailCall) {
649     bool MustTail = CLI.CS && CLI.CS.isMustTailCall();
650     if (Subtarget->hasTailCall() && !CLI.IsVarArg) {
651       // Do not tail call unless caller and callee return types match
652       const Function &F = MF.getFunction();
653       const TargetMachine &TM = getTargetMachine();
654       Type *RetTy = F.getReturnType();
655       SmallVector<MVT, 4> CallerRetTys;
656       SmallVector<MVT, 4> CalleeRetTys;
657       computeLegalValueVTs(F, TM, RetTy, CallerRetTys);
658       computeLegalValueVTs(F, TM, CLI.RetTy, CalleeRetTys);
659       bool TypesMatch = CallerRetTys.size() == CalleeRetTys.size() &&
660                         std::equal(CallerRetTys.begin(), CallerRetTys.end(),
661                                    CalleeRetTys.begin());
662       if (!TypesMatch) {
663         // musttail in this case would be an LLVM IR validation failure
664         assert(!MustTail);
665         CLI.IsTailCall = false;
666       }
667     } else {
668       CLI.IsTailCall = false;
669       if (MustTail) {
670         if (CLI.IsVarArg) {
671           // The return would pop the argument buffer
672           fail(DL, DAG, "WebAssembly does not support varargs tail calls");
673         } else {
674           fail(DL, DAG, "WebAssembly 'tail-call' feature not enabled");
675         }
676       }
677     }
678   }
679 
680   SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
681   if (Ins.size() > 1)
682     fail(DL, DAG, "WebAssembly doesn't support more than 1 returned value yet");
683 
684   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
685   SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
686 
687   // The generic code may have added an sret argument. If we're lowering an
688   // invoke function, the ABI requires that the function pointer be the first
689   // argument, so we may have to swap the arguments.
690   if (CallConv == CallingConv::WASM_EmscriptenInvoke && Outs.size() >= 2 &&
691       Outs[0].Flags.isSRet()) {
692     std::swap(Outs[0], Outs[1]);
693     std::swap(OutVals[0], OutVals[1]);
694   }
695 
696   unsigned NumFixedArgs = 0;
697   for (unsigned I = 0; I < Outs.size(); ++I) {
698     const ISD::OutputArg &Out = Outs[I];
699     SDValue &OutVal = OutVals[I];
700     if (Out.Flags.isNest())
701       fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
702     if (Out.Flags.isInAlloca())
703       fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
704     if (Out.Flags.isInConsecutiveRegs())
705       fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
706     if (Out.Flags.isInConsecutiveRegsLast())
707       fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
708     if (Out.Flags.isByVal() && Out.Flags.getByValSize() != 0) {
709       auto &MFI = MF.getFrameInfo();
710       int FI = MFI.CreateStackObject(Out.Flags.getByValSize(),
711                                      Out.Flags.getByValAlign(),
712                                      /*isSS=*/false);
713       SDValue SizeNode =
714           DAG.getConstant(Out.Flags.getByValSize(), DL, MVT::i32);
715       SDValue FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
716       Chain = DAG.getMemcpy(
717           Chain, DL, FINode, OutVal, SizeNode, Out.Flags.getByValAlign(),
718           /*isVolatile*/ false, /*AlwaysInline=*/false,
719           /*isTailCall*/ false, MachinePointerInfo(), MachinePointerInfo());
720       OutVal = FINode;
721     }
722     // Count the number of fixed args *after* legalization.
723     NumFixedArgs += Out.IsFixed;
724   }
725 
726   bool IsVarArg = CLI.IsVarArg;
727   auto PtrVT = getPointerTy(Layout);
728 
729   // Analyze operands of the call, assigning locations to each operand.
730   SmallVector<CCValAssign, 16> ArgLocs;
731   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
732 
733   if (IsVarArg) {
734     // Outgoing non-fixed arguments are placed in a buffer. First
735     // compute their offsets and the total amount of buffer space needed.
736     for (unsigned I = NumFixedArgs; I < Outs.size(); ++I) {
737       const ISD::OutputArg &Out = Outs[I];
738       SDValue &Arg = OutVals[I];
739       EVT VT = Arg.getValueType();
740       assert(VT != MVT::iPTR && "Legalized args should be concrete");
741       Type *Ty = VT.getTypeForEVT(*DAG.getContext());
742       unsigned Align = std::max(Out.Flags.getOrigAlign(),
743                                 Layout.getABITypeAlignment(Ty));
744       unsigned Offset = CCInfo.AllocateStack(Layout.getTypeAllocSize(Ty),
745                                              Align);
746       CCInfo.addLoc(CCValAssign::getMem(ArgLocs.size(), VT.getSimpleVT(),
747                                         Offset, VT.getSimpleVT(),
748                                         CCValAssign::Full));
749     }
750   }
751 
752   unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
753 
754   SDValue FINode;
755   if (IsVarArg && NumBytes) {
756     // For non-fixed arguments, next emit stores to store the argument values
757     // to the stack buffer at the offsets computed above.
758     int FI = MF.getFrameInfo().CreateStackObject(NumBytes,
759                                                  Layout.getStackAlignment(),
760                                                  /*isSS=*/false);
761     unsigned ValNo = 0;
762     SmallVector<SDValue, 8> Chains;
763     for (SDValue Arg :
764          make_range(OutVals.begin() + NumFixedArgs, OutVals.end())) {
765       assert(ArgLocs[ValNo].getValNo() == ValNo &&
766              "ArgLocs should remain in order and only hold varargs args");
767       unsigned Offset = ArgLocs[ValNo++].getLocMemOffset();
768       FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
769       SDValue Add = DAG.getNode(ISD::ADD, DL, PtrVT, FINode,
770                                 DAG.getConstant(Offset, DL, PtrVT));
771       Chains.push_back(
772           DAG.getStore(Chain, DL, Arg, Add,
773                        MachinePointerInfo::getFixedStack(MF, FI, Offset), 0));
774     }
775     if (!Chains.empty())
776       Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
777   } else if (IsVarArg) {
778     FINode = DAG.getIntPtrConstant(0, DL);
779   }
780 
781   if (Callee->getOpcode() == ISD::GlobalAddress) {
782     // If the callee is a GlobalAddress node (quite common, every direct call
783     // is) turn it into a TargetGlobalAddress node so that LowerGlobalAddress
784     // doesn't at MO_GOT which is not needed for direct calls.
785     GlobalAddressSDNode* GA = cast<GlobalAddressSDNode>(Callee);
786     Callee = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
787                                         getPointerTy(DAG.getDataLayout()),
788                                         GA->getOffset());
789     Callee = DAG.getNode(WebAssemblyISD::Wrapper, DL,
790                          getPointerTy(DAG.getDataLayout()), Callee);
791   }
792 
793   // Compute the operands for the CALLn node.
794   SmallVector<SDValue, 16> Ops;
795   Ops.push_back(Chain);
796   Ops.push_back(Callee);
797 
798   // Add all fixed arguments. Note that for non-varargs calls, NumFixedArgs
799   // isn't reliable.
800   Ops.append(OutVals.begin(),
801              IsVarArg ? OutVals.begin() + NumFixedArgs : OutVals.end());
802   // Add a pointer to the vararg buffer.
803   if (IsVarArg)
804     Ops.push_back(FINode);
805 
806   SmallVector<EVT, 8> InTys;
807   for (const auto &In : Ins) {
808     assert(!In.Flags.isByVal() && "byval is not valid for return values");
809     assert(!In.Flags.isNest() && "nest is not valid for return values");
810     if (In.Flags.isInAlloca())
811       fail(DL, DAG, "WebAssembly hasn't implemented inalloca return values");
812     if (In.Flags.isInConsecutiveRegs())
813       fail(DL, DAG, "WebAssembly hasn't implemented cons regs return values");
814     if (In.Flags.isInConsecutiveRegsLast())
815       fail(DL, DAG,
816            "WebAssembly hasn't implemented cons regs last return values");
817     // Ignore In.getOrigAlign() because all our arguments are passed in
818     // registers.
819     InTys.push_back(In.VT);
820   }
821 
822   if (CLI.IsTailCall) {
823     // ret_calls do not return values to the current frame
824     SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
825     return DAG.getNode(WebAssemblyISD::RET_CALL, DL, NodeTys, Ops);
826   }
827 
828   InTys.push_back(MVT::Other);
829   SDVTList InTyList = DAG.getVTList(InTys);
830   SDValue Res =
831       DAG.getNode(Ins.empty() ? WebAssemblyISD::CALL0 : WebAssemblyISD::CALL1,
832                   DL, InTyList, Ops);
833   if (Ins.empty()) {
834     Chain = Res;
835   } else {
836     InVals.push_back(Res);
837     Chain = Res.getValue(1);
838   }
839 
840   return Chain;
841 }
842 
843 bool WebAssemblyTargetLowering::CanLowerReturn(
844     CallingConv::ID /*CallConv*/, MachineFunction & /*MF*/, bool /*IsVarArg*/,
845     const SmallVectorImpl<ISD::OutputArg> &Outs,
846     LLVMContext & /*Context*/) const {
847   // WebAssembly can't currently handle returning tuples.
848   return Outs.size() <= 1;
849 }
850 
851 SDValue WebAssemblyTargetLowering::LowerReturn(
852     SDValue Chain, CallingConv::ID CallConv, bool /*IsVarArg*/,
853     const SmallVectorImpl<ISD::OutputArg> &Outs,
854     const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
855     SelectionDAG &DAG) const {
856   assert(Outs.size() <= 1 && "WebAssembly can only return up to one value");
857   if (!callingConvSupported(CallConv))
858     fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
859 
860   SmallVector<SDValue, 4> RetOps(1, Chain);
861   RetOps.append(OutVals.begin(), OutVals.end());
862   Chain = DAG.getNode(WebAssemblyISD::RETURN, DL, MVT::Other, RetOps);
863 
864   // Record the number and types of the return values.
865   for (const ISD::OutputArg &Out : Outs) {
866     assert(!Out.Flags.isByVal() && "byval is not valid for return values");
867     assert(!Out.Flags.isNest() && "nest is not valid for return values");
868     assert(Out.IsFixed && "non-fixed return value is not valid");
869     if (Out.Flags.isInAlloca())
870       fail(DL, DAG, "WebAssembly hasn't implemented inalloca results");
871     if (Out.Flags.isInConsecutiveRegs())
872       fail(DL, DAG, "WebAssembly hasn't implemented cons regs results");
873     if (Out.Flags.isInConsecutiveRegsLast())
874       fail(DL, DAG, "WebAssembly hasn't implemented cons regs last results");
875   }
876 
877   return Chain;
878 }
879 
880 SDValue WebAssemblyTargetLowering::LowerFormalArguments(
881     SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
882     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
883     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
884   if (!callingConvSupported(CallConv))
885     fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
886 
887   MachineFunction &MF = DAG.getMachineFunction();
888   auto *MFI = MF.getInfo<WebAssemblyFunctionInfo>();
889 
890   // Set up the incoming ARGUMENTS value, which serves to represent the liveness
891   // of the incoming values before they're represented by virtual registers.
892   MF.getRegInfo().addLiveIn(WebAssembly::ARGUMENTS);
893 
894   for (const ISD::InputArg &In : Ins) {
895     if (In.Flags.isInAlloca())
896       fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
897     if (In.Flags.isNest())
898       fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
899     if (In.Flags.isInConsecutiveRegs())
900       fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
901     if (In.Flags.isInConsecutiveRegsLast())
902       fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
903     // Ignore In.getOrigAlign() because all our arguments are passed in
904     // registers.
905     InVals.push_back(In.Used ? DAG.getNode(WebAssemblyISD::ARGUMENT, DL, In.VT,
906                                            DAG.getTargetConstant(InVals.size(),
907                                                                  DL, MVT::i32))
908                              : DAG.getUNDEF(In.VT));
909 
910     // Record the number and types of arguments.
911     MFI->addParam(In.VT);
912   }
913 
914   // Varargs are copied into a buffer allocated by the caller, and a pointer to
915   // the buffer is passed as an argument.
916   if (IsVarArg) {
917     MVT PtrVT = getPointerTy(MF.getDataLayout());
918     Register VarargVreg =
919         MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrVT));
920     MFI->setVarargBufferVreg(VarargVreg);
921     Chain = DAG.getCopyToReg(
922         Chain, DL, VarargVreg,
923         DAG.getNode(WebAssemblyISD::ARGUMENT, DL, PtrVT,
924                     DAG.getTargetConstant(Ins.size(), DL, MVT::i32)));
925     MFI->addParam(PtrVT);
926   }
927 
928   // Record the number and types of arguments and results.
929   SmallVector<MVT, 4> Params;
930   SmallVector<MVT, 4> Results;
931   computeSignatureVTs(MF.getFunction().getFunctionType(), MF.getFunction(),
932                       DAG.getTarget(), Params, Results);
933   for (MVT VT : Results)
934     MFI->addResult(VT);
935   // TODO: Use signatures in WebAssemblyMachineFunctionInfo too and unify
936   // the param logic here with ComputeSignatureVTs
937   assert(MFI->getParams().size() == Params.size() &&
938          std::equal(MFI->getParams().begin(), MFI->getParams().end(),
939                     Params.begin()));
940 
941   return Chain;
942 }
943 
944 void WebAssemblyTargetLowering::ReplaceNodeResults(
945     SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
946   switch (N->getOpcode()) {
947   case ISD::SIGN_EXTEND_INREG:
948     // Do not add any results, signifying that N should not be custom lowered
949     // after all. This happens because simd128 turns on custom lowering for
950     // SIGN_EXTEND_INREG, but for non-vector sign extends the result might be an
951     // illegal type.
952     break;
953   default:
954     llvm_unreachable(
955         "ReplaceNodeResults not implemented for this op for WebAssembly!");
956   }
957 }
958 
959 //===----------------------------------------------------------------------===//
960 //  Custom lowering hooks.
961 //===----------------------------------------------------------------------===//
962 
963 SDValue WebAssemblyTargetLowering::LowerOperation(SDValue Op,
964                                                   SelectionDAG &DAG) const {
965   SDLoc DL(Op);
966   switch (Op.getOpcode()) {
967   default:
968     llvm_unreachable("unimplemented operation lowering");
969     return SDValue();
970   case ISD::FrameIndex:
971     return LowerFrameIndex(Op, DAG);
972   case ISD::GlobalAddress:
973     return LowerGlobalAddress(Op, DAG);
974   case ISD::ExternalSymbol:
975     return LowerExternalSymbol(Op, DAG);
976   case ISD::JumpTable:
977     return LowerJumpTable(Op, DAG);
978   case ISD::BR_JT:
979     return LowerBR_JT(Op, DAG);
980   case ISD::VASTART:
981     return LowerVASTART(Op, DAG);
982   case ISD::BlockAddress:
983   case ISD::BRIND:
984     fail(DL, DAG, "WebAssembly hasn't implemented computed gotos");
985     return SDValue();
986   case ISD::RETURNADDR:
987     return LowerRETURNADDR(Op, DAG);
988   case ISD::FRAMEADDR:
989     return LowerFRAMEADDR(Op, DAG);
990   case ISD::CopyToReg:
991     return LowerCopyToReg(Op, DAG);
992   case ISD::EXTRACT_VECTOR_ELT:
993   case ISD::INSERT_VECTOR_ELT:
994     return LowerAccessVectorElement(Op, DAG);
995   case ISD::INTRINSIC_VOID:
996   case ISD::INTRINSIC_WO_CHAIN:
997   case ISD::INTRINSIC_W_CHAIN:
998     return LowerIntrinsic(Op, DAG);
999   case ISD::SIGN_EXTEND_INREG:
1000     return LowerSIGN_EXTEND_INREG(Op, DAG);
1001   case ISD::BUILD_VECTOR:
1002     return LowerBUILD_VECTOR(Op, DAG);
1003   case ISD::VECTOR_SHUFFLE:
1004     return LowerVECTOR_SHUFFLE(Op, DAG);
1005   case ISD::SHL:
1006   case ISD::SRA:
1007   case ISD::SRL:
1008     return LowerShift(Op, DAG);
1009   }
1010 }
1011 
1012 SDValue WebAssemblyTargetLowering::LowerCopyToReg(SDValue Op,
1013                                                   SelectionDAG &DAG) const {
1014   SDValue Src = Op.getOperand(2);
1015   if (isa<FrameIndexSDNode>(Src.getNode())) {
1016     // CopyToReg nodes don't support FrameIndex operands. Other targets select
1017     // the FI to some LEA-like instruction, but since we don't have that, we
1018     // need to insert some kind of instruction that can take an FI operand and
1019     // produces a value usable by CopyToReg (i.e. in a vreg). So insert a dummy
1020     // local.copy between Op and its FI operand.
1021     SDValue Chain = Op.getOperand(0);
1022     SDLoc DL(Op);
1023     unsigned Reg = cast<RegisterSDNode>(Op.getOperand(1))->getReg();
1024     EVT VT = Src.getValueType();
1025     SDValue Copy(DAG.getMachineNode(VT == MVT::i32 ? WebAssembly::COPY_I32
1026                                                    : WebAssembly::COPY_I64,
1027                                     DL, VT, Src),
1028                  0);
1029     return Op.getNode()->getNumValues() == 1
1030                ? DAG.getCopyToReg(Chain, DL, Reg, Copy)
1031                : DAG.getCopyToReg(Chain, DL, Reg, Copy,
1032                                   Op.getNumOperands() == 4 ? Op.getOperand(3)
1033                                                            : SDValue());
1034   }
1035   return SDValue();
1036 }
1037 
1038 SDValue WebAssemblyTargetLowering::LowerFrameIndex(SDValue Op,
1039                                                    SelectionDAG &DAG) const {
1040   int FI = cast<FrameIndexSDNode>(Op)->getIndex();
1041   return DAG.getTargetFrameIndex(FI, Op.getValueType());
1042 }
1043 
1044 SDValue WebAssemblyTargetLowering::LowerRETURNADDR(SDValue Op,
1045                                                    SelectionDAG &DAG) const {
1046   SDLoc DL(Op);
1047 
1048   if (!Subtarget->getTargetTriple().isOSEmscripten()) {
1049     fail(DL, DAG,
1050          "Non-Emscripten WebAssembly hasn't implemented "
1051          "__builtin_return_address");
1052     return SDValue();
1053   }
1054 
1055   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
1056     return SDValue();
1057 
1058   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1059   return makeLibCall(DAG, RTLIB::RETURN_ADDRESS, Op.getValueType(),
1060                      {DAG.getConstant(Depth, DL, MVT::i32)}, false, DL)
1061       .first;
1062 }
1063 
1064 SDValue WebAssemblyTargetLowering::LowerFRAMEADDR(SDValue Op,
1065                                                   SelectionDAG &DAG) const {
1066   // Non-zero depths are not supported by WebAssembly currently. Use the
1067   // legalizer's default expansion, which is to return 0 (what this function is
1068   // documented to do).
1069   if (Op.getConstantOperandVal(0) > 0)
1070     return SDValue();
1071 
1072   DAG.getMachineFunction().getFrameInfo().setFrameAddressIsTaken(true);
1073   EVT VT = Op.getValueType();
1074   Register FP =
1075       Subtarget->getRegisterInfo()->getFrameRegister(DAG.getMachineFunction());
1076   return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), FP, VT);
1077 }
1078 
1079 SDValue WebAssemblyTargetLowering::LowerGlobalAddress(SDValue Op,
1080                                                       SelectionDAG &DAG) const {
1081   SDLoc DL(Op);
1082   const auto *GA = cast<GlobalAddressSDNode>(Op);
1083   EVT VT = Op.getValueType();
1084   assert(GA->getTargetFlags() == 0 &&
1085          "Unexpected target flags on generic GlobalAddressSDNode");
1086   if (GA->getAddressSpace() != 0)
1087     fail(DL, DAG, "WebAssembly only expects the 0 address space");
1088 
1089   unsigned OperandFlags = 0;
1090   if (isPositionIndependent()) {
1091     const GlobalValue *GV = GA->getGlobal();
1092     if (getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV)) {
1093       MachineFunction &MF = DAG.getMachineFunction();
1094       MVT PtrVT = getPointerTy(MF.getDataLayout());
1095       const char *BaseName;
1096       if (GV->getValueType()->isFunctionTy()) {
1097         BaseName = MF.createExternalSymbolName("__table_base");
1098         OperandFlags = WebAssemblyII::MO_TABLE_BASE_REL;
1099       }
1100       else {
1101         BaseName = MF.createExternalSymbolName("__memory_base");
1102         OperandFlags = WebAssemblyII::MO_MEMORY_BASE_REL;
1103       }
1104       SDValue BaseAddr =
1105           DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT,
1106                       DAG.getTargetExternalSymbol(BaseName, PtrVT));
1107 
1108       SDValue SymAddr = DAG.getNode(
1109           WebAssemblyISD::WrapperPIC, DL, VT,
1110           DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT, GA->getOffset(),
1111                                      OperandFlags));
1112 
1113       return DAG.getNode(ISD::ADD, DL, VT, BaseAddr, SymAddr);
1114     } else {
1115       OperandFlags = WebAssemblyII::MO_GOT;
1116     }
1117   }
1118 
1119   return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1120                      DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT,
1121                                                 GA->getOffset(), OperandFlags));
1122 }
1123 
1124 SDValue
1125 WebAssemblyTargetLowering::LowerExternalSymbol(SDValue Op,
1126                                                SelectionDAG &DAG) const {
1127   SDLoc DL(Op);
1128   const auto *ES = cast<ExternalSymbolSDNode>(Op);
1129   EVT VT = Op.getValueType();
1130   assert(ES->getTargetFlags() == 0 &&
1131          "Unexpected target flags on generic ExternalSymbolSDNode");
1132   return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1133                      DAG.getTargetExternalSymbol(ES->getSymbol(), VT));
1134 }
1135 
1136 SDValue WebAssemblyTargetLowering::LowerJumpTable(SDValue Op,
1137                                                   SelectionDAG &DAG) const {
1138   // There's no need for a Wrapper node because we always incorporate a jump
1139   // table operand into a BR_TABLE instruction, rather than ever
1140   // materializing it in a register.
1141   const JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
1142   return DAG.getTargetJumpTable(JT->getIndex(), Op.getValueType(),
1143                                 JT->getTargetFlags());
1144 }
1145 
1146 SDValue WebAssemblyTargetLowering::LowerBR_JT(SDValue Op,
1147                                               SelectionDAG &DAG) const {
1148   SDLoc DL(Op);
1149   SDValue Chain = Op.getOperand(0);
1150   const auto *JT = cast<JumpTableSDNode>(Op.getOperand(1));
1151   SDValue Index = Op.getOperand(2);
1152   assert(JT->getTargetFlags() == 0 && "WebAssembly doesn't set target flags");
1153 
1154   SmallVector<SDValue, 8> Ops;
1155   Ops.push_back(Chain);
1156   Ops.push_back(Index);
1157 
1158   MachineJumpTableInfo *MJTI = DAG.getMachineFunction().getJumpTableInfo();
1159   const auto &MBBs = MJTI->getJumpTables()[JT->getIndex()].MBBs;
1160 
1161   // Add an operand for each case.
1162   for (auto MBB : MBBs)
1163     Ops.push_back(DAG.getBasicBlock(MBB));
1164 
1165   // TODO: For now, we just pick something arbitrary for a default case for now.
1166   // We really want to sniff out the guard and put in the real default case (and
1167   // delete the guard).
1168   Ops.push_back(DAG.getBasicBlock(MBBs[0]));
1169 
1170   return DAG.getNode(WebAssemblyISD::BR_TABLE, DL, MVT::Other, Ops);
1171 }
1172 
1173 SDValue WebAssemblyTargetLowering::LowerVASTART(SDValue Op,
1174                                                 SelectionDAG &DAG) const {
1175   SDLoc DL(Op);
1176   EVT PtrVT = getPointerTy(DAG.getMachineFunction().getDataLayout());
1177 
1178   auto *MFI = DAG.getMachineFunction().getInfo<WebAssemblyFunctionInfo>();
1179   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
1180 
1181   SDValue ArgN = DAG.getCopyFromReg(DAG.getEntryNode(), DL,
1182                                     MFI->getVarargBufferVreg(), PtrVT);
1183   return DAG.getStore(Op.getOperand(0), DL, ArgN, Op.getOperand(1),
1184                       MachinePointerInfo(SV), 0);
1185 }
1186 
1187 SDValue WebAssemblyTargetLowering::LowerIntrinsic(SDValue Op,
1188                                                   SelectionDAG &DAG) const {
1189   MachineFunction &MF = DAG.getMachineFunction();
1190   unsigned IntNo;
1191   switch (Op.getOpcode()) {
1192   case ISD::INTRINSIC_VOID:
1193   case ISD::INTRINSIC_W_CHAIN:
1194     IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
1195     break;
1196   case ISD::INTRINSIC_WO_CHAIN:
1197     IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1198     break;
1199   default:
1200     llvm_unreachable("Invalid intrinsic");
1201   }
1202   SDLoc DL(Op);
1203 
1204   switch (IntNo) {
1205   default:
1206     return SDValue(); // Don't custom lower most intrinsics.
1207 
1208   case Intrinsic::wasm_lsda: {
1209     EVT VT = Op.getValueType();
1210     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1211     MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
1212     auto &Context = MF.getMMI().getContext();
1213     MCSymbol *S = Context.getOrCreateSymbol(Twine("GCC_except_table") +
1214                                             Twine(MF.getFunctionNumber()));
1215     return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1216                        DAG.getMCSymbol(S, PtrVT));
1217   }
1218 
1219   case Intrinsic::wasm_throw: {
1220     // We only support C++ exceptions for now
1221     int Tag = cast<ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1222     if (Tag != CPP_EXCEPTION)
1223       llvm_unreachable("Invalid tag!");
1224     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1225     MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
1226     const char *SymName = MF.createExternalSymbolName("__cpp_exception");
1227     SDValue SymNode = DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT,
1228                                   DAG.getTargetExternalSymbol(SymName, PtrVT));
1229     return DAG.getNode(WebAssemblyISD::THROW, DL,
1230                        MVT::Other, // outchain type
1231                        {
1232                            Op.getOperand(0), // inchain
1233                            SymNode,          // exception symbol
1234                            Op.getOperand(3)  // thrown value
1235                        });
1236   }
1237   }
1238 }
1239 
1240 SDValue
1241 WebAssemblyTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
1242                                                   SelectionDAG &DAG) const {
1243   SDLoc DL(Op);
1244   // If sign extension operations are disabled, allow sext_inreg only if operand
1245   // is a vector extract. SIMD does not depend on sign extension operations, but
1246   // allowing sext_inreg in this context lets us have simple patterns to select
1247   // extract_lane_s instructions. Expanding sext_inreg everywhere would be
1248   // simpler in this file, but would necessitate large and brittle patterns to
1249   // undo the expansion and select extract_lane_s instructions.
1250   assert(!Subtarget->hasSignExt() && Subtarget->hasSIMD128());
1251   if (Op.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
1252     const SDValue &Extract = Op.getOperand(0);
1253     MVT VecT = Extract.getOperand(0).getSimpleValueType();
1254     MVT ExtractedLaneT = static_cast<VTSDNode *>(Op.getOperand(1).getNode())
1255                              ->getVT()
1256                              .getSimpleVT();
1257     MVT ExtractedVecT =
1258         MVT::getVectorVT(ExtractedLaneT, 128 / ExtractedLaneT.getSizeInBits());
1259     if (ExtractedVecT == VecT)
1260       return Op;
1261     // Bitcast vector to appropriate type to ensure ISel pattern coverage
1262     const SDValue &Index = Extract.getOperand(1);
1263     unsigned IndexVal =
1264         static_cast<ConstantSDNode *>(Index.getNode())->getZExtValue();
1265     unsigned Scale =
1266         ExtractedVecT.getVectorNumElements() / VecT.getVectorNumElements();
1267     assert(Scale > 1);
1268     SDValue NewIndex =
1269         DAG.getConstant(IndexVal * Scale, DL, Index.getValueType());
1270     SDValue NewExtract = DAG.getNode(
1271         ISD::EXTRACT_VECTOR_ELT, DL, Extract.getValueType(),
1272         DAG.getBitcast(ExtractedVecT, Extract.getOperand(0)), NewIndex);
1273     return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, Op.getValueType(),
1274                        NewExtract, Op.getOperand(1));
1275   }
1276   // Otherwise expand
1277   return SDValue();
1278 }
1279 
1280 SDValue WebAssemblyTargetLowering::LowerBUILD_VECTOR(SDValue Op,
1281                                                      SelectionDAG &DAG) const {
1282   SDLoc DL(Op);
1283   const EVT VecT = Op.getValueType();
1284   const EVT LaneT = Op.getOperand(0).getValueType();
1285   const size_t Lanes = Op.getNumOperands();
1286   auto IsConstant = [](const SDValue &V) {
1287     return V.getOpcode() == ISD::Constant || V.getOpcode() == ISD::ConstantFP;
1288   };
1289 
1290   // Find the most common operand, which is approximately the best to splat
1291   using Entry = std::pair<SDValue, size_t>;
1292   SmallVector<Entry, 16> ValueCounts;
1293   size_t NumConst = 0, NumDynamic = 0;
1294   for (const SDValue &Lane : Op->op_values()) {
1295     if (Lane.isUndef()) {
1296       continue;
1297     } else if (IsConstant(Lane)) {
1298       NumConst++;
1299     } else {
1300       NumDynamic++;
1301     }
1302     auto CountIt = std::find_if(ValueCounts.begin(), ValueCounts.end(),
1303                                 [&Lane](Entry A) { return A.first == Lane; });
1304     if (CountIt == ValueCounts.end()) {
1305       ValueCounts.emplace_back(Lane, 1);
1306     } else {
1307       CountIt->second++;
1308     }
1309   }
1310   auto CommonIt =
1311       std::max_element(ValueCounts.begin(), ValueCounts.end(),
1312                        [](Entry A, Entry B) { return A.second < B.second; });
1313   assert(CommonIt != ValueCounts.end() && "Unexpected all-undef build_vector");
1314   SDValue SplatValue = CommonIt->first;
1315   size_t NumCommon = CommonIt->second;
1316 
1317   // If v128.const is available, consider using it instead of a splat
1318   if (Subtarget->hasUnimplementedSIMD128()) {
1319     // {i32,i64,f32,f64}.const opcode, and value
1320     const size_t ConstBytes = 1 + std::max(size_t(4), 16 / Lanes);
1321     // SIMD prefix and opcode
1322     const size_t SplatBytes = 2;
1323     const size_t SplatConstBytes = SplatBytes + ConstBytes;
1324     // SIMD prefix, opcode, and lane index
1325     const size_t ReplaceBytes = 3;
1326     const size_t ReplaceConstBytes = ReplaceBytes + ConstBytes;
1327     // SIMD prefix, v128.const opcode, and 128-bit value
1328     const size_t VecConstBytes = 18;
1329     // Initial v128.const and a replace_lane for each non-const operand
1330     const size_t ConstInitBytes = VecConstBytes + NumDynamic * ReplaceBytes;
1331     // Initial splat and all necessary replace_lanes
1332     const size_t SplatInitBytes =
1333         IsConstant(SplatValue)
1334             // Initial constant splat
1335             ? (SplatConstBytes +
1336                // Constant replace_lanes
1337                (NumConst - NumCommon) * ReplaceConstBytes +
1338                // Dynamic replace_lanes
1339                (NumDynamic * ReplaceBytes))
1340             // Initial dynamic splat
1341             : (SplatBytes +
1342                // Constant replace_lanes
1343                (NumConst * ReplaceConstBytes) +
1344                // Dynamic replace_lanes
1345                (NumDynamic - NumCommon) * ReplaceBytes);
1346     if (ConstInitBytes < SplatInitBytes) {
1347       // Create build_vector that will lower to initial v128.const
1348       SmallVector<SDValue, 16> ConstLanes;
1349       for (const SDValue &Lane : Op->op_values()) {
1350         if (IsConstant(Lane)) {
1351           ConstLanes.push_back(Lane);
1352         } else if (LaneT.isFloatingPoint()) {
1353           ConstLanes.push_back(DAG.getConstantFP(0, DL, LaneT));
1354         } else {
1355           ConstLanes.push_back(DAG.getConstant(0, DL, LaneT));
1356         }
1357       }
1358       SDValue Result = DAG.getBuildVector(VecT, DL, ConstLanes);
1359       // Add replace_lane instructions for non-const lanes
1360       for (size_t I = 0; I < Lanes; ++I) {
1361         const SDValue &Lane = Op->getOperand(I);
1362         if (!Lane.isUndef() && !IsConstant(Lane))
1363           Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VecT, Result, Lane,
1364                                DAG.getConstant(I, DL, MVT::i32));
1365       }
1366       return Result;
1367     }
1368   }
1369   // Use a splat for the initial vector
1370   SDValue Result = DAG.getSplatBuildVector(VecT, DL, SplatValue);
1371   // Add replace_lane instructions for other values
1372   for (size_t I = 0; I < Lanes; ++I) {
1373     const SDValue &Lane = Op->getOperand(I);
1374     if (Lane != SplatValue)
1375       Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VecT, Result, Lane,
1376                            DAG.getConstant(I, DL, MVT::i32));
1377   }
1378   return Result;
1379 }
1380 
1381 SDValue
1382 WebAssemblyTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
1383                                                SelectionDAG &DAG) const {
1384   SDLoc DL(Op);
1385   ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op.getNode())->getMask();
1386   MVT VecType = Op.getOperand(0).getSimpleValueType();
1387   assert(VecType.is128BitVector() && "Unexpected shuffle vector type");
1388   size_t LaneBytes = VecType.getVectorElementType().getSizeInBits() / 8;
1389 
1390   // Space for two vector args and sixteen mask indices
1391   SDValue Ops[18];
1392   size_t OpIdx = 0;
1393   Ops[OpIdx++] = Op.getOperand(0);
1394   Ops[OpIdx++] = Op.getOperand(1);
1395 
1396   // Expand mask indices to byte indices and materialize them as operands
1397   for (int M : Mask) {
1398     for (size_t J = 0; J < LaneBytes; ++J) {
1399       // Lower undefs (represented by -1 in mask) to zero
1400       uint64_t ByteIndex = M == -1 ? 0 : (uint64_t)M * LaneBytes + J;
1401       Ops[OpIdx++] = DAG.getConstant(ByteIndex, DL, MVT::i32);
1402     }
1403   }
1404 
1405   return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, Op.getValueType(), Ops);
1406 }
1407 
1408 SDValue
1409 WebAssemblyTargetLowering::LowerAccessVectorElement(SDValue Op,
1410                                                     SelectionDAG &DAG) const {
1411   // Allow constant lane indices, expand variable lane indices
1412   SDNode *IdxNode = Op.getOperand(Op.getNumOperands() - 1).getNode();
1413   if (isa<ConstantSDNode>(IdxNode) || IdxNode->isUndef())
1414     return Op;
1415   else
1416     // Perform default expansion
1417     return SDValue();
1418 }
1419 
1420 static SDValue unrollVectorShift(SDValue Op, SelectionDAG &DAG) {
1421   EVT LaneT = Op.getSimpleValueType().getVectorElementType();
1422   // 32-bit and 64-bit unrolled shifts will have proper semantics
1423   if (LaneT.bitsGE(MVT::i32))
1424     return DAG.UnrollVectorOp(Op.getNode());
1425   // Otherwise mask the shift value to get proper semantics from 32-bit shift
1426   SDLoc DL(Op);
1427   SDValue ShiftVal = Op.getOperand(1);
1428   uint64_t MaskVal = LaneT.getSizeInBits() - 1;
1429   SDValue MaskedShiftVal = DAG.getNode(
1430       ISD::AND,                    // mask opcode
1431       DL, ShiftVal.getValueType(), // masked value type
1432       ShiftVal,                    // original shift value operand
1433       DAG.getConstant(MaskVal, DL, ShiftVal.getValueType()) // mask operand
1434   );
1435 
1436   return DAG.UnrollVectorOp(
1437       DAG.getNode(Op.getOpcode(),        // original shift opcode
1438                   DL, Op.getValueType(), // original return type
1439                   Op.getOperand(0),      // original vector operand,
1440                   MaskedShiftVal         // new masked shift value operand
1441                   )
1442           .getNode());
1443 }
1444 
1445 SDValue WebAssemblyTargetLowering::LowerShift(SDValue Op,
1446                                               SelectionDAG &DAG) const {
1447   SDLoc DL(Op);
1448 
1449   // Only manually lower vector shifts
1450   assert(Op.getSimpleValueType().isVector());
1451 
1452   // Unroll non-splat vector shifts
1453   BuildVectorSDNode *ShiftVec;
1454   SDValue SplatVal;
1455   if (!(ShiftVec = dyn_cast<BuildVectorSDNode>(Op.getOperand(1).getNode())) ||
1456       !(SplatVal = ShiftVec->getSplatValue()))
1457     return unrollVectorShift(Op, DAG);
1458 
1459   // All splats except i64x2 const splats are handled by patterns
1460   auto *SplatConst = dyn_cast<ConstantSDNode>(SplatVal);
1461   if (!SplatConst || Op.getSimpleValueType() != MVT::v2i64)
1462     return Op;
1463 
1464   // i64x2 const splats are custom lowered to avoid unnecessary wraps
1465   unsigned Opcode;
1466   switch (Op.getOpcode()) {
1467   case ISD::SHL:
1468     Opcode = WebAssemblyISD::VEC_SHL;
1469     break;
1470   case ISD::SRA:
1471     Opcode = WebAssemblyISD::VEC_SHR_S;
1472     break;
1473   case ISD::SRL:
1474     Opcode = WebAssemblyISD::VEC_SHR_U;
1475     break;
1476   default:
1477     llvm_unreachable("unexpected opcode");
1478   }
1479   APInt Shift = SplatConst->getAPIntValue().zextOrTrunc(32);
1480   return DAG.getNode(Opcode, DL, Op.getValueType(), Op.getOperand(0),
1481                      DAG.getConstant(Shift, DL, MVT::i32));
1482 }
1483 
1484 //===----------------------------------------------------------------------===//
1485 //                          WebAssembly Optimization Hooks
1486 //===----------------------------------------------------------------------===//
1487